publish master branch snapshot, revision 0110d9c98fd7209589d06344f0d836f61d81f4b3
authorAlexey Suhov <alexey.suhov@intel.com>
Mon, 27 Apr 2020 18:21:29 +0000 (21:21 +0300)
committerAlexey Suhov <alexey.suhov@intel.com>
Mon, 27 Apr 2020 18:21:29 +0000 (21:21 +0300)
1456 files changed:
cmake/coverage/coverage.cmake
cmake/developer_package.cmake
cmake/download/dependency_solver.cmake
cmake/download/download_and_extract.cmake
cmake/fuzzing.cmake [moved from inference-engine/cmake/fuzzing.cmake with 100% similarity]
cmake/os_flags.cmake
cmake/sanitizer.cmake
inference-engine/CMakeLists.txt
inference-engine/cmake/clang_format.cmake
inference-engine/cmake/config.cmake.in
inference-engine/cmake/cpplint.cmake
inference-engine/cmake/cpplint_run.cmake
inference-engine/cmake/developer_package_config.cmake.in
inference-engine/cmake/features_ie.cmake
inference-engine/cmake/plugins/plugins.cmake
inference-engine/cmake/share/InferenceEngineConfig.cmake.in
inference-engine/cmake/vpu_dependencies.cmake
inference-engine/ie_bridges/c/CMakeLists.txt
inference-engine/ie_bridges/c/samples/common/CMakeLists.txt
inference-engine/ie_bridges/c/samples/hello_classification/CMakeLists.txt
inference-engine/ie_bridges/c/samples/hello_nv12_input_classification/CMakeLists.txt
inference-engine/ie_bridges/c/samples/hello_nv12_input_classification/main.c
inference-engine/ie_bridges/c/samples/object_detection_sample_ssd/CMakeLists.txt
inference-engine/ie_bridges/c/samples/object_detection_sample_ssd/main.c
inference-engine/ie_bridges/c/tests/CMakeLists.txt [new file with mode: 0644]
inference-engine/ie_bridges/c/tests/ie_c_api_test.cpp [new file with mode: 0644]
inference-engine/ie_bridges/c/tests/test_model_repo.hpp [new file with mode: 0644]
inference-engine/ie_bridges/python/docs/api_overview.md
inference-engine/ie_bridges/python/sample/classification_sample_async/classification_sample_async.py
inference-engine/ie_bridges/python/src/openvino/inference_engine/CMakeLists.txt
inference-engine/ie_bridges/python/src/openvino/inference_engine/__init__.py
inference-engine/ie_bridges/python/src/openvino/inference_engine/constants.pyx [new file with mode: 0644]
inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pxd
inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pyx
inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.cpp
inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.hpp
inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl_defs.pxd
inference-engine/ie_bridges/python/tests/conftest.py [new file with mode: 0644]
inference-engine/ie_bridges/python/tests/test_CDataPtr.py [new file with mode: 0644]
inference-engine/ie_bridges/python/tests/test_DataPtr.py [new file with mode: 0644]
inference-engine/ie_bridges/python/tests/test_ExecutableNetwork.py [new file with mode: 0644]
inference-engine/ie_bridges/python/tests/test_IEBlob.py [new file with mode: 0644]
inference-engine/ie_bridges/python/tests/test_IECore.py [new file with mode: 0644]
inference-engine/ie_bridges/python/tests/test_IENetLayer.py [new file with mode: 0644]
inference-engine/ie_bridges/python/tests/test_IENetwork.py [new file with mode: 0644]
inference-engine/ie_bridges/python/tests/test_IEPlugin.py [new file with mode: 0644]
inference-engine/ie_bridges/python/tests/test_IETensorDesk.py [new file with mode: 0644]
inference-engine/ie_bridges/python/tests/test_InferRequest.py [new file with mode: 0644]
inference-engine/ie_bridges/python/tests/test_NGraph.py [new file with mode: 0644]
inference-engine/include/builders/ie_argmax_layer.hpp [deleted file]
inference-engine/include/builders/ie_batch_normalization_layer.hpp [deleted file]
inference-engine/include/builders/ie_clamp_layer.hpp [deleted file]
inference-engine/include/builders/ie_concat_layer.hpp [deleted file]
inference-engine/include/builders/ie_const_layer.hpp [deleted file]
inference-engine/include/builders/ie_convolution_layer.hpp [deleted file]
inference-engine/include/builders/ie_crop_layer.hpp [deleted file]
inference-engine/include/builders/ie_ctc_greedy_decoder_layer.hpp [deleted file]
inference-engine/include/builders/ie_deconvolution_layer.hpp [deleted file]
inference-engine/include/builders/ie_deformable_convolution_layer.hpp [deleted file]
inference-engine/include/builders/ie_detection_output_layer.hpp [deleted file]
inference-engine/include/builders/ie_eltwise_layer.hpp [deleted file]
inference-engine/include/builders/ie_elu_layer.hpp [deleted file]
inference-engine/include/builders/ie_fully_connected_layer.hpp [deleted file]
inference-engine/include/builders/ie_grn_layer.hpp [deleted file]
inference-engine/include/builders/ie_gru_sequence_layer.hpp [deleted file]
inference-engine/include/builders/ie_input_layer.hpp [deleted file]
inference-engine/include/builders/ie_layer_builder.hpp [deleted file]
inference-engine/include/builders/ie_layer_decorator.hpp [deleted file]
inference-engine/include/builders/ie_lrn_layer.hpp [deleted file]
inference-engine/include/builders/ie_lstm_sequence_layer.hpp [deleted file]
inference-engine/include/builders/ie_memory_layer.hpp [deleted file]
inference-engine/include/builders/ie_mvn_layer.hpp [deleted file]
inference-engine/include/builders/ie_network_builder.hpp [deleted file]
inference-engine/include/builders/ie_norm_layer.hpp [deleted file]
inference-engine/include/builders/ie_normalize_layer.hpp [deleted file]
inference-engine/include/builders/ie_output_layer.hpp [deleted file]
inference-engine/include/builders/ie_permute_layer.hpp [deleted file]
inference-engine/include/builders/ie_pooling_layer.hpp [deleted file]
inference-engine/include/builders/ie_power_layer.hpp [deleted file]
inference-engine/include/builders/ie_prelu_layer.hpp [deleted file]
inference-engine/include/builders/ie_prior_box_clustered_layer.hpp [deleted file]
inference-engine/include/builders/ie_prior_box_layer.hpp [deleted file]
inference-engine/include/builders/ie_proposal_layer.hpp [deleted file]
inference-engine/include/builders/ie_psroi_pooling_layer.hpp [deleted file]
inference-engine/include/builders/ie_region_yolo_layer.hpp [deleted file]
inference-engine/include/builders/ie_relu6_layer.hpp [deleted file]
inference-engine/include/builders/ie_relu_layer.hpp [deleted file]
inference-engine/include/builders/ie_reorg_yolo_layer.hpp [deleted file]
inference-engine/include/builders/ie_resample_layer.hpp [deleted file]
inference-engine/include/builders/ie_reshape_layer.hpp [deleted file]
inference-engine/include/builders/ie_rnn_sequence_layer.hpp [deleted file]
inference-engine/include/builders/ie_roi_pooling_layer.hpp [deleted file]
inference-engine/include/builders/ie_scale_shift_layer.hpp [deleted file]
inference-engine/include/builders/ie_sigmoid_layer.hpp [deleted file]
inference-engine/include/builders/ie_simpler_nms_layer.hpp [deleted file]
inference-engine/include/builders/ie_softmax_layer.hpp [deleted file]
inference-engine/include/builders/ie_split_layer.hpp [deleted file]
inference-engine/include/builders/ie_tanh_layer.hpp [deleted file]
inference-engine/include/builders/ie_tile_layer.hpp [deleted file]
inference-engine/include/cpp/ie_cnn_net_reader.h
inference-engine/include/cpp/ie_executable_network.hpp
inference-engine/include/cpp/ie_infer_request.hpp
inference-engine/include/cpp/ie_memory_state.hpp
inference-engine/include/cpp/ie_plugin_cpp.hpp
inference-engine/include/details/ie_inetwork_iterator.hpp [deleted file]
inference-engine/include/details/ie_so_pointer.hpp
inference-engine/include/details/os/lin_shared_object_loader.h
inference-engine/include/details/os/win_shared_object_loader.h
inference-engine/include/ie_api.h
inference-engine/include/ie_builders.hpp [deleted file]
inference-engine/include/ie_context.hpp [deleted file]
inference-engine/include/ie_core.hpp
inference-engine/include/ie_extension.h
inference-engine/include/ie_icnn_net_reader.h
inference-engine/include/ie_icnn_network.hpp
inference-engine/include/ie_icnn_network_stats.hpp
inference-engine/include/ie_iexecutable_network.hpp
inference-engine/include/ie_iextension.h
inference-engine/include/ie_layers.h
inference-engine/include/ie_layers_property.hpp
inference-engine/include/ie_network.hpp [deleted file]
inference-engine/include/ie_parameter.hpp
inference-engine/include/ie_plugin.hpp
inference-engine/include/ie_plugin_config.hpp
inference-engine/include/ie_remote_context.hpp
inference-engine/include/inference_engine.hpp
inference-engine/samples/CMakeLists.txt
inference-engine/samples/benchmark_app/main.cpp
inference-engine/samples/classification_sample_async/main.cpp
inference-engine/samples/common/format_reader/CMakeLists.txt
inference-engine/samples/hello_reshape_ssd/main.cpp
inference-engine/samples/object_detection_sample_ssd/main.cpp
inference-engine/samples/style_transfer_sample/main.cpp
inference-engine/src/cldnn_engine/CMakeLists.txt
inference-engine/src/cldnn_engine/cldnn_config.cpp
inference-engine/src/cldnn_engine/cldnn_engine.cpp
inference-engine/src/cldnn_engine/cldnn_engine.h
inference-engine/src/cldnn_engine/cldnn_infer_request.cpp
inference-engine/src/cldnn_engine/cldnn_program.cpp
inference-engine/src/cldnn_engine/cldnn_program.h
inference-engine/src/gna_plugin/backend/am_intel_dnn.cpp
inference-engine/src/gna_plugin/gna2_model_export_helper.cpp
inference-engine/src/gna_plugin/gna2_model_export_helper.hpp
inference-engine/src/gna_plugin/gna2_model_helper.cpp
inference-engine/src/gna_plugin/gna2_model_helper.hpp
inference-engine/src/gna_plugin/gna_device.cpp
inference-engine/src/gna_plugin/gna_device.hpp
inference-engine/src/gna_plugin/gna_graph_compiler.cpp
inference-engine/src/gna_plugin/gna_graph_tools.hpp
inference-engine/src/gna_plugin/gna_model_serial.cpp
inference-engine/src/gna_plugin/gna_model_serial.hpp
inference-engine/src/gna_plugin/gna_plugin.cpp
inference-engine/src/gna_plugin/gna_plugin.hpp
inference-engine/src/hetero_plugin/hetero_executable_network.cpp
inference-engine/src/hetero_plugin/hetero_plugin.cpp
inference-engine/src/hetero_plugin/hetero_plugin.hpp
inference-engine/src/inference_engine/CMakeLists.txt
inference-engine/src/inference_engine/builders/ie_argmax_layer.cpp [deleted file]
inference-engine/src/inference_engine/builders/ie_batch_normalization_layer.cpp [deleted file]
inference-engine/src/inference_engine/builders/ie_clamp_layer.cpp [deleted file]
inference-engine/src/inference_engine/builders/ie_concat_layer.cpp [deleted file]
inference-engine/src/inference_engine/builders/ie_convolution_layer.cpp [deleted file]
inference-engine/src/inference_engine/builders/ie_crop_layer.cpp [deleted file]
inference-engine/src/inference_engine/builders/ie_ctc_greedy_decoder_layer.cpp [deleted file]
inference-engine/src/inference_engine/builders/ie_deconvolution_layer.cpp [deleted file]
inference-engine/src/inference_engine/builders/ie_deformable_convolution_layer.cpp [deleted file]
inference-engine/src/inference_engine/builders/ie_detection_output_layer.cpp [deleted file]
inference-engine/src/inference_engine/builders/ie_eltwise_layer.cpp [deleted file]
inference-engine/src/inference_engine/builders/ie_elu_layer.cpp [deleted file]
inference-engine/src/inference_engine/builders/ie_fully_connected_layer.cpp [deleted file]
inference-engine/src/inference_engine/builders/ie_grn_layer.cpp [deleted file]
inference-engine/src/inference_engine/builders/ie_gru_sequence_layer.cpp [deleted file]
inference-engine/src/inference_engine/builders/ie_lrn_layer.cpp [deleted file]
inference-engine/src/inference_engine/builders/ie_lstm_sequence_layer.cpp [deleted file]
inference-engine/src/inference_engine/builders/ie_memory_layer.cpp [deleted file]
inference-engine/src/inference_engine/builders/ie_mvn_layer.cpp [deleted file]
inference-engine/src/inference_engine/builders/ie_network_builder_converter.cpp [deleted file]
inference-engine/src/inference_engine/builders/ie_norm_layer.cpp [deleted file]
inference-engine/src/inference_engine/builders/ie_normalize_layer.cpp [deleted file]
inference-engine/src/inference_engine/builders/ie_output_layer_layer.cpp [deleted file]
inference-engine/src/inference_engine/builders/ie_permute_layer.cpp [deleted file]
inference-engine/src/inference_engine/builders/ie_power_layer.cpp [deleted file]
inference-engine/src/inference_engine/builders/ie_prelu_layer.cpp [deleted file]
inference-engine/src/inference_engine/builders/ie_prior_box_clustered_layer.cpp [deleted file]
inference-engine/src/inference_engine/builders/ie_prior_box_layer.cpp [deleted file]
inference-engine/src/inference_engine/builders/ie_proposal_layer.cpp [deleted file]
inference-engine/src/inference_engine/builders/ie_psroi_pooling_layer.cpp [deleted file]
inference-engine/src/inference_engine/builders/ie_region_yolo_layer.cpp [deleted file]
inference-engine/src/inference_engine/builders/ie_relu6_layer.cpp [deleted file]
inference-engine/src/inference_engine/builders/ie_relu_layer.cpp [deleted file]
inference-engine/src/inference_engine/builders/ie_reorg_yolo_layer.cpp [deleted file]
inference-engine/src/inference_engine/builders/ie_resample_layer.cpp [deleted file]
inference-engine/src/inference_engine/builders/ie_reshape_layer.cpp [deleted file]
inference-engine/src/inference_engine/builders/ie_rnn_sequence_layer.cpp [deleted file]
inference-engine/src/inference_engine/builders/ie_roi_pooling_layer.cpp [deleted file]
inference-engine/src/inference_engine/builders/ie_scale_shift_layer.cpp [deleted file]
inference-engine/src/inference_engine/builders/ie_sigmoid_layer.cpp [deleted file]
inference-engine/src/inference_engine/builders/ie_simpler_nms_layer.cpp [deleted file]
inference-engine/src/inference_engine/builders/ie_softmax_layer.cpp [deleted file]
inference-engine/src/inference_engine/builders/ie_tanh_layer.cpp [deleted file]
inference-engine/src/inference_engine/builders/ie_tile_layer.cpp [deleted file]
inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp
inference-engine/src/inference_engine/ie_core.cpp
inference-engine/src/inference_engine/ie_plugin_dispatcher.cpp
inference-engine/src/inference_engine/ie_system_conf.cpp
inference-engine/src/inference_engine/threading/ie_cpu_streams_executor.cpp
inference-engine/src/ir_readers/ie_cnn_net_reader_impl.cpp
inference-engine/src/ir_readers/ie_format_parser.cpp
inference-engine/src/ir_readers/ie_ir_parser.cpp
inference-engine/src/ir_readers/ie_ir_parser.hpp
inference-engine/src/ir_readers/ie_ir_reader.hpp
inference-engine/src/legacy_api/CMakeLists.txt
inference-engine/src/legacy_api/include/layer_transform.hpp
inference-engine/src/legacy_api/src/builders/ie_const_layer.cpp [deleted file]
inference-engine/src/legacy_api/src/builders/ie_input_layer_layer.cpp [deleted file]
inference-engine/src/legacy_api/src/builders/ie_layer_builder.cpp [deleted file]
inference-engine/src/legacy_api/src/builders/ie_layer_decorator.cpp [deleted file]
inference-engine/src/legacy_api/src/builders/ie_network_builder.cpp [deleted file]
inference-engine/src/legacy_api/src/builders/ie_pooling_layer.cpp [deleted file]
inference-engine/src/legacy_api/src/builders/ie_split_layer.cpp [deleted file]
inference-engine/src/legacy_api/src/convert_function_to_cnn_network.cpp
inference-engine/src/legacy_api/src/ie_cnn_layer_builder.cpp [deleted file]
inference-engine/src/legacy_api/src/ie_cnn_layer_builder.h
inference-engine/src/legacy_api/src/ie_cnn_layer_builder_ngraph.cpp
inference-engine/src/legacy_api/src/ie_cnn_layer_builder_ngraph.h
inference-engine/src/legacy_api/src/ie_context.cpp [deleted file]
inference-engine/src/legacy_api/src/ie_layer_validators.cpp
inference-engine/src/legacy_api/src/ie_layer_validators.hpp
inference-engine/src/legacy_api/src/ie_layers.cpp
inference-engine/src/legacy_api/src/ie_network.cpp [deleted file]
inference-engine/src/legacy_api/src/ie_util_internal.cpp
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_built_in_holder.cpp
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_scatter_shape_infer.hpp
inference-engine/src/legacy_api/src/shape_infer/ie_reshaper.cpp
inference-engine/src/legacy_api/src/shape_infer/ie_reshaper.hpp
inference-engine/src/mkldnn_plugin/CMakeLists.txt
inference-engine/src/mkldnn_plugin/mkldnn_graph.cpp
inference-engine/src/mkldnn_plugin/mkldnn_graph_optimizer.cpp
inference-engine/src/mkldnn_plugin/mkldnn_graph_optimizer.h
inference-engine/src/mkldnn_plugin/mkldnn_infer_request.cpp
inference-engine/src/mkldnn_plugin/mkldnn_node.cpp
inference-engine/src/mkldnn_plugin/mkldnn_node.h
inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp
inference-engine/src/mkldnn_plugin/nodes/mkldnn_batchnorm_node.cpp
inference-engine/src/mkldnn_plugin/nodes/mkldnn_conv_node.cpp
inference-engine/src/mkldnn_plugin/nodes/mkldnn_conv_node.h
inference-engine/src/mkldnn_plugin/nodes/mkldnn_deconv_node.cpp
inference-engine/src/mkldnn_plugin/nodes/mkldnn_fullyconnected_node.cpp
inference-engine/src/mkldnn_plugin/nodes/mkldnn_mvn_node.cpp
inference-engine/src/mkldnn_plugin/nodes/mkldnn_mvn_node.h
inference-engine/src/mkldnn_plugin/nodes/mkldnn_normalize_node.cpp [new file with mode: 0644]
inference-engine/src/mkldnn_plugin/nodes/mkldnn_normalize_node.h [new file with mode: 0644]
inference-engine/src/mkldnn_plugin/nodes/mkldnn_quantize_node.cpp
inference-engine/src/mkldnn_plugin/nodes/mkldnn_quantize_node.h
inference-engine/src/mkldnn_plugin/nodes/mkldnn_resample_node.cpp
inference-engine/src/mkldnn_plugin/nodes/mkldnn_reshape_node.cpp
inference-engine/src/mkldnn_plugin/nodes/normalize.cpp [deleted file]
inference-engine/src/mkldnn_plugin/nodes/select.cpp
inference-engine/src/plugin_api/cpp_interfaces/base/ie_inference_plugin_api.hpp
inference-engine/src/plugin_api/cpp_interfaces/impl/ie_executable_network_internal.hpp
inference-engine/src/plugin_api/cpp_interfaces/interface/ie_iexecutable_network_internal.hpp
inference-engine/src/plugin_api/cpp_interfaces/interface/ie_iplugin_internal.hpp
inference-engine/src/plugin_api/ie_icore.hpp
inference-engine/src/plugin_api/ie_system_conf.h
inference-engine/src/transformations/include/transformations/common_optimizations/common_optimizations.hpp [new file with mode: 0644]
inference-engine/src/transformations/include/transformations/common_optimizations/common_optimizations_tbl.hpp [new file with mode: 0644]
inference-engine/src/transformations/include/transformations/convert_opset1_to_legacy/conv_bias_fusion.hpp
inference-engine/src/transformations/include/transformations/convert_opset1_to_legacy/convert_mul_or_add_finally.hpp
inference-engine/src/transformations/include/transformations/convert_opset1_to_legacy/convert_opset1_to_legacy_tbl.hpp
inference-engine/src/transformations/include/transformations/convert_opset1_to_legacy/fc_bias_fusion.hpp
inference-engine/src/transformations/include/transformations/convert_opset1_to_legacy/reshape_fc_fusion.hpp
inference-engine/src/transformations/include/transformations/convert_opset2_to_opset1/convert_opset2_to_opset1_tbl.hpp [new file with mode: 0644]
inference-engine/src/transformations/include/transformations/convert_reduce_to_pooling.hpp
inference-engine/src/transformations/include/transformations/init_node_info.hpp [new file with mode: 0644]
inference-engine/src/transformations/include/transformations/mul_add_squence_fusion.hpp
inference-engine/src/transformations/include/transformations/rt_info/fused_names_attribute.hpp [new file with mode: 0644]
inference-engine/src/transformations/include/transformations/utils/pass_manager.hpp [deleted file]
inference-engine/src/transformations/src/transformations/batch_norm_decomposition.cpp
inference-engine/src/transformations/src/transformations/common_optimizations/common_optimizations.cpp [new file with mode: 0644]
inference-engine/src/transformations/src/transformations/convert_batch_to_space.cpp
inference-engine/src/transformations/src/transformations/convert_broadcast_to_tiles.cpp
inference-engine/src/transformations/src/transformations/convert_depth_to_space.cpp
inference-engine/src/transformations/src/transformations/convert_divide.cpp
inference-engine/src/transformations/src/transformations/convert_minimum_to_power_and_max.cpp
inference-engine/src/transformations/src/transformations/convert_mod.cpp
inference-engine/src/transformations/src/transformations/convert_negative.cpp
inference-engine/src/transformations/src/transformations/convert_opset1_to_legacy/convert_convolutions.cpp
inference-engine/src/transformations/src/transformations/convert_opset1_to_legacy/convert_gather_to_gather_ie.cpp
inference-engine/src/transformations/src/transformations/convert_opset1_to_legacy/convert_gathertree_to_gathertree_ie.cpp
inference-engine/src/transformations/src/transformations/convert_opset1_to_legacy/convert_gelu.cpp
inference-engine/src/transformations/src/transformations/convert_opset1_to_legacy/convert_hard_sigmoid_to_hard_sigmoid_ie.cpp
inference-engine/src/transformations/src/transformations/convert_opset1_to_legacy/convert_interpolate_to_interp_or_resample.cpp
inference-engine/src/transformations/src/transformations/convert_opset1_to_legacy/convert_lrn_to_lrn_ie.cpp
inference-engine/src/transformations/src/transformations/convert_opset1_to_legacy/convert_lstm_cell_to_lstm_cell_ie.cpp
inference-engine/src/transformations/src/transformations/convert_opset1_to_legacy/convert_matmul_to_fc_or_gemm.cpp
inference-engine/src/transformations/src/transformations/convert_opset1_to_legacy/convert_mul_add_to_scaleshift_or_power.cpp
inference-engine/src/transformations/src/transformations/convert_opset1_to_legacy/convert_nms_to_nms_ie.cpp
inference-engine/src/transformations/src/transformations/convert_opset1_to_legacy/convert_normalizel2_to_normalize_ie.cpp
inference-engine/src/transformations/src/transformations/convert_opset1_to_legacy/convert_one_hot_to_one_hot_ie.cpp
inference-engine/src/transformations/src/transformations/convert_opset1_to_legacy/convert_opset1_to_legacy.cpp
inference-engine/src/transformations/src/transformations/convert_opset1_to_legacy/convert_pad_to_pad_ie.cpp
inference-engine/src/transformations/src/transformations/convert_opset1_to_legacy/convert_power_to_power_ie.cpp
inference-engine/src/transformations/src/transformations/convert_opset1_to_legacy/convert_prelu_to_relu_ie.cpp
inference-engine/src/transformations/src/transformations/convert_opset1_to_legacy/convert_prior_to_ie_prior.cpp
inference-engine/src/transformations/src/transformations/convert_opset1_to_legacy/convert_proposal_to_proposal_ie.cpp
inference-engine/src/transformations/src/transformations/convert_opset1_to_legacy/convert_selu_to_selu_ie.cpp
inference-engine/src/transformations/src/transformations/convert_opset1_to_legacy/convert_sqrt_to_power_ie.cpp
inference-engine/src/transformations/src/transformations/convert_opset1_to_legacy/convert_strided_slice_to_crop.cpp
inference-engine/src/transformations/src/transformations/convert_opset1_to_legacy/convert_strided_slice_to_strided_slice_ie.cpp
inference-engine/src/transformations/src/transformations/convert_opset1_to_legacy/convert_tile_to_ie_tile.cpp
inference-engine/src/transformations/src/transformations/convert_opset1_to_legacy/convert_topk_to_topk_ie.cpp
inference-engine/src/transformations/src/transformations/convert_opset1_to_legacy/reshape_1d_convolutions.cpp
inference-engine/src/transformations/src/transformations/convert_opset1_to_legacy/reshape_fully_connected.cpp
inference-engine/src/transformations/src/transformations/convert_opset2_to_opset1/convert_opset2_to_opset1.cpp
inference-engine/src/transformations/src/transformations/convert_space_to_batch.cpp
inference-engine/src/transformations/src/transformations/convert_space_to_depth.cpp
inference-engine/src/transformations/src/transformations/convert_subtract.cpp
inference-engine/src/transformations/src/transformations/init_node_info.cpp [new file with mode: 0644]
inference-engine/src/transformations/src/transformations/pull_transpose_through_fq.cpp
inference-engine/src/transformations/src/transformations/rt_info/fused_names_attribute.cpp [new file with mode: 0644]
inference-engine/src/transformations/src/transformations/utils/pass_manager.cpp [deleted file]
inference-engine/src/vpu/CMakeLists.txt
inference-engine/src/vpu/common/include/vpu/ngraph/operations/dynamic_shape_resolver.hpp
inference-engine/src/vpu/common/include/vpu/ngraph/operations/static_shape_nonzero.hpp
inference-engine/src/vpu/common/include/vpu/ngraph/transformations/dynamic_to_static_shape.hpp
inference-engine/src/vpu/common/include/vpu/ngraph/transformations/dynamic_to_static_shape_binary_elementwise.hpp [new file with mode: 0644]
inference-engine/src/vpu/common/include/vpu/ngraph/transformations/dynamic_to_static_shape_non_max_suppression.hpp [new file with mode: 0644]
inference-engine/src/vpu/common/include/vpu/ngraph/transformations/dynamic_to_static_shape_nonzero.hpp
inference-engine/src/vpu/common/include/vpu/ngraph/transformations/dynamic_to_static_shape_roialign.hpp [new file with mode: 0644]
inference-engine/src/vpu/common/include/vpu/ngraph/transformations/dynamic_to_static_shape_squeeze.hpp [new file with mode: 0644]
inference-engine/src/vpu/common/include/vpu/ngraph/transformations/dynamic_to_static_shape_transpose.hpp [new file with mode: 0644]
inference-engine/src/vpu/common/include/vpu/ngraph/transformations/dynamic_to_static_shape_unary_elementwise.hpp [new file with mode: 0644]
inference-engine/src/vpu/common/include/vpu/ngraph/transformations/dynamic_to_static_shape_unsqueeze.hpp [new file with mode: 0644]
inference-engine/src/vpu/common/src/ngraph/operations/dynamic_shape_resolver.cpp
inference-engine/src/vpu/common/src/ngraph/operations/static_shape_nonzero.cpp
inference-engine/src/vpu/common/src/ngraph/transformations/dynamic_to_static_shape.cpp
inference-engine/src/vpu/common/src/ngraph/transformations/dynamic_to_static_shape_binary_elementwise.cpp [new file with mode: 0644]
inference-engine/src/vpu/common/src/ngraph/transformations/dynamic_to_static_shape_non_max_suppression.cpp [new file with mode: 0644]
inference-engine/src/vpu/common/src/ngraph/transformations/dynamic_to_static_shape_nonzero.cpp
inference-engine/src/vpu/common/src/ngraph/transformations/dynamic_to_static_shape_roialign.cpp [new file with mode: 0644]
inference-engine/src/vpu/common/src/ngraph/transformations/dynamic_to_static_shape_squeeze.cpp [new file with mode: 0644]
inference-engine/src/vpu/common/src/ngraph/transformations/dynamic_to_static_shape_transpose.cpp [new file with mode: 0644]
inference-engine/src/vpu/common/src/ngraph/transformations/dynamic_to_static_shape_unary_elementwise.cpp [new file with mode: 0644]
inference-engine/src/vpu/common/src/ngraph/transformations/dynamic_to_static_shape_unsqueeze.cpp [new file with mode: 0644]
inference-engine/src/vpu/custom_kernels/grn.cl
inference-engine/src/vpu/custom_kernels/mvn.cl
inference-engine/src/vpu/graph_transformer/include/vpu/frontend/frontend.hpp
inference-engine/src/vpu/graph_transformer/include/vpu/middleend/allocator/allocator.hpp
inference-engine/src/vpu/graph_transformer/include/vpu/model/base.hpp
inference-engine/src/vpu/graph_transformer/include/vpu/model/data.hpp
inference-engine/src/vpu/graph_transformer/include/vpu/model/edges.hpp
inference-engine/src/vpu/graph_transformer/include/vpu/model/model.hpp
inference-engine/src/vpu/graph_transformer/include/vpu/model/stage.hpp
inference-engine/src/vpu/graph_transformer/include/vpu/stage_builder.hpp
inference-engine/src/vpu/graph_transformer/src/backend/dump_to_dot.cpp
inference-engine/src/vpu/graph_transformer/src/backend/serialize.cpp
inference-engine/src/vpu/graph_transformer/src/frontend/detect_network_batch.cpp
inference-engine/src/vpu/graph_transformer/src/frontend/frontend.cpp
inference-engine/src/vpu/graph_transformer/src/middleend/allocator/allocator.cpp
inference-engine/src/vpu/graph_transformer/src/middleend/passes/adjust_data_location.cpp
inference-engine/src/vpu/graph_transformer/src/middleend/passes/allocate_resources.cpp
inference-engine/src/vpu/graph_transformer/src/middleend/passes/eliminate_copy.cpp
inference-engine/src/vpu/graph_transformer/src/middleend/passes/final_check.cpp
inference-engine/src/vpu/graph_transformer/src/middleend/passes/sw_pooling_adaptation.cpp
inference-engine/src/vpu/graph_transformer/src/middleend/special_stage_processor.cpp
inference-engine/src/vpu/graph_transformer/src/model/data.cpp
inference-engine/src/vpu/graph_transformer/src/model/model.cpp
inference-engine/src/vpu/graph_transformer/src/stages/dynamic_shape_resolver.cpp [new file with mode: 0644]
inference-engine/src/vpu/graph_transformer/src/stages/eltwise.cpp
inference-engine/src/vpu/graph_transformer/src/stages/roi_align.cpp
inference-engine/src/vpu/graph_transformer/src/stages/scatter_elements_update.cpp [new file with mode: 0644]
inference-engine/src/vpu/myriad_plugin/myriad_infer_request.cpp
inference-engine/src/vpu/myriad_plugin/myriad_plugin.cpp
inference-engine/tests/CMakeLists.txt
inference-engine/tests/functional/inference_engine/CMakeLists.txt
inference-engine/tests/functional/inference_engine/blob_copy_test.cpp [moved from inference-engine/tests_deprecated/unit/inference_engine_tests/blob_copy_test.cpp with 84% similarity]
inference-engine/tests/functional/inference_engine/exception_test.cpp [moved from inference-engine/tests_deprecated/unit/inference_engine_tests/exception_test.cpp with 87% similarity]
inference-engine/tests/functional/inference_engine/net_reader_test.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/abs_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/acos_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/asin_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/atan_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/batch_norm_inference_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/broadcast_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/ceiling_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/clamp_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/concat_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/convolution_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/cos_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/cosh_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/deconvolution_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/detection_output_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/divide_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/elu_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/erf_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/exp_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/fake_quantize_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/floor_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/fusion_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/gather_tree_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/gelu_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/greater_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/grn_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/hard_sigmoid_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/interpolate_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/less_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/linear_ops_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/log_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/logical_and_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/logical_not_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/logical_or_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/logical_xor_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/matmul_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/maximum_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/mvn_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/neg_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/negative_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/non_max_suppression_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/one_hot_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/pad_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/pooling_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/pow_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/prelu_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/prior_box_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/proposal_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/range_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/reduce_logical_and_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/reduce_logical_or_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/reduce_to_pooling_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/relu_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/reshape_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/reverse_sequence_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/select_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/selu_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/shape_of_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/sigmoid_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/sign_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/sin_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/sinh_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/softmax_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/split_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/sqrt_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/squared_difference_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/squeeze_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/strided_slice_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/tan_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/tanh_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/tile_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/topK_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/transpose_tests.cpp
inference-engine/tests/functional/inference_engine/ngraph_reader/unsqueeze_tests.cpp
inference-engine/tests/functional/inference_engine/parameter_tests.cpp [moved from inference-engine/tests_deprecated/unit/inference_engine_tests/parameter_tests.cpp with 91% similarity]
inference-engine/tests/functional/inference_engine/tensor_desc_test.cpp [moved from inference-engine/tests_deprecated/unit/inference_engine_tests/tensor_desc_test.cpp with 90% similarity]
inference-engine/tests/functional/inference_engine/transformations/convert_matmul_test.cpp [moved from inference-engine/tests_deprecated/unit/inference_engine_tests/transformations/convert_matmul_test.cpp with 89% similarity]
inference-engine/tests/functional/inference_engine/transformations/convert_strided_slice_to_crop_test.cpp [moved from inference-engine/tests_deprecated/unit/inference_engine_tests/transformations/convert_strided_slice_to_crop_test.cpp with 93% similarity]
inference-engine/tests/functional/inference_engine/transformations/fc_bias_fusion_test.cpp [moved from inference-engine/tests_deprecated/unit/inference_engine_tests/transformations/fc_bias_fusion_test.cpp with 90% similarity]
inference-engine/tests/functional/inference_engine/transformations/ngraph_1d_convolution_reshape_test.cpp [moved from inference-engine/tests_deprecated/unit/inference_engine_tests/transformations/ngraph_1d_convolution_reshape_test.cpp with 88% similarity]
inference-engine/tests/functional/inference_engine/transformations/ngraph_depth_to_space_transform_test.cpp [moved from inference-engine/tests_deprecated/unit/inference_engine_tests/transformations/ngraph_depth_to_space_transform_test.cpp with 81% similarity]
inference-engine/tests/functional/inference_engine/transformations/ngraph_fq_transpose_test.cpp [moved from inference-engine/tests_deprecated/unit/inference_engine_tests/transformations/ngraph_fq_transpose_test.cpp with 87% similarity]
inference-engine/tests/functional/inference_engine/transformations/ngraph_mode_decomposition_test.cpp [moved from inference-engine/tests_deprecated/unit/inference_engine_tests/transformations/ngraph_mode_decomposition_test.cpp with 77% similarity]
inference-engine/tests/functional/inference_engine/transformations/ngraph_test_utils.cpp [moved from inference-engine/tests_deprecated/unit/inference_engine_tests/transformations/ngraph_test_utils.cpp with 75% similarity]
inference-engine/tests/functional/inference_engine/transformations/ngraph_test_utils.hpp [moved from inference-engine/tests_deprecated/unit/inference_engine_tests/transformations/ngraph_test_utils.hpp with 60% similarity]
inference-engine/tests/functional/inference_engine/transformations/reshape_fc_fusion_test.cpp [moved from inference-engine/tests_deprecated/unit/inference_engine_tests/transformations/reshape_fc_fusion_test.cpp with 84% similarity]
inference-engine/tests/functional/plugin/cpu/bfloat16/bfloat16_helpers.hpp
inference-engine/tests/functional/plugin/cpu/shared_tests_instances/execution_graph_tests/unique_node_names.cpp
inference-engine/tests/functional/plugin/cpu/shared_tests_instances/ngraph_conversion_tests/conv_bias_fusion.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/cpu/shared_tests_instances/other_tests/add_output.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/add.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/batch_to_space.cpp
inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/concat.cpp
inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/convolution.cpp
inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/group_convolution.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/lrn.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/multiply.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/mvn.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/pooling.cpp
inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/reshape.cpp
inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/select.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/softmax.cpp
inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/space_to_batch.cpp
inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/split.cpp
inference-engine/tests/functional/plugin/cpu/shared_tests_instances/subgraph_tests/reshape_squeeze_reshape_relu.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/cpu/shared_tests_instances/subgraph_tests/split_conv_concat.cpp
inference-engine/tests/functional/plugin/cpu/single_layer_tests/cpu_test_utils.hpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/cpu/single_layer_tests/group_convolution.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/gna/shared_tests_instances/other_tests/add_output.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/gna/shared_tests_instances/single_layer_tests/concat.cpp
inference-engine/tests/functional/plugin/gna/shared_tests_instances/single_layer_tests/convolution.cpp
inference-engine/tests/functional/plugin/gna/shared_tests_instances/single_layer_tests/multiply.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/gna/shared_tests_instances/single_layer_tests/pooling.cpp
inference-engine/tests/functional/plugin/gna/shared_tests_instances/single_layer_tests/split.cpp
inference-engine/tests/functional/plugin/gna/shared_tests_instances/subgraph_tests/reshape_squeeze_reshape_relu.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/gna/shared_tests_instances/subgraph_tests/split_conv_concat.cpp
inference-engine/tests/functional/plugin/gpu/remote_blob_tests/cldnn_remote_blob_tests.cpp
inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/activation.cpp
inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/concat.cpp
inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/convolution.cpp
inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/group_convolution.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/lrn.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/multiply.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/mvn.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/pooling.cpp
inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/reshape.cpp
inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/select.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/split.cpp
inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/strided_slice.cpp
inference-engine/tests/functional/plugin/gpu/shared_tests_instances/subgraph_tests/reshape_squeeze_reshape_relu.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/gpu/shared_tests_instances/subgraph_tests/split_conv_concat.cpp
inference-engine/tests/functional/plugin/myriad/ngraph/conversions/dynamic_shape_resolver.cpp
inference-engine/tests/functional/plugin/myriad/ngraph/operations/dynamic_shape_resolver.cpp
inference-engine/tests/functional/plugin/myriad/ngraph/operations/static_shape_nonzero.cpp
inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_binary_elementwise.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_clamp.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_convert.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_non_max_suppression.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_nonzero.cpp
inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_roialign.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_scatter.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_squeeze.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_transpose.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_unary_elementwise.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_unsqueeze.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/myriad/ngraph/utils/ngraph_utils.h [deleted file]
inference-engine/tests/functional/plugin/myriad/shared_tests_instances/single_layer_tests/concat.cpp
inference-engine/tests/functional/plugin/myriad/shared_tests_instances/single_layer_tests/convolution.cpp
inference-engine/tests/functional/plugin/myriad/shared_tests_instances/single_layer_tests/maximum.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/myriad/shared_tests_instances/single_layer_tests/multiply.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/myriad/shared_tests_instances/single_layer_tests/nonzero.cpp
inference-engine/tests/functional/plugin/myriad/shared_tests_instances/single_layer_tests/pooling.cpp
inference-engine/tests/functional/plugin/myriad/shared_tests_instances/single_layer_tests/split.cpp
inference-engine/tests/functional/plugin/myriad/shared_tests_instances/subgraph_tests/reshape_squeeze_reshape_relu.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/myriad/shared_tests_instances/subgraph_tests/split_conv_concat.cpp
inference-engine/tests/functional/plugin/myriad/subgraph_tests/dsr_binary_elementwise.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/myriad/subgraph_tests/dsr_clamp.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/myriad/subgraph_tests/dsr_convert.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/myriad/subgraph_tests/dsr_non_max_suppression.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/myriad/subgraph_tests/dsr_roialign.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/myriad/subgraph_tests/dsr_scatter.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/myriad/subgraph_tests/dsr_squeeze.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/myriad/subgraph_tests/dsr_transpose.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/myriad/subgraph_tests/dsr_unary_elementwise.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/myriad/subgraph_tests/dsr_unsqueeze.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/shared/include/execution_graph_tests/unique_node_names.hpp
inference-engine/tests/functional/plugin/shared/include/ngraph_conversion_tests/conv_bias_fusion.hpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/shared/include/other/add_output.hpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/shared/include/single_layer_tests/activation.hpp
inference-engine/tests/functional/plugin/shared/include/single_layer_tests/add.hpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/shared/include/single_layer_tests/batch_to_space.hpp
inference-engine/tests/functional/plugin/shared/include/single_layer_tests/concat.hpp
inference-engine/tests/functional/plugin/shared/include/single_layer_tests/convolution.hpp
inference-engine/tests/functional/plugin/shared/include/single_layer_tests/group_convolution.hpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/shared/include/single_layer_tests/lrn.hpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/shared/include/single_layer_tests/maximum.hpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/shared/include/single_layer_tests/multiply.hpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/shared/include/single_layer_tests/mvn.hpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/shared/include/single_layer_tests/nonzero.hpp
inference-engine/tests/functional/plugin/shared/include/single_layer_tests/pooling.hpp
inference-engine/tests/functional/plugin/shared/include/single_layer_tests/reshape.hpp
inference-engine/tests/functional/plugin/shared/include/single_layer_tests/select.hpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/shared/include/single_layer_tests/softmax.hpp
inference-engine/tests/functional/plugin/shared/include/single_layer_tests/space_to_batch.hpp
inference-engine/tests/functional/plugin/shared/include/single_layer_tests/split.hpp
inference-engine/tests/functional/plugin/shared/include/single_layer_tests/strided_slice.hpp
inference-engine/tests/functional/plugin/shared/include/subgraph_tests/reshape_squeeze_reshape_relu.hpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/shared/include/subgraph_tests/split_conv_concat.hpp
inference-engine/tests/functional/plugin/shared/src/execution_graph_tests/unique_node_names.cpp
inference-engine/tests/functional/plugin/shared/src/ngraph_conversion_tests/conv_bias_fusion.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/shared/src/ngraph_conversion_tests/plugin_specific_ngraph_conversion.cpp
inference-engine/tests/functional/plugin/shared/src/other/add_output.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/shared/src/single_layer_tests/add.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/shared/src/single_layer_tests/batch_to_space.cpp
inference-engine/tests/functional/plugin/shared/src/single_layer_tests/concat.cpp
inference-engine/tests/functional/plugin/shared/src/single_layer_tests/convolution.cpp
inference-engine/tests/functional/plugin/shared/src/single_layer_tests/group_convolution.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/shared/src/single_layer_tests/lrn.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/shared/src/single_layer_tests/maximum.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/shared/src/single_layer_tests/multiply.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/shared/src/single_layer_tests/mvn.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/shared/src/single_layer_tests/nonzero.cpp
inference-engine/tests/functional/plugin/shared/src/single_layer_tests/pooling.cpp
inference-engine/tests/functional/plugin/shared/src/single_layer_tests/reshape.cpp
inference-engine/tests/functional/plugin/shared/src/single_layer_tests/select.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/shared/src/single_layer_tests/softmax.cpp
inference-engine/tests/functional/plugin/shared/src/single_layer_tests/space_to_batch.cpp
inference-engine/tests/functional/plugin/shared/src/single_layer_tests/split.cpp
inference-engine/tests/functional/plugin/shared/src/single_layer_tests/strided_slice.cpp
inference-engine/tests/functional/plugin/shared/src/subgraph_tests/reshape_squeeze_reshape_relu.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/shared/src/subgraph_tests/split_conv_concat.cpp
inference-engine/tests/ie_test_utils/CMakeLists.txt
inference-engine/tests/ie_test_utils/common_test_utils/CMakeLists.txt
inference-engine/tests/ie_test_utils/common_test_utils/test_constants.hpp
inference-engine/tests/ie_test_utils/functional_test_utils/blob_utils.hpp
inference-engine/tests/ie_test_utils/functional_test_utils/layer_test_utils.cpp
inference-engine/tests/ie_test_utils/functional_test_utils/layer_test_utils.hpp
inference-engine/tests/ie_test_utils/functional_test_utils/network_utils.cpp
inference-engine/tests/ie_test_utils/functional_test_utils/plugin_cache.hpp
inference-engine/tests/ngraph_functions/include/ngraph_functions/builders.hpp
inference-engine/tests/ngraph_functions/include/ngraph_functions/select.hpp [new file with mode: 0644]
inference-engine/tests/ngraph_functions/include/ngraph_functions/subgraph_builders.hpp
inference-engine/tests/ngraph_functions/include/ngraph_functions/utils/ngraph_helpers.hpp
inference-engine/tests/ngraph_functions/src/activation.cpp
inference-engine/tests/ngraph_functions/src/group_convolution.cpp [new file with mode: 0644]
inference-engine/tests/ngraph_functions/src/mvn.cpp [new file with mode: 0644]
inference-engine/tests/ngraph_functions/src/select.cpp [new file with mode: 0644]
inference-engine/tests/ngraph_functions/src/squeeze.cpp [new file with mode: 0644]
inference-engine/tests/ngraph_functions/src/unsqueeze.cpp [new file with mode: 0644]
inference-engine/tests/ngraph_functions/src/utils/ngraph_helpers.cpp
inference-engine/tests/unit/CMakeLists.txt
inference-engine/tests/unit/vpu/CMakeLists.txt
inference-engine/tests/unit/vpu/base/graph_transformer_tests.cpp [new file with mode: 0644]
inference-engine/tests/unit/vpu/base/graph_transformer_tests.hpp [new file with mode: 0644]
inference-engine/tests/unit/vpu/frontend_tests/dsr_parsing_tests.cpp [new file with mode: 0644]
inference-engine/tests/unit/vpu/middleend_tests/edges_tests/data_to_shape_edge.cpp [new file with mode: 0644]
inference-engine/tests/unit/vpu/middleend_tests/edges_tests/stage_dependency_edge.cpp [new file with mode: 0644]
inference-engine/tests/unit/vpu/utils_tests/heap_test.cpp [moved from inference-engine/tests/unit/vpu/heap_test.cpp with 100% similarity]
inference-engine/tests_deprecated/CMakeLists.txt
inference-engine/tests_deprecated/behavior/CMakeLists.txt [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/cldnn/CMakeLists.txt [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/cpp_wrappers/holders_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_config.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_exec_graph_info.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_callback.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_config.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_input.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_output.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_layers.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_layout.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_perf_counters.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_set_preprocess.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_unsupported.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_version.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/cldnn_test_data.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/skip_tests_config.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/gna/CMakeLists.txt [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/cpp_wrappers/holders_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin_config.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin_exec_graph_info.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_callback.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_config.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_input.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_output.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin_layers.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin_layout.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin_unsupported.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin_version.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/gna_test_data.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/gna_test_data.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/skip_tests_config.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/mkldnn/CMakeLists.txt [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/cpp_wrappers/holders_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_config.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_exec_graph_info.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_callback.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_config.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_input.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_output.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_layers.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_layout.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_set_preprocess.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_unsupported.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_version.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/mkldnn_test_data.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/skip_tests_config.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/shared_tests/CMakeLists.txt [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/shared_tests/cpp_wrappers/holders_tests.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin.h [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_config.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_exec_graph_info.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_infer_request.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_infer_request_callback.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_infer_request_config.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_infer_request_fixture.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_infer_request_input.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_infer_request_output.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_layers.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_layout.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_perf_counters.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_set_preprocess.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_unsupported.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_version.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugins.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/vpu/CMakeLists.txt [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/vpu/myriad_tests/aot_behavior_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/vpu/myriad_tests/helpers/myriad_devices.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/vpu/myriad_tests/helpers/myriad_devices.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/vpu/myriad_tests/helpers/myriad_load_network_case.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/vpu/myriad_tests/helpers/myriad_load_network_case.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/vpu/myriad_tests/helpers/myriad_protocol_case.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/vpu/myriad_tests/helpers/myriad_protocol_case.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/vpu/myriad_tests/vpu_boot_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/vpu/myriad_tests/vpu_get_metric_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/vpu/myriad_tests/vpu_load_network_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/vpu/myriad_tests/vpu_protocol_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/vpu/myriad_tests/vpu_watchdog_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/cpp_wrappers/holders_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_config.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_exec_graph_info.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_callback.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_config.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_input.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_output.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_layers.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_layout.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_set_preprocess.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_unsupported.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_version.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/vpu_test_data.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/skip_tests_config.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/fluid_preproc/CMakeLists.txt [new file with mode: 0644]
inference-engine/tests_deprecated/fluid_preproc/common/fluid_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/fluid_preproc/common/fluid_tests.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/fluid_preproc/common/fluid_tests_common.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/fluid_preproc/cpu/fluid_tests_cpu.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/fluid_preproc/fluid_test_computations/CMakeLists.txt [new file with mode: 0644]
inference-engine/tests_deprecated/fluid_preproc/fluid_test_computations/fluid_test_computations.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/fluid_preproc/fluid_test_computations/fluid_test_computations.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/CMakeLists.txt [new file with mode: 0644]
inference-engine/tests_deprecated/functional/cldnn/CMakeLists.txt [new file with mode: 0644]
inference-engine/tests_deprecated/functional/cldnn/regression_tests/regression_reference.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/common_single_layer_tests/single_layer_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/ie_class/ie_class.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/inference_engine_regression_tests/common_dyn_batch_regression.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/input_tests/parser_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/io_blob_tests/cropResize_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/io_blob_tests/dims_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/io_blob_tests/layout_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/lstm/lstm_cell_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/lstm/lstm_ir_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/lstm/rnn_seq_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/activation_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/arg_max_min_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/bin_conv_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/conv_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/deformable_psroipooling_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/depth_to_space_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/eltwise_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/gather_ftests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/gemm_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/one_hot_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/pad_ftests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/permute_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/quantize_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/reduce_ftests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/resample_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/softmax_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/space_to_depth_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/ti_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/topk_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/transformations/low_precision_single_layers_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/cldnn/single_layer_tests/convert_like_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/cldnn/single_layer_tests/expand_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/cldnn/single_layer_tests/gather_tree_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/cldnn/single_layer_tests/power_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/cldnn/single_layer_tests/priorbox_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/cldnn/single_layer_tests/reverse_sequence_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/cldnn/single_layer_tests/select_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/cldnn/single_layer_tests/shuffle_channels_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/cldnn/single_layer_tests/squeeze_unsqueeze_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/cldnn/single_layer_tests/strided_slice_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/cldnn/single_layer_tests/transpose_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/cldnn/single_layer_tests/variadic_split_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/cldnn/test_model_repo.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/gna/CMakeLists.txt [new file with mode: 0644]
inference-engine/tests_deprecated/functional/gna/shared_tests_instance/ie_class/ie_class.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/gna/shared_tests_instance/input_tests/parser_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/gna/shared_tests_instance/lstm/lstm_cell_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/gna/shared_tests_instance/single_layer_tests/ti_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/gna/test_model_repo.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/ie_tests/CMakeLists.txt [new file with mode: 0644]
inference-engine/tests_deprecated/functional/ie_tests/include/base_matcher.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/ie_tests/include/classification_matcher.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/ie_tests/include/custom_matcher.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/ie_tests/include/ie_core_adapter.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/ie_tests/include/label_probability.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/ie_tests/include/net_model.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/ie_tests/include/object_detection_matcher.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/ie_tests/include/optimized_network_matcher.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/ie_tests/include/raw_matcher.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/ie_tests/include/regression_config.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/ie_tests/include/regression_reference.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/ie_tests/include/regression_tests.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/ie_tests/include/segmentation_matcher.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/ie_tests/src/base_matcher.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/ie_tests/src/classification_matcher.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/ie_tests/src/custom_matcher.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/ie_tests/src/ie_core_adapter.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/ie_tests/src/net_model.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/ie_tests/src/object_detection_matcher.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/ie_tests/src/optimized_network_matcher.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/ie_tests/src/raw_matcher.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/ie_tests/src/segmentation_matcher.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/CMakeLists.txt [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/config_param_test/config_param_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/extensions_tests/extensions_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/network_tests/ngraph_network_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/regression_tests/regression_reference.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/common_single_layer_tests/single_layer_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/graph_tools/graph_tools_functional_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/ie_class/ie_class.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/inference_engine_regression_tests/common_dyn_batch_regression.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/input_tests/parser_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/io_blob_tests/cropResize_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/io_blob_tests/dims_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/io_blob_tests/layout_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/lstm/lstm_cell_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/lstm/lstm_ir_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/lstm/rnn_seq_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/network_tests/network_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/activation_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/arg_max_min_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/bin_conv_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/deformable_psroi_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/depth_to_space_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/eltwise_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/gather_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/gemm_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/pad_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/permute_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/quantize_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/reduce_ftests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/resample_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/softmax_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/ti_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/tile_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/transformations/low_precision_transformer_single_layer_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/argmax_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/concat_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/conv_int8_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/conv_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/conv_tests_int8.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/crop_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/detectionout_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/fullycon_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/gather_tree_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_batchnorm_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_deconv_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_logistic_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_power_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_roipooling_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_scaleshift_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_simplernms_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/network_stats.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/network_stats.h [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/norm_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/pooling_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/priorbox_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/region_yolo_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/snippet_test/multi_out_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/snippet_test/tripple_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/mkldnn/test_model_repo.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/CMakeLists.txt [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/common_single_layer_tests/conv_ref.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/common_single_layer_tests/conv_ref.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/common_single_layer_tests/deconv_ref.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/common_single_layer_tests/deconv_ref.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/common_single_layer_tests/def_conv_ref.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/common_single_layer_tests/def_conv_ref.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/common_single_layer_tests/pool_ref.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/common_single_layer_tests/pool_ref.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/common_single_layer_tests/single_layer_tests.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/graph_tools/graph_tools_functional_tests.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/ie_class/ie_class.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/inference_engine_regression_tests/common_dyn_batch_regression.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/input_tests/parser_tests.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/io_blob_tests/cropResize_tests.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/io_blob_tests/dims_tests.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/io_blob_tests/layout_tests.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/lstm/lstm_cell_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/lstm/lstm_ir_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/lstm/npy.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/lstm/plg_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/lstm/rnn_gen.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/lstm/rnn_gen.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/lstm/rnn_referee.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/lstm/rnn_referee.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/lstm/rnn_seq_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/lstm/rnn_util.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/lstm/rnn_util.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/network_tests/network_i8.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/activation_tests.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/arg_max_min_tests.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/bin_conv_tests.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/conv_tests.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/deformable_psroi_tests.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/depth_to_space_tests.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/eltwise_tests.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/gather_tests.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/gather_tree_tests.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/gemm_tests.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/one_hot_tests.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/pad_tests.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/permute_tests.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/quantize_tests.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/reduce_tests.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/resample_tests.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/select_tests.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/softmax_tests.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/space_to_depth_tests.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/ti_tests.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/tile_tests.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/topk_tests.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/variadic_split_tests.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/common/low_precision_tests_utils.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/common/low_precision_tests_utils.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/common/validation.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/common/validation.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/concat_multi_branch_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/concat_multi_channels_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/concat_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/conv_and_dequantization_scaleshift_and_quantize_on_activations_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/conv_and_dequantization_scaleshifts_on_activations_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/conv_and_pooling_and_quantize_on_activations_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/conv_and_quantize_on_activations_and_weights_simple_base_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/conv_and_quantize_on_activations_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/conv_and_quantize_on_signed_activations_and_inverted_weights_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/conv_and_quantize_on_signed_activations_and_weights_negative_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/conv_and_quantize_on_signed_activations_and_weights_positive_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/conv_and_quantize_on_unsigned_activations_and_weights_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/conv_and_quantize_on_weights_with_multi_output_intervals_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/conv_and_quantize_on_weights_without_const_transformation_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/conv_base_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/conv_depthwise_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/conv_grouped_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/eltwise_broadcast_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/eltwise_fq_with_children_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/eltwise_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/eltwise_with_pooling_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/fake_quantize_and_activation_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/fake_quantize_and_activation_with_negative_scales_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/fake_quantize_and_activation_with_negative_slope_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/fake_quantize_and_scaleshift_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/fake_quantize_reshape_pooling_test_model_with_constants_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/fake_quantize_reshape_pooling_test_model_without_constants_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/fake_quantize_reshape_test_model_with_constants_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/fc_and_scaleshifts_on_activations_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/fq_as_output.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/fq_with_multioutputs.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/fq_with_two_scale_shifts_as_output.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/fully_connected_base_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/low_precision_transformer_single_layer_tests.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/mvn_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/pooling_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/precision_selection_multibranch_not_preserved.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/precision_selection_multibranch_preserved.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/quantization_on_inverted_weights_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/quantization_on_weights_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/resample_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/scaleshift_and_fake_quantize_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/scaleshift_to_conv_after_concat_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/scaleshift_to_conv_after_fakequantize_ignore_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/scaleshift_to_conv_after_not_concat_ignore_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/single_layer_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/single_layer_transformations_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/update_biases_convolution_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/shared_tests/transformations/update_biases_fully_connected_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/CMakeLists.txt [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/bbox_util.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/bbox_util.h [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/blob_reader_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_CTCDecoder_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_CTCDecoder_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_batch_normalization_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_batch_normalization_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_bias_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_bias_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_blob_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_clamp_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_clamp_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_concat_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_concat_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_conv_nd_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_conv_nd_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_convert_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_convert_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_convolution1x1.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_convolution1x1.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_convolution3x3.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_convolution3x3.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_convolution_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_convolution_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_copy_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_copy_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_crop_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_crop_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_custom_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_custom_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_deconvolution_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_deconvolution_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_detection_output_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_eltwise_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_eltwise_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_elu_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_elu_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_erf_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_erf_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_exp_detectionoutput_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_exp_detectionoutput_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_exp_generateproposals.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_exp_generateproposals_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_exp_priorgridgenerator_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_exp_priorgridgenerator_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_exp_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_exp_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_exp_topkrois_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_exp_topkrois_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_flatten_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_flatten_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_floor_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_floor_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_fully_connected_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_fully_connected_tests.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_gather_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_gather_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_gemm_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_gemm_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_grn_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_grn_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_interp_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_interp_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_log_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_log_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_lrn_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_lrn_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_lstm_cell.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_lstm_cell.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_mvn_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_mvn_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_nms_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_nms_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_nonzero_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_nonzero_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_normalize_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_normalize_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_oneHot_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_oneHot_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_pad_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_pad_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_permute_nd_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_permute_nd_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_permute_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_permute_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_pool_nd_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_pool_nd_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_pooling_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_pooling_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_power_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_power_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_prelu_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_prelu_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_prior_box_clustered_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_prior_box_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_proposal_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_psroipooling_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_psroipooling_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_reduce_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_reduce_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_region_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_region_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_relu_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_relu_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_reorg_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_reorg_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_resample_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_resample_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_reshape_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_reshape_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_reverse_sequence_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_reverse_sequence_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_rfcn_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_roi_align_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_roi_align_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_roi_feature_extractor_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_roi_feature_extractor_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_roi_pooling_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_roi_pooling_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_scale_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_scale_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_scatter_elements_update_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_scatter_elements_update_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_scatter_update_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_scatter_update_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_select_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_select_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_sigmoid_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_sigmoid_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_slice_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_slice_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_softmax_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_softmax_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_split_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_split_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_squeeze_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_squeeze_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_strided_slice_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_strided_slice_test.h [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_tanh_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_tanh_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_tensor_iterator_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_tile_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_tile_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_topk_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_topk_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_unsqueeze_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_unsqueeze_test.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/layers/weights_for_convolution_test.h [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/myriad_get_output_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/myriad_get_output_tests.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/myriad_get_perf_count_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/myriad_hw_conv_tests.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/myriad_hw_extra_tests.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/myriad_hw_fc_tests.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/myriad_hw_network_tests.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/myriad_hw_opt_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/myriad_hw_opt_tests.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/myriad_hw_pool_tests.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/myriad_hw_tests_base.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/myriad_infer_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/myriad_merge_permute_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/myriad_merge_permute_tests.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/myriad_xml_tests.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/reference_regression.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/vpu_case_common.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/vpu_case_common.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/vpu_case_params.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/vpu_classification_case.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/vpu_classification_case.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/vpu_param_containers.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/vpu_param_containers.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/vpu_raw_results_case.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/vpu_raw_results_case.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/graph_transformer/gt_functional_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/graph_transformer/gt_functional_tests.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/graph_transformer/merge_permute_and_reorder_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/myriad_tests/myriad_configs_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/myriad_tests/myriad_multiple_graph_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/myriad_tests/myriad_streams_configuration_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/myriad_tests/vpu_tests_config.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/shared_tests_instance/common_single_layer_tests/single_layer_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/shared_tests_instance/ie_class/ie_class.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/shared_tests_instance/input_tests/parser_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/shared_tests_instance/io_blob_tests/cropResize_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/shared_tests_instance/io_blob_tests/dims_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/shared_tests_instance/io_blob_tests/layout_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/test_data/test_model_repo.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/tester.py [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/vpu_base/myriad_layers_reference_functions.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/vpu_base/myriad_layers_reference_functions.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/vpu_base/myriad_layers_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/vpu_base/myriad_layers_tests.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/vpu_base/vpu_ir_dumper.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/vpu_base/vpu_ir_dumper.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/vpu_base/vpu_layer_tests_utils.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/vpu_base/vpu_layer_tests_utils.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/vpu_base/vpu_layers_tests.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/vpu_base/vpu_layers_tests.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/vpu_base/vpu_test_common_definitions.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/vpu_base/vpu_test_net.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/vpu_base/vpu_test_net.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/functional/vpu/vpu_base/vpu_tests_config.hpp [new file with mode: 0644]
inference-engine/tests_deprecated/helpers/single_layer_common.hpp
inference-engine/tests_deprecated/helpers/test_model_repo.hpp
inference-engine/tests_deprecated/helpers/tests_common.cpp
inference-engine/tests_deprecated/helpers/tests_common.hpp
inference-engine/tests_deprecated/helpers/tests_vpu_common.hpp
inference-engine/tests_deprecated/mock_engine/CMakeLists.txt
inference-engine/tests_deprecated/unit/CMakeLists.txt
inference-engine/tests_deprecated/unit/builders/argmax_layer_test.cpp [deleted file]
inference-engine/tests_deprecated/unit/builders/batch_normalization_layer_test.cpp [deleted file]
inference-engine/tests_deprecated/unit/builders/builder_test.hpp [deleted file]
inference-engine/tests_deprecated/unit/builders/clamp_layer_test.cpp [deleted file]
inference-engine/tests_deprecated/unit/builders/concat_layer_test.cpp [deleted file]
inference-engine/tests_deprecated/unit/builders/const_layer_test.cpp [deleted file]
inference-engine/tests_deprecated/unit/builders/convolution_layer_test.cpp [deleted file]
inference-engine/tests_deprecated/unit/builders/crop_layer_test.cpp [deleted file]
inference-engine/tests_deprecated/unit/builders/ctc_greedy_decoder_layer_test.cpp [deleted file]
inference-engine/tests_deprecated/unit/builders/deconvolution_layer_test.cpp [deleted file]
inference-engine/tests_deprecated/unit/builders/detection_output_layer_test.cpp [deleted file]
inference-engine/tests_deprecated/unit/builders/eltwise_layer_test.cpp [deleted file]
inference-engine/tests_deprecated/unit/builders/elu_layer_test.cpp [deleted file]
inference-engine/tests_deprecated/unit/builders/input_layer_test.cpp [deleted file]
inference-engine/tests_deprecated/unit/builders/memory_layer_test.cpp [deleted file]
inference-engine/tests_deprecated/unit/builders/mvn_layer_test.cpp [deleted file]
inference-engine/tests_deprecated/unit/builders/network_builder_test.cpp [deleted file]
inference-engine/tests_deprecated/unit/builders/norm_layer_test.cpp [deleted file]
inference-engine/tests_deprecated/unit/builders/normalize_layer_test.cpp [deleted file]
inference-engine/tests_deprecated/unit/builders/output_layer_test.cpp [deleted file]
inference-engine/tests_deprecated/unit/builders/relu6_layer_test.cpp [deleted file]
inference-engine/tests_deprecated/unit/builders/relu_layer_test.cpp [deleted file]
inference-engine/tests_deprecated/unit/builders/resample_layer_test.cpp [deleted file]
inference-engine/tests_deprecated/unit/builders/split_layer_test.cpp [deleted file]
inference-engine/tests_deprecated/unit/builders/tanh_layer_test.cpp [deleted file]
inference-engine/tests_deprecated/unit/cnn_network/cnn_layer_validation_tests.cpp
inference-engine/tests_deprecated/unit/cnn_network/cnn_net_reader_impl_test.cpp
inference-engine/tests_deprecated/unit/engines/gna/I8_quantisation_test.cpp
inference-engine/tests_deprecated/unit/engines/gna/gna_hardware_precision_test.cpp
inference-engine/tests_deprecated/unit/engines/gna/gna_matcher.cpp
inference-engine/tests_deprecated/unit/engines/gna/i16_quantisation_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/constant_propagation_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/dump_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/dumper_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/broadcast_tests.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/bucketize_tests.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/depth_to_space_tests.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/fill_tests.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/gather_tests.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/graph_generic_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/interp_tests.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/log_softmax_tests.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/math_tests.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/mvn_tests.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/non_max_suppression_tests.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/normalize_tests.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/onehot_tests.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/range_tests.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/reduce_tests.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/resample_tests.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/reverse_sequence_tests.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/scatter_tests.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/select_tests.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/shuffle_channels_tests.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/sparse_fill_empty_rows_tests.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/sparse_segment_reduce_tests.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/sparse_to_dense_tests.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/sparse_weighted_reduce_tests.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/strided_slice_tests.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/topk_tests.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/unique_tests.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_activation_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_batchnorm_scaleshift_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_batchnorm_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_concat_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_conv_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_crop_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_deconv_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_depthwise_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_eltwise_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_fullyconnected_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_gemm_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_input_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_leaks_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_lrn_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_permute_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_pooling_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_power_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_relu_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_reorder_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_reshape_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_roi_pooling_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_simplernms_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_softmax_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_split_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_tile_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/structure/graph_conv_concat_tests.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/structure/graph_conv_depthwise_fusing_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/structure/graph_deconv_concat_tests.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/structure/graph_dw_conv_fusing_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/structure/graph_optimization_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/structure/graph_structure_test.cpp
inference-engine/tests_deprecated/unit/engines/vpu/adjust_data_location_tests.cpp
inference-engine/tests_deprecated/unit/engines/vpu/eliminate_copy_tests.cpp
inference-engine/tests_deprecated/unit/engines/vpu/get_vpu_scale_from_ir_tests.cpp
inference-engine/tests_deprecated/unit/engines/vpu/graph_transformer_tests_constructs.cpp
inference-engine/tests_deprecated/unit/engines/vpu/mvnc/watchdog_tests.cpp
inference-engine/tests_deprecated/unit/graph_tools/graph_copy_tests.cpp
inference-engine/tests_deprecated/unit/inference_engine_tests/blob_test.cpp
inference-engine/tests_deprecated/unit/inference_engine_tests/cnn_network_test.cpp
inference-engine/tests_deprecated/unit/inference_engine_tests/cnn_ngraph_impl_tests.cpp
inference-engine/tests_deprecated/unit/inference_engine_tests/convert_ngraph_to_cnn_network_tests.cpp
inference-engine/tests_deprecated/unit/inference_engine_tests/inference_engine_test.cpp [deleted file]
inference-engine/tests_deprecated/unit/inference_engine_tests/local_test.cpp
inference-engine/tests_deprecated/unit/inference_engine_tests/network_serializer_tests.cpp
inference-engine/tests_deprecated/unit/inference_engine_tests/normalization/latest_in_fuse_test.cpp
inference-engine/tests_deprecated/unit/inference_engine_tests/plugin_dispatcher_tests.cpp
inference-engine/tests_deprecated/unit/inference_engine_tests/so_pointer_tests.cpp
inference-engine/tests_deprecated/unit/shape_infer/adult_test.cpp [deleted file]
inference-engine/tests_deprecated/unit/shape_infer/adult_test.hpp [deleted file]
inference-engine/tests_deprecated/unit/shape_infer/adult_test_utils.cpp [deleted file]
inference-engine/tests_deprecated/unit/shape_infer/adult_test_utils.hpp [deleted file]
inference-engine/tests_deprecated/unit/shape_infer/built_in_holder_test.cpp [deleted file]
inference-engine/tests_deprecated/unit/shape_infer/built_in_shape_infer_batch_test.cpp [deleted file]
inference-engine/tests_deprecated/unit/shape_infer/built_in_shape_infer_conv_test.cpp [deleted file]
inference-engine/tests_deprecated/unit/shape_infer/built_in_shape_infer_fake_test.cpp [deleted file]
inference-engine/tests_deprecated/unit/shape_infer/built_in_shape_infer_general_test.cpp [deleted file]
inference-engine/tests_deprecated/unit/shape_infer/built_in_shape_infer_general_test.hpp [deleted file]
inference-engine/tests_deprecated/unit/shape_infer/built_in_shape_infer_pool_test.cpp [deleted file]
inference-engine/tests_deprecated/unit/shape_infer/input_controller_test.cpp [deleted file]
inference-engine/tests_deprecated/unit/shape_infer/input_reshape_launcher_test.cpp [deleted file]
inference-engine/tests_deprecated/unit/shape_infer/models_test.cpp [deleted file]
inference-engine/tests_deprecated/unit/shape_infer/output_controller_test.cpp [deleted file]
inference-engine/tests_deprecated/unit/shape_infer/reshape_launcher_test.cpp [deleted file]
inference-engine/tests_deprecated/unit/shape_infer/reshaper_test.cpp [deleted file]
inference-engine/thirdparty/CMakeLists.txt
inference-engine/thirdparty/clDNN/api/activation.hpp
inference-engine/thirdparty/clDNN/api/select.hpp
inference-engine/thirdparty/clDNN/api/tensor.hpp
inference-engine/thirdparty/clDNN/kernel_selector/common/common_types.h
inference-engine/thirdparty/clDNN/kernel_selector/common/tensor_type.cpp
inference-engine/thirdparty/clDNN/kernel_selector/common/tensor_type.h
inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_b_fs_yx_fsv16.cpp
inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_base.cpp
inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_mmad_bfyx_to_b_fs_yx_fsv32.cpp [moved from inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_mmad_bfyx_b_fs_yx_fsv32.cpp with 62% similarity]
inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_mmad_bfyx_to_b_fs_yx_fsv32.h [moved from inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_mmad_bfyx_b_fs_yx_fsv32.h with 76% similarity]
inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_selector.cpp
inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_across_channel_multiple_features.cpp
inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_across_channel_multiple_features.h
inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_across_channel_opt_b8.cpp
inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_across_channel_opt_b8.h
inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_across_channel_ref.cpp
inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_across_channel_ref.h
inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_base.cpp
inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_base.h
inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_ref.cpp
inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_ref.h
inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_within_channel_byxf_opt.cpp
inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_within_channel_byxf_opt.h
inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_within_channel_ref.cpp
inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_within_channel_ref.h
inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_within_channel_ref_opt.cpp
inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/lrn/lrn_kernel_within_channel_ref_opt.h
inference-engine/thirdparty/clDNN/kernel_selector/core/cl_kernels/convolution_gpu_bfyx_f16.cl
inference-engine/thirdparty/clDNN/kernel_selector/core/cl_kernels/convolution_gpu_bfyx_f16_1x1.cl
inference-engine/thirdparty/clDNN/kernel_selector/core/cl_kernels/convolution_gpu_bfyx_f16_depthwise.cl
inference-engine/thirdparty/clDNN/kernel_selector/core/cl_kernels/convolution_gpu_bfyx_to_bfyx_f16.cl
inference-engine/thirdparty/clDNN/kernel_selector/core/cl_kernels/convolution_gpu_bfyx_to_fs_byx_fsv32.cl
inference-engine/thirdparty/clDNN/kernel_selector/core/cl_kernels/convolution_gpu_mmad_bfyx_b_fs_yx_fsv32.cl [deleted file]
inference-engine/thirdparty/clDNN/kernel_selector/core/cl_kernels/convolution_gpu_mmad_bfyx_to_b_fs_yx_fsv32.cl [new file with mode: 0644]
inference-engine/thirdparty/clDNN/kernel_selector/core/cl_kernels/gen9_common_conv_fwd_data_f16.cl
inference-engine/thirdparty/clDNN/kernel_selector/core/cl_kernels/gen9_common_conv_fwd_data_f32.cl
inference-engine/thirdparty/clDNN/kernel_selector/core/cl_kernels/include/fetch.cl
inference-engine/thirdparty/clDNN/kernel_selector/core/cl_kernels/lrn_gpu_across_channel_multiple_features.cl
inference-engine/thirdparty/clDNN/kernel_selector/core/cl_kernels/lrn_gpu_across_channel_ref.cl
inference-engine/thirdparty/clDNN/kernel_selector/core/cl_kernels/lrn_gpu_across_channel_yxfb_b8_opt.cl
inference-engine/thirdparty/clDNN/kernel_selector/core/cl_kernels/lrn_gpu_within_channel.cl
inference-engine/thirdparty/clDNN/kernel_selector/core/cl_kernels/lrn_gpu_within_channel_opt.cl
inference-engine/thirdparty/clDNN/kernel_selector/core/cl_kernels/lrn_ref.cl
inference-engine/thirdparty/clDNN/kernel_selector/core/cl_kernels/lrn_within_channel_byxf_opt.cl
inference-engine/thirdparty/clDNN/kernel_selector/core/cl_kernels/mvn_gpu_b_fs_yx_fsv16_imad.cl
inference-engine/thirdparty/clDNN/kernel_selector/core/cl_kernels/mvn_gpu_b_fs_yx_fsv16_imad_reduce.cl
inference-engine/thirdparty/clDNN/kernel_selector/core/cl_kernels/reorder_weights.cl
inference-engine/thirdparty/clDNN/kernel_selector/core/cl_kernels/reverse_sequence_ref.cl
inference-engine/thirdparty/clDNN/kernel_selector/core/common/jitter.cpp
inference-engine/thirdparty/clDNN/kernel_selector/core/kernel_selector_common.cpp
inference-engine/thirdparty/clDNN/src/gpu/lrn_gpu.cpp
inference-engine/thirdparty/clDNN/src/gpu/quantize_gpu.cpp
inference-engine/thirdparty/clDNN/src/graph_optimizer/prepare_primitive_fusing.cpp
inference-engine/thirdparty/clDNN/src/graph_optimizer/remove_redundant_reorders.cpp
inference-engine/thirdparty/clDNN/src/include/to_string_utils.h
inference-engine/thirdparty/clDNN/src/kernel_selector_helper.cpp
inference-engine/thirdparty/clDNN/src/layout_optimizer.cpp
inference-engine/thirdparty/clDNN/src/lrn.cpp
inference-engine/thirdparty/clDNN/src/select.cpp
inference-engine/thirdparty/clDNN/tests/test_cases/activation_simple_gpu_test.cpp
inference-engine/thirdparty/clDNN/tests/test_cases/convolution_gpu_test.cpp
inference-engine/thirdparty/clDNN/tests/test_cases/fusings_gpu_test.cpp
inference-engine/thirdparty/clDNN/tests/test_cases/reverse_sequence_gpu_test.cpp
inference-engine/thirdparty/clDNN/tests/test_cases/select_gpu_test.cpp
inference-engine/thirdparty/fluid/modules/gapi/src/backends/ie/giebackend.cpp
inference-engine/thirdparty/fluid/modules/gapi/test/infer/gapi_infer_ie_test.cpp
inference-engine/thirdparty/mkl-dnn/include/mkldnn_debug.h
inference-engine/thirdparty/mkl-dnn/src/common/mkldnn_debug.cpp
inference-engine/thirdparty/mkl-dnn/src/cpu/jit_uni_depthwise.cpp
inference-engine/thirdparty/mkl-dnn/src/cpu/jit_uni_depthwise.hpp
inference-engine/thirdparty/mkl-dnn/src/cpu/jit_uni_quantization.cpp
inference-engine/thirdparty/mkl-dnn/src/cpu/jit_uni_quantization.hpp
inference-engine/thirdparty/mkl-dnn/tests/benchdnn/mkldnn_debug.cpp
inference-engine/thirdparty/movidius/mvnc/src/watchdog/watchdog.cpp
inference-engine/thirdparty/movidius/vpualHost.patch [deleted file]
inference-engine/thirdparty/movidius/vpualHost_clone_repo.sh [deleted file]
inference-engine/tools/compile_tool/CMakeLists.txt
inference-engine/tools/vpu/common/vpu_tools_common.cpp
inference-engine/tools/vpu/vpu_compile/CMakeLists.txt
inference-engine/tools/vpu/vpu_perfcheck/CMakeLists.txt
inference-engine/tools/vpu/vpu_perfcheck/main.cpp
inference-engine/tools/vpu/vpu_profile/CMakeLists.txt
model-optimizer/.pylintrc
model-optimizer/README.md
model-optimizer/automation/package_BOM.txt
model-optimizer/extensions/back/op_versioning.py
model-optimizer/extensions/back/remove_last_softmax_pattern.py
model-optimizer/extensions/back/remove_last_softmax_test.py
model-optimizer/extensions/front/LogSoftmax.py
model-optimizer/extensions/front/LogSoftmax_test.py
model-optimizer/extensions/front/onnx/flattenONNX_to_reshape.py
model-optimizer/extensions/front/onnx/logsoftmaxONNX_to_logsoftmax.py [new file with mode: 0644]
model-optimizer/extensions/front/onnx/softmax_ext.py
model-optimizer/extensions/front/onnx/where_ext.py [new file with mode: 0644]
model-optimizer/extensions/front/tf/activation_ext.py
model-optimizer/extensions/middle/ConvertGroupedStridedSlice.py
model-optimizer/extensions/middle/ConvertGroupedStridedSlice_test.py
model-optimizer/extensions/middle/InsertLayoutPropagationTransposes.py
model-optimizer/extensions/middle/MarkSubgraphsWithCorrectLayout.py [new file with mode: 0644]
model-optimizer/extensions/middle/UnsqueezeTileReshapeBlockToInterpolate.py
model-optimizer/extensions/ops/one_hot.py
model-optimizer/extensions/ops/one_hot_test.py [new file with mode: 0644]
model-optimizer/extensions/ops/select.py
model-optimizer/extensions/ops/select_test.py
model-optimizer/extensions/ops/upsample.py
model-optimizer/extensions/ops/upsample_test.py
model-optimizer/mo/front/common/partial_infer/utils.py
model-optimizer/mo/ops/softmax.py
model-optimizer/mo/ops/strided_slice.py
model-optimizer/mo/ops/strided_slice_test.py
ngraph
tests/fuzz/CMakeLists.txt [new file with mode: 0644]
tests/fuzz/README.md [new file with mode: 0644]
tests/fuzz/fuzz-testhelper/CMakeLists.txt [new file with mode: 0644]
tests/fuzz/fuzz-testhelper/main-testhelper.cc [new file with mode: 0644]
tests/fuzz/fuzz-testhelper/main.h [new file with mode: 0644]
tests/fuzz/src/CMakeLists.txt [new file with mode: 0644]
tests/fuzz/src/read_network-fuzzer.cc [new file with mode: 0644]
tests/stress_tests/.automation/memcheck_tests/nightly_configs/desktop_references_config.xml
tests/stress_tests/.automation/memcheck_tests/precommit_configs/desktop_env_config.xml [new file with mode: 0644]
tests/stress_tests/.automation/memcheck_tests/precommit_configs/desktop_references_config.xml [new file with mode: 0644]
tests/stress_tests/.automation/memcheck_tests/precommit_configs/desktop_test_config.xml [new file with mode: 0644]
tests/stress_tests/.automation/memcheck_tests/weekly_configs/desktop_references_config.xml
tests/stress_tests/.automation/memleaks_tests/precommit_configs/desktop_env_config.xml [new file with mode: 0644]
tests/stress_tests/.automation/memleaks_tests/precommit_configs/desktop_test_config.xml [new file with mode: 0644]
tests/stress_tests/.automation/memleaks_tests/weekly_configs/desktop_test_config.xml
tests/stress_tests/.automation/unittests/weekly_configs/desktop_test_config.xml
tests/stress_tests/.gitignore [new file with mode: 0644]
tests/stress_tests/common/ie_pipelines/pipelines.cpp
tests/stress_tests/common/ie_pipelines/pipelines.h
tests/stress_tests/common/managers/task_manager.h
tests/stress_tests/common/managers/thread_manager.h
tests/stress_tests/common/tests_utils.cpp
tests/stress_tests/common/tests_utils.h
tests/stress_tests/common/utils.cpp
tests/stress_tests/common/utils.h
tests/stress_tests/memcheck_tests/flags.h
tests/stress_tests/memcheck_tests/local_configs/references_config.xml
tests/stress_tests/memcheck_tests/main.cpp
tests/stress_tests/memcheck_tests/tests.cpp
tests/stress_tests/memcheck_tests/tests_pipelines/tests_pipelines.cpp
tests/stress_tests/memcheck_tests/tests_pipelines/tests_pipelines.h
tests/stress_tests/memcheck_tests/tests_utils.h
tests/stress_tests/memleaks_tests/flags.h
tests/stress_tests/memleaks_tests/main.cpp
tests/stress_tests/memleaks_tests/tests.cpp
tests/stress_tests/memleaks_tests/tests_pipelines/tests_pipelines.cpp
tests/stress_tests/memleaks_tests/tests_pipelines/tests_pipelines.h
tests/stress_tests/scripts/get_testdata.py
tests/stress_tests/unittests/flags.h
tests/stress_tests/unittests/main.cpp
tests/stress_tests/unittests/tests.cpp
tests/stress_tests/unittests/tests_pipelines/tests_pipelines.cpp
tests/stress_tests/unittests/tests_pipelines/tests_pipelines.h
tests/stress_tests/unittests/tests_pipelines/tests_pipelines_full_pipeline.cpp
tools/benchmark/utils/inputs_filling.py

index 5c03862..e2fa3b5 100644 (file)
@@ -4,14 +4,17 @@
 
 if(NOT TARGET ie_coverage_clean)
     add_custom_target(ie_coverage_clean)
+    set_target_properties(ie_coverage_clean PROPERTIES FOLDER coverage)
 endif()
 
 if(NOT TARGET ie_coverage_init)
     add_custom_target(ie_coverage_init)
+    set_target_properties(ie_coverage_init PROPERTIES FOLDER coverage)
 endif()
 
 if(NOT TARGET ie_coverage)
     add_custom_target(ie_coverage)
+    set_target_properties(ie_coverage PROPERTIES FOLDER coverage)
 endif()
 
 set(IE_COVERAGE_REPORTS "${CMAKE_BINARY_DIR}/coverage")
@@ -26,10 +29,10 @@ function(ie_coverage_clean)
     cmake_parse_arguments(IE_COVERAGE "" "REPOSITORY;DIRECTORY" "" ${ARGN})
 
     add_custom_target(ie_coverage_zerocounters_${IE_COVERAGE_REPOSITORY}
-                  COMMAND lcov --zerocounters --quiet
-                               --directory "${IE_COVERAGE_DIRECTORY}"
-                  COMMENT "Add zero counters for coverage for ${IE_COVERAGE_REPOSITORY}"
-                  VERBATIM)
+                      COMMAND lcov --zerocounters --quiet
+                                   --directory "${IE_COVERAGE_DIRECTORY}"
+                      COMMENT "Add zero counters for coverage for ${IE_COVERAGE_REPOSITORY}"
+                      VERBATIM)
 
     add_custom_target(ie_coverage_clean_${IE_COVERAGE_REPOSITORY}
                       COMMAND ${CMAKE_COMMAND}
@@ -42,6 +45,10 @@ function(ie_coverage_clean)
                       DEPENDS "${IE_COVERAGE_SCRIPT_DIR}/coverage_clean.cmake"
                       VERBATIM)
 
+    set_target_properties(ie_coverage_zerocounters_${IE_COVERAGE_REPOSITORY}
+                          ie_coverage_clean_${IE_COVERAGE_REPOSITORY}
+                          PROPERTIES FOLDER coverage)
+
     add_dependencies(ie_coverage_clean ie_coverage_zerocounters_${IE_COVERAGE_REPOSITORY}
                                        ie_coverage_clean_${IE_COVERAGE_REPOSITORY})
 endfunction()
@@ -87,6 +94,8 @@ function(ie_coverage_capture)
 
     add_custom_target(ie_coverage_${IE_COVERAGE_INFO_FILE}_info
                       DEPENDS ${output_file})
+    set_target_properties(ie_coverage_${IE_COVERAGE_INFO_FILE}_info
+                          PROPERTIES FOLDER coverage)
 endfunction()
 
 #
@@ -111,6 +120,8 @@ function(ie_coverage_extract)
                        VERBATIM)
     add_custom_target(ie_coverage_${IE_COVERAGE_OUTPUT}_info
                       DEPENDS ${output_file})
+    set_target_properties(ie_coverage_${IE_COVERAGE_OUTPUT}_info
+                          PROPERTIES FOLDER coverage)
 
     add_dependencies(ie_coverage_${IE_COVERAGE_OUTPUT}_info ie_coverage_${IE_COVERAGE_INPUT}_info)
 endfunction()
@@ -137,6 +148,8 @@ function(ie_coverage_remove)
                        VERBATIM)
     add_custom_target(ie_coverage_${IE_COVERAGE_OUTPUT}_info
                       DEPENDS ${output_file})
+    set_target_properties(ie_coverage_${IE_COVERAGE_OUTPUT}_info
+                          PROPERTIES FOLDER coverage)
 
     add_dependencies(ie_coverage_${IE_COVERAGE_OUTPUT}_info ie_coverage_${IE_COVERAGE_INPUT}_info)
 endfunction()
@@ -164,6 +177,8 @@ function(ie_coverage_merge)
                        VERBATIM)
     add_custom_target(ie_coverage_${IE_COVERAGE_OUTPUT}_info
                       DEPENDS ${output_file})
+    set_target_properties(ie_coverage_${IE_COVERAGE_OUTPUT}_info
+                          PROPERTIES FOLDER coverage)
 
     add_dependencies(ie_coverage_${IE_COVERAGE_OUTPUT}_info ${dependencies})
 endfunction()
@@ -188,6 +203,8 @@ function(ie_coverage_genhtml)
                        VERBATIM)
     add_custom_target(ie_coverage_${IE_COVERAGE_INFO_FILE}_genhtml
                       DEPENDS "${output_directory}/index.html")
+    set_target_properties(ie_coverage_${IE_COVERAGE_INFO_FILE}_genhtml
+                          PROPERTIES FOLDER coverage)
 
     add_dependencies(ie_coverage_${IE_COVERAGE_INFO_FILE}_genhtml ie_coverage_${IE_COVERAGE_INFO_FILE}_info)
     add_dependencies(ie_coverage ie_coverage_${IE_COVERAGE_INFO_FILE}_genhtml)
index a074ecd..c7a43b4 100644 (file)
@@ -36,9 +36,9 @@ function(ie_cpack_set_library_dir)
     endif()
 
     if(WIN32)
-        set(IE_CPACK_LIBRARY_PATH ${IE_CPACK_IE_DIR}/lib/${CMAKE_BUILD_TYPE}/${ARCH} PARENT_SCOPE)
-        set(IE_CPACK_RUNTIME_PATH ${IE_CPACK_IE_DIR}/bin/${CMAKE_BUILD_TYPE}/${ARCH} PARENT_SCOPE)
-        set(IE_CPACK_ARCHIVE_PATH ${IE_CPACK_IE_DIR}/lib/${CMAKE_BUILD_TYPE}/${ARCH} PARENT_SCOPE)
+        set(IE_CPACK_LIBRARY_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH}/${CMAKE_BUILD_TYPE} PARENT_SCOPE)
+        set(IE_CPACK_RUNTIME_PATH ${IE_CPACK_IE_DIR}/bin/${ARCH}/${CMAKE_BUILD_TYPE} PARENT_SCOPE)
+        set(IE_CPACK_ARCHIVE_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH}/${CMAKE_BUILD_TYPE} PARENT_SCOPE)
     else()
         set(IE_CPACK_LIBRARY_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH} PARENT_SCOPE)
         set(IE_CPACK_RUNTIME_PATH ${IE_CPACK_IE_DIR}/lib/${ARCH} PARENT_SCOPE)
@@ -200,8 +200,8 @@ else()
 endif()
 
 if(APPLE)
-       set(CMAKE_MACOSX_RPATH 1)
-endif(APPLE)
+    set(CMAKE_MACOSX_RPATH ON)
+endif()
 
 # Use solution folders
 set_property(GLOBAL PROPERTY USE_FOLDERS ON)
index f275f68..437b2e2 100644 (file)
@@ -138,6 +138,14 @@ function (RESOLVE_DEPENDENCY NAME_OF_CMAKE_VAR)
 
 endfunction(RESOLVE_DEPENDENCY)
 
+function (resolve_model_dependency network archive network_model_path)
+  RESOLVE_DEPENDENCY(${network_model_path}
+        ARCHIVE "models_archives/${archive}"
+        TARGET_PATH "${MODELS_PATH}/${network}")
+  string (REPLACE ${MODELS_PATH} "" relative_path ${${network_model_path}})
+  set(${network_model_path} ".${relative_path}" PARENT_SCOPE)
+endfunction()
+
 function(reset_deps_cache)
     #
     # Reset the dependencies cache if it was set by dependency solver
index f8f79df..2e16e73 100644 (file)
@@ -151,7 +151,9 @@ function (CheckOrDownloadAndExtract component RELATIVE_URL archive_name unpacked
   set (status "ON")
   set (on_master FALSE)
 
-  if(DEFINED ENV{IE_PATH_TO_DEPS})
+  if(DEFINED IE_PATH_TO_DEPS)
+    set(URL "${IE_PATH_TO_DEPS}/${RELATIVE_URL}")
+  elseif(DEFINED ENV{IE_PATH_TO_DEPS})
     set(URL "$ENV{IE_PATH_TO_DEPS}/${RELATIVE_URL}")
   else()
     set(URL "https://download.01.org/opencv/2020/openvinotoolkit/2020.2/inference_engine/${RELATIVE_URL}")
index 6bcd332..a91c464 100644 (file)
@@ -223,12 +223,13 @@ if(WIN32)
         # 161 unrecognized pragma
         # 177 variable was declared but never referenced
         # 556 not matched type of assigned function pointer
+        # 1744: field of class type without a DLL interface used in a class with a DLL interface
         # 2586 decorated name length exceeded, name was truncated
         # 2651: attribute does not apply to any entity
         # 3180 unrecognized OpenMP pragma
         # 11075: To get full report use -Qopt-report:4 -Qopt-report-phase ipo
         # 15335 was not vectorized: vectorization possible but seems inefficient. Use vector always directive or /Qvec-threshold0 to override
-        ie_add_compiler_flags(/Qdiag-disable:161,177,556,2586,2651,3180,11075,15335)
+        ie_add_compiler_flags(/Qdiag-disable:161,177,556,1744,2586,2651,3180,11075,15335)
     endif()
 
     # Debug information flags
index 1ae339a..047f370 100644 (file)
@@ -14,6 +14,8 @@ if (ENABLE_SANITIZER)
     set(SANITIZER_LINKER_FLAGS "-fsanitize=address")
     if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
         set(SANITIZER_LINKER_FLAGS "${SANITIZER_LINKER_FLAGS} -fuse-ld=gold")
+    elseif(CMAKE_CXX_COMPILER_ID MATCHES "Clang" AND NOT WIN32)
+        set(SANITIZER_LINKER_FLAGS "${SANITIZER_LINKER_FLAGS} -fuse-ld=lld")
     endif()
 
     set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SANITIZER_COMPILER_FLAGS}")
index e7ea654..efdb263 100644 (file)
@@ -60,7 +60,7 @@ function(ie_developer_export)
             APPEND FILE "${CMAKE_BINARY_DIR}/targets_developer.cmake")
 
     # Custom target to build only Inference Engine Developer Package targets
-    add_custom_target(ie_dev_targets ALL DEPENDS ${IEDeveloperPackageTargets})
+    add_custom_target(ie_dev_targets ALL DEPENDS ${IEDeveloperPackageTargets} gflags)
 endfunction()
 
 add_subdirectory(thirdparty)
@@ -74,9 +74,24 @@ endif()
 
 add_subdirectory(tools)
 
+function(ie_build_samples)
+    # samples should be build with the same flags as from OpenVINO package,
+    # so unset all flags
+    foreach(var CMAKE_CXX_FLAGS CMAKE_C_FLAGS CMAKE_CXX_STANDARD
+                CMAKE_EXE_LINKER_FLAGS CMAKE_POLICY_DEFAULT_CMP0063
+                CMAKE_CXX_VISIBILITY_PRESET CMAKE_C_VISIBILITY_PRESET
+                CMAKE_VISIBILITY_INLINES_HIDDEN CMAKE_POSITION_INDEPENDENT_CODE
+                THREADS_PREFER_PTHREAD_FLAG X86_64 X86 ARM AARCH64 LINUX
+                MINGW64 CMAKE_BUILD_TYPE CMAKE_MACOSX_RPATH)
+        unset(${var})
+    endforeach()
+
+    add_subdirectory(samples)
+endfunction()
+
 # gflags and format_reader targets are kept inside of samples directory and
 # they must be built even if samples build is disabled (required for tests and tools).
-add_subdirectory(samples)
+ie_build_samples()
 
 file(GLOB_RECURSE SAMPLES_SOURCES samples/*.cpp samples/*.hpp samples/*.h)
 add_cpplint_target(sample_cpplint
@@ -109,7 +124,7 @@ if(UNIX)
             PATTERN *.bat EXCLUDE
             PATTERN speech_libs_and_demos EXCLUDE)
 elseif(WIN32)
-    install(DIRECTORY samples
+    install(DIRECTORY samples/
             DESTINATION ${IE_CPACK_IE_DIR}/samples/cpp
             COMPONENT cpp_samples
             USE_SOURCE_PERMISSIONS
index ded2740..d2ff778 100644 (file)
@@ -22,6 +22,8 @@ endif()
 if(ENABLE_CLANG_FORMAT)
     add_custom_target(clang_format_check_all)
     add_custom_target(clang_format_fix_all)
+    set_target_properties(clang_format_check_all clang_format_fix_all
+                          PROPERTIES FOLDER clang_format)
     set(CLANG_FORMAT_ALL_OUTPUT_FILES "" CACHE INTERNAL "All clang-format output files")
 endif()
 
@@ -108,6 +110,9 @@ function(add_clang_format_target TARGET_NAME)
         "[clang-format] ${TARGET_NAME}_fix"
         VERBATIM)
 
+    set_target_properties(${TARGET_NAME} ${TARGET_NAME}_fix
+                          PROPERTIES FOLDER clang_format)
+
     # if(CLANG_FORMAT_FOR_TARGETS)
     #     foreach(target IN LISTS CLANG_FORMAT_FOR_TARGETS)
     #         add_dependencies(${target} ${TARGET_NAME})
index a317ff5..42a32f9 100644 (file)
@@ -4,7 +4,7 @@
 
 if(DEFINED IE_MAIN_SOURCE_DIR AND TARGET inference_engine)
     set(InferenceEngine_LIBRARIES inference_engine_legacy inference_engine
-                                  inference_engine_c_api inference_engine_nn_builder)
+                                  inference_engine_c_api)
 else()
     include("${CMAKE_CURRENT_LIST_DIR}/targets.cmake")
     if(NOT WIN32)
@@ -30,5 +30,5 @@ else()
 
     get_target_property(InferenceEngine_INCLUDE_DIRS IE::inference_engine INTERFACE_INCLUDE_DIRECTORIES)
     set(InferenceEngine_LIBRARIES IE::inference_engine_legacy IE::inference_engine
-                                  IE::inference_engine_c_api IE::inference_engine_nn_builder)
+                                  IE::inference_engine_c_api)
 endif()
index 1a2533c..9e279ff 100644 (file)
@@ -13,6 +13,7 @@ endif()
 
 if(ENABLE_CPPLINT)
     add_custom_target(cpplint_all ALL)
+    set_target_properties(cpplint_all PROPERTIES FOLDER cpplint)
     set(CPPLINT_ALL_OUTPUT_FILES "" CACHE INTERNAL "All cpplint output files")
 endif()
 
@@ -93,6 +94,7 @@ function(add_cpplint_target TARGET_NAME)
     add_custom_target(${TARGET_NAME} ALL
         DEPENDS ${all_output_files}
         COMMENT "[cpplint] ${TARGET_NAME}")
+    set_target_properties(${TARGET_NAME} PROPERTIES FOLDER cpplint)
 
     if(CPPLINT_FOR_TARGETS)
         foreach(target IN LISTS CPPLINT_FOR_TARGETS)
@@ -168,4 +170,5 @@ function(add_cpplint_report_target)
     add_custom_target(cpplint_report
         DEPENDS "${html_output_file}"
         COMMENT "[cpplint] Generate report")
+    set_target_properties(cpplint_report PROPERTIES FOLDER cpplint)
 endfunction()
index 491a958..05c1d5d 100644 (file)
@@ -47,7 +47,7 @@ file(WRITE "${OUTPUT_FILE}" "${formatted_output}")
 
 if(NOT SKIP_RETURN_CODE)
     # Pass through the cpplint return code
-    if(NOT result EQUAL 0)
+    if(NOT result EQUAL "0")
         # Display the cpplint output to console (to parse it form IDE)
         message("${output}")
         message(FATAL_ERROR "[cpplint] Code style check failed for : ${INPUT_FILE}")
index 26fe61c..93a27cb 100644 (file)
@@ -33,7 +33,7 @@ include("${CMAKE_CURRENT_LIST_DIR}/targets_developer.cmake")
 set_property(TARGET IE::inference_engine PROPERTY IMPORTED_GLOBAL TRUE)
 
 get_target_property(InferenceEngine_INCLUDE_DIRS IE::inference_engine INTERFACE_INCLUDE_DIRECTORIES)
-set(InferenceEngine_LIBRARIES IE::inference_engine_legacy IE::inference_engine IE::inference_engine_nn_builder)
+set(InferenceEngine_LIBRARIES IE::inference_engine_legacy IE::inference_engine)
 
 #
 # Common cmake includes
index 9749c21..82f1a85 100644 (file)
@@ -106,7 +106,7 @@ ie_option (ENABLE_CPP_CCT "enables C++ version of Cross Check Tool" OFF)
 
 ie_option (ENABLE_C "enables ie c bridge build" ON)
 
-ie_dependent_option(ENABLE_CPPLINT "Enable cpplint checks during the build" OFF "OFF;UNIX;NOT APPLE;NOT ANDROID" OFF)
+ie_dependent_option(ENABLE_CPPLINT "Enable cpplint checks during the build" OFF "UNIX;NOT APPLE;NOT ANDROID" OFF)
 ie_dependent_option(ENABLE_CPPLINT_REPORT "Build cpplint report instead of failing the build" OFF "ENABLE_CPPLINT" OFF)
 
 ie_option(ENABLE_CLANG_FORMAT "Enable clang-format checks during the build" OFF)
index 75ecd3c..df874c6 100644 (file)
@@ -82,6 +82,9 @@ function(ie_add_plugin)
     if(TARGET inference_engine_preproc)
         add_dependencies(${IE_PLUGIN_NAME} inference_engine_preproc)
     endif()
+    if(TARGET inference_engine_ir_readers)
+        add_dependencies(${IE_PLUGIN_NAME} inference_engine_ir_readers)
+    endif()
 
     # install rules
 
index fc18913..ff27310 100644 (file)
@@ -18,7 +18,6 @@
 #   IE::inference_engine            - The Inference Engine library
 #   IE::inference_engine_legacy     - The Inference Engine library with legacy API for IR v7 and older.
 #   IE::inference_engine_c_api      - The Inference Engine C API library
-#   IE::inference_engine_nn_builder - The Inference Engine NN Builder library
 #
 
 macro(ext_message TRACE_LEVEL)
@@ -40,7 +39,7 @@ if(TARGET IE::inference_engine)
     set(InferenceEngine_FOUND TRUE)
     get_target_property(InferenceEngine_INCLUDE_DIRS IE::inference_engine INTERFACE_INCLUDE_DIRECTORIES)
     set(InferenceEngine_LIBRARIES IE::inference_engine_legacy IE::inference_engine
-                                  IE::inference_engine_c_api IE::inference_engine_nn_builder)
+                                  IE::inference_engine_c_api)
 else()
     if (WIN32)
         set(_ARCH intel64)
@@ -88,29 +87,26 @@ else()
         find_library(IE_RELEASE_LIBRARY inference_engine@IE_RELEASE_POSTFIX_WIN@ "${IE_LIB_REL_DIR}" NO_DEFAULT_PATH)
         find_library(IE_LEGACY_RELEASE_LIBRARY inference_engine_legacy@IE_RELEASE_POSTFIX_WIN@ "${IE_LIB_REL_DIR}" NO_DEFAULT_PATH)
         find_library(IE_C_API_RELEASE_LIBRARY inference_engine_c_api@IE_RELEASE_POSTFIX_WIN@ "${IE_LIB_REL_DIR}" NO_DEFAULT_PATH)
-        find_library(IE_NN_BUILDER_RELEASE_LIBRARY inference_engine_nn_builder@IE_RELEASE_POSTFIX_WIN@ "${IE_LIB_REL_DIR}" NO_DEFAULT_PATH)
     elseif(APPLE)
         find_library(IE_RELEASE_LIBRARY inference_engine@IE_RELEASE_POSTFIX_MAC@ "${IE_LIB_DIR}" NO_DEFAULT_PATH)
         find_library(IE_LEGACY_RELEASE_LIBRARY inference_engine_legacy@IE_RELEASE_POSTFIX_MAC@ "${IE_LIB_DIR}" NO_DEFAULT_PATH)
         find_library(IE_C_API_RELEASE_LIBRARY inference_engine_c_api@IE_RELEASE_POSTFIX_MAC@ "${IE_LIB_DIR}" NO_DEFAULT_PATH)
-        find_library(IE_NN_BUILDER_RELEASE_LIBRARY inference_engine_nn_builder@IE_RELEASE_POSTFIX_MAC@ "${IE_LIB_DIR}" NO_DEFAULT_PATH)
     else()
         find_library(IE_RELEASE_LIBRARY inference_engine@IE_RELEASE_POSTFIX_LIN@ "${IE_LIB_DIR}" NO_DEFAULT_PATH)
         find_library(IE_LEGACY_RELEASE_LIBRARY inference_engine_legacy@IE_RELEASE_POSTFIX_LIN@ "${IE_LIB_DIR}" NO_DEFAULT_PATH)
         find_library(IE_C_API_RELEASE_LIBRARY inference_engine_c_api@IE_RELEASE_POSTFIX_LIN@ "${IE_LIB_DIR}" NO_DEFAULT_PATH)
-        find_library(IE_NN_BUILDER_RELEASE_LIBRARY inference_engine_nn_builder@IE_RELEASE_POSTFIX_LIN@ "${IE_LIB_DIR}" NO_DEFAULT_PATH)
     endif()
 
     find_package_handle_standard_args(  InferenceEngine
                                         FOUND_VAR INFERENCEENGINE_FOUND
-                                        REQUIRED_VARS IE_RELEASE_LIBRARY IE_LEGACY_RELEASE_LIBRARY IE_C_API_RELEASE_LIBRARY IE_NN_BUILDER_RELEASE_LIBRARY IE_INCLUDE_DIR
+                                        REQUIRED_VARS IE_RELEASE_LIBRARY IE_LEGACY_RELEASE_LIBRARY IE_C_API_RELEASE_LIBRARY IE_INCLUDE_DIR
                                         FAIL_MESSAGE "Some of mandatory Inference Engine components are not found. Please consult InferenceEgnineConfig.cmake module's help page.")
 
     if(INFERENCEENGINE_FOUND)
         # to keep this line for successful execution in CMake 2.8
         set(InferenceEngine_FOUND TRUE)
 
-        foreach(ie_library_suffix "" "_legacy" "_c_api" "_nn_builder")
+        foreach(ie_library_suffix "" "_legacy" "_c_api")
             string(TOUPPER "${ie_library_suffix}" ie_library_usuffix)
             add_library(IE::inference_engine${ie_library_suffix} SHARED IMPORTED GLOBAL)
 
@@ -167,7 +163,7 @@ else()
 
         set(InferenceEngine_INCLUDE_DIRS ${IE_INCLUDE_DIR})
         set(InferenceEngine_LIBRARIES IE::inference_engine_legacy IE::inference_engine
-                                      IE::inference_engine_c_api IE::inference_engine_nn_builder)
+                                      IE::inference_engine_c_api)
 
         set(IE_EXTERNAL_DIR "${IE_ROOT_DIR}/external")
         include("${IE_ROOT_DIR}/share/ie_parallel.cmake")
index d0b5e20..1f22482 100644 (file)
@@ -20,6 +20,7 @@ set(VPU_SUPPORTED_FIRMWARES usb-ma2450 usb-ma2x8x pcie-ma248x)
 #
 
 set(FIRMWARE_PACKAGE_VERSION 1076)
+set(VPU_CLC_MA2X8X_VERSION "movi-cltools-20.02.0")
 
 #
 # CMake variables to override default firmware files
@@ -82,7 +83,7 @@ foreach(firmware_name IN LISTS VPU_SUPPORTED_FIRMWARES)
         VERBATIM)
 
     install(FILES ${${var_name}}
-        DESTINATION ${IE_CPACK_LIBRARY_PATH}
+        DESTINATION ${IE_CPACK_RUNTIME_PATH}
         COMPONENT myriad)
 endforeach()
 
@@ -105,3 +106,106 @@ if(ANDROID)
 
     log_rpath_from_dir(LIBUSB "${LIBUSB}/libs/${ANDROID_ABI}")
 endif()
+
+#
+# OpenCL compiler
+#
+
+if(LINUX AND LINUX_OS_NAME MATCHES "Ubuntu")
+    if(DEFINED ENV{VPU_OCL_COMPILER_PATH})
+        set(IE_PATH_TO_DEPS "$ENV{VPU_OCL_COMPILER_PATH}")
+    elseif(DEFINED VPU_OCL_COMPILER_PATH)
+        set(IE_PATH_TO_DEPS "${VPU_OCL_COMPILER_PATH}")
+    else()
+        message(WARNING "VPU_OCL_COMPILER is not found. Some tests will skipped")
+    endif()
+
+    if(DEFINED IE_PATH_TO_DEPS)
+        message(STATUS "VPU_OCL_COMPILER_PATH=${IE_PATH_TO_DEPS}")
+
+        reset_deps_cache(VPU_CLC_MA2X8X_ROOT)
+        reset_deps_cache(VPU_CLC_MA2X8X_COMMAND)
+
+        RESOLVE_DEPENDENCY(VPU_CLC_MA2X8X
+            ARCHIVE_LIN "VPU_OCL_compiler/${VPU_CLC_MA2X8X_VERSION}.tar.gz"
+            TARGET_PATH "${TEMP}/vpu/clc/ma2x8x/${VPU_CLC_MA2X8X_VERSION}"
+            ENVIRONMENT "VPU_CLC_MA2X8X_COMMAND")
+        debug_message(STATUS "VPU_CLC_MA2X8X=" ${VPU_CLC_MA2X8X})
+
+        update_deps_cache(
+            VPU_CLC_MA2X8X_ROOT
+            "${VPU_CLC_MA2X8X}"
+            "[VPU] Root directory of OpenCL compiler")
+
+        update_deps_cache(
+            VPU_CLC_MA2X8X_COMMAND
+            "${VPU_CLC_MA2X8X}/bin/clc"
+            "[VPU] OpenCL compiler")
+
+        find_program(VPU_CLC_MA2X8X_COMMAND clc)
+        unset (IE_PATH_TO_DEPS)
+    endif()
+endif()
+
+#
+# `vpu_custom_kernels` CMake target
+#
+
+add_library(vpu_custom_kernels INTERFACE)
+
+function(add_vpu_compile_custom_kernels)
+    set(SRC_DIR "${IE_MAIN_SOURCE_DIR}/src/vpu/custom_kernels")
+    set(DST_DIR "${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/vpu_custom_kernels")
+
+    file(MAKE_DIRECTORY "${DST_DIR}")
+
+    file(GLOB XML_FILES "${SRC_DIR}/*.xml")
+    file(GLOB CL_FILES "${SRC_DIR}/*.cl")
+
+    foreach(xml_file IN LISTS XML_FILES)
+        get_filename_component(xml_file_name ${xml_file} NAME)
+
+        set(out_file "${DST_DIR}/${xml_file_name}")
+        list(APPEND all_output_files ${out_file})
+
+        add_custom_command(
+            OUTPUT ${out_file}
+            COMMAND
+                ${CMAKE_COMMAND} -E copy ${xml_file} ${out_file}
+            MAIN_DEPENDENCY ${xml_file}
+            COMMENT "[VPU] Copy ${xml_file} to ${DST_DIR}"
+            VERBATIM)
+    endforeach()
+
+    foreach(cl_file IN LISTS CL_FILES)
+        get_filename_component(cl_file_name ${cl_file} NAME_WE)
+
+        set(out_file "${DST_DIR}/${cl_file_name}.bin")
+        list(APPEND all_output_files ${out_file})
+
+        add_custom_command(
+            OUTPUT ${out_file}
+            COMMAND
+                ${CMAKE_COMMAND} -E env
+                    "SHAVE_LDSCRIPT_DIR=${VPU_CLC_MA2X8X}/ldscripts/"
+                    "SHAVE_MA2X8XLIBS_DIR=${VPU_CLC_MA2X8X}/lib"
+                    "SHAVE_MOVIASM_DIR=${VPU_CLC_MA2X8X}/bin"
+                    "SHAVE_MYRIAD_LD_DIR=${VPU_CLC_MA2X8X}/bin"
+                ${VPU_CLC_MA2X8X_COMMAND} --strip-binary-header ${cl_file} -o ${out_file}
+            MAIN_DEPENDENCY ${cl_file}
+            DEPENDS ${VPU_CLC_MA2X8X_COMMAND}
+            COMMENT "[VPU] Compile ${cl_file}"
+            VERBATIM)
+    endforeach()
+
+    add_custom_target(vpu_compile_custom_kernels
+        DEPENDS ${all_output_files}
+        COMMENT "[VPU] Compile custom kernels")
+
+    add_dependencies(vpu_custom_kernels vpu_compile_custom_kernels)
+    target_compile_definitions(vpu_custom_kernels INTERFACE "VPU_HAS_CUSTOM_KERNELS")
+endfunction()
+
+if(VPU_CLC_MA2X8X_COMMAND)
+    add_vpu_compile_custom_kernels()
+endif()
index 10d7254..c4feefd 100644 (file)
@@ -6,6 +6,10 @@ project(InferenceEngine_C_API)
 
 add_subdirectory(src)
 
+if(ENABLE_TESTS)
+    add_subdirectory(tests)
+endif()
+
 if(ENABLE_SAMPLES)
     add_subdirectory(samples)
 endif()
index f69ce1a..b8d5ddf 100644 (file)
@@ -24,6 +24,8 @@ target_link_libraries(${TARGET_NAME} PUBLIC ${OpenCV_LIBRARIES})
 
 target_include_directories(${TARGET_NAME} PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}")
 
+set_target_properties(${TARGET_NAME} PROPERTIES FOLDER c_samples)
+
 if(COMMAND add_cpplint_target)
        add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME})
 endif()
index 9ca1c7d..d5b4fda 100644 (file)
@@ -2,14 +2,6 @@
 # SPDX-License-Identifier: Apache-2.0
 #
 
-set(TARGET_NAME "hello_classification_c")
-
-# create sample target
-
-add_executable(${TARGET_NAME} main.c)
-
-target_link_libraries(${TARGET_NAME} PRIVATE ${InferenceEngine_LIBRARIES} opencv_c_wraper)
-
-if(COMMAND add_cpplint_target)
-    add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME})
-endif()
+ie_add_sample(NAME hello_classification_c
+              SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/main.c"
+              DEPENDENCIES opencv_c_wraper)
index 1a4dffd..d090436 100644 (file)
@@ -2,14 +2,6 @@
 # SPDX-License-Identifier: Apache-2.0
 #
 
-set(TARGET_NAME "hello_nv12_input_classification_c")
-
-# create sample target
-
-add_executable(${TARGET_NAME} main.c)
-
-target_link_libraries(${TARGET_NAME} PRIVATE ${InferenceEngine_LIBRARIES})
-
-if(COMMAND add_cpplint_target)
-    add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME})
-endif()
+ie_add_sample(NAME hello_nv12_input_classification_c
+              SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/main.c"
+              DEPENDENCIES opencv_c_wraper)
index 2684412..e84a7b7 100644 (file)
@@ -167,7 +167,7 @@ int main(int argc, char **argv) {
     // set input resize algorithm to enable input autoresize
     status |= ie_network_set_input_resize_algorithm(network, input_name, RESIZE_BILINEAR);
     // set input color format to NV12 to enable automatic input color format pre-processing
-    status |= ie_network_set_color_format(network, input_name, NV12 );
+    status |= ie_network_set_color_format(network, input_name, NV12);
 
     if (status != OK)
         goto err;
index 6332bf8..fd6fc2e 100644 (file)
@@ -2,14 +2,8 @@
 # SPDX-License-Identifier: Apache-2.0
 #
 
-set (TARGET_NAME "object_detection_sample_ssd_c")
-
-# create sample target
-
-add_executable(${TARGET_NAME} main.c)
-
-target_link_libraries(${TARGET_NAME} PRIVATE ${InferenceEngine_LIBRARIES} opencv_c_wraper)
-
-if(COMMAND add_cpplint_target)
-    add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME})
-endif()
+ie_add_sample(NAME object_detection_sample_ssd_c
+              SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/main.c"
+              HEADERS "${CMAKE_CURRENT_SOURCE_DIR}/object_detection_sample_ssd.h"
+                         "${CMAKE_CURRENT_SOURCE_DIR}/c_w_dirent.h"
+              DEPENDENCIES opencv_c_wraper)
index bf599e6..f869989 100644 (file)
@@ -18,7 +18,7 @@
 
 #define MAX_IMAGE 20
 
-static const char *img_msg = NULL;                                                                                                                                                                        
+static const char *img_msg = NULL;
 static const char *input_model = NULL;
 static const char *device_name = "CPU";
 static const char *custom_cldnn_msg = NULL;
@@ -38,7 +38,7 @@ int ParseAndCheckCommandLine(int argc, char *argv[]) {
     printf("%sParsing input parameters\n", info);
 
     while ((opt = getopt(argc, argv, string)) != -1) {
-        switch(opt) {
+        switch (opt) {
             case 'h':
                 showUsage();
                 help = 1;
@@ -183,7 +183,7 @@ ie_config_t *parseConfig(const char *config_file, char comment) {
 
     ie_config_t *cfg = NULL;
     char key[256], value[256];
-    
+
     if (fscanf(file, "%s", key)!= EOF && fscanf(file, "%s", value) != EOF) {
         char *cfg_name = (char *)calloc(strlen(key) + 1, sizeof(char));
         char *cfg_value = (char *)calloc(strlen(value) + 1, sizeof(char));
@@ -213,7 +213,7 @@ ie_config_t *parseConfig(const char *config_file, char comment) {
             cfg_temp = cfg_temp->next;
         }
     }
-    
+
     return cfg;
 }
 
@@ -229,11 +229,11 @@ void config_free(ie_config_t *config) {
             free((char *)config->name);
             config->name = NULL;
         }
-        if(config->value) {
+        if (config->value) {
             free((char *)config->value);
             config->value = NULL;
         }
-        if(config->next) {
+        if (config->next) {
             config = config->next;
         }
 
@@ -388,7 +388,7 @@ int main(int argc, char **argv) {
             goto err;
 
         /** Working with first input tensor that stores image **/
-        if(input_dim.ranks == 4) {
+        if (input_dim.ranks == 4) {
             imageInputName = name;
             input_height = input_dim.dims[2];
             input_width = input_dim.dims[3];
@@ -399,9 +399,9 @@ int main(int argc, char **argv) {
                 goto err;
         } else if (input_dim.ranks == 2) {
             imInfoInputName = name;
-        
+
             status = ie_network_set_input_precision(network, name, FP32);
-            if(status !=OK || (input_dim.dims[1] != 3 && input_dim.dims[1] != 6)) {
+            if (status !=OK || (input_dim.dims[1] != 3 && input_dim.dims[1] != 6)) {
                 printf("Invalid input info. Should be 3 or 6 values length\n");
                 goto err;
             }
@@ -590,7 +590,7 @@ int main(int argc, char **argv) {
 
         dimensions_t imInfoDim;
         status |= ie_blob_get_dims(input2, &imInfoDim);
-        //Fill input tensor with values 
+        //Fill input tensor with values
         ie_blob_buffer_t info_blob_buffer;
         status |= ie_blob_get_buffer(input2, &info_blob_buffer);
         if (status != OK) {
@@ -601,7 +601,7 @@ int main(int argc, char **argv) {
         for (image_id = 0; image_id < batchSize; ++image_id) {
             p[image_id * imInfoDim.dims[1] + 0] = (float)input_height;
             p[image_id * imInfoDim.dims[1] + 1] = (float)input_width;
-            
+
             for (k = 2; k < imInfoDim.dims[1]; k++) {
                 p[image_id * imInfoDim.dims[1] + k] = 1.0f;  // all scale factors are set to 1.0
             }
@@ -616,7 +616,7 @@ int main(int argc, char **argv) {
     status |= ie_infer_request_wait(infer_request, -1);
     if (status != OK)
         goto err;
-    // ----------------------------------------------------------------------------------------------------- 
+    // -----------------------------------------------------------------------------------------------------
 
     // --------------------------- 11. Process output -------------------------------------------------------
     printf("%sProcessing output blobs\n", info);
@@ -634,7 +634,7 @@ int main(int argc, char **argv) {
     int **classes = (int **)calloc(image_num, sizeof(int *));
     rectangle_t **boxes = (rectangle_t **)calloc(image_num, sizeof(rectangle_t *));
     int *object_num = (int *)calloc(image_num, sizeof(int));
-    for ( i = 0; i < image_num; ++i) {
+    for (i = 0; i < image_num; ++i) {
         classes[i] = (int *)calloc(maxProposalCount, sizeof(int));
         boxes[i] = (rectangle_t *)calloc(maxProposalCount, sizeof(rectangle_t));
         object_num[i] = 0;
@@ -678,7 +678,7 @@ int main(int argc, char **argv) {
         }
         const char *out = "out_";
         char str_num[16] = {0};
-        int2str(str_num, batch_id); 
+        int2str(str_num, batch_id);
         char *img_path = (char *)calloc(strlen(out) + strlen(str_num) + strlen(".bmp") + 1, sizeof(char));
         strcpy(img_path, out);
         strcat(img_path, str_num);
diff --git a/inference-engine/ie_bridges/c/tests/CMakeLists.txt b/inference-engine/ie_bridges/c/tests/CMakeLists.txt
new file mode 100644 (file)
index 0000000..8bd1169
--- /dev/null
@@ -0,0 +1,38 @@
+# Copyright (C) 2018-2020 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+#
+
+set(TARGET_NAME "InferenceEngineCAPITests")
+
+# Find OpenCV components if exist
+find_package(OpenCV COMPONENTS imgcodecs videoio imgproc QUIET)
+if(NOT OpenCV_FOUND)
+    message(WARNING "OPENCV is disabled or not found, " ${TARGET_NAME} " is built without OPENCV support")
+else()
+    add_definitions(-DUSE_OPENCV)
+endif()
+
+add_executable(${TARGET_NAME} ie_c_api_test.cpp test_model_repo.hpp)
+
+target_link_libraries(${TARGET_NAME}
+        PRIVATE
+        inference_engine
+        inference_engine_c_api
+        ${OpenCV_LIBRARIES}
+        commonTestUtils
+        )
+
+target_compile_definitions(${TARGET_NAME}
+        PUBLIC ${ARGV}
+        DATA_PATH=\"${DATA_PATH}\"
+        MODELS_PATH=\"${MODELS_PATH}\" )
+
+if(ENABLE_MKL_DNN)
+    add_dependencies(${TARGET_NAME} MKLDNNPlugin)
+endif()
+
+if(ENABLE_CLDNN)
+    add_dependencies(${TARGET_NAME} clDNNPlugin)
+endif()
+
+add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME})
diff --git a/inference-engine/ie_bridges/c/tests/ie_c_api_test.cpp b/inference-engine/ie_bridges/c/tests/ie_c_api_test.cpp
new file mode 100644 (file)
index 0000000..25c0972
--- /dev/null
@@ -0,0 +1,2020 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier : Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <opencv2/opencv.hpp>
+#include <condition_variable>
+#include <mutex>
+#include <c_api/ie_c_api.h>
+#include <inference_engine.hpp>
+#include "test_model_repo.hpp"
+
+std::string xml_std = TestDataHelpers::generate_model_path("test_model", "test_model_fp32.xml"),
+            bin_std = TestDataHelpers::generate_model_path("test_model", "test_model_fp32.bin"),
+            input_image_std = TestDataHelpers::generate_image_path("224x224", "dog.bmp"),
+            input_image_nv12_std = TestDataHelpers::generate_image_path("224x224", "dog6.yuv");
+
+const char* xml = xml_std.c_str();
+const char* bin = bin_std.c_str();
+const char* input_image = input_image_std.c_str();
+const char* input_image_nv12 = input_image_nv12_std.c_str();
+
+std::mutex m;
+bool ready = false;
+std::condition_variable condVar;
+#ifdef _WIN32
+    #ifdef __MINGW32__
+        std::string plugins_xml_std = TestDataHelpers::generate_ieclass_xml_path("plugins_mingw.xml");
+    #else
+        std::string plugins_xml_std = TestDataHelpers::generate_ieclass_xml_path("plugins_win.xml");
+    #endif
+#elif defined __APPLE__
+        std::string plugins_xml_std = TestDataHelpers::generate_ieclass_xml_path("plugins_apple.xml");
+#else
+        std::string plugins_xml_std = TestDataHelpers::generate_ieclass_xml_path("plugins.xml");
+#endif
+const char* plugins_xml = plugins_xml_std.c_str();
+
+#define IE_EXPECT_OK(...) EXPECT_EQ(IEStatusCode::OK, __VA_ARGS__)
+#define IE_ASSERT_OK(...) ASSERT_EQ(IEStatusCode::OK, __VA_ARGS__)
+#define IE_EXPECT_NOT_OK(...) EXPECT_NE(IEStatusCode::OK, __VA_ARGS__)
+
+size_t read_image_from_file(const char* img_path, unsigned char *img_data, size_t size) {
+    FILE *fp = fopen(img_path, "rb+");
+    size_t read_size = 0;
+
+    if (fp) {
+        fseek(fp, 0, SEEK_END);
+        if (ftell(fp) >= size) {
+            fseek(fp, 0, SEEK_SET);
+            read_size = fread(img_data, 1, size, fp);
+        }
+    }
+    fclose(fp);
+    return read_size;
+}
+
+void Mat2Blob(const cv::Mat& img, ie_blob_t *blob)
+{
+    dimensions_t dimenison;
+    IE_EXPECT_OK(ie_blob_get_dims(blob, &dimenison));
+    size_t channels = dimenison.dims[1];
+    size_t width = dimenison.dims[3];
+    size_t height = dimenison.dims[2];
+    ie_blob_buffer_t buffer;
+    IE_EXPECT_OK(ie_blob_get_buffer(blob, &buffer));
+    uint8_t *blob_data = (uint8_t *)(buffer.buffer);
+    cv::Mat resized_image;
+    cv::resize(img, resized_image, cv::Size(width, height));
+
+    for (size_t c = 0; c < channels; c++) {
+        for (size_t  h = 0; h < height; h++) {
+            for (size_t w = 0; w < width; w++) {
+                blob_data[c * width * height + h * width + w] =
+                        resized_image.at<cv::Vec3b>(h, w)[c];
+            }
+        }
+    }
+}
+
+size_t find_device(ie_available_devices_t avai_devices, const char *device_name) {
+    for (size_t i = 0; i < avai_devices.num_devices; ++i) {
+        if (strstr(avai_devices.devices[i], device_name))
+            return i;
+    }
+
+    return -1;
+}
+
+TEST(ie_c_api_version, apiVersion) {
+    ie_version_t version = ie_c_api_version();
+    auto ver = InferenceEngine::GetInferenceEngineVersion();
+    std::string ver_str = std::to_string(ver->apiVersion.major) + ".";
+    ver_str += std::to_string(ver->apiVersion.minor) + ".";
+    ver_str += ver->buildNumber;
+
+    EXPECT_EQ(strcmp(version.api_version, ver_str.c_str()), 0);
+    ie_version_free(&version);
+}
+
+void completion_callback(void *args) {
+    ie_infer_request_t *infer_request = (ie_infer_request_t *)args;
+    ie_blob_t *output_blob = nullptr;
+
+    printf("async infer callback...\n");
+    IE_EXPECT_OK(ie_infer_request_get_blob(infer_request, "fc_out", &output_blob));
+
+    ie_blob_buffer_t buffer;
+    IE_EXPECT_OK(ie_blob_get_buffer(output_blob, &buffer));
+    float *output_data = (float *)(buffer.buffer);
+    EXPECT_NEAR(output_data[9], 0.f, 1.e-5);
+
+    ie_blob_free(&output_blob);
+
+    std::lock_guard<std::mutex> lock(m);
+    ready = true;
+    condVar.notify_one();
+}
+
+TEST(ie_core_create, coreCreatewithConfig) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create(plugins_xml, &core));
+    ASSERT_NE(nullptr, core);
+
+    ie_core_free(&core);
+}
+
+TEST(ie_core_create, coreCreateNoConfig) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    ie_core_free(&core);
+}
+
+TEST(ie_core_get_available_devices, getAvailableDevices) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+
+    ie_available_devices_t avai_devices = {0};
+    IE_EXPECT_OK(ie_core_get_available_devices(core, &avai_devices));
+
+    ie_core_available_devices_free(&avai_devices);
+    ie_core_free(&core);
+}
+
+TEST(ie_core_register_plugin, registerPlugin) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    ie_network_t *network = nullptr;
+    IE_EXPECT_OK(ie_core_read_network(core, xml, bin, &network));
+    EXPECT_NE(nullptr, network);
+
+    const char *plugin_name = "MKLDNNPlugin";
+    const char *device_name = "BLA";
+    IE_EXPECT_OK(ie_core_register_plugin(core, plugin_name, device_name));
+
+    ie_config_t config = {nullptr, nullptr, nullptr};
+    ie_executable_network_t *exe_network = nullptr;
+    IE_EXPECT_OK(ie_core_load_network(core, network, device_name, &config, &exe_network));
+    EXPECT_NE(nullptr, exe_network);
+
+    ie_exec_network_free(&exe_network);
+    ie_network_free(&network);
+    ie_core_free(&core);
+}
+
+TEST(ie_core_register_plugins, registerPlugins) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    ie_network_t *network = nullptr;
+    IE_EXPECT_OK(ie_core_read_network(core, xml, bin, &network));
+    EXPECT_NE(nullptr, network);
+
+    IE_EXPECT_OK(ie_core_register_plugins(core, plugins_xml));
+
+    ie_config_t config = {nullptr, nullptr, nullptr};
+    const char *device_name = "CUSTOM";
+    ie_executable_network_t *exe_network = nullptr;
+    IE_EXPECT_OK(ie_core_load_network(core, network, device_name, &config, &exe_network));
+    EXPECT_NE(nullptr, exe_network);
+
+    ie_exec_network_free(&exe_network);
+    ie_network_free(&network);
+    ie_core_free(&core);
+}
+
+TEST(ie_core_unregister_plugin, unregisterPlugin) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create(plugins_xml, &core));
+    ASSERT_NE(nullptr, core);
+
+    ie_network_t *network = nullptr;
+    IE_EXPECT_OK(ie_core_read_network(core, xml, bin, &network));
+    EXPECT_NE(nullptr, network);
+
+    ie_config_t config = {nullptr, nullptr, nullptr};
+    const char *device_name = "CUSTOM";
+    ie_executable_network_t *exe_network = nullptr;
+    IE_EXPECT_OK(ie_core_load_network(core, network, device_name, &config, &exe_network));
+    EXPECT_NE(nullptr, exe_network);
+
+    ie_exec_network_free(&exe_network);
+    ie_network_free(&network);
+
+    IE_EXPECT_OK(ie_core_unregister_plugin(core, device_name));
+
+    ie_core_free(&core);
+}
+
+TEST(ie_core_set_config, setConfig) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    const char *device_name = "CPU";
+    ie_config_t config = {"CPU_THREADS_NUM", "3", nullptr};
+    IE_EXPECT_OK(ie_core_set_config(core, &config, device_name));
+
+    ie_core_free(&core);
+}
+
+TEST(ie_core_get_metric, getMetric) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    const char *device_name = "CPU";
+    const char *metric_name = "SUPPORTED_CONFIG_KEYS";
+    ie_param_t param;
+    IE_EXPECT_OK(ie_core_get_metric(core, device_name, metric_name, &param));
+
+    ie_param_free(&param);
+    ie_core_free(&core);
+}
+
+TEST(ie_core_get_config, getConfig) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    const char *device_name = "CPU";
+    const char *config_name = "CPU_THREADS_NUM";
+    ie_param_t param;
+    IE_EXPECT_OK(ie_core_get_config(core, device_name, config_name, &param));
+    EXPECT_STREQ(param.params, "0");
+
+    ie_param_free(&param);
+    ie_core_free(&core);
+}
+
+TEST(ie_core_get_versions, getVersions) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    ie_core_versions_t versions = {0};
+    IE_EXPECT_OK(ie_core_get_versions(core, "CPU", &versions));
+    EXPECT_EQ(versions.num_vers, 1);
+
+    ie_core_versions_free(&versions);
+    ie_core_free(&core);
+}
+
+TEST(ie_core_read_network, networkRead) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    ie_network_t *network = nullptr;
+    IE_EXPECT_OK(ie_core_read_network(core, xml, bin, &network));
+    EXPECT_NE(nullptr, network);
+
+    ie_network_free(&network);
+    ie_core_free(&core);
+}
+
+TEST(ie_core_load_network, loadNetwork) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    ie_network_t *network = nullptr;
+    IE_EXPECT_OK(ie_core_read_network(core, xml, bin, &network));
+    EXPECT_NE(nullptr, network);
+
+    IE_EXPECT_OK(ie_network_set_input_layout(network, "data", layout_e::NHWC));
+    IE_EXPECT_OK(ie_network_set_input_precision(network, "data", precision_e::U8));
+
+    ie_config_t config = {"CPU_THREADS_NUM", "3", nullptr};
+    ie_executable_network_t *exe_network = nullptr;
+    IE_EXPECT_OK(ie_core_load_network(core, network, "CPU", &config, &exe_network));
+    EXPECT_NE(nullptr, exe_network);
+
+    ie_exec_network_free(&exe_network);
+    ie_network_free(&network);
+    ie_core_free(&core);
+}
+
+TEST(ie_core_load_network, loadNetworkNoConfig) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    ie_network_t *network = nullptr;
+    IE_EXPECT_OK(ie_core_read_network(core, xml, bin, &network));
+    EXPECT_NE(nullptr, network);
+
+    ie_config_t config = {nullptr, nullptr, nullptr};
+    ie_executable_network_t *exe_network = nullptr;
+    IE_EXPECT_OK(ie_core_load_network(core, network, "CPU", &config, &exe_network));
+    EXPECT_NE(nullptr, exe_network);
+
+    ie_exec_network_free(&exe_network);
+    ie_network_free(&network);
+    ie_core_free(&core);
+}
+
+TEST(ie_network_get_name, networkName) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    ie_network_t *network = nullptr;
+    IE_EXPECT_OK(ie_core_read_network(core, xml, bin, &network));
+    EXPECT_NE(nullptr, network);
+
+    char *network_name = nullptr;
+    IE_EXPECT_OK(ie_network_get_name(network, &network_name));
+
+    EXPECT_STREQ(network_name, "test_model");
+
+    ie_network_name_free(&network_name);
+    ie_network_free(&network);
+    ie_core_free(&core);
+}
+
+TEST(ie_network_get_inputs_number, inputNumer) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    ie_network_t *network = nullptr;
+    IE_EXPECT_OK(ie_core_read_network(core, xml, bin, &network));
+    EXPECT_NE(nullptr, network);
+
+    size_t size;
+    IEStatusCode status_result = ie_network_get_inputs_number(network, &size);
+    EXPECT_EQ(status_result, IEStatusCode::OK);
+    EXPECT_EQ(size, 1);
+
+    ie_network_free(&network);
+    ie_core_free(&core);
+}
+
+TEST(ie_network_get_input_name, inputName) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    ie_network_t *network = nullptr;
+    IE_EXPECT_OK(ie_core_read_network(core, xml, bin, &network));
+    EXPECT_NE(nullptr, network);
+
+    char *input_name = nullptr;
+    IE_EXPECT_OK(ie_network_get_input_name(network, 0, &input_name));
+
+    EXPECT_STREQ(input_name, "data");
+
+    ie_network_name_free(&input_name);
+    ie_network_free(&network);
+    ie_core_free(&core);
+}
+
+TEST(ie_network_get_input_precision, getPrecision) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    ie_network_t *network = nullptr;
+    IE_EXPECT_OK(ie_core_read_network(core, xml, bin, &network));
+    EXPECT_NE(nullptr, network);
+
+    const char *name = "data";
+    precision_e p;
+    IE_EXPECT_OK(ie_network_get_input_precision(network, name, &p));
+    EXPECT_EQ(p, precision_e::FP32);
+
+    ie_network_free(&network);
+    ie_core_free(&core);
+}
+
+TEST(ie_network_get_input_precision, incorrectName) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    ie_network_t *network = nullptr;
+    IE_EXPECT_OK(ie_core_read_network(core, xml, bin, &network));
+    EXPECT_NE(nullptr, network);
+
+    const char *name = "model";
+    precision_e p;
+    EXPECT_EQ(IEStatusCode::NOT_FOUND, ie_network_get_input_precision(network, name, &p));
+
+    ie_network_free(&network);
+    ie_core_free(&core);
+}
+
+TEST(ie_network_set_input_precision, setPrecision) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    ie_network_t *network = nullptr;
+    IE_EXPECT_OK(ie_core_read_network(core, xml, bin, &network));
+    EXPECT_NE(nullptr, network);
+
+    const char *name = "data";
+    const precision_e p = precision_e::FP16;
+    IE_EXPECT_OK(ie_network_set_input_precision(network, name, p));
+    precision_e p2;
+    IE_EXPECT_OK(ie_network_get_input_precision(network, name, &p2));
+    EXPECT_EQ(p, p2);
+
+    ie_network_free(&network);
+    ie_core_free(&core);
+}
+
+TEST(ie_network_get_input_layout, getLayout) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    ie_network_t *network = nullptr;
+    IE_EXPECT_OK(ie_core_read_network(core, xml, bin, &network));
+    EXPECT_NE(nullptr, network);
+
+    const char *name = "data";
+    layout_e l;
+    IE_EXPECT_OK(ie_network_get_input_layout(network, name, &l));
+    EXPECT_EQ(l, layout_e::NCHW);
+
+    ie_network_free(&network);
+    ie_core_free(&core);
+}
+
+TEST(ie_network_set_input_layout, setLayout) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    ie_network_t *network = nullptr;
+    IE_EXPECT_OK(ie_core_read_network(core, xml, bin, &network));
+    EXPECT_NE(nullptr, network);
+
+    const char *name = "data";
+    const layout_e l = layout_e ::NHWC;
+    IE_EXPECT_OK(ie_network_set_input_layout(network, name, l));
+    layout_e l2;
+    IE_EXPECT_OK(ie_network_get_input_layout(network, name, &l2));
+    EXPECT_EQ(l, l2);
+
+    ie_network_free(&network);
+    ie_core_free(&core);
+}
+
+TEST(ie_network_get_input_dims, getDims) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    ie_network_t *network = nullptr;
+    IE_EXPECT_OK(ie_core_read_network(core, xml, bin, &network));
+    EXPECT_NE(nullptr, network);
+
+    const char *name = "data";
+    dimensions_t dims_res;
+    IE_EXPECT_OK(ie_network_get_input_dims(network, name, &dims_res));
+    EXPECT_EQ(dims_res.dims[0], 1);
+    EXPECT_EQ(dims_res.dims[1], 3);
+    EXPECT_EQ(dims_res.dims[2], 32);
+    EXPECT_EQ(dims_res.dims[3], 32);
+
+    ie_network_free(&network);
+    ie_core_free(&core);
+}
+
+TEST(ie_network_get_input_resize_algorithm, getResizeAlgo) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    ie_network_t *network = nullptr;
+    IE_EXPECT_OK(ie_core_read_network(core, xml, bin, &network));
+    EXPECT_NE(nullptr, network);
+
+    const char *name = "data";
+    resize_alg_e resizeAlg;
+    IE_EXPECT_OK(ie_network_get_input_resize_algorithm(network, name, &resizeAlg));
+    EXPECT_EQ(resizeAlg, resize_alg_e::NO_RESIZE);
+
+    ie_network_free(&network);
+    ie_core_free(&core);
+}
+
+TEST(ie_network_set_input_resize_algorithm, setResizeAlgo) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    ie_network_t *network = nullptr;
+    IE_EXPECT_OK(ie_core_read_network(core, xml, bin, &network));
+    EXPECT_NE(nullptr, network);
+
+    const char *name = "data";
+    resize_alg_e resizeAlg = resize_alg_e::RESIZE_BILINEAR;
+    IE_EXPECT_OK(ie_network_set_input_resize_algorithm(network, name, resizeAlg));
+
+    resize_alg_e resizeAlg2;
+    IE_EXPECT_OK(ie_network_get_input_resize_algorithm(network, name, &resizeAlg2));
+    EXPECT_EQ(resizeAlg, resizeAlg2);
+
+    ie_network_free(&network);
+    ie_core_free(&core);
+}
+
+TEST(ie_network_get_color_format, getColorFormat) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    ie_network_t *network = nullptr;
+    IE_EXPECT_OK(ie_core_read_network(core, xml, bin, &network));
+    EXPECT_NE(nullptr, network);
+
+    const char *name = "data";
+    colorformat_e color;
+    IE_EXPECT_OK(ie_network_get_color_format(network, name, &color));
+    EXPECT_EQ(color, colorformat_e::RAW);
+
+    ie_network_free(&network);
+    ie_core_free(&core);
+}
+
+TEST(ie_network_set_color_format, setColorFormat) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    ie_network_t *network = nullptr;
+    IE_EXPECT_OK(ie_core_read_network(core, xml, bin, &network));
+    EXPECT_NE(nullptr, network);
+
+    const char *name = "data";
+    const colorformat_e color = colorformat_e::BGR;
+    IE_EXPECT_OK(ie_network_set_color_format(network, name, color));
+
+    colorformat_e color2;
+    IE_EXPECT_OK(ie_network_get_color_format(network, name, &color2));
+    EXPECT_EQ(color2, colorformat_e::BGR);
+
+    ie_network_free(&network);
+    ie_core_free(&core);
+}
+
+TEST(ie_network_get_input_shapes, getInputShapes) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    ie_network_t *network = nullptr;
+    IE_EXPECT_OK(ie_core_read_network(core, xml, bin, &network));
+    EXPECT_NE(nullptr, network);
+
+    input_shapes_t shapes;
+    IE_EXPECT_OK(ie_network_get_input_shapes(network, &shapes));
+    EXPECT_EQ(shapes.shape_num, 1);
+
+    ie_network_input_shapes_free(&shapes);
+    ie_network_free(&network);
+    ie_core_free(&core);
+}
+
+TEST(ie_network_reshape, reshape) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    ie_network_t *network = nullptr;
+    IE_EXPECT_OK(ie_core_read_network(core, xml, bin, &network));
+    EXPECT_NE(nullptr, network);
+
+    input_shapes_t inputShapes;
+    IE_EXPECT_OK(ie_network_get_input_shapes(network, &inputShapes));
+
+    inputShapes.shapes[0].shape.dims[0] = 2;
+
+    IE_EXPECT_OK(ie_network_reshape(network, inputShapes));
+
+    input_shapes_t inputShapes2;
+    IE_EXPECT_OK(ie_network_get_input_shapes(network, &inputShapes2));
+    EXPECT_EQ(inputShapes2.shapes[0].shape.dims[0], 2);
+
+    ie_network_input_shapes_free(&inputShapes2);
+    ie_network_input_shapes_free(&inputShapes);
+    ie_network_free(&network);
+    ie_core_free(&core);
+}
+
+TEST(ie_network_get_outputs_number, getNumber) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    ie_network_t *network = nullptr;
+    IE_EXPECT_OK(ie_core_read_network(core, xml, bin, &network));
+    EXPECT_NE(nullptr, network);
+
+    size_t size;
+    IE_EXPECT_OK(ie_network_get_outputs_number(network, &size));
+    EXPECT_EQ(size, 1);
+
+    ie_network_free(&network);
+    ie_core_free(&core);
+}
+
+TEST(ie_network_get_output_name, getName) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    ie_network_t *network = nullptr;
+    IE_EXPECT_OK(ie_core_read_network(core, xml, bin, &network));
+    EXPECT_NE(nullptr, network);
+
+    char *output_name = nullptr;
+    IE_EXPECT_OK(ie_network_get_output_name(network, 0, &output_name));
+    EXPECT_STREQ(output_name, "fc_out");
+
+    ie_network_name_free(&output_name);
+    ie_network_free(&network);
+    ie_core_free(&core);
+}
+
+TEST(ie_network_get_output_name, incorrectNumber) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    ie_network_t *network = nullptr;
+    IE_EXPECT_OK(ie_core_read_network(core, xml, bin, &network));
+    EXPECT_NE(nullptr, network);
+
+    char *output_name = nullptr;
+    EXPECT_EQ(IEStatusCode::OUT_OF_BOUNDS, ie_network_get_output_name(network, 3, &output_name));
+
+    ie_network_name_free(&output_name);
+    ie_network_free(&network);
+    ie_core_free(&core);
+}
+
+TEST(ie_network_get_output_precision, getPrecision) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    ie_network_t *network = nullptr;
+    IE_EXPECT_OK(ie_core_read_network(core, xml, bin, &network));
+    EXPECT_NE(nullptr, network);
+
+    const char *name = "fc_out";
+    precision_e p;
+    IE_EXPECT_OK(ie_network_get_output_precision(network, name, &p));
+    EXPECT_EQ(p, precision_e::FP32);
+
+    ie_network_free(&network);
+    ie_core_free(&core);
+}
+
+TEST(ie_network_set_output_precision, setPrecision) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    ie_network_t *network = nullptr;
+    IE_EXPECT_OK(ie_core_read_network(core, xml, bin, &network));
+    EXPECT_NE(nullptr, network);
+
+    const char *name = "fc_out";
+    precision_e p = precision_e::FP16;
+    IE_EXPECT_OK(ie_network_set_output_precision(network, name, p));
+
+    precision_e precision_res;
+    IE_EXPECT_OK(ie_network_get_output_precision(network, name, &precision_res));
+    EXPECT_EQ(p, precision_res);
+
+    ie_network_free(&network);
+    ie_core_free(&core);
+}
+
+TEST(ie_network_get_output_layout, getLayout) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    ie_network_t *network = nullptr;
+    IE_EXPECT_OK(ie_core_read_network(core, xml, bin, &network));
+    EXPECT_NE(nullptr, network);
+
+    const char *name = "fc_out";
+    layout_e l;
+    IE_EXPECT_OK(ie_network_get_output_layout(network, name, &l));
+    EXPECT_EQ(l, layout_e::NC);
+
+    ie_network_free(&network);
+    ie_core_free(&core);
+}
+
+TEST(ie_network_set_output_layout, setLayout) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    ie_network_t *network = nullptr;
+    IE_EXPECT_OK(ie_core_read_network(core, xml, bin, &network));
+    EXPECT_NE(nullptr, network);
+
+    const char *name = "fc_out";
+    layout_e l = layout_e::CN;
+    IE_EXPECT_OK(ie_network_set_output_layout(network, name, l));
+    layout_e l_res;
+    IE_EXPECT_OK(ie_network_get_output_layout(network, name, &l_res));
+    EXPECT_EQ(l, l_res);
+
+    ie_network_free(&network);
+    ie_core_free(&core);
+}
+
+TEST(ie_network_get_output_dims, getDims) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    ie_network_t *network = nullptr;
+    IE_EXPECT_OK(ie_core_read_network(core, xml, bin, &network));
+    EXPECT_NE(nullptr, network);
+
+    const char *name = "fc_out";
+    dimensions_t dims_res;
+    IE_EXPECT_OK(ie_network_get_output_dims(network, name, &dims_res));
+    EXPECT_EQ(dims_res.dims[0], 1);
+    EXPECT_EQ(dims_res.dims[1], 10);
+
+    ie_network_free(&network);
+    ie_core_free(&core);
+}
+
+TEST(ie_exec_network_create_infer_request, createInferRquest) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    ie_network_t *network = nullptr;
+    IE_EXPECT_OK(ie_core_read_network(core, xml, bin, &network));
+    EXPECT_NE(nullptr, network);
+
+    const char *device_name = "CPU";
+    ie_config_t config = {nullptr, nullptr, nullptr};
+    ie_executable_network_t *exe_network = nullptr;
+    IE_EXPECT_OK(ie_core_load_network(core, network,device_name, &config, &exe_network));
+    EXPECT_NE(nullptr, exe_network);
+
+    ie_infer_request_t *infer_request = nullptr;
+    IE_EXPECT_OK(ie_exec_network_create_infer_request(exe_network, &infer_request));
+    EXPECT_NE(nullptr, infer_request);
+
+    ie_infer_request_free(&infer_request);
+    ie_exec_network_free(&exe_network);
+    ie_network_free(&network);
+    ie_core_free(&core);
+}
+
+TEST(ie_exec_network_get_config, getConfig) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    ie_network_t *network = nullptr;
+    IE_EXPECT_OK(ie_core_read_network(core, xml, bin, &network));
+    EXPECT_NE(nullptr, network);
+
+    const char *device_name = "CPU";
+    ie_config_t config = {nullptr, nullptr, nullptr};
+    ie_executable_network_t *exe_network = nullptr;
+    IE_EXPECT_OK(ie_core_load_network(core, network,device_name, &config, &exe_network));
+    EXPECT_NE(nullptr, exe_network);
+
+    ie_param_t param;
+    IE_EXPECT_OK(ie_exec_network_get_config(exe_network, "CPU_THREADS_NUM", &param));
+
+    ie_param_free(&param);
+    ie_exec_network_free(&exe_network);
+    ie_network_free(&network);
+    ie_core_free(&core);
+}
+
+TEST(ie_exec_network_set_config, setConfig) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    ie_core_versions_t ie_core_versions_multi;
+    ie_param_t param;
+    if (ie_core_get_versions(core, "MULTI", &ie_core_versions_multi) != IEStatusCode::OK ||
+        ie_core_get_metric(core, "GPU", "AVAILABLE_DEVICES", &param) != IEStatusCode::OK) {
+        ie_core_free(&core);
+        GTEST_SKIP();
+    }
+
+    ie_network_t *network = nullptr;
+    IE_EXPECT_OK(ie_core_read_network(core, xml, bin, &network));
+    EXPECT_NE(nullptr, network);
+
+    const char *device_name = "MULTI:GPU,CPU";
+    ie_config_t config = {nullptr, nullptr, nullptr};
+    ie_executable_network_t *exe_network = nullptr;
+    IE_EXPECT_OK(ie_core_load_network(core, network, device_name, &config, &exe_network));
+    EXPECT_NE(nullptr, exe_network);
+
+    ie_config_t config_param = {"MULTI_DEVICE_PRIORITIES", "GPU,CPU", nullptr};
+    IE_EXPECT_OK(ie_exec_network_set_config(exe_network, &config_param));
+
+    ie_core_versions_free(&ie_core_versions_multi);
+    ie_param_free(&param);
+    ie_exec_network_free(&exe_network);
+    ie_network_free(&network);
+    ie_core_free(&core);
+}
+
+TEST(ie_exec_network_get_metric, getMetric) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    ie_network_t *network = nullptr;
+    IE_EXPECT_OK(ie_core_read_network(core, xml, bin, &network));
+    EXPECT_NE(nullptr, network);
+
+    const char *device_name = "CPU";
+    ie_config_t config = {nullptr, nullptr, nullptr};
+    ie_executable_network_t *exe_network = nullptr;
+    IE_EXPECT_OK(ie_core_load_network(core, network, device_name, &config, &exe_network));
+    EXPECT_NE(nullptr, exe_network);
+
+    ie_param_t param;
+    IE_EXPECT_OK(ie_exec_network_get_metric(exe_network, "SUPPORTED_CONFIG_KEYS", &param));
+
+    ie_param_free(&param);
+    ie_exec_network_free(&exe_network);
+    ie_network_free(&network);
+    ie_core_free(&core);
+}
+
+TEST(ie_infer_request_get_blob, getBlob) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    ie_network_t *network = nullptr;
+    IE_EXPECT_OK(ie_core_read_network(core, xml, bin, &network));
+    EXPECT_NE(nullptr, network);
+
+    char *input_name = nullptr;
+    IE_EXPECT_OK(ie_network_get_input_name(network, 0, &input_name));
+    const char *device_name = "CPU";
+    ie_config_t config = {nullptr, nullptr, nullptr};
+    ie_executable_network_t *exe_network = nullptr;
+    IE_EXPECT_OK(ie_core_load_network(core, network, device_name, &config, &exe_network));
+    EXPECT_NE(nullptr, exe_network);
+
+    ie_infer_request_t *infer_request = nullptr;
+    IE_EXPECT_OK(ie_exec_network_create_infer_request(exe_network, &infer_request));
+    EXPECT_NE(nullptr, infer_request);
+
+    ie_blob_t *blob = nullptr;
+    IE_EXPECT_OK(ie_infer_request_get_blob(infer_request, input_name, &blob));
+
+    ie_blob_free(&blob);
+    ie_infer_request_free(&infer_request);
+    ie_exec_network_free(&exe_network);
+    ie_network_name_free(&input_name);
+    ie_network_free(&network);
+    ie_core_free(&core);
+}
+
+TEST(ie_infer_request_set_blob, setBlob) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    ie_network_t *network = nullptr;
+    IE_EXPECT_OK(ie_core_read_network(core, xml, bin, &network));
+    EXPECT_NE(nullptr, network);
+
+    dimensions_t dim_t;
+    precision_e p = precision_e::U8;
+    layout_e l = layout_e::NCHW;
+    IE_EXPECT_OK(ie_network_get_input_dims(network, "data", &dim_t));
+    IE_EXPECT_OK(ie_network_set_input_layout(network, "data", l));
+    IE_EXPECT_OK(ie_network_set_input_precision(network, "data", p));
+
+    const char *device_name = "CPU";
+    ie_config_t config = {nullptr, nullptr, nullptr};
+    ie_executable_network_t *exe_network = nullptr;
+    IE_EXPECT_OK(ie_core_load_network(core, network, device_name, &config, &exe_network));
+    EXPECT_NE(nullptr, exe_network);
+
+    ie_infer_request_t *infer_request = nullptr;
+    IE_EXPECT_OK(ie_exec_network_create_infer_request(exe_network, &infer_request));
+    EXPECT_NE(nullptr, infer_request);
+
+    tensor_desc tensor;
+    tensor.dims = dim_t ;
+    tensor.precision = p;
+    tensor.layout = l;
+    ie_blob_t *blob = nullptr;
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor, &blob));
+
+    IE_EXPECT_OK(ie_infer_request_set_blob(infer_request, "data", blob));
+
+    ie_blob_deallocate(&blob);
+    ie_infer_request_free(&infer_request);
+    ie_exec_network_free(&exe_network);
+    ie_network_free(&network);
+    ie_core_free(&core);
+}
+
+TEST(ie_infer_request_infer, infer) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    ie_network_t *network = nullptr;
+    IE_EXPECT_OK(ie_core_read_network(core, xml, bin, &network));
+    EXPECT_NE(nullptr, network);
+
+    IE_EXPECT_OK(ie_network_set_input_precision(network, "data", precision_e::U8));
+
+    const char *device_name = "CPU";
+    ie_config_t config = {nullptr, nullptr, nullptr};
+    ie_executable_network_t *exe_network = nullptr;
+    IE_EXPECT_OK(ie_core_load_network(core, network, device_name, &config, &exe_network));
+    EXPECT_NE(nullptr, exe_network);
+
+    ie_infer_request_t *infer_request = nullptr;
+    IE_EXPECT_OK(ie_exec_network_create_infer_request(exe_network, &infer_request));
+    EXPECT_NE(nullptr, infer_request);
+
+    ie_blob_t *blob = nullptr;
+    IE_EXPECT_OK(ie_infer_request_get_blob(infer_request, "data", &blob));
+
+
+    cv::Mat image = cv::imread(input_image);
+    Mat2Blob(image, blob);
+
+    IE_EXPECT_OK(ie_infer_request_infer(infer_request));
+
+    ie_blob_t *output_blob = nullptr;
+    IE_EXPECT_OK(ie_infer_request_get_blob(infer_request, "fc_out", &output_blob));
+    dimensions_t dim_res;
+    IE_EXPECT_OK(ie_blob_get_dims(output_blob, &dim_res));
+    EXPECT_EQ(dim_res.ranks, 2);
+    EXPECT_EQ(dim_res.dims[1], 10);
+
+    ie_blob_buffer_t buffer;
+    IE_EXPECT_OK(ie_blob_get_buffer(output_blob, &buffer));
+    float *output_data = (float *)(buffer.buffer);
+    EXPECT_NEAR(output_data[9], 0.f, 1.e-5);
+
+    ie_blob_free(&output_blob);
+    ie_blob_free(&blob);
+    ie_infer_request_free(&infer_request);
+    ie_exec_network_free(&exe_network);
+    ie_network_free(&network);
+    ie_core_free(&core);
+}
+
+TEST(ie_infer_request_infer_async, inferAsyncWaitFinish) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    ie_network_t *network = nullptr;
+    IE_EXPECT_OK(ie_core_read_network(core, xml, bin, &network));
+    EXPECT_NE(nullptr, network);
+
+    IE_EXPECT_OK(ie_network_set_input_precision(network, "data", precision_e::U8));
+
+    const char *device_name = "CPU";
+    ie_config_t config = {nullptr, nullptr, nullptr};
+    ie_executable_network_t *exe_network = nullptr;
+    IE_EXPECT_OK(ie_core_load_network(core, network, device_name, &config, &exe_network));
+    EXPECT_NE(nullptr, exe_network);
+
+    ie_infer_request_t *infer_request = nullptr;
+    IE_EXPECT_OK(ie_exec_network_create_infer_request(exe_network, &infer_request));
+    EXPECT_NE(nullptr, infer_request);
+
+    ie_blob_t *blob = nullptr;
+    IE_EXPECT_OK(ie_infer_request_get_blob(infer_request, "data", &blob));
+
+
+    cv::Mat image = cv::imread(input_image);
+    Mat2Blob(image, blob);
+
+    IE_EXPECT_OK(ie_infer_request_infer_async(infer_request));
+
+    ie_blob_t *output_blob = nullptr;
+    if (!HasFatalFailure()) {
+        IE_EXPECT_OK(ie_infer_request_wait(infer_request, -1));
+
+        IE_EXPECT_OK(ie_infer_request_get_blob(infer_request, "fc_out", &output_blob));
+        EXPECT_NE(nullptr, output_blob);
+
+        ie_blob_buffer_t buffer;
+        IE_EXPECT_OK(ie_blob_get_buffer(output_blob, &buffer));
+        float *output_data = (float *)(buffer.buffer);
+        EXPECT_NEAR(output_data[9], 0.f, 1.e-5);
+    }
+
+    ie_blob_free(&output_blob);
+    ie_blob_free(&blob);
+    ie_infer_request_free(&infer_request);
+    ie_exec_network_free(&exe_network);
+    ie_network_free(&network);
+    ie_core_free(&core);
+}
+
+TEST(ie_infer_request_infer_async, inferAsyncWaitTime) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    ie_network_t *network = nullptr;
+    IE_EXPECT_OK(ie_core_read_network(core, xml, bin, &network));
+    EXPECT_NE(nullptr, network);
+
+    IE_EXPECT_OK(ie_network_set_input_precision(network, "data", precision_e::U8));
+
+    const char *device_name = "CPU";
+    ie_config_t config = {nullptr, nullptr, nullptr};
+    ie_executable_network_t *exe_network = nullptr;
+    IE_EXPECT_OK(ie_core_load_network(core, network, device_name, &config, &exe_network));
+    EXPECT_NE(nullptr, exe_network);
+
+    ie_infer_request_t *infer_request = nullptr;
+    IE_EXPECT_OK(ie_exec_network_create_infer_request(exe_network, &infer_request));
+    EXPECT_NE(nullptr, infer_request);
+
+    ie_blob_t *blob = nullptr;
+    IE_EXPECT_OK(ie_infer_request_get_blob(infer_request, "data", &blob));
+    EXPECT_NE(nullptr, blob);
+
+
+    cv::Mat image = cv::imread(input_image);
+    Mat2Blob(image, blob);
+
+    IE_EXPECT_OK(ie_infer_request_infer_async(infer_request));
+
+    ie_blob_t *output_blob = nullptr;
+    if (!HasFatalFailure()) {
+        auto waitStatus = ie_infer_request_wait(infer_request, 10);
+        EXPECT_TRUE((IEStatusCode::OK == waitStatus) || (IEStatusCode::RESULT_NOT_READY == waitStatus));
+        if (IEStatusCode::RESULT_NOT_READY == waitStatus) {
+            IE_EXPECT_OK(ie_infer_request_wait(infer_request, -1));
+        }
+
+        IE_EXPECT_OK(ie_infer_request_get_blob(infer_request, "fc_out", &output_blob));
+
+        ie_blob_buffer_t buffer;
+        IE_EXPECT_OK(ie_blob_get_buffer(output_blob, &buffer));
+        float *output_data = (float *)(buffer.buffer);
+        EXPECT_NEAR(output_data[9], 0.f, 1.e-5);
+    }
+
+    ie_blob_free(&output_blob);
+    ie_blob_free(&blob);
+    ie_infer_request_free(&infer_request);
+    ie_exec_network_free(&exe_network);
+    ie_network_free(&network);
+    ie_core_free(&core);
+}
+
+TEST(ie_infer_request_set_batch, setBatch) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    const char *device_name = "CPU";
+    ie_config_t config = {"DYN_BATCH_ENABLED", "YES", nullptr};
+    IE_EXPECT_OK(ie_core_set_config(core, &config, device_name));
+
+    ie_network_t *network = nullptr;
+    IE_EXPECT_OK(ie_core_read_network(core, xml, bin, &network));
+    EXPECT_NE(nullptr, network);
+
+    ie_executable_network_t *exe_network = nullptr;
+    IE_EXPECT_OK(ie_core_load_network(core, network, device_name, &config, &exe_network));
+    EXPECT_NE(nullptr, exe_network);
+
+    ie_infer_request_t *infer_request = nullptr;
+    IE_EXPECT_OK(ie_exec_network_create_infer_request(exe_network, &infer_request));
+    EXPECT_NE(nullptr, infer_request);
+
+    IE_EXPECT_OK(ie_infer_request_set_batch(infer_request, 1));
+
+    ie_infer_request_free(&infer_request);
+    ie_exec_network_free(&exe_network);
+    ie_network_free(&network);
+    ie_core_free(&core);
+}
+
+TEST(ie_infer_request_set_batch, setZeroBatch) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    const char *device_name = "CPU";
+    ie_config_t config = {"DYN_BATCH_ENABLED", "YES", nullptr};
+    IE_EXPECT_OK(ie_core_set_config(core, &config, device_name));
+
+    ie_network_t *network = nullptr;
+    IE_EXPECT_OK(ie_core_read_network(core, xml, bin, &network));
+    EXPECT_NE(nullptr, network);
+
+    ie_executable_network_t *exe_network = nullptr;
+    IE_EXPECT_OK(ie_core_load_network(core, network, device_name, &config, &exe_network));
+    EXPECT_NE(nullptr, exe_network);
+
+    ie_infer_request_t *infer_request = nullptr;
+    IE_EXPECT_OK(ie_exec_network_create_infer_request(exe_network, &infer_request));
+    EXPECT_NE(nullptr, infer_request);
+
+    EXPECT_EQ(IEStatusCode::GENERAL_ERROR, ie_infer_request_set_batch(infer_request, 0));
+
+    ie_infer_request_free(&infer_request);
+    ie_exec_network_free(&exe_network);
+    ie_network_free(&network);
+    ie_core_free(&core);
+}
+
+TEST(ie_infer_request_set_batch, setNegativeBatch) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    const char *device_name = "CPU";
+    ie_config_t config = {"DYN_BATCH_ENABLED", "YES", nullptr};
+    IE_EXPECT_OK(ie_core_set_config(core, &config, device_name));
+
+    ie_network_t *network = nullptr;
+    IE_EXPECT_OK(ie_core_read_network(core, xml, bin, &network));
+    EXPECT_NE(nullptr, network);
+
+    ie_executable_network_t *exe_network = nullptr;
+    IE_EXPECT_OK(ie_core_load_network(core, network, device_name, &config, &exe_network));
+    EXPECT_NE(nullptr, exe_network);
+
+    ie_infer_request_t *infer_request = nullptr;
+    IE_EXPECT_OK(ie_exec_network_create_infer_request(exe_network, &infer_request));
+    EXPECT_NE(nullptr, infer_request);
+
+    EXPECT_EQ(IEStatusCode::GENERAL_ERROR, ie_infer_request_set_batch(infer_request, -1));
+
+    ie_infer_request_free(&infer_request);
+    ie_exec_network_free(&exe_network);
+    ie_network_free(&network);
+    ie_core_free(&core);
+}
+
+TEST(ie_blob_make_memory, makeMemory) {
+
+    dimensions_t dim_t;
+    dim_t.ranks = 4 ;
+    dim_t.dims[0] = 1, dim_t.dims[1] = 3, dim_t.dims[2] = 4, dim_t.dims[3] = 4;
+    tensor_desc tensor;
+    tensor.dims = dim_t ;
+    tensor.precision = precision_e::U8;
+    tensor.layout = layout_e::NCHW;
+
+    ie_blob_t *blob = nullptr;
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor, &blob));
+    EXPECT_NE(nullptr, blob);
+
+    ie_blob_deallocate(&blob);
+}
+
+TEST(ie_blob_make_memory_from_preallocated, makeMemoryfromPreallocated) {
+
+    dimensions_t dim_t;
+    dim_t.ranks = 4 ;
+    dim_t.dims[0] = 1, dim_t.dims[1] = 3, dim_t.dims[2] = 4, dim_t.dims[3] = 4;
+    tensor_desc tensor;
+    tensor.dims = dim_t ;
+    tensor.precision = precision_e::U8;
+    tensor.layout = layout_e::NCHW;
+    uint8_t array[1][3][4][4]= {0};
+
+    size_t size = 48;
+    ie_blob_t *blob = nullptr;
+    IE_EXPECT_OK(ie_blob_make_memory_from_preallocated(&tensor, &array, size, &blob));
+    EXPECT_NE(nullptr, blob);
+
+    ie_blob_free(&blob);
+}
+
+TEST(ie_blob_make_memory_with_roi, makeMemorywithROI) {
+
+    dimensions_t dim_t;
+    dim_t.ranks = 4 ;
+    dim_t.dims[0] = 1, dim_t.dims[1] = 3, dim_t.dims[2] = 4, dim_t.dims[3] = 4;
+    tensor_desc tensor;
+    tensor.dims = dim_t ;
+    tensor.precision = precision_e::U8;
+    tensor.layout = layout_e::NCHW;
+
+    ie_blob_t *input_blob = nullptr;
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor, &input_blob));
+    EXPECT_NE(nullptr, input_blob);
+
+    roi_t roi = {0, 0, 0, 1, 1};
+    ie_blob_t *blob = nullptr;
+    IE_EXPECT_OK(ie_blob_make_memory_with_roi(input_blob, &roi, &blob));
+    EXPECT_NE(nullptr, blob);
+
+    ie_blob_deallocate(&blob);
+    ie_blob_free(&input_blob);
+}
+
+TEST(ie_blob_deallocate, blobDeallocate) {
+    dimensions_t dim_t;
+    dim_t.ranks = 4 ;
+    dim_t.dims[0] = 1, dim_t.dims[1] = 3, dim_t.dims[2] = 4, dim_t.dims[3] = 4;
+    tensor_desc tensor;
+    tensor.dims = dim_t ;
+    tensor.precision = precision_e::U8;
+    tensor.layout = layout_e::NCHW;
+
+    ie_blob_t *blob = nullptr;
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor, &blob));
+    EXPECT_NE(nullptr, blob);
+
+    ie_blob_deallocate(&blob);
+}
+
+TEST(ie_blob_get_dims, getDims) {
+    dimensions_t dim_t;
+    dim_t.ranks = 4 ;
+    dim_t.dims[0] = 1, dim_t.dims[1] = 3, dim_t.dims[2] = 4, dim_t.dims[3] = 4;
+    tensor_desc tensor;
+    tensor.dims = dim_t ;
+    tensor.precision = precision_e::U8;
+    tensor.layout = layout_e::NCHW;
+
+    ie_blob_t *blob = nullptr;
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor, &blob));
+    EXPECT_NE(nullptr, blob);
+
+    dimensions_t dim_res;
+    IE_EXPECT_OK(ie_blob_get_dims(blob, &dim_res));
+    EXPECT_EQ(dim_t.ranks, dim_res.ranks);
+
+    ie_blob_deallocate(&blob);
+}
+
+TEST(ie_blob_get_layout, getLayout) {
+
+    dimensions_t dim_t;
+    dim_t.ranks = 4 ;
+    dim_t.dims[0] = 1, dim_t.dims[1] = 3, dim_t.dims[2] = 4, dim_t.dims[3] = 4;
+    tensor_desc tensor;
+    tensor.dims = dim_t ;
+    tensor.precision = precision_e::U8;
+    tensor.layout = layout_e::NCHW;
+
+    ie_blob_t *blob = nullptr;
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor, &blob));
+    EXPECT_NE(nullptr, blob);
+
+    layout_e l;
+    IE_EXPECT_OK(ie_blob_get_layout(blob, &l));
+    EXPECT_EQ(tensor.layout, l);
+
+    ie_blob_deallocate(&blob);
+}
+
+TEST(ie_blob_get_precision, getPrecision) {
+
+    dimensions_t dim_t;
+    dim_t.ranks = 4 ;
+    dim_t.dims[0] = 1, dim_t.dims[1] = 3, dim_t.dims[2] = 4, dim_t.dims[3] = 4;
+    tensor_desc tensor;
+    tensor.dims = dim_t ;
+    tensor.precision = precision_e::U8;
+    tensor.layout = layout_e::NCHW;
+
+    ie_blob_t *blob = nullptr;
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor, &blob));
+    EXPECT_NE(nullptr, blob);
+
+    precision_e p;
+    IEStatusCode status3 = ie_blob_get_precision(blob, &p);
+    EXPECT_EQ(status3, IEStatusCode::OK);
+    EXPECT_EQ(tensor.precision, p);
+
+    ie_blob_deallocate(&blob);
+}
+
+TEST(ie_blob_size, getSize) {
+
+    dimensions_t dim_t;
+    dim_t.ranks = 4 ;
+    dim_t.dims[0] = 1, dim_t.dims[1] = 3, dim_t.dims[2] = 4, dim_t.dims[3] = 4;
+    tensor_desc tensor;
+    tensor.dims = dim_t ;
+    tensor.precision = precision_e::I16;
+    tensor.layout = layout_e::NCHW;
+
+    ie_blob_t *blob = nullptr;
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor, &blob));
+    EXPECT_NE(nullptr, blob);
+
+    int size_res;
+    IE_EXPECT_OK(ie_blob_size(blob, &size_res));
+    EXPECT_EQ(size_res, 48);
+
+    ie_blob_deallocate(&blob);
+}
+
+TEST(ie_blob_byte_size, getByteSize) {
+
+    dimensions_t dim_t;
+    dim_t.ranks = 4 ;
+    dim_t.dims[0] = 1, dim_t.dims[1] = 3, dim_t.dims[2] = 4, dim_t.dims[3] = 4;
+    tensor_desc tensor;
+    tensor.dims = dim_t ;
+    tensor.precision = precision_e::I16;
+    tensor.layout = layout_e::NCHW;
+
+    ie_blob_t *blob = nullptr;
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor, &blob));
+    EXPECT_NE(nullptr, blob);
+
+    int size_res;
+    IE_EXPECT_OK(ie_blob_byte_size(blob, &size_res));
+    EXPECT_EQ(size_res, 96);
+
+    ie_blob_deallocate(&blob);
+}
+
+TEST(ie_blob_get_buffer, getBuffer) {
+
+    dimensions_t dim_t;
+    dim_t.ranks = 4 ;
+    dim_t.dims[0] = 1, dim_t.dims[1] = 3, dim_t.dims[2] = 4, dim_t.dims[3] = 4;
+    tensor_desc tensor;
+    tensor.dims = dim_t ;
+    tensor.precision = precision_e::U8;
+    tensor.layout = layout_e::NCHW;
+
+    ie_blob_t *blob = nullptr;
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor, &blob));
+    EXPECT_NE(nullptr, blob);
+
+    ie_blob_buffer_t blob_buffer;
+    IE_EXPECT_OK(ie_blob_get_buffer(blob, &blob_buffer));
+    EXPECT_NE(nullptr, blob_buffer.buffer);
+
+    ie_blob_deallocate(&blob);
+}
+
+TEST(ie_blob_get_cbuffer, getBuffer) {
+
+    dimensions_t dim_t;
+    dim_t.ranks = 4 ;
+    dim_t.dims[0] = 1, dim_t.dims[1] = 3, dim_t.dims[2] = 4, dim_t.dims[3] = 4;
+    tensor_desc tensor;
+    tensor.dims = dim_t ;
+    tensor.precision = precision_e::U8;
+    tensor.layout = layout_e::NCHW;
+
+    ie_blob_t *blob = nullptr;
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor, &blob));
+    EXPECT_NE(nullptr, blob);
+
+    ie_blob_buffer_t blob_cbuffer;
+    IE_EXPECT_OK(ie_blob_get_buffer(blob, &blob_cbuffer));
+    EXPECT_NE(nullptr, blob_cbuffer.cbuffer);
+
+    ie_blob_deallocate(&blob);
+}
+
+TEST(ie_infer_set_completion_callback, setCallback) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    ie_network_t *network = nullptr;
+    IE_EXPECT_OK(ie_core_read_network(core, xml, bin, &network));
+    EXPECT_NE(nullptr, network);
+
+    IE_EXPECT_OK(ie_network_set_input_precision(network, "data", precision_e::U8));
+
+    const char *device_name = "CPU";
+    ie_config_t config = {nullptr, nullptr, nullptr};
+    ie_executable_network_t *exe_network = nullptr;
+    IE_EXPECT_OK(ie_core_load_network(core, network, device_name, &config, &exe_network));
+    EXPECT_NE(nullptr, exe_network);
+
+    ie_infer_request_t *infer_request = nullptr;
+    IE_EXPECT_OK(ie_exec_network_create_infer_request(exe_network, &infer_request));
+    EXPECT_NE(nullptr, infer_request);
+
+    ie_blob_t *blob = nullptr;
+    IE_EXPECT_OK(ie_infer_request_get_blob(infer_request, "data", &blob));
+
+    cv::Mat image = cv::imread(input_image);
+    Mat2Blob(image, blob);
+
+    ie_complete_call_back_t callback;
+    callback.completeCallBackFunc = completion_callback;
+    callback.args = infer_request;
+
+    IE_EXPECT_OK(ie_infer_set_completion_callback(infer_request, &callback));
+
+    IE_EXPECT_OK(ie_infer_request_infer_async(infer_request));
+
+    if (!HasFatalFailure()) {
+        std::unique_lock<std::mutex> lock(m);
+        condVar.wait(lock, []{ return ready; });
+    }
+
+    ie_blob_free(&blob);
+    ie_infer_request_free(&infer_request);
+    ie_exec_network_free(&exe_network);
+    ie_network_free(&network);
+    ie_core_free(&core);
+}
+
+TEST(ie_blob_make_memory_nv12, makeNV12Blob) {
+    dimensions_t dim_y = {4, {1, 1, 8, 12}}, dim_uv = {4, {1, 2, 4, 6}};
+    tensor_desc tensor_y, tensor_uv;
+    tensor_y.dims = dim_y;
+    tensor_uv.dims = dim_uv;
+    tensor_y.precision = tensor_uv.precision = precision_e::U8;
+    tensor_y.layout = tensor_uv.layout = layout_e::NHWC;
+
+    ie_blob_t *blob_y = nullptr, *blob_uv = nullptr, *blob_nv12 = nullptr;
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_y, &blob_y));
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_uv, &blob_uv));
+    IE_EXPECT_OK(ie_blob_make_memory_nv12(blob_y, blob_uv, &blob_nv12));
+
+    ie_blob_free(&blob_nv12);
+    ie_blob_deallocate(&blob_y);
+    ie_blob_deallocate(&blob_uv);
+}
+
+TEST(ie_blob_make_memory_nv12, cannotMakeNV12BlobFromNullptrBlobs) {
+    dimensions_t dim_y = {4, {1, 1, 8, 12}}, dim_uv = {4, {1, 2, 4, 6}};
+    tensor_desc tensor_y, tensor_uv;
+    tensor_y.dims = dim_y;
+    tensor_uv.dims = dim_uv;
+    tensor_y.precision = tensor_uv.precision = precision_e::U8;
+    tensor_y.layout = tensor_uv.layout = layout_e::NHWC;
+
+    ie_blob_t *blob_y = nullptr, *blob_uv = nullptr, *blob_nv12 = nullptr;
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_y, &blob_y));
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_uv, &blob_uv));
+    ie_blob_deallocate(&blob_y);
+    ie_blob_deallocate(&blob_uv);
+
+    IE_EXPECT_NOT_OK(ie_blob_make_memory_nv12(blob_y, blob_uv, &blob_nv12));
+}
+
+TEST(ie_blob_make_memory_nv12, cannotMakeNV12BlobFromPlanesWithDifferentElementSize) {
+    dimensions_t dim_y = {4, {1, 1, 8, 12}}, dim_uv = {4, {1, 2, 4, 6}};
+    tensor_desc tensor_y, tensor_uv;
+    tensor_y.dims = dim_y;
+    tensor_uv.dims = dim_uv;
+    tensor_y.precision = precision_e::U8;
+    tensor_uv.precision = precision_e::FP32;
+    tensor_y.layout = tensor_uv.layout = layout_e::NHWC;
+
+    ie_blob_t *blob_y = nullptr, *blob_uv = nullptr, *blob_nv12 = nullptr;
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_y, &blob_y));
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_uv, &blob_uv));
+
+    IE_EXPECT_NOT_OK(ie_blob_make_memory_nv12(blob_y, blob_uv, &blob_nv12));
+
+    ie_blob_deallocate(&blob_y);
+    ie_blob_deallocate(&blob_uv);
+}
+
+TEST(ie_blob_make_memory_nv12, cannotMakeNV12BlobFromPlanesWithNonU8Precision) {
+    dimensions_t dim_y = {4, {1, 1, 8, 12}}, dim_uv = {4, {1, 2, 4, 6}};
+    tensor_desc tensor_y, tensor_uv;
+    tensor_y.dims = dim_y;
+    tensor_uv.dims = dim_uv;
+    tensor_y.precision = tensor_uv.precision = precision_e::FP32;
+    tensor_y.layout = tensor_uv.layout = layout_e::NHWC;
+
+    ie_blob_t *blob_y = nullptr, *blob_uv = nullptr, *blob_nv12 = nullptr;
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_y, &blob_y));
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_uv, &blob_uv));
+
+    IE_EXPECT_NOT_OK(ie_blob_make_memory_nv12(blob_y, blob_uv, &blob_nv12));
+
+    ie_blob_deallocate(&blob_y);
+    ie_blob_deallocate(&blob_uv);
+}
+
+TEST(ie_blob_make_memory_nv12, cannotMakeNV12BlobWithInconsistentBatchSize) {
+    dimensions_t dim_y = {4, {1, 1, 8, 12}}, dim_uv = {4, {2, 2, 4, 6}};
+    tensor_desc tensor_y, tensor_uv;
+    tensor_y.dims = dim_y;
+    tensor_uv.dims = dim_uv;
+    tensor_y.precision = tensor_uv.precision = precision_e::U8;
+    tensor_y.layout = tensor_uv.layout = layout_e::NHWC;
+
+    ie_blob_t *blob_y = nullptr, *blob_uv = nullptr, *blob_nv12 = nullptr;
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_y, &blob_y));
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_uv, &blob_uv));
+
+    IE_EXPECT_NOT_OK(ie_blob_make_memory_nv12(blob_y, blob_uv, &blob_nv12));
+
+    ie_blob_deallocate(&blob_y);
+    ie_blob_deallocate(&blob_uv);
+}
+
+TEST(ie_blob_make_memory_nv12, cannotMakeNV12BlobWithWrongChannelNumber) {
+    dimensions_t dim_y = {4, {1, 1, 8, 12}}, dim_uv = {4, {1, 2, 4, 6}};
+    tensor_desc tensor_y, tensor_uv;
+    tensor_y.dims = dim_y;
+    tensor_uv.dims = dim_uv;
+    tensor_y.precision = tensor_uv.precision = precision_e::U8;
+    tensor_y.layout = tensor_uv.layout = layout_e::NHWC;
+
+    ie_blob_t *blob_y = nullptr, *blob_uv = nullptr, *blob_nv12 = nullptr;
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_y, &blob_y));
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_uv, &blob_uv));
+
+    IE_EXPECT_NOT_OK(ie_blob_make_memory_nv12(blob_y, blob_y, &blob_nv12));
+    IE_EXPECT_NOT_OK(ie_blob_make_memory_nv12(blob_uv, blob_uv, &blob_nv12));
+    IE_EXPECT_NOT_OK(ie_blob_make_memory_nv12(blob_uv, blob_y, &blob_nv12));
+
+    ie_blob_deallocate(&blob_y);
+    ie_blob_deallocate(&blob_uv);
+}
+
+TEST(ie_blob_make_memory_nv12, cannotMakeNV12BlobWithWrongHeightRation) {
+    dimensions_t dim_y = {4, {1, 1, 8, 12}}, dim_uv = {4, {1, 2, 2, 6}};
+    tensor_desc tensor_y, tensor_uv;
+    tensor_y.dims = dim_y;
+    tensor_uv.dims = dim_uv;
+    tensor_y.precision = tensor_uv.precision = precision_e::U8;
+    tensor_y.layout = tensor_uv.layout = layout_e::NHWC;
+
+    ie_blob_t *blob_y = nullptr, *blob_uv = nullptr, *blob_nv12 = nullptr;
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_y, &blob_y));
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_uv, &blob_uv));
+
+    IE_EXPECT_NOT_OK(ie_blob_make_memory_nv12(blob_y, blob_uv, &blob_nv12));
+
+    ie_blob_deallocate(&blob_y);
+    ie_blob_deallocate(&blob_uv);
+}
+
+
+TEST(ie_blob_make_memory_nv12, cannotMakeNV12BlobWithWrongWidthRation) {
+    dimensions_t dim_y = {4, {1, 1, 8, 12}}, dim_uv = {4, {1, 2, 4, 4}};
+    tensor_desc tensor_y, tensor_uv;
+    tensor_y.dims = dim_y;
+    tensor_uv.dims = dim_uv;
+    tensor_y.precision = tensor_uv.precision = precision_e::U8;
+    tensor_y.layout = tensor_uv.layout = layout_e::NHWC;
+
+    ie_blob_t *blob_y = nullptr, *blob_uv = nullptr, *blob_nv12 = nullptr;
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_y, &blob_y));
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_uv, &blob_uv));
+
+    IE_EXPECT_NOT_OK(ie_blob_make_memory_nv12(blob_y, blob_uv, &blob_nv12));
+
+    ie_blob_deallocate(&blob_y);
+    ie_blob_deallocate(&blob_uv);
+}
+
+TEST(ie_blob_make_memory_nv12, NV12BlobInvalidAfterDeallocateYPlane) {
+    dimensions_t dim_y = {4, {1, 1, 8, 12}}, dim_uv = {4, {1, 2, 4, 6}};
+    tensor_desc tensor_y, tensor_uv;
+    tensor_y.dims = dim_y;
+    tensor_uv.dims = dim_uv;
+    tensor_y.precision = tensor_uv.precision = precision_e::U8;
+    tensor_y.layout = tensor_uv.layout = layout_e::NHWC;
+
+    ie_blob_t *blob_y = nullptr, *blob_uv = nullptr, *blob_nv12 = nullptr;
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_y, &blob_y));
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_uv, &blob_uv));
+    IE_EXPECT_OK(ie_blob_make_memory_nv12(blob_y, blob_uv, &blob_nv12));
+
+    ie_blob_deallocate(&blob_y);
+    ie_blob_buffer_t buffer;
+    IE_EXPECT_OK(ie_blob_get_buffer(blob_nv12, &buffer));
+    EXPECT_EQ(nullptr, buffer.buffer);
+
+    ie_blob_deallocate(&blob_uv);
+    ie_blob_free(&blob_nv12);
+}
+
+TEST(ie_blob_make_memory_nv12, NV12BlobInvalidAfterDeallocateUVPlane) {
+    dimensions_t dim_y = {4, {1, 1, 8, 12}}, dim_uv = {4, {1, 2, 4, 6}};
+    tensor_desc tensor_y, tensor_uv;
+    tensor_y.dims = dim_y;
+    tensor_uv.dims = dim_uv;
+    tensor_y.precision = tensor_uv.precision = precision_e::U8;
+    tensor_y.layout = tensor_uv.layout = layout_e::NHWC;
+
+    ie_blob_t *blob_y = nullptr, *blob_uv = nullptr, *blob_nv12 = nullptr;
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_y, &blob_y));
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_uv, &blob_uv));
+    IE_EXPECT_OK(ie_blob_make_memory_nv12(blob_y, blob_uv, &blob_nv12));
+
+    ie_blob_deallocate(&blob_uv);
+    ie_blob_buffer_t buffer;
+    IE_EXPECT_OK(ie_blob_get_buffer(blob_nv12, &buffer));
+    EXPECT_EQ(nullptr, buffer.buffer);
+
+    ie_blob_deallocate(&blob_y);
+    ie_blob_free(&blob_nv12);
+}
+
+TEST(ie_blob_make_memory_nv12, inferRequestWithNV12Blob) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    ie_network_t *network = nullptr;
+    IE_EXPECT_OK(ie_core_read_network(core, xml, bin, &network));
+    EXPECT_NE(nullptr, network);
+
+    IE_EXPECT_OK(ie_network_set_input_precision(network, "data", precision_e::U8));
+    IE_EXPECT_OK(ie_network_set_input_layout(network, "data", layout_e::NCHW));
+    IE_EXPECT_OK(ie_network_set_input_resize_algorithm(network, "data", resize_alg_e::RESIZE_BILINEAR));
+    IE_EXPECT_OK(ie_network_set_color_format(network, "data", colorformat_e::NV12));
+    IE_EXPECT_OK(ie_network_set_output_precision(network, "fc_out", precision_e::FP32));
+
+    ie_config_t config = {nullptr, nullptr, nullptr};
+    ie_executable_network_t *exe_network = nullptr;
+    IE_EXPECT_OK(ie_core_load_network(core, network, "CPU", &config, &exe_network));
+    EXPECT_NE(nullptr, exe_network);
+
+    ie_infer_request_t *infer_request = nullptr;
+    IE_EXPECT_OK(ie_exec_network_create_infer_request(exe_network, &infer_request));
+    EXPECT_NE(nullptr, infer_request);
+
+    size_t img_width = 224, img_height = 224;
+    size_t img_size = img_width * (img_height * 3 / 2);
+    unsigned char *img_data = (unsigned char *)calloc(img_size, sizeof(unsigned char));
+    EXPECT_NE(nullptr, img_data);
+    EXPECT_EQ(img_size, read_image_from_file(input_image_nv12, img_data, img_size));
+
+    dimensions_t dim_y = {4, {1, 1, img_height, img_width}};
+    dimensions_t dim_uv = {4, {1, 2, img_height / 2, img_width / 2}};
+    tensor_desc_t tensor_y, tensor_uv;
+    tensor_y.dims = dim_y;
+    tensor_uv.dims = dim_uv;
+    tensor_y.precision = tensor_uv.precision = precision_e::U8;
+    tensor_y.layout = tensor_uv.layout = layout_e::NHWC;
+    const size_t offset = img_width * img_height;
+
+    ie_blob_t *blob_y = nullptr, *blob_uv = nullptr, *blob_nv12 = nullptr;
+    IE_EXPECT_OK(ie_blob_make_memory_from_preallocated(&tensor_y, img_data, img_width * img_height, &blob_y));
+    IE_EXPECT_OK(ie_blob_make_memory_from_preallocated(&tensor_uv, img_data + offset, img_width * (img_height / 2), &blob_uv));
+    IE_EXPECT_OK(ie_blob_make_memory_nv12(blob_y, blob_uv, &blob_nv12));
+
+    IE_EXPECT_OK(ie_infer_request_set_blob(infer_request, "data", blob_nv12));
+    IE_EXPECT_OK(ie_infer_request_infer(infer_request));
+
+    ie_blob_t *output_blob = nullptr;
+    IE_EXPECT_OK(ie_infer_request_get_blob(infer_request, "fc_out", &output_blob));
+
+    ie_blob_buffer_t buffer;
+    IE_EXPECT_OK(ie_blob_get_buffer(output_blob, &buffer));
+    float *output_data = (float *)(buffer.buffer);
+    EXPECT_NEAR(output_data[1], 0.f, 1.e-5);
+
+    ie_blob_free(&output_blob);
+    ie_blob_free(&blob_nv12);
+    ie_blob_free(&blob_uv);
+    ie_blob_free(&blob_y);
+    ie_infer_request_free(&infer_request);
+    ie_exec_network_free(&exe_network);
+    ie_network_free(&network);
+    ie_core_free(&core);
+    free(img_data);
+}
+
+TEST(ie_blob_make_memory_i420, makeI420Blob) {
+    dimensions_t dim_y = {4, {1, 1, 8, 12}}, dim_u = {4, {1, 1, 4, 6}}, dim_v = {4, {1, 1, 4, 6}};
+    tensor_desc tensor_y, tensor_u, tensor_v;
+    tensor_y.dims = dim_y;
+    tensor_u.dims = dim_u;
+    tensor_v.dims = dim_v;
+    tensor_y.precision = tensor_u.precision = tensor_v.precision = precision_e::U8;
+    tensor_y.layout = tensor_u.layout = tensor_v.layout = layout_e::NHWC;
+
+    ie_blob_t *blob_y = nullptr, *blob_u = nullptr, *blob_v = nullptr, *blob_i420 = nullptr;
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_y, &blob_y));
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_u, &blob_u));
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_v, &blob_v));
+    IE_EXPECT_OK(ie_blob_make_memory_i420(blob_y, blob_u, blob_v, &blob_i420));
+
+    ie_blob_free(&blob_i420);
+    ie_blob_deallocate(&blob_y);
+    ie_blob_deallocate(&blob_u);
+    ie_blob_deallocate(&blob_v);
+}
+
+TEST(ie_blob_make_memory_i420, cannotMakeI420BlobFromNullptrBlobs) {
+    dimensions_t dim = {4, {1, 1, 8, 12}};
+    tensor_desc tensor;
+    tensor.dims = dim;
+    tensor.precision = precision_e::U8;
+    tensor.layout = layout_e::NHWC;
+
+    ie_blob_t *blob = nullptr, *blob_i420 = nullptr;
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor, &blob));
+    IE_EXPECT_NOT_OK(ie_blob_make_memory_i420(blob, nullptr, nullptr, &blob_i420));
+    IE_EXPECT_NOT_OK(ie_blob_make_memory_i420(nullptr, blob, nullptr, &blob_i420));
+
+    ie_blob_deallocate(&blob);
+}
+
+TEST(ie_blob_make_memory_i420, cannotMakeI420BlobFromPlanesWithDifferentElementSize) {
+    dimensions_t dim_y = {4, {1, 1, 8, 12}}, dim_u = {4, {1, 1, 4, 6}}, dim_v = {4, {1, 1, 4, 6}};
+    tensor_desc tensor_y, tensor_u, tensor_v;
+    tensor_y.dims = dim_y;
+    tensor_u.dims = dim_u;
+    tensor_v.dims = dim_v;
+    tensor_y.precision = precision_e::U8;
+    tensor_u.precision = tensor_v.precision = precision_e::FP32;
+    tensor_y.layout = tensor_u.layout = tensor_v.layout = layout_e::NHWC;
+
+    ie_blob_t *blob_y = nullptr, *blob_u = nullptr, *blob_v = nullptr, *blob_i420 = nullptr;
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_y, &blob_y));
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_u, &blob_u));
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_v, &blob_v));
+    IE_EXPECT_NOT_OK(ie_blob_make_memory_i420(blob_y, blob_u, blob_v, &blob_i420));
+
+    ie_blob_deallocate(&blob_y);
+    ie_blob_deallocate(&blob_u);
+    ie_blob_deallocate(&blob_v);
+}
+
+TEST(ie_blob_make_memory_i420, cannotMakeI420BlobFromPlanesWithNonU8Precision) {
+    dimensions_t dim_y = {4, {1, 1, 8, 12}}, dim_u = {4, {1, 1, 4, 6}}, dim_v = {4, {1, 1, 4, 6}};
+    tensor_desc tensor_y, tensor_u, tensor_v;
+    tensor_y.dims = dim_y;
+    tensor_u.dims = dim_u;
+    tensor_v.dims = dim_v;
+    tensor_y.precision = tensor_u.precision = tensor_v.precision = precision_e::FP32;
+    tensor_y.layout = tensor_u.layout = tensor_v.layout = layout_e::NHWC;
+
+    ie_blob_t *blob_y = nullptr, *blob_u = nullptr, *blob_v = nullptr, *blob_i420 = nullptr;
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_y, &blob_y));
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_u, &blob_u));
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_v, &blob_v));
+    IE_EXPECT_NOT_OK(ie_blob_make_memory_i420(blob_y, blob_u, blob_v, &blob_i420));
+
+    ie_blob_deallocate(&blob_y);
+    ie_blob_deallocate(&blob_u);
+    ie_blob_deallocate(&blob_v);
+}
+
+TEST(ie_blob_make_memory_i420, cannotMakeI420BlobFromPlanesWithInconsistentBatchSize) {
+    dimensions_t dim_y = {4, {1, 1, 8, 12}}, dim_u = {4, {2, 1, 4, 6}}, dim_v = {4, {1, 1, 4, 6}};
+    tensor_desc tensor_y, tensor_u, tensor_v;
+    tensor_y.dims = dim_y;
+    tensor_u.dims = dim_u;
+    tensor_v.dims = dim_v;
+    tensor_y.precision = tensor_u.precision = tensor_v.precision = precision_e::U8;
+    tensor_y.layout = tensor_u.layout = tensor_v.layout = layout_e::NHWC;
+
+    ie_blob_t *blob_y = nullptr, *blob_u = nullptr, *blob_v = nullptr, *blob_i420 = nullptr;
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_y, &blob_y));
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_u, &blob_u));
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_v, &blob_v));
+    IE_EXPECT_NOT_OK(ie_blob_make_memory_i420(blob_y, blob_u, blob_v, &blob_i420));
+
+    ie_blob_deallocate(&blob_y);
+    ie_blob_deallocate(&blob_u);
+    ie_blob_deallocate(&blob_v);
+}
+
+TEST(ie_blob_make_memory_i420, cannotMakeI420BlobFromPlanesWithWrongChannelNumber) {
+    dimensions_t dim_y = {4, {1, 1, 8, 12}}, dim_u = {4, {1, 2, 4, 6}}, dim_v = {4, {1, 1, 4, 6}};
+    tensor_desc tensor_y, tensor_u, tensor_v;
+    tensor_y.dims = dim_y;
+    tensor_u.dims = dim_u;
+    tensor_v.dims = dim_v;
+    tensor_y.precision = tensor_u.precision = tensor_v.precision = precision_e::U8;
+    tensor_y.layout = tensor_u.layout = tensor_v.layout = layout_e::NHWC;
+
+    ie_blob_t *blob_y = nullptr, *blob_u = nullptr, *blob_v = nullptr, *blob_i420 = nullptr;
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_y, &blob_y));
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_u, &blob_u));
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_v, &blob_v));
+    IE_EXPECT_NOT_OK(ie_blob_make_memory_i420(blob_y, blob_u, blob_v, &blob_i420));
+
+    ie_blob_deallocate(&blob_y);
+    ie_blob_deallocate(&blob_u);
+    ie_blob_deallocate(&blob_v);
+}
+
+TEST(ie_blob_make_memory_i420, cannotMakeI420BlobFromPlanesWithWrongWidthRatio) {
+    dimensions_t dim_y = {4, {1, 1, 8, 12}}, dim_u = {4, {1, 1, 4, 4}}, dim_v = {4, {1, 1, 4, 6}};
+    tensor_desc tensor_y, tensor_u, tensor_v;
+    tensor_y.dims = dim_y;
+    tensor_u.dims = dim_u;
+    tensor_v.dims = dim_v;
+    tensor_y.precision = tensor_u.precision = tensor_v.precision = precision_e::U8;
+    tensor_y.layout = tensor_u.layout = tensor_v.layout = layout_e::NHWC;
+
+    ie_blob_t *blob_y = nullptr, *blob_u = nullptr, *blob_v = nullptr, *blob_i420 = nullptr;
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_y, &blob_y));
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_u, &blob_u));
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_v, &blob_v));
+    IE_EXPECT_NOT_OK(ie_blob_make_memory_i420(blob_y, blob_u, blob_v, &blob_i420));
+
+    ie_blob_deallocate(&blob_y);
+    ie_blob_deallocate(&blob_u);
+    ie_blob_deallocate(&blob_v);
+}
+
+TEST(ie_blob_make_memory_i420, cannotMakeI420BlobFromPlanesWithWrongHeightRatio) {
+    dimensions_t dim_y = {4, {1, 1, 8, 12}}, dim_u = {4, {1, 1, 2, 6}}, dim_v = {4, {1, 1, 4, 6}};
+    tensor_desc tensor_y, tensor_u, tensor_v;
+    tensor_y.dims = dim_y;
+    tensor_u.dims = dim_u;
+    tensor_v.dims = dim_v;
+    tensor_y.precision = tensor_u.precision = tensor_v.precision = precision_e::U8;
+    tensor_y.layout = tensor_u.layout = tensor_v.layout = layout_e::NHWC;
+
+    ie_blob_t *blob_y = nullptr, *blob_u = nullptr, *blob_v = nullptr, *blob_i420 = nullptr;
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_y, &blob_y));
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_u, &blob_u));
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_v, &blob_v));
+    IE_EXPECT_NOT_OK(ie_blob_make_memory_i420(blob_y, blob_u, blob_v, &blob_i420));
+
+    ie_blob_deallocate(&blob_y);
+    ie_blob_deallocate(&blob_u);
+    ie_blob_deallocate(&blob_v);
+}
+
+TEST(ie_blob_make_memory_i420, I420BlobInvalidAfterDeallocateYPlane) {
+    dimensions_t dim_y = {4, {1, 1, 8, 12}}, dim_u = {4, {1, 1, 4, 6}}, dim_v = {4, {1, 1, 4, 6}};
+    tensor_desc tensor_y, tensor_u, tensor_v;
+    tensor_y.dims = dim_y;
+    tensor_u.dims = dim_u;
+    tensor_v.dims = dim_v;
+    tensor_y.precision = tensor_u.precision = tensor_v.precision = precision_e::U8;
+    tensor_y.layout = tensor_u.layout = tensor_v.layout = layout_e::NHWC;
+
+    ie_blob_t *blob_y = nullptr, *blob_u = nullptr, *blob_v = nullptr, *blob_i420 = nullptr;
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_y, &blob_y));
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_u, &blob_u));
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_v, &blob_v));
+    IE_EXPECT_OK(ie_blob_make_memory_i420(blob_y, blob_u, blob_v, &blob_i420));
+
+    ie_blob_deallocate(&blob_y);
+    ie_blob_buffer_t i420_buffer;
+    IE_EXPECT_OK(ie_blob_get_buffer(blob_i420, &i420_buffer));
+    EXPECT_EQ(nullptr, i420_buffer.buffer);
+
+    ie_blob_deallocate(&blob_u);
+    ie_blob_deallocate(&blob_v);
+    ie_blob_free(&blob_i420);
+}
+
+TEST(ie_blob_make_memory_i420, I420BlobInvalidAfterDeallocateUPlane) {
+    dimensions_t dim_y = {4, {1, 1, 8, 12}}, dim_u = {4, {1, 1, 4, 6}}, dim_v = {4, {1, 1, 4, 6}};
+    tensor_desc tensor_y, tensor_u, tensor_v;
+    tensor_y.dims = dim_y;
+    tensor_u.dims = dim_u;
+    tensor_v.dims = dim_v;
+    tensor_y.precision = tensor_u.precision = tensor_v.precision = precision_e::U8;
+    tensor_y.layout = tensor_u.layout = tensor_v.layout = layout_e::NHWC;
+
+    ie_blob_t *blob_y = nullptr, *blob_u = nullptr, *blob_v = nullptr, *blob_i420 = nullptr;
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_y, &blob_y));
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_u, &blob_u));
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_v, &blob_v));
+    IE_EXPECT_OK(ie_blob_make_memory_i420(blob_y, blob_u, blob_v, &blob_i420));
+
+    ie_blob_deallocate(&blob_u);
+    ie_blob_buffer_t i420_buffer;
+    IE_EXPECT_OK(ie_blob_get_buffer(blob_i420, &i420_buffer));
+    EXPECT_EQ(nullptr, i420_buffer.buffer);
+
+    ie_blob_deallocate(&blob_y);
+    ie_blob_deallocate(&blob_v);
+    ie_blob_free(&blob_i420);
+}
+
+TEST(ie_blob_make_memory_i420, I420BlobInvalidAfterDeallocateVPlane) {
+    dimensions_t dim_y = {4, {1, 1, 8, 12}}, dim_u = {4, {1, 1, 4, 6}}, dim_v = {4, {1, 1, 4, 6}};
+    tensor_desc tensor_y, tensor_u, tensor_v;
+    tensor_y.dims = dim_y;
+    tensor_u.dims = dim_u;
+    tensor_v.dims = dim_v;
+    tensor_y.precision = tensor_u.precision = tensor_v.precision = precision_e::U8;
+    tensor_y.layout = tensor_u.layout = tensor_v.layout = layout_e::NHWC;
+
+    ie_blob_t *blob_y = nullptr, *blob_u = nullptr, *blob_v = nullptr, *blob_i420 = nullptr;
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_y, &blob_y));
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_u, &blob_u));
+    IE_EXPECT_OK(ie_blob_make_memory(&tensor_v, &blob_v));
+    IE_EXPECT_OK(ie_blob_make_memory_i420(blob_y, blob_u, blob_v, &blob_i420));
+
+    ie_blob_deallocate(&blob_v);
+    ie_blob_buffer_t i420_buffer;
+    IE_EXPECT_OK(ie_blob_get_buffer(blob_i420, &i420_buffer));
+    EXPECT_EQ(nullptr, i420_buffer.buffer);
+
+    ie_blob_deallocate(&blob_y);
+    ie_blob_deallocate(&blob_u);
+    ie_blob_free(&blob_i420);
+}
+
+TEST(ie_blob_make_memory_i420, inferRequestWithI420) {
+    ie_core_t *core = nullptr;
+    IE_ASSERT_OK(ie_core_create("", &core));
+    ASSERT_NE(nullptr, core);
+
+    ie_network_t *network = nullptr;
+    IE_EXPECT_OK(ie_core_read_network(core, xml, bin, &network));
+    EXPECT_NE(nullptr, network);
+
+    IE_EXPECT_OK(ie_network_set_input_precision(network, "data", precision_e::U8));
+    IE_EXPECT_OK(ie_network_set_input_layout(network, "data", layout_e::NCHW));
+    IE_EXPECT_OK(ie_network_set_input_resize_algorithm(network, "data", resize_alg_e::RESIZE_BILINEAR));
+    IE_EXPECT_OK(ie_network_set_color_format(network, "data", colorformat_e::I420));
+    IE_EXPECT_OK(ie_network_set_output_precision(network, "fc_out", precision_e::FP32));
+
+    ie_config_t config = {nullptr, nullptr, nullptr};
+    ie_executable_network_t *exe_network = nullptr;
+    IE_EXPECT_OK(ie_core_load_network(core, network, "CPU", &config, &exe_network));
+    EXPECT_NE(nullptr, exe_network);
+
+    ie_infer_request_t *infer_request = nullptr;
+    IE_EXPECT_OK(ie_exec_network_create_infer_request(exe_network, &infer_request));
+    EXPECT_NE(nullptr, infer_request);
+
+    size_t img_width = 224, img_height = 224;
+    size_t img_size = img_width * (img_height * 3 / 2);
+    unsigned char *img_data = (unsigned char *)calloc(img_size, sizeof(unsigned char));
+    EXPECT_NE(nullptr, img_data);
+    EXPECT_EQ(img_size, read_image_from_file(input_image_nv12, img_data, img_size));
+
+    dimensions_t dim_y = {4, {1, 1, img_height, img_width}};
+    dimensions_t dim_u = {4, {1, 1, img_height / 2, img_width / 2}};
+    dimensions_t dim_v = {4, {1, 1, img_height / 2, img_width / 2}};
+    tensor_desc_t tensor_y, tensor_u, tensor_v;
+    tensor_y.dims = dim_y;
+    tensor_u.dims = dim_u;
+    tensor_v.dims = dim_v;
+    tensor_y.precision = tensor_u.precision = tensor_v.precision = precision_e::U8;
+    tensor_y.layout = tensor_u.layout = tensor_v.layout = layout_e::NHWC;
+    const size_t offset = img_width * img_height;
+
+    ie_blob_t *blob_y = nullptr, *blob_u = nullptr, *blob_v = nullptr, *blob_i420 = nullptr;
+    IE_EXPECT_OK(ie_blob_make_memory_from_preallocated(&tensor_y, img_data, img_width * img_height, &blob_y));
+    IE_EXPECT_OK(ie_blob_make_memory_from_preallocated(&tensor_u, img_data + offset, img_width * (img_height / 4), &blob_u));
+    IE_EXPECT_OK(ie_blob_make_memory_from_preallocated(&tensor_v, img_data + offset * 5 / 4, img_width * (img_height / 4), &blob_v));
+    IE_EXPECT_OK(ie_blob_make_memory_i420(blob_y, blob_u, blob_v, &blob_i420));
+
+    IE_EXPECT_OK(ie_infer_request_set_blob(infer_request, "data", blob_i420));
+    IE_EXPECT_OK(ie_infer_request_infer(infer_request));
+
+    ie_blob_t *output_blob = nullptr;
+    IE_EXPECT_OK(ie_infer_request_get_blob(infer_request, "fc_out", &output_blob));
+
+    ie_blob_buffer_t buffer;
+    IE_EXPECT_OK(ie_blob_get_buffer(output_blob, &buffer));
+    float *output_data = (float *)(buffer.buffer);
+    EXPECT_NEAR(output_data[1], 0.f, 1.e-5);
+
+    ie_blob_free(&output_blob);
+    ie_blob_free(&blob_i420);
+    ie_blob_free(&blob_v);
+    ie_blob_free(&blob_u);
+    ie_blob_free(&blob_y);
+    ie_infer_request_free(&infer_request);
+    ie_exec_network_free(&exe_network);
+    ie_network_free(&network);
+    ie_core_free(&core);
+    free(img_data);
+}
+
+int main(int argc, char *argv[]){
+    ::testing::InitGoogleTest(&argc, argv);
+
+    return RUN_ALL_TESTS();
+}
diff --git a/inference-engine/ie_bridges/c/tests/test_model_repo.hpp b/inference-engine/ie_bridges/c/tests/test_model_repo.hpp
new file mode 100644 (file)
index 0000000..62bac0b
--- /dev/null
@@ -0,0 +1,53 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+namespace TestDataHelpers {
+
+static const char kPathSeparator =
+#if defined _WIN32 || defined __CYGWIN__
+        '\\';
+#else
+        '/';
+#endif
+
+std::string getModelPathNonFatal() noexcept {
+    if (const auto envVar = std::getenv("MODELS_PATH")) {
+        return envVar;
+    }
+
+#ifdef MODELS_PATH
+    return MODELS_PATH;
+#else
+    return "";
+#endif
+}
+
+std::string get_models_path() {
+    return getModelPathNonFatal() + kPathSeparator + std::string("models");
+};
+
+std::string get_data_path() {
+    if (const auto envVar = std::getenv("DATA_PATH")) {
+        return envVar;
+    }
+
+#ifdef DATA_PATH
+    return DATA_PATH;
+#else
+    return "";
+#endif
+}
+
+std::string generate_model_path(std::string dir, std::string filename) {
+    return get_models_path() + kPathSeparator + dir + kPathSeparator + filename;
+}
+
+std::string generate_image_path(std::string dir, std::string filename) {
+    return get_data_path() + kPathSeparator +"validation_set" + kPathSeparator + dir + kPathSeparator + filename;
+}
+
+std::string generate_ieclass_xml_path(std::string filename) {
+    return getModelPathNonFatal() + kPathSeparator + "ie_class" + kPathSeparator + filename;
+}
+} // namespace TestDataHelpers
\ No newline at end of file
index cbb6444..afb3c08 100644 (file)
@@ -36,4 +36,4 @@ If you want to use certain version of Python\*, set the environment variable `PY
 after running the environment configuration script.
 
 ## API Reference
-For the complete API Reference, see  [Inference Engine Python* API Reference](ie_python_api.html)
+For the complete API Reference, see  [Inference Engine Python* API Reference](ie_python_api/annotated.html)
index d964221..a58f35d 100644 (file)
@@ -152,7 +152,7 @@ def main():
 
     # Processing output blob
     log.info("Processing output blob")
-    res = infer_request.outputs[out_blob]
+    res = infer_request.output_blobs[out_blob]
     log.info("Top {} results: ".format(args.number_top))
     if args.labels:
         with open(args.labels, 'r') as f:
@@ -161,7 +161,7 @@ def main():
         labels_map = None
     classid_str = "classid"
     probability_str = "probability"
-    for i, probs in enumerate(res):
+    for i, probs in enumerate(res.buffer):
         probs = np.squeeze(probs)
         top_ind = np.argsort(probs)[-args.number_top:][::-1]
         print("Image {}\n".format(args.input[i]))
index 042091d..2ad199f 100644 (file)
@@ -6,7 +6,7 @@ set(TARGET_NAME "ie_api")
 set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}/inference_engine)
 
 file(GLOB SOURCE
-        ${CMAKE_CURRENT_SOURCE_DIR}/*.pyx
+        ${CMAKE_CURRENT_SOURCE_DIR}/ie_api.pyx
         ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp)
 
 set_source_files_properties(${SOURCE} PROPERTIES CYTHON_IS_CXX ON)
@@ -15,6 +15,18 @@ set_source_files_properties(${SOURCE} PROPERTIES CYTHON_IS_CXX ON)
 
 cython_add_module(${TARGET_NAME} ${SOURCE})
 
+file(GLOB OTHER_SOURCES
+        ${CMAKE_CURRENT_SOURCE_DIR}/*.pyx)
+list(REMOVE_ITEM OTHER_SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/ie_api.pyx")
+
+foreach(PYX_FILE ${OTHER_SOURCES})
+    get_filename_component(PYX_NAME "${PYX_FILE}" NAME_WE)
+    set_source_files_properties(${PYX_FILE} PROPERTIES CYTHON_IS_CXX ON)
+    cython_add_module(${PYX_NAME} ${PYX_FILE})
+    target_include_directories(${PYX_NAME} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}")
+    target_link_libraries(${PYX_NAME} PRIVATE ${InferenceEngine_LIBRARIES})
+endforeach()
+
 function(python_disable_deprecated_warnings)
     disable_deprecated_warnings()
     set(pyx_file "${CMAKE_CURRENT_BINARY_DIR}/ie_api.cxx")
index fe127f4..e00ed92 100644 (file)
@@ -1,4 +1,4 @@
 from .ie_api import *
-__all__ = ['IENetwork', "IEPlugin", "IECore", "get_version"]
+__all__ = ['IENetwork', "IETensorDesc", "IECore", "IEBlob", "get_version"]
 __version__ = get_version()
 
diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/constants.pyx b/inference-engine/ie_bridges/python/src/openvino/inference_engine/constants.pyx
new file mode 100644 (file)
index 0000000..de6c2bc
--- /dev/null
@@ -0,0 +1,74 @@
+"""
+ Copyright (C) 2018-2020 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the 'License');
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an 'AS IS' BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+from .cimport ie_api_impl_defs as C
+
+import numpy as np
+
+supported_precisions = ["FP32", "FP16", "I64", "U64", "I32", "I16", "I8", "U16", "U8"]
+
+known_plugins = ['CPU', 'GPU', 'FPGA', 'MYRIAD', 'HETERO', 'HDDL', 'MULTI']
+
+layout_int_to_str_map = {0: "ANY", 1: "NCHW", 2: "NHWC", 3: "NCDHW", 4: "NDHWC", 64: "OIHW", 95: "SCALAR", 96: "C",
+                         128: "CHW", 192: "HW", 193: "NC", 194: "CN", 200: "BLOCKED"}
+
+format_map = {
+      'FP32' : np.float32,
+      'I32'  : np.int32,
+      'FP16' : np.float16,
+      'I16'  : np.int16,
+      'U16'  : np.uint16,
+      'I8'   : np.int8,
+      'U8'   : np.uint8,
+      'I64'  : np.int64
+    }
+
+layout_str_to_enum = {'ANY': C.Layout.ANY,
+                      "NHWC": C.Layout.NHWC,
+                      "NCHW": C.Layout.NCHW,
+                      "NCDHW": C.Layout.NCDHW,
+                      "NDHWC": C.Layout.NDHWC,
+                      "OIHW": C.Layout.OIHW,
+                      "GOIHW": C.Layout.GOIHW,
+                      "OIDHW": C.Layout.OIDHW,
+                      "GOIDHW": C.Layout.GOIDHW,
+                      "SCALAR": C.Layout.SCALAR,
+                      "C": C.Layout.C,
+                      "CHW": C.Layout.CHW,
+                      "HW": C.Layout.HW,
+                      "NC": C.Layout.NC,
+                      "CN": C.Layout.CN,
+                      "BLOCKED": C.Layout.BLOCKED
+                      }
+
+
+cpdef enum StatusCode:
+    OK = 0
+    GENERAL_ERROR = -1
+    NOT_IMPLEMENTED = -2
+    NETWORK_NOT_LOADED = -3
+    PARAMETER_MISMATCH = -4
+    NOT_FOUND = -5
+    OUT_OF_BOUNDS = -6
+    UNEXPECTED = -7
+    REQUEST_BUSY = -8
+    RESULT_NOT_READY = -9
+    NOT_ALLOCATED = -10
+    INFER_NOT_STARTED = -11
+    NETWORK_NOT_READ = -12
+
+cpdef enum WaitMode:
+    RESULT_READY = -1
+    STATUS_ONLY = 0
index 990cfa5..2944345 100644 (file)
@@ -1,17 +1,24 @@
 from .cimport ie_api_impl_defs as C
 from .ie_api_impl_defs cimport Blob, TensorDesc
 
+from pathlib import Path
+
 from libcpp.string cimport string
 from libcpp.vector cimport vector
 from libcpp cimport bool
 from libcpp.memory cimport unique_ptr, shared_ptr
 
+cdef class IEBlob:
+    cdef Blob.Ptr _ptr
+    cdef public object _array_data
+    cdef public object _initial_shape
+
 cdef class BlobBuffer:
     cdef Blob.Ptr ptr
     cdef char*format
     cdef vector[Py_ssize_t] shape
     cdef vector[Py_ssize_t] strides
-    cdef reset(self, Blob.Ptr &)
+    cdef reset(self, Blob.Ptr &, vector[size_t] representation_shape = ?)
     cdef char*_get_blob_format(self, const TensorDesc & desc)
 
     cdef public:
@@ -28,7 +35,7 @@ cdef class InferRequest:
     cpdef get_perf_counts(self)
     cdef void user_callback(self, int status) with gil
     cdef public:
-        _inputs_list, _outputs_list, _py_callback, _py_data, _py_callback_used, _py_callback_called
+        _inputs_list, _outputs_list, _py_callback, _py_data, _py_callback_used, _py_callback_called, _user_blobs
 
 cdef class IENetwork:
     cdef C.IENetwork impl
@@ -55,7 +62,7 @@ cdef class LayersStatsMap(dict):
 
 cdef class IECore:
     cdef C.IECore impl
-    cpdef IENetwork read_network(self, model : [str, bytes], weights : [str, bytes] = ?, bool init_from_buffer = ?)
+    cpdef IENetwork read_network(self, model : [str, bytes, Path], weights : [str, bytes, Path] = ?, bool init_from_buffer = ?)
     cpdef ExecutableNetwork load_network(self, IENetwork network, str device_name, config = ?, int num_requests = ?)
     cpdef ExecutableNetwork import_network(self, str model_file, str device_name, config = ?, int num_requests = ?)
 
@@ -68,3 +75,6 @@ cdef class CDataPtr:
 
 cdef class IENetLayer:
     cdef C.CNNLayerPtr _ptr
+
+cdef class IETensorDesc:
+    cdef C.TensorDesc impl
index 648fa6a..f60fd43 100644 (file)
@@ -1,23 +1,28 @@
 #distutils: language=c++
 from cython.operator cimport dereference as deref
-from .cimport ie_api_impl_defs as C
-from .ie_api_impl_defs cimport Blob, TensorDesc, SizeVector, Precision
 from libcpp.string cimport string
 from libcpp.vector cimport vector
 from libcpp cimport bool
 from libcpp.pair cimport pair
 from libcpp.map cimport map
-from libcpp.memory cimport unique_ptr, shared_ptr
+from libcpp.memory cimport unique_ptr
 from libc.stdlib cimport malloc, free
-from libc.stdint cimport int64_t, uint8_t
+from libc.stdint cimport int64_t, uint8_t, int8_t, int32_t, uint16_t, int16_t
 from libc.string cimport memcpy
+
 import os
-import numpy as np
-from copy import deepcopy
+from pathlib import Path
+import threading
 import warnings
+from copy import deepcopy
 from collections import OrderedDict, namedtuple
-from collections import OrderedDict
-import threading
+
+from .cimport ie_api_impl_defs as C
+from .ie_api_impl_defs cimport SizeVector, Precision
+from .constants import supported_precisions, known_plugins, layout_int_to_str_map, \
+                       format_map, layout_str_to_enum, StatusCode, WaitMode
+
+import numpy as np
 
 cdef extern from "<utility>" namespace "std" nogil:
     cdef unique_ptr[C.IEExecNetwork] move(unique_ptr[C.IEExecNetwork])
@@ -42,53 +47,168 @@ cdef c_map_to_dict(map[string, string] c_map):
         py_dict[v.first.decode()] = v.second.decode()
     return py_dict
 
-supported_precisions = ["FP32", "FP16", "I64", "U64", "I32", "I16", "I8", "U16", "U8"]
-
-layout_int_to_str_map = {0: "ANY", 1: "NCHW", 2: "NHWC", 3: "NCDHW", 4: "NDHWC", 64: "OIHW", 95: "SCALAR", 96: "C",
-                         128: "CHW", 192: "HW", 193: "NC", 194: "CN", 200: "BLOCKED"}
-layout_str_to_enum = {'ANY': C.Layout.ANY,
-                      "NHWC": C.Layout.NHWC,
-                      "NCHW": C.Layout.NCHW,
-                      "NCDHW": C.Layout.NCDHW,
-                      "NDHWC": C.Layout.NDHWC,
-                      "OIHW": C.Layout.OIHW,
-                      "GOIHW": C.Layout.GOIHW,
-                      "OIDHW": C.Layout.OIDHW,
-                      "GOIDHW": C.Layout.GOIDHW,
-                      "SCALAR": C.Layout.SCALAR,
-                      "C": C.Layout.C,
-                      "CHW": C.Layout.CHW,
-                      "HW": C.Layout.HW,
-                      "NC": C.Layout.NC,
-                      "CN": C.Layout.CN,
-                      "BLOCKED": C.Layout.BLOCKED
-
-                      }
-
-known_plugins = ['CPU', 'GPU', 'FPGA', 'MYRIAD', 'HETERO', 'HDDL', 'MULTI']
-
-cpdef enum StatusCode:
-    OK = 0
-    GENERAL_ERROR = -1
-    NOT_IMPLEMENTED = -2
-    NETWORK_NOT_LOADED = -3
-    PARAMETER_MISMATCH = -4
-    NOT_FOUND = -5
-    OUT_OF_BOUNDS = -6
-    UNEXPECTED = -7
-    REQUEST_BUSY = -8
-    RESULT_NOT_READY = -9
-    NOT_ALLOCATED = -10
-    INFER_NOT_STARTED = -11
-    NETWORK_NOT_READ = -12
-
-cpdef enum WaitMode:
-    RESULT_READY = -1
-    STATUS_ONLY = 0
 
 def get_version():
     return C.get_version().decode()
 
+## This class defines Tensor description
+cdef class IETensorDesc:
+    def __eq__(self, other : IETensorDesc):
+        return self.layout == other.layout and self.precision == other.precision and self.dims == other.dims
+    def __ne__(self, other : IETensorDesc):
+        return self.layout != other.layout or self.precision != other.precision or self.dims != other.dims
+    def __deepcopy__(self, memodict={}):
+        return IETensorDesc(deepcopy(self.precision, memodict), deepcopy(self.dims, memodict), deepcopy(self.layout, memodict))
+    ## Class constructor
+    # @param precision: target memory precision
+    # @param dims: target memory dimensions
+    # @param layout: target memory layout
+    # @return Instance of defines class
+    def __cinit__(self, precision : str, dims : [list, tuple], layout : str):
+        if precision not in supported_precisions:
+            raise ValueError("Unsupported precision {}! List of supported precisions: {}".format(precision,
+                                                                                                 supported_precisions))
+        self.impl = C.TensorDesc(C.Precision.FromStr(precision.encode()), dims, layout_str_to_enum[layout])
+    ## Shape (dimensions) of the IETensorDesc object
+    @property
+    def dims(self):
+        return self.impl.getDims()
+    @dims.setter
+    def dims(self, dims_array : [list, tuple]):
+        self.impl.setDims(dims_array)
+    ## Precision of the IETensorDesc object
+    @property
+    def precision(self):
+        return self.impl.getPrecision().name().decode()
+    @precision.setter
+    def precision(self, precision : str):
+        if precision not in supported_precisions:
+            raise ValueError("Unsupported precision {}! List of supported precisions: {}".format(precision,
+                                                                                                 supported_precisions))
+        self.impl.setPrecision(C.Precision.FromStr(precision.encode()))
+    ## Layout of the IETensorDesc object
+    @property
+    def layout(self):
+        return layout_int_to_str_map[self.impl.getLayout()]
+    @layout.setter
+    def layout(self, layout : str):
+        if layout not in layout_str_to_enum.keys():
+            raise ValueError("Unsupported layout {}! "
+                             "List of supported layouts: {}".format(layout, list(layout_str_to_enum.keys())))
+        self.impl.setLayout(layout_str_to_enum[layout])
+
+## This class represents Blob
+cdef class IEBlob:
+    ## Class constructor
+    # @param tensor_desc: IETensorDesc object describing creating IEBlob object.
+    # @param array: numpy.ndarray with data to fill blob memory, The array have to have same elements count
+    #               as specified in tensor_desc.dims attribute and same elements precision corresponding to
+    #               tensor_desc.precision. If array isn't provided empty numpy.ndarray will be created accorsing
+    #               to parameters of tensor_desc
+    # @return Instance of IEBlob class
+    def __cinit__(self, IETensorDesc tensor_desc = None, array : np.ndarray = None):
+        cdef TensorDesc c_tensor_desc
+        cdef float[::1] fp32_array_memview
+        cdef int16_t[::1] I16_array_memview
+        cdef uint16_t[::1] U16_array_memview
+        cdef uint8_t[::1] U8_array_memview
+        cdef int8_t[::1] I8_array_memview
+        cdef int32_t[::1] I32_array_memview
+        cdef int64_t[::1] I64_array_memview
+
+        cdef int16_t[:] x_as_uint
+        cdef int16_t[:] y_as_uint
+
+        self._array_data = array
+        self._initial_shape = array.shape if array is not None else None
+
+        if self._array_data is not None:
+            if np.isfortran(self._array_data):
+                self._array_data = self._array_data.ravel(order="F")
+            else:
+                self._array_data = self._array_data.ravel(order="C")
+        if self._array_data is None and tensor_desc is not None:
+            c_tensor_desc = tensor_desc.impl
+            precision = tensor_desc.precision
+            if precision == "FP32":
+                self._ptr = C.make_shared_blob[float](c_tensor_desc)
+            elif precision == "FP16" or precision == "I16":
+                self._ptr = C.make_shared_blob[int16_t](c_tensor_desc)
+            elif precision == "Q78" or precision == "U16":
+                self._ptr = C.make_shared_blob[uint16_t](c_tensor_desc)
+            elif  precision == "U8" or precision == "BOOL":
+                self._ptr = C.make_shared_blob[uint8_t](c_tensor_desc)
+            elif  precision == "I8" or precision == "BIN":
+                self._ptr = C.make_shared_blob[int8_t](c_tensor_desc)
+            elif  precision == "I32":
+                self._ptr = C.make_shared_blob[int32_t](c_tensor_desc)
+            elif  precision == "I64":
+                self._ptr = C.make_shared_blob[int64_t](c_tensor_desc)
+            else:
+                raise AttributeError("Unsupported precision {} for blob".format(precision))
+            deref(self._ptr).allocate()
+        elif tensor_desc is not None and self._array_data is not None:
+            c_tensor_desc = tensor_desc.impl
+            precision = tensor_desc.precision
+            size_arr = np.prod(array.shape)
+            size_td = np.prod(tensor_desc.dims)
+            if size_arr != size_td:
+                raise AttributeError("Number of elements in provided numpy array {} and "
+                                     "required by TensorDesc {} are not equal".format(size_arr, size_td))
+            if self._array_data.dtype != format_map[precision]:
+                raise ValueError("Data type {} of provided numpy array "
+                                 "doesn't match to TensorDesc precision {}".format(self._array_data.dtype, precision))
+            if not self._array_data.flags['C_CONTIGUOUS']:
+                self._array_data = np.ascontiguousarray(self._array_data)
+            if precision == "FP32":
+                fp32_array_memview = self._array_data
+                self._ptr = C.make_shared_blob[float](c_tensor_desc, &fp32_array_memview[0], fp32_array_memview.shape[0])
+            elif precision == "FP16":
+                raise RuntimeError("Currently, it's impossible to set_blob with FP16 precision")
+            elif precision == "I16":
+                I16_array_memview = self._array_data
+                self._ptr = C.make_shared_blob[int16_t](c_tensor_desc, &I16_array_memview[0], I16_array_memview.shape[0])
+            elif precision == "Q78" or precision == "U16":
+                U16_array_memview = self._array_data
+                self._ptr = C.make_shared_blob[uint16_t](c_tensor_desc, &U16_array_memview[0], U16_array_memview.shape[0])
+            elif  precision == "U8" or precision == "BOOL":
+                U8_array_memview = self._array_data
+                self._ptr = C.make_shared_blob[uint8_t](c_tensor_desc, &U8_array_memview[0], U8_array_memview.shape[0])
+            elif  precision == "I8" or precision == "BIN":
+                I8_array_memview = self._array_data
+                self._ptr = C.make_shared_blob[int8_t](c_tensor_desc, &I8_array_memview[0], I8_array_memview.shape[0])
+            elif  precision == "I32":
+                I32_array_memview = self._array_data
+                self._ptr = C.make_shared_blob[int32_t](c_tensor_desc, &I32_array_memview[0], I32_array_memview.shape[0])
+            elif  precision == "I64":
+                I64_array_memview = self._array_data
+                self._ptr = C.make_shared_blob[int64_t](c_tensor_desc, &I64_array_memview[0], I64_array_memview.shape[0])
+            else:
+                raise AttributeError("Unsupported precision {} for blob".format(precision))
+
+    def __deepcopy__(self, memodict):
+        res = IEBlob(deepcopy(self.tensor_desc, memodict), deepcopy(self._array_data, memodict))
+        res.buffer[:] = deepcopy(self.buffer[:], memodict)
+        return res
+
+    ## IEBlob's memory as numpy.ndarray representation
+    @property
+    def buffer(self):
+        representation_shape = self._initial_shape if self._initial_shape is not None else []
+        cdef BlobBuffer buffer = BlobBuffer()
+        buffer.reset(self._ptr, representation_shape)
+        return buffer.to_numpy()
+
+    ## IETensorDesc of created IEBlob
+    @property
+    def tensor_desc(self):
+        cdef TensorDesc c_tensor_desc = deref(self._ptr).getTensorDesc()
+        precision = c_tensor_desc.getPrecision().name().decode()
+        layout = c_tensor_desc.getLayout()
+        dims = c_tensor_desc.getDims()
+        tensor_desc = IETensorDesc(precision, dims, layout_int_to_str_map[layout])
+        return tensor_desc
+
 ## This class represents an Inference Engine entity and allows you to manipulate with plugins using unified interfaces.
 cdef class IECore:
     ## Class constructor
@@ -133,7 +253,7 @@ cdef class IECore:
     #  ie = IECore()
     #  net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file)
     #  ```
-    cpdef IENetwork read_network(self, model: [str, bytes], weights: [str, bytes] = "", init_from_buffer: bool = False):
+    cpdef IENetwork read_network(self, model: [str, bytes, Path], weights: [str, bytes, Path] = "", init_from_buffer: bool = False):
         cdef char*xml_buffer
         cdef uint8_t*bin_buffer
         cdef string weights_
@@ -148,12 +268,20 @@ cdef class IECore:
             net.impl = self.impl.readNetwork(xml_buffer, bin_buffer, len(weights))
             free(xml_buffer)
         else:
-            if not os.path.isfile(model):
-                raise Exception("Path to the model {} doesn't exists or it's a directory".format(model))
-            if not os.path.isfile(weights):
-                raise Exception("Path to the weights {} doesn't exists or it's a directory".format(weights))
-            model_ = model.encode()
-            weights_ = weights.encode()
+            if isinstance(model, Path) and isinstance(weights, Path):
+                if not model.is_file():
+                    raise Exception("Path to the model {} doesn't exist or it's a directory".format(model))
+                if not weights.is_file():
+                    raise Exception("Path to the weights {} doesn't exist or it's a directory".format(weights))
+                model_ = bytes(model)
+                weights_ = bytes(weights)
+            else:
+                if not os.path.isfile(model):
+                    raise Exception("Path to the model {} doesn't exist or it's a directory".format(model))
+                if not os.path.isfile(weights):
+                    raise Exception("Path to the weights {} doesn't exist or it's a directory".format(weights))
+                model_ = model.encode()
+                weights_ = weights.encode()
             net.impl =  self.impl.readNetwork(model_, weights_)
         return net
 
@@ -249,7 +377,7 @@ cdef class IECore:
     #  ```python
     #  ie = IECore()
     #  net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file)
-    #  ie.set_config({"DYN_BATCH_ENABLED": "YES"})
+    #  ie.set_config(config={"DYN_BATCH_ENABLED": "YES"}, device_name="CPU")
     #  ```
     def set_config(self, config: dict, device_name: str):
         cdef map[string, string] c_config = dict_to_c_map(config)
@@ -463,7 +591,7 @@ cdef class ExecutableNetwork:
     #  ```python
     #  ie_core = IECore()
     #  net = ie_core.read_network(model=path_to_xml_file, weights=path_to_bin_file)
-    #  exec_net = ie_core.load_network(net, device, num_requests=2)
+    #  exec_net = ie_core.load_network(network=net, device_name="CPU", num_requests=2)
     #  res = exec_net.infer({'data': img})
     #  res
     #  {'prob': array([[[[2.83426580e-08]],
@@ -476,7 +604,11 @@ cdef class ExecutableNetwork:
     def infer(self, inputs=None):
         current_request = self.requests[0]
         current_request.infer(inputs)
-        return deepcopy(current_request.outputs)
+        res = {}
+        for out in current_request._outputs_list:
+            res[out] = deepcopy(current_request.output_blobs[out].buffer)
+        return res
+
 
     ## Starts asynchronous inference for specified infer request.
     #  Wraps `async_infer()` method of the `InferRequest` class.
@@ -489,7 +621,7 @@ cdef class ExecutableNetwork:
     #  ```python
     #  infer_request_handle = exec_net.start_async(request_id=0, inputs={input_blob: image})
     #  infer_status = infer_request_handle.wait()
-    #  res = infer_request_handle.outputs[out_blob_name]
+    #  res = infer_request_handle.output_blobs[out_blob_name]
     #  ```
     def start_async(self, request_id, inputs=None):
         if request_id not in list(range(len(self.requests))):
@@ -542,7 +674,7 @@ cdef class ExecutableNetwork:
     #  ```python
     #  ie_core = IECore()
     #  net = ie_core.read_network(model=path_to_xml_file, weights=path_to_bin_file)
-    #  exec_net = ie_core.load_network(net, device, num_requsts=2)
+    #  exec_net = ie_core.load_network(net, device, num_requests=2)
     #  exec_graph = exec_net.get_exec_graph_info()
     #  ```
     def get_exec_graph_info(self):
@@ -575,7 +707,7 @@ cdef class ExecutableNetwork:
     #  ie = IECore()
     #  net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file)
     #  exec_net = ie.load_network(net, "CPU")
-    #  exec_net.get_metric("DEVICE_ID")
+    #  config = exec_net.get_config("CPU_BIND_THREAD")
     #  ```
     def get_config(self, config_name: str):
         return deref(self.impl).getConfig(config_name.encode())
@@ -620,6 +752,7 @@ cdef class InferRequest:
     #  method of the `IECore` class with specified number of requests to get `ExecutableNetwork` instance
     #  which stores infer requests.
     def __init__(self):
+        self._user_blobs = {}
         self._inputs_list = []
         self._outputs_list = []
         self._py_callback = lambda *args, **kwargs: None
@@ -629,8 +762,9 @@ cdef class InferRequest:
 
     cdef void user_callback(self, int status) with gil:
         if self._py_callback:
-            self._py_callback(status, self._py_data)
+            # Set flag at first since user can call wait in callback
             self._py_callback_called.set()
+            self._py_callback(status, self._py_data)
 
     ## Description: Sets a callback function that is called on success or failure of an asynchronous request
     #
@@ -663,6 +797,48 @@ cdef class InferRequest:
         buffer.reset(blob_ptr)
         return buffer
 
+    ## Dictionary that maps input layer names to corresponding IEBlobs
+    @property
+    def input_blobs(self):
+        input_blobs = {}
+        for input in self._inputs_list:
+            # TODO: will not work for setting data via .inputs['data'][:]
+            if input in self._user_blobs:
+                input_blobs[input] = self._user_blobs[input]
+            else:
+                blob = IEBlob()
+                deref(self.impl).getBlobPtr(input.encode(), blob._ptr)
+                input_blobs[input] = blob
+        return input_blobs
+
+    ## Dictionary that maps output layer names to corresponding IEBlobs
+    @property
+    def output_blobs(self):
+        output_blobs = {}
+        for output in self._outputs_list:
+            blob = IEBlob()
+            deref(self.impl).getBlobPtr(output.encode(), blob._ptr)
+            output_blobs[output] = deepcopy(blob)
+        return output_blobs
+
+    ## Sets user defined IEBlob for the infer request
+    #  @param blob_name: A name of input blob
+    #  @param blob: IEBlob object to set for the infer request
+    #  @return None
+    #
+    #  Usage example:\n
+    #  ```python
+    #  ie = IECore()
+    #  net = IENetwork("./model.xml", "./model.bin")
+    #  exec_net = ie.load_network(net, "CPU", num_requests=2)
+    #  td = IETensorDesc("FP32", (1, 3, 224, 224), "NCHW")
+    #  blob_data = np.ones(shape=(1, 3, 224, 224), dtype=np.float32)
+    #  blob = IEBlob(td, blob_data)
+    #  exec_net.requests[0].set_blob(blob_name="input_blob_name", blob=blob),
+    #  ```
+    def set_blob(self, blob_name : str, blob : IEBlob):
+        deref(self.impl).setBlob(blob_name.encode(), blob._ptr)
+        self._user_blobs[blob_name] = blob
     ## Starts synchronous inference of the infer request and fill outputs array
     #
     #  @param inputs: A dictionary that maps input layer names to `numpy.ndarray` objects of proper shape with
@@ -671,9 +847,9 @@ cdef class InferRequest:
     #
     #  Usage example:\n
     #  ```python
-    #  exec_net = ie_core.load_network(network=net, num_requests=2)
+    #  exec_net = ie_core.load_network(network=net, device_name="CPU", num_requests=2)
     #  exec_net.requests[0].infer({input_blob: image})
-    #  res = exec_net.requests[0].outputs['prob']
+    #  res = exec_net.requests[0].output_blobs['prob']
     #  np.flip(np.sort(np.squeeze(res)),0)
     #  array([4.85416055e-01, 1.70385033e-01, 1.21873841e-01, 1.18894853e-01,
     #         5.45198545e-02, 2.44456064e-02, 5.41366823e-03, 3.42589128e-03,
@@ -692,10 +868,10 @@ cdef class InferRequest:
     #
     #  Usage example:\n
     #  ```python
-    #  exec_net = ie_core.load_network(network=net, num_requests=2)
+    #  exec_net = ie_core.load_network(network=net, device_name="CPU", num_requests=2)
     #  exec_net.requests[0].async_infer({input_blob: image})
     #  request_status = exec_net.requests[0].wait()
-    #  res = exec_net.requests[0].outputs['prob']
+    #  res = exec_net.requests[0].output_blobs['prob']
     #  ```
     cpdef async_infer(self, inputs=None):
         if inputs is not None:
@@ -724,6 +900,11 @@ cdef class InferRequest:
             if status != StatusCode.RESULT_NOT_READY:
                 return status
             if not self._py_callback_called.is_set():
+                if timeout == WaitMode.RESULT_READY:
+                    timeout = None
+                if timeout is not None:
+                    # Convert milliseconds to seconds
+                    timeout = float(timeout)/1000
                 if not self._py_callback_called.wait(timeout):
                     return StatusCode.REQUEST_BUSY
             return StatusCode.OK
@@ -741,7 +922,7 @@ cdef class InferRequest:
     #
     #  Usage example:
     #  ```python
-    #  exec_net = ie_core.load_network(network=net, num_requests=2)
+    #  exec_net = ie_core.load_network(network=net, device_name="CPU", num_requests=2)
     #  exec_net.requests[0].infer({input_blob: image})
     #  exec_net.requests[0].get_perf_counts()
     #  {'Conv2D': {'exec_type': 'jit_avx2_1x1',
@@ -772,6 +953,9 @@ cdef class InferRequest:
     #  objects of proper shape with input data for the layer
     @property
     def inputs(self):
+        warnings.filterwarnings("always", category=DeprecationWarning)
+        warnings.warn("'inputs' property of InferRequest is deprecated. Please instead use 'input_blobs' property.",
+                      DeprecationWarning)
         inputs = {}
         for input in self._inputs_list:
             inputs[input] = self._get_blob_buffer(input.encode()).to_numpy()
@@ -780,6 +964,9 @@ cdef class InferRequest:
     ## A dictionary that maps output layer names to `numpy.ndarray` objects with output data of the layer
     @property
     def outputs(self):
+        warnings.filterwarnings("always", category=DeprecationWarning)
+        warnings.warn("'outputs' property of InferRequest is deprecated. Please instead use 'output_blobs' property.",
+                      DeprecationWarning)
         outputs = {}
         for output in self._outputs_list:
             outputs[output] = self._get_blob_buffer(output.encode()).to_numpy()
@@ -804,8 +991,8 @@ cdef class InferRequest:
     #  net = ie.read_network(model=path_to_xml_file, weights=path_to_bin_file)
     #  # Set max batch size
     #  net.batch = 10
-    #  ie.set_config({"DYN_BATCH_ENABLED": "YES"})
-    #  exec_net = ie.load_network(network=net)
+    #  ie.set_config(config={"DYN_BATCH_ENABLED": "YES"}, device_name=device)
+    #  exec_net = ie.load_network(network=net, device_name=device)
     #  # Set batch size for certain network.
     #  # NOTE: Input data shape will not be changed, but will be used partially in inference which increases performance
     #  exec_net.requests[0].set_batch(2)
@@ -818,7 +1005,7 @@ cdef class InferRequest:
     def _fill_inputs(self, inputs):
         for k, v in inputs.items():
             assert k in self._inputs_list, "No input with name {} found in network".format(k)
-            self.inputs[k][:] = v
+            self.input_blobs[k].buffer[:] = v
 
 
 ## Layer calibration statistic container.
@@ -1085,9 +1272,9 @@ cdef class IENetwork:
                           "Please, use IECore.read_network() method instead",
                           DeprecationWarning)
                 if not os.path.isfile(model):
-                    raise Exception("Path to the model {} doesn't exists or it's a directory".format(model))
+                    raise Exception("Path to the model {} doesn't exist or it's a directory".format(model))
                 if not os.path.isfile(weights):
-                    raise Exception("Path to the weights {} doesn't exists or it's a directory".format(weights))
+                    raise Exception("Path to the weights {} doesn't exist or it's a directory".format(weights))
                 model_ = model.encode()
                 weights_ = weights.encode()
                 self.impl = C.IENetwork(model_, weights_)
@@ -1320,8 +1507,8 @@ cdef class IEPlugin:
     #  Usage example:\n
     #  ```python
     #  net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
-    #  plugin = IEPlugin(device="CPU")
-    #  exec_net = plugin.load(network=net, num_requsts=2)
+    #  ie = IECore()
+    #  exec_net = ie.load_network(network=net, device_name="CPU", num_requsts=2)
     #  ```
     cpdef ExecutableNetwork load(self, IENetwork network, int num_requests=1, config=None):
         cdef ExecutableNetwork exec_net = ExecutableNetwork()
@@ -1389,10 +1576,14 @@ cdef class IEPlugin:
 cdef class BlobBuffer:
     """Copy-less accessor for Inference Engine Blob"""
 
-    cdef reset(self, Blob.Ptr & ptr):
+    cdef reset(self, Blob.Ptr & ptr, vector[size_t] representation_shape = []):
         self.ptr = ptr
         cdef TensorDesc desc = deref(ptr).getTensorDesc()
-        cdef SizeVector shape = desc.getDims()
+        cdef SizeVector shape
+        if len(representation_shape) == 0:
+            shape = desc.getDims()
+        else:
+            shape = representation_shape
         cdef Py_ssize_t itemsize = deref(ptr).element_size()
         self.strides.resize(shape.size())
         self.shape.resize(shape.size())
@@ -1437,7 +1628,6 @@ cdef class BlobBuffer:
             'I64': 'q',  # signed long int
             'U64': 'Q',  # signed long int
         }
-
         if name not in precision_to_format:
             raise ValueError("Unknown Blob precision: {}".format(name))
 
index 4ccfdcf..9fb786d 100644 (file)
@@ -437,10 +437,10 @@ PyObject *InferenceEnginePython::IEExecNetwork::getMetric(const std::string &met
     return parse_parameter(parameter);
 }
 
-PyObject *InferenceEnginePython::IEExecNetwork::getConfig(const std::string &metric_name) {
+PyObject *InferenceEnginePython::IEExecNetwork::getConfig(const std::string &name) {
     InferenceEngine::Parameter parameter;
     InferenceEngine::ResponseDesc response;
-    IE_CHECK_CALL(actual->GetMetric(metric_name, parameter, &response));
+    IE_CHECK_CALL(actual->GetConfig(name, parameter, &response));
     return parse_parameter(parameter);
 }
 
@@ -464,11 +464,17 @@ std::map <std::string, InferenceEngine::CDataPtr> InferenceEnginePython::IEExecN
     InferenceEngine::ConstOutputsDataMap outputsDataMap;
     InferenceEngine::ResponseDesc response;
     IE_CHECK_CALL(actual->GetOutputsInfo(outputsDataMap, &response));
-    std::map <std::string, InferenceEngine::CDataPtr> pyInputs;
+    std::map <std::string, InferenceEngine::CDataPtr> pyOutputs;
     for (const auto &item : outputsDataMap) {
-        pyInputs[item.first] = item.second;
+        pyOutputs[item.first] = item.second;
     }
-    return pyInputs;
+    return pyOutputs;
+}
+
+void InferenceEnginePython::InferRequestWrap::setBlob(const std::string &blob_name,
+                                                      const InferenceEngine::Blob::Ptr &blob_ptr) {
+    InferenceEngine::ResponseDesc response;
+    IE_CHECK_CALL(request_ptr->SetBlob(blob_name.c_str(), blob_ptr, &response));
 }
 
 void InferenceEnginePython::InferRequestWrap::getBlobPtr(const std::string &blob_name,
index 3c3def0..ebebc79 100644 (file)
@@ -113,6 +113,8 @@ struct InferRequestWrap {
 
     void getBlobPtr(const std::string &blob_name, InferenceEngine::Blob::Ptr &blob_ptr);
 
+    void setBlob(const std::string &blob_name, const InferenceEngine::Blob::Ptr &blob_ptr);
+
     void setBatch(int size);
 
     std::map<std::string, InferenceEnginePython::ProfileInfo> getPerformanceCounts();
@@ -136,7 +138,7 @@ struct IEExecNetwork {
     std::map<std::string, InferenceEngine::CDataPtr> getOutputs();
 
     PyObject* getMetric(const std::string & metric_name);
-    PyObject* getConfig(const std::string & metric_name);
+    PyObject* getConfig(const std::string & name);
 
     int wait(int num_requests, int64_t timeout);
     int getIdleRequestId();
index 180d54e..e2e4e99 100644 (file)
@@ -12,30 +12,45 @@ from libc.stdint cimport int64_t, uint8_t
 cdef extern from "<inference_engine.hpp>" namespace "InferenceEngine":
     ctypedef vector[size_t] SizeVector
 
+    cdef cppclass TBlob[T]:
+        ctypedef shared_ptr[TBlob[T]] Ptr
+
+    cdef cppclass Blob:
+        ctypedef shared_ptr[Blob] Ptr
+        const TensorDesc& getTensorDesc()  except +
+        size_t element_size()  except +
+        void allocate()
+
+    cdef TBlob[Type].Ptr make_shared_blob[Type](const TensorDesc& tensorDesc)
+
+    cdef TBlob[Type].Ptr make_shared_blob[Type](const TensorDesc& tensorDesc, Type* ptr, size_t size)
 
     cdef cppclass TensorDesc:
-        SizeVector& getDims()
-        const Precision& getPrecision() const
+        TensorDesc() except +
+        TensorDesc(const Precision& precision, SizeVector dims, Layout layout) except +
+        SizeVector& getDims() except +
+        void setDims(const SizeVector& dims) except +
+        Layout getLayout() except +
+        void setLayout(Layout l) except +
+        const Precision& getPrecision() except +
+        void setPrecision(const Precision& p) except +
+
 
     cdef cppclass Data:
         const Precision getPrecision() const
         void setPrecision(const Precision& precision) const
-        const SizeVector getDims()
-        const string& getName() const
-        const Layout getLayout() const
-        void setLayout(Layout layout) const
-        const bool isInitialized() const
-        weak_ptr[CNNLayer] & getCreatorLayer()
-        map[string, shared_ptr[CNNLayer]] & getInputTo()
+        const SizeVector getDims() except +
+        const string& getName() except +
+        const Layout getLayout() except +
+        void setLayout(Layout layout) except +
+        const bool isInitialized() except +
+        weak_ptr[CNNLayer] & getCreatorLayer() except +
+        map[string, shared_ptr[CNNLayer]] & getInputTo() except +
 
     ctypedef shared_ptr[Data] DataPtr
     ctypedef weak_ptr[Data] DataWeakPtr
     ctypedef shared_ptr[const Data] CDataPtr
 
-    cdef cppclass Blob:
-        ctypedef shared_ptr[Blob] Ptr
-        const TensorDesc& getTensorDesc() const
-        size_t element_size()  const
 
     cdef cppclass Precision:
         const char*name() const
@@ -85,17 +100,6 @@ cdef extern from "<inference_engine.hpp>" namespace "InferenceEngine":
 
 cdef extern from "ie_api_impl.hpp" namespace "InferenceEnginePython":
 
-#    cdef cppclass IENetLayer:
-#        string layout
-#        vector[string] children
-#        vector[string] parents
-#        void setAffinity(const string & target_affinity) except +
-#        void setParams(const map[string, string] & params_map) except +
-#        map[string, Blob.Ptr] getWeights() except +
-#        void setPrecision(string precision) except +
-#        vector[DataPtr] getOutData() except +
-
-
     cdef cppclass ProfileInfo:
         string status
         string exec_type
@@ -112,11 +116,11 @@ cdef extern from "ie_api_impl.hpp" namespace "InferenceEnginePython":
     cdef cppclass IEExecNetwork:
         vector[InferRequestWrap] infer_requests
         IENetwork GetExecGraphInfo() except +
-        map[string, DataPtr] getInputs()
-        map[string, CDataPtr] getOutputs()
+        map[string, DataPtr] getInputs() except +
+        map[string, CDataPtr] getOutputs() except +
         void exportNetwork(const string & model_file) except +
-        object getMetric(const string & metric_name)
-        object getConfig(const string & metric_name)
+        object getMetric(const string & metric_name) except +
+        object getConfig(const string & metric_name) except +
         int wait(int num_requests, int64_t timeout)
         int getIdleRequestId()
 
@@ -158,6 +162,7 @@ cdef extern from "ie_api_impl.hpp" namespace "InferenceEnginePython":
         double exec_time;
         int index;
         void getBlobPtr(const string & blob_name, Blob.Ptr & blob_ptr) except +
+        void setBlob(const string & blob_name, const Blob.Ptr & blob_ptr) except +
         map[string, ProfileInfo] getPerformanceCounts() except +
         void infer() except +
         void infer_async() except +
diff --git a/inference-engine/ie_bridges/python/tests/conftest.py b/inference-engine/ie_bridges/python/tests/conftest.py
new file mode 100644 (file)
index 0000000..81910f9
--- /dev/null
@@ -0,0 +1,38 @@
+import os
+import pytest
+
+
+def model_path(is_myriad=False):
+    if os.environ.get("MODELS_PATH"):
+        path_to_repo = os.environ.get("MODELS_PATH")
+    else:
+        raise EnvironmentError("MODELS_PATH variable isn't set")
+    if not is_myriad:
+        test_xml = os.path.join(path_to_repo, "models", "test_model", 'test_model_fp32.xml')
+        test_bin = os.path.join(path_to_repo, "models", "test_model", 'test_model_fp32.bin')
+    else:
+        test_xml = os.path.join(path_to_repo, "models", "test_model", 'test_model_fp16.xml')
+        test_bin = os.path.join(path_to_repo, "models", "test_model", 'test_model_fp16.bin')
+    return (test_xml, test_bin)
+
+def image_path():
+    if os.environ.get("DATA_PATH"):
+        path_to_repo = os.environ.get("DATA_PATH")
+    else:
+        raise EnvironmentError("DATA_PATH variable isn't set")
+    path_to_img = os.path.join(path_to_repo, 'validation_set', '224x224', 'dog.bmp')
+    return path_to_img
+
+def plugins_path():
+    if os.environ.get("DATA_PATH"):
+        path_to_repo = os.environ.get("DATA_PATH")
+    else:
+        raise EnvironmentError("DATA_PATH variable isn't set")
+    plugins_xml = os.path.join(path_to_repo, 'ie_class', 'plugins.xml')
+    plugins_win_xml = os.path.join(path_to_repo, 'ie_class', 'plugins_mingw.xml')
+    plugins_osx_xml = os.path.join(path_to_repo, 'ie_class', 'plugins_apple.xml')
+    return (plugins_xml, plugins_win_xml, plugins_osx_xml)
+
+@pytest.fixture(scope='session')
+def device():
+    return os.environ.get("TEST_DEVICE") if os.environ.get("TEST_DEVICE") else "CPU"
diff --git a/inference-engine/ie_bridges/python/tests/test_CDataPtr.py b/inference-engine/ie_bridges/python/tests/test_CDataPtr.py
new file mode 100644 (file)
index 0000000..a5df7b6
--- /dev/null
@@ -0,0 +1,54 @@
+import pytest
+
+from openvino.inference_engine import CDataPtr, IECore
+from conftest import model_path
+
+
+test_net_xml, test_net_bin = model_path()
+
+def test_name(device):
+    ie = IECore()
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    exec_net = ie.load_network(net, device, num_requests=5)
+    assert isinstance(exec_net.outputs['fc_out'], CDataPtr)
+    assert exec_net.outputs['fc_out'].name == "fc_out", "Incorrect name for layer 'fc_out'"
+
+
+def test_precision(device):
+    ie = IECore()
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    exec_net = ie.load_network(net, device, num_requests=5)
+    assert isinstance(exec_net.outputs['fc_out'], CDataPtr)
+    assert exec_net.outputs['fc_out'].precision == "FP32", "Incorrect precision for layer 'fc_out'"
+
+
+def test_no_precision_setter(device):
+    ie = IECore()
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    exec_net = ie.load_network(net, device, num_requests=5)
+    with pytest.raises(AttributeError) as e:
+        exec_net.outputs['fc_out'].precision = "I8"
+    assert "attribute 'precision' of 'openvino.inference_engine.ie_api.CDataPtr' objects is not writable" in str(e.value)
+
+
+def test_layout(device):
+    ie = IECore()
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    exec_net = ie.load_network(net, device, num_requests=5)
+    assert exec_net.outputs['fc_out'].layout == "NC", "Incorrect layout for layer 'fc_out"
+
+
+def test_no_layout_setter(device):
+    ie = IECore()
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    exec_net = ie.load_network(net, device, num_requests=5)
+    with pytest.raises(AttributeError) as e:
+        exec_net.outputs['fc_out'].layout = "CN"
+    assert "attribute 'layout' of 'openvino.inference_engine.ie_api.CDataPtr' objects is not writable" in str(e.value)
+
+
+def test_initialized(device):
+    ie = IECore()
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    exec_net = ie.load_network(net, device, num_requests=5)
+    assert exec_net.outputs['fc_out'].initialized, "Incorrect value for initialized property for layer 'fc_out"
diff --git a/inference-engine/ie_bridges/python/tests/test_DataPtr.py b/inference-engine/ie_bridges/python/tests/test_DataPtr.py
new file mode 100644 (file)
index 0000000..2cb141d
--- /dev/null
@@ -0,0 +1,41 @@
+import pytest
+
+from openvino.inference_engine import IECore, IENetLayer, DataPtr
+from conftest import model_path
+
+
+test_net_xml, test_net_bin = model_path()
+
+def layer_out_data():
+    ie = IECore()
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    return net.layers['19/Fused_Add_'].out_data[0]
+
+
+def test_name():
+    assert layer_out_data().name == "19/Fused_Add_", "Incorrect name for layer '19/Fused_Add_'"
+
+
+def test_precision():
+    assert layer_out_data().precision == "FP32", "Incorrect precision for layer '19/Fused_Add_'"
+
+
+def test_precision_setter():
+    ie = IECore()
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    net.layers['19/Fused_Add_'].out_data[0].precision = "I8"
+    assert net.layers['19/Fused_Add_'].out_data[0].precision == "I8", "Incorrect precision for layer '19/Fused_Add_'"
+
+
+def test_incorrect_precision_setter():
+    with pytest.raises(ValueError) as e:
+        layer_out_data().precision = "123"
+    assert "Unsupported precision 123! List of supported precisions:" in str(e.value)
+
+
+def test_layout():
+    assert layer_out_data().layout == "NCHW", "Incorrect layout for layer '19/Fused_Add_'"
+
+
+def test_initialized():
+    assert layer_out_data().initialized, "Incorrect value for initialized property for layer '19/Fused_Add_'"
diff --git a/inference-engine/ie_bridges/python/tests/test_ExecutableNetwork.py b/inference-engine/ie_bridges/python/tests/test_ExecutableNetwork.py
new file mode 100644 (file)
index 0000000..3fb1690
--- /dev/null
@@ -0,0 +1,277 @@
+import numpy as np
+import os
+import pytest
+
+from openvino.inference_engine import ie_api as ie
+from conftest import model_path, image_path
+
+
+is_myriad = os.environ.get("TEST_DEVICE") == "MYRIAD"
+path_to_image = image_path()
+test_net_xml, test_net_bin = model_path(is_myriad)
+
+
+def read_image():
+    import cv2
+    n, c, h, w = (1, 3, 32, 32)
+    image = cv2.imread(path_to_image)
+    if image is None:
+        raise FileNotFoundError("Input image not found")
+
+    image = cv2.resize(image, (h, w)) / 255
+    image = image.transpose((2, 0, 1))
+    image = image.reshape((n, c, h, w))
+    return image
+
+
+def test_infer(device):
+    ie_core = ie.IECore()
+    net = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
+    exec_net = ie_core.load_network(net, device)
+    img = read_image()
+    res = exec_net.infer({'data': img})
+    assert np.argmax(res['fc_out'][0]) == 2
+    del exec_net
+    del ie_core
+
+
+def test_infer_net_from_buffer(device):
+    ie_core = ie.IECore()
+    with open(test_net_bin, 'rb') as f:
+        bin = f.read()
+    with open(test_net_xml, 'rb') as f:
+        xml = f.read()
+    net = ie_core.read_network(model=xml, weights=bin, init_from_buffer=True)
+    net2 = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
+    exec_net = ie_core.load_network(net, device)
+    exec_net2 = ie_core.load_network(net2, device)
+    img = read_image()
+    res = exec_net.infer({'data': img})
+    res2 = exec_net2.infer({'data': img})
+    del exec_net
+    del exec_net2
+    del ie_core
+    assert np.allclose(res['fc_out'], res2['fc_out'], atol=1E-4, rtol=1E-4)
+
+
+def test_infer_wrong_input_name(device):
+    ie_core = ie.IECore()
+    net = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
+    exec_net = ie_core.load_network(net, device)
+    img = read_image()
+    with pytest.raises(AssertionError) as e:
+        exec_net.infer({'_data_': img})
+    assert "No input with name _data_ found in network" in str(e.value)
+    del exec_net
+    del ie_core
+
+
+def test_inputs(device):
+    ie_core = ie.IECore()
+    net = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
+    exec_net = ie_core.load_network(net, device, num_requests=5)
+    assert len(exec_net.inputs) == 1
+    assert "data" in exec_net.inputs
+    assert isinstance(exec_net.inputs['data'], ie.DataPtr)
+    del exec_net
+    del ie_core
+
+
+def test_outputs(device):
+    ie_core = ie.IECore()
+    net = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
+    exec_net = ie_core.load_network(net, device, num_requests=5)
+    assert len(exec_net.outputs) == 1
+    assert "fc_out" in exec_net.outputs
+    assert isinstance(exec_net.outputs['fc_out'], ie.CDataPtr)
+    del exec_net
+    del ie_core
+
+
+def test_access_requests(device):
+    ie_core = ie.IECore()
+    net = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
+    exec_net = ie_core.load_network(net, device, num_requests=5)
+    assert len(exec_net.requests) == 5
+    assert isinstance(exec_net.requests[0], ie.InferRequest)
+    del exec_net
+    del ie_core
+
+
+def test_async_infer_one_req(device):
+    ie_core = ie.IECore()
+    net = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
+    exec_net = ie_core.load_network(net, device, num_requests=1)
+    img = read_image()
+    request_handler = exec_net.start_async(request_id=0, inputs={'data': img})
+    request_handler.wait()
+    res = request_handler.output_blobs['fc_out'].buffer
+    assert np.argmax(res) == 2
+    del exec_net
+    del ie_core
+
+
+def test_async_infer_many_req(device):
+    ie_core = ie.IECore()
+    net = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
+    exec_net = ie_core.load_network(net, device, num_requests=5)
+    img = read_image()
+    for id in range(5):
+        request_handler = exec_net.start_async(request_id=id, inputs={'data': img})
+        request_handler.wait()
+        res = request_handler.output_blobs['fc_out'].buffer
+        assert np.argmax(res) == 2
+    del exec_net
+    del ie_core
+
+
+def test_async_infer_many_req_get_idle(device):
+    ie_core = ie.IECore()
+    net = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
+    num_requests = 5
+    exec_net = ie_core.load_network(net, device, num_requests=num_requests)
+    img = read_image()
+    check_id = set()
+    for id in range(2*num_requests):
+        request_id = exec_net.get_idle_request_id()
+        if request_id == -1:
+            status = exec_net.wait(num_requests=1, timeout=ie.WaitMode.RESULT_READY)
+            assert(status == ie.StatusCode.OK)
+        request_id = exec_net.get_idle_request_id()
+        assert(request_id >= 0)
+        request_handler = exec_net.start_async(request_id=request_id, inputs={'data': img})
+        check_id.add(request_id)
+    status = exec_net.wait(timeout=ie.WaitMode.RESULT_READY)
+    assert status == ie.StatusCode.OK
+    for id in range(num_requests):
+        if id in check_id:
+            assert np.argmax(exec_net.requests[id].output_blobs['fc_out'].buffer) == 2
+    del exec_net
+    del ie_core
+
+
+def test_wait_before_start(device):
+  ie_core = ie.IECore()
+  net = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
+  num_requests = 5
+  exec_net = ie_core.load_network(net, device, num_requests=num_requests)
+  img = read_image()
+  requests = exec_net.requests
+  for id in range(num_requests):
+      status = requests[id].wait()
+      assert status == ie.StatusCode.INFER_NOT_STARTED
+      request_handler = exec_net.start_async(request_id=id, inputs={'data': img})
+      status = requests[id].wait()
+      assert status == ie.StatusCode.OK
+      assert np.argmax(request_handler.output_blobs['fc_out'].buffer) == 2
+  del exec_net
+  del ie_core
+
+
+def test_wrong_request_id(device):
+    ie_core = ie.IECore()
+    net = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
+    exec_net = ie_core.load_network(net, device, num_requests=1)
+    img = read_image()
+    with pytest.raises(ValueError) as e:
+        exec_net.start_async(request_id=20, inputs={'data': img})
+    assert "Incorrect request_id specified!" in str(e.value)
+    del exec_net
+    del ie_core
+
+
+def test_wrong_num_requests(device):
+    with pytest.raises(ValueError) as e:
+        ie_core = ie.IECore()
+        net = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
+        ie_core.load_network(net, device, num_requests=-1)
+        assert "Incorrect number of requests specified: -1. Expected positive integer number or zero for auto detection" \
+           in str(e.value)
+        del ie_core
+
+def test_wrong_num_requests_core(device):
+    with pytest.raises(ValueError) as e:
+        ie_core = ie.IECore()
+        net = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
+        exec_net = ie_core.load_network(net, device, num_requests=-1)
+        assert "Incorrect number of requests specified: -1. Expected positive integer number or zero for auto detection" \
+           in str(e.value)
+        del ie_core
+
+def test_plugin_accessible_after_deletion(device):
+    ie_core = ie.IECore()
+    net = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
+    exec_net = ie_core.load_network(net, device)
+    del ie_core
+    img = read_image()
+    res = exec_net.infer({'data': img})
+    assert np.argmax(res['fc_out'][0]) == 2
+    del exec_net
+
+
+def test_exec_graph(device):
+    ie_core = ie.IECore()
+    net = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
+    exec_net = ie_core.load_network(net, device)
+    img = read_image()
+    res = exec_net.infer({'data': img})
+    exec_graph = exec_net.get_exec_graph_info()
+    exec_graph_file = 'exec_graph.xml'
+    exec_graph.serialize(exec_graph_file)
+    assert os.path.exists(exec_graph_file)
+    os.remove(exec_graph_file)
+    del exec_net
+    del exec_graph
+    del ie_core
+
+
+@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "MYRIAD", reason="Device specific test. "
+                                                                             "Only MYRIAD plugin implements network export")
+def test_export_import():
+    ie_core = ie.IECore()
+    net = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
+    exec_net = ie_core.load_network(net, "MYRIAD")
+    exported_net_file = 'exported_model.bin'
+    exec_net.export(exported_net_file)
+    assert os.path.exists(exported_net_file)
+    exec_net = ie_core.import_network(exported_net_file, "MYRIAD")
+    os.remove(exported_net_file)
+    img = read_image()
+    res = exec_net.infer({'data': img})
+    assert np.argmax(res['fc_out'][0]) == 3
+    del exec_net
+    del ie_core
+
+
+def test_multi_out_data(device):
+    # Regression test CVS-23965
+    # Check that CDataPtr for all output layers not copied  between outputs map items
+    ie_core = ie.IECore()
+    net = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
+    net.add_outputs(['28/Reshape'])
+    exec_net = ie_core.load_network(net, device)
+    assert "fc_out" in exec_net.outputs and "28/Reshape" in exec_net.outputs
+    assert isinstance(exec_net.outputs["fc_out"], ie.CDataPtr)
+    assert isinstance(exec_net.outputs["28/Reshape"], ie.CDataPtr)
+    assert exec_net.outputs["fc_out"].name == "fc_out" and exec_net.outputs["fc_out"].shape == [1, 10]
+    assert exec_net.outputs["28/Reshape"].name == "28/Reshape" and exec_net.outputs["28/Reshape"].shape == [1, 5184]
+    del ie_core
+    pass
+
+# TODO: return when cvs-29487 will be fixed
+@pytest.mark.skip(reason="get_metric('NETWORK_NAME') returns wrong name, problem somewhere in ngraph")
+def test_get_metric(device):
+    ie_core = ie.IECore()
+    net = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
+    exec_net = ie_core.load_network(net, "CPU")
+    network_name = exec_net.get_metric("NETWORK_NAME")
+    assert network_name == "test_model"
+
+
+@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", reason="Device independent test")
+def test_get_config(device):
+    ie_core = ie.IECore()
+    net = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
+    exec_net = ie_core.load_network(net, device)
+    config = exec_net.get_config("PERF_COUNT")
+    assert config == "NO"
diff --git a/inference-engine/ie_bridges/python/tests/test_IEBlob.py b/inference-engine/ie_bridges/python/tests/test_IEBlob.py
new file mode 100644 (file)
index 0000000..eb4ff34
--- /dev/null
@@ -0,0 +1,125 @@
+import pytest
+
+import numpy as np
+
+from openvino.inference_engine import IETensorDesc, IEBlob
+from conftest import image_path
+
+
+path_to_image = image_path()
+
+def test_init_with_tensor_desc():
+    tensor_desc = IETensorDesc("FP32", [1, 3, 127, 127], "NHWC")
+    blob = IEBlob(tensor_desc)
+    assert isinstance(blob.buffer, np.ndarray)
+    assert blob.tensor_desc == tensor_desc
+
+
+def test_init_with_numpy():
+    tensor_desc = IETensorDesc("FP32", [1, 3, 127, 127], "NCHW")
+    array = np.ones(shape=(1, 3, 127, 127), dtype=np.float32)
+    blob = IEBlob(tensor_desc, array)
+    assert isinstance(blob.buffer, np.ndarray)
+    assert blob.tensor_desc == tensor_desc
+
+
+def test_get_tensor_desc():
+    tensor_desc = IETensorDesc("FP32", [1, 127, 127, 3], "NHWC")
+    blob = IEBlob(tensor_desc)
+    assert blob.tensor_desc == tensor_desc
+
+
+def test_get_buffer():
+    tensor_desc = IETensorDesc("FP32", [1, 3, 127, 127], "NCHW")
+    array = np.ones(shape=(1, 3, 127, 127), dtype=np.float32)
+    blob = IEBlob(tensor_desc, array)
+    assert np.array_equal(blob.buffer, array)
+
+def test_write_to_buffer_fp32():
+    tensor_desc = IETensorDesc("FP32", [1, 3, 127, 127], "NCHW")
+    array = np.zeros(shape=(1, 3, 127, 127), dtype=np.float32)
+    blob = IEBlob(tensor_desc, array)
+    ones_arr = np.ones(shape=(1, 3, 127, 127), dtype=np.float32)
+    blob.buffer[:] = ones_arr
+    assert  np.array_equal(blob.buffer, ones_arr)
+
+@pytest.mark.skip(reason="Need to figure out how to implement right conversion")
+def test_write_to_buffer_fp16():
+    tensor_desc = IETensorDesc("FP16", [1, 3, 127, 127], "NCHW")
+    array = np.zeros(shape=(1, 3, 127, 127), dtype=np.float16)
+    blob = IEBlob(tensor_desc, array)
+    ones_arr = np.ones(shape=(1, 3, 127, 127), dtype=np.float16)
+    blob.buffer[:] = ones_arr
+    assert  np.array_equal(blob.buffer, ones_arr)
+
+def test_write_to_buffer_int8():
+    tensor_desc = IETensorDesc("I8", [1, 3, 127, 127], "NCHW")
+    array = np.zeros(shape=(1, 3, 127, 127), dtype=np.int8)
+    blob = IEBlob(tensor_desc, array)
+    ones_arr = np.ones(shape=(1, 3, 127, 127), dtype=np.int8)
+    blob.buffer[:] = ones_arr
+    assert  np.array_equal(blob.buffer, ones_arr)
+
+def test_write_to_buffer_uint8():
+    tensor_desc = IETensorDesc("U8", [1, 3, 127, 127], "NCHW")
+    array = np.zeros(shape=(1, 3, 127, 127), dtype=np.uint8)
+    blob = IEBlob(tensor_desc, array)
+    ones_arr = np.ones(shape=(1, 3, 127, 127), dtype=np.uint8)
+    blob.buffer[:] = ones_arr
+    assert  np.array_equal(blob.buffer, ones_arr)
+
+def test_write_to_buffer_int32():
+    tensor_desc = IETensorDesc("I32", [1, 3, 127, 127], "NCHW")
+    array = np.zeros(shape=(1, 3, 127, 127), dtype=np.int32)
+    blob = IEBlob(tensor_desc, array)
+    ones_arr = np.ones(shape=(1, 3, 127, 127), dtype=np.int32)
+    blob.buffer[:] = ones_arr
+    assert  np.array_equal(blob.buffer, ones_arr)
+
+def test_write_to_buffer_int16():
+    tensor_desc = IETensorDesc("I16", [1, 3, 127, 127], "NCHW")
+    array = np.zeros(shape=(1, 3, 127, 127), dtype=np.int16)
+    blob = IEBlob(tensor_desc, array)
+    ones_arr = np.ones(shape=(1, 3, 127, 127), dtype= np.int16)
+    blob.buffer[:] = ones_arr
+    assert  np.array_equal(blob.buffer, ones_arr)
+
+def test_write_to_buffer_uint16():
+    tensor_desc = IETensorDesc("U16", [1, 3, 127, 127], "NCHW")
+    array = np.zeros(shape=(1, 3, 127, 127), dtype=np.uint16)
+    blob = IEBlob(tensor_desc, array)
+    ones_arr = np.ones(shape=(1, 3, 127, 127), dtype=np.uint16)
+    blob.buffer[:] = ones_arr
+    assert  np.array_equal(blob.buffer, ones_arr)
+
+def test_write_to_buffer_int64():
+    tensor_desc = IETensorDesc("I64", [1, 3, 127, 127], "NCHW")
+    array = np.zeros(shape=(1, 3, 127, 127), dtype=np.int64)
+    blob = IEBlob(tensor_desc, array)
+    ones_arr = np.ones(shape=(1, 3, 127, 127), dtype=np.int64)
+    blob.buffer[:] = ones_arr
+    assert  np.array_equal(blob.buffer, ones_arr)
+
+def test_incompatible_array_and_td():
+    tensor_desc = IETensorDesc("FP32", [1, 3, 127, 127], "NCHW")
+    array = np.zeros(shape=(1, 2, 3, 4), dtype=np.float32)
+    with pytest.raises(AttributeError) as e:
+        IEBlob(tensor_desc, array)
+    assert "Number of elements in provided numpy array 24 and " \
+           "required by TensorDesc 48387 are not equal" in str(e.value)
+
+def test_incompatible_input_precision():
+    import cv2
+    n, c, h, w = (1, 3, 32, 32)
+    image = cv2.imread(path_to_image) / 255
+    if image is None:
+        raise FileNotFoundError("Input image not found")
+
+    image = cv2.resize(image, (h, w))
+    image = image.transpose((2, 0, 1))
+    image = image.reshape((n, c, h, w))
+    tensor_desc = IETensorDesc("FP32", [1, 3, 32, 32], "NCHW")
+    with pytest.raises(ValueError) as e:
+        IEBlob(tensor_desc, image)
+    assert "Data type float64 of provided numpy array " \
+           "doesn't match to TensorDesc precision FP32" in str(e.value)
diff --git a/inference-engine/ie_bridges/python/tests/test_IECore.py b/inference-engine/ie_bridges/python/tests/test_IECore.py
new file mode 100644 (file)
index 0000000..9eabe34
--- /dev/null
@@ -0,0 +1,188 @@
+import os
+import pytest
+from sys import platform
+import numpy as np
+from pathlib import Path
+
+from openvino.inference_engine import IENetwork, IECore, ExecutableNetwork
+from conftest import model_path, plugins_path
+
+
+test_net_xml, test_net_bin = model_path()
+plugins_xml, plugins_win_xml, plugins_osx_xml = plugins_path()
+
+
+def test_init_ie_core_no_cfg():
+    ie = IECore()
+    assert isinstance(ie, IECore)
+
+
+def test_init_ie_core_with_cfg():
+    ie = IECore(plugins_xml)
+    assert isinstance(ie, IECore)
+
+
+def test_get_version(device):
+    ie = IECore()
+    version = ie.get_versions(device)
+    assert isinstance(version, dict), "Returned version must be a dictionary"
+    assert device in version, "{} plugin version wasn't found in versions"
+    assert hasattr(version[device], "major"), "Returned version has no field 'major'"
+    assert hasattr(version[device], "minor"), "Returned version has no field 'minor'"
+    assert hasattr(version[device], "description"), "Returned version has no field 'description'"
+    assert hasattr(version[device], "build_number"), "Returned version has no field 'build_number'"
+
+
+def test_load_network(device):
+    ie = IECore()
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    exec_net = ie.load_network(net, device)
+    assert isinstance(exec_net, ExecutableNetwork)
+
+
+@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", reason="Device independent test")
+def test_load_network_wrong_device():
+    ie = IECore()
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    with pytest.raises(RuntimeError) as e:
+        ie.load_network(net, "BLA")
+    assert 'Device with "BLA" name is not registered in the InferenceEngine' in str(e.value)
+
+
+def test_query_network(device):
+    ie = IECore()
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    query_res = ie.query_network(net, device)
+    assert net.layers.keys() == query_res.keys(), "Not all network layers present in query_network results"
+    assert next(iter(set(query_res.values()))) == device, "Wrong device for some layers"
+
+
+@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", reason="Device independent test")
+def test_register_plugin():
+    ie = IECore()
+    ie.register_plugin("MKLDNNPlugin", "BLA")
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    exec_net = ie.load_network(net, "BLA")
+    assert isinstance(exec_net, ExecutableNetwork), "Cannot load the network to the registered plugin with name 'BLA'"
+
+
+@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", reason="Device independent test")
+def test_register_plugins():
+    ie = IECore()
+    if platform == "linux" or platform == "linux2":
+        ie.register_plugins(plugins_xml)
+    elif platform == "darwin":
+        ie.register_plugins(plugins_osx_xml)
+    elif platform == "win32":
+        ie.register_plugins(plugins_win_xml)
+
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    exec_net = ie.load_network(net, "CUSTOM")
+    assert isinstance(exec_net,
+                      ExecutableNetwork), "Cannot load the network to the registered plugin with name 'CUSTOM' " \
+                                          "registred in the XML file"
+
+
+@pytest.mark.skip(reason="Need to figure out if it's expected behaviour (fails with C++ API as well")
+def test_unregister_plugin(device):
+    ie = IECore()
+    ie.unregister_plugin(device)
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    with pytest.raises(RuntimeError) as e:
+        ie.load_network(net, device)
+    assert 'Device with "{}" name is not registered in the InferenceEngine'.format(device) in str(e.value)
+
+
+@pytest.mark.skip(reason="Need to figure out segmentation fault cause.")
+def test_available_devices(device):
+    ie = IECore()
+    devices = ie.available_devices
+    assert device in devices, "Current device '{}' is not listed in available devices '{}'".format(device,
+                                                                                                   ', '.join(devices))
+
+
+@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU",
+                    reason="Cannot run test on device {}," "Plugin specific test".format(os.environ.get("TEST_DEVICE")))
+def test_get_metric_list_of_str():
+    ie = IECore()
+    param = ie.get_metric("CPU", "OPTIMIZATION_CAPABILITIES")
+    assert isinstance(param, list), "Parameter value for 'OPTIMIZATION_CAPABILITIES' " \
+                                    "metric must be a list but {} is returned".format(type(param))
+    assert all(isinstance(v, str) for v in param), "Not all of the parameter values for 'OPTIMIZATION_CAPABILITIES' " \
+                                                   "metric are strings!"
+
+
+
+@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU",
+                    reason="Cannot run test on device {}," "Plugin specific test".format(os.environ.get("TEST_DEVICE")))
+def test_get_metric_tuple_of_two_ints():
+    ie = IECore()
+    param = ie.get_metric("CPU", "RANGE_FOR_STREAMS")
+    assert isinstance(param, tuple), "Parameter value for 'RANGE_FOR_STREAMS' " \
+                                     "metric must be tuple but {} is returned".format(type(param))
+    assert all(isinstance(v, int) for v in param), "Not all of the parameter values for 'RANGE_FOR_STREAMS' " \
+                                                   "metric are integers!"
+
+
+@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU",
+                    reason="Cannot run test on device {}," "Plugin specific test".format(os.environ.get("TEST_DEVICE")))
+def test_get_metric_tuple_of_three_ints():
+    ie = IECore()
+    param = ie.get_metric("CPU", "RANGE_FOR_ASYNC_INFER_REQUESTS")
+    assert isinstance(param, tuple), "Parameter value for 'RANGE_FOR_ASYNC_INFER_REQUESTS' " \
+                                     "metric must be tuple but {} is returned".format(type(param))
+    assert all(isinstance(v, int) for v in param), "Not all of the parameter values for " \
+                                                   "'RANGE_FOR_ASYNC_INFER_REQUESTS' metric are integers!"
+
+
+@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU",
+                    reason="Cannot run test on device {}," "Plugin specific test".format(os.environ.get("TEST_DEVICE")))
+def test_get_metric_str():
+    ie = IECore()
+    param = ie.get_metric("CPU", "FULL_DEVICE_NAME")
+    assert isinstance(param, str), "Parameter value for 'FULL_DEVICE_NAME' " \
+                                   "metric must be string but {} is returned".format(type(param))
+
+def test_read_network_from_xml():
+    ie = IECore()
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    assert isinstance(net, IENetwork)
+
+def test_read_network_as_path():
+    ie = IECore()
+    net = ie.read_network(model=Path(model_path()[0]), weights=Path(test_net_bin))
+    assert isinstance(net, IENetwork)
+
+def test_incorrect_xml():
+    ie = IECore()
+    with pytest.raises(Exception) as e:
+        ie.read_network(model="./model.xml", weights=Path(test_net_bin))
+    assert "Path to the model ./model.xml doesn't exist or it's a directory" in str(e.value)
+
+def test_incorrect_bin():
+    ie = IECore()
+    with pytest.raises(Exception) as e:
+        ie.read_network(model=test_net_xml, weights="./model.bin")
+    assert "Path to the weights ./model.bin doesn't exist or it's a directory" in str(e.value)
+
+def test_read_net_from_buffer():
+    ie = IECore()
+    with open(test_net_bin, 'rb') as f:
+        bin = f.read()
+    with open(model_path()[0], 'rb') as f:
+        xml = f.read()
+    net = ie.read_network(model=xml, weights=bin, init_from_buffer=True)
+    assert isinstance(net, IENetwork)
+
+def test_net_from_buffer_valid():
+    ie = IECore()
+    with open(test_net_bin, 'rb') as f:
+        bin = f.read()
+    with open(model_path()[0], 'rb') as f:
+        xml = f.read()
+    net = ie.read_network(model=xml, weights=bin, init_from_buffer=True)
+    net2 = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    for name, layer in net.layers.items():
+        for blob, data in layer.blobs.items():
+            assert np.allclose(data, net2.layers[name].blobs[blob]), \
+                "Incorrect weights for layer {} and blob {}".format(name, blob)
diff --git a/inference-engine/ie_bridges/python/tests/test_IENetLayer.py b/inference-engine/ie_bridges/python/tests/test_IENetLayer.py
new file mode 100644 (file)
index 0000000..e67ed30
--- /dev/null
@@ -0,0 +1,130 @@
+import warnings
+import numpy
+
+from openvino.inference_engine import DataPtr, IECore
+from conftest import model_path
+
+
+test_net_xml, test_net_bin = model_path()
+
+def test_name():
+    ie = IECore()
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    assert net.layers['27'].name == "27"
+
+
+def test_type():
+    ie = IECore()
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    assert net.layers['27'].type == "Pooling"
+
+
+def test_precision_getter(recwarn):
+    warnings.simplefilter("always")
+    ie = IECore()
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    assert net.layers['27'].precision == "FP32"
+    assert len(recwarn) == 1
+    assert recwarn.pop(DeprecationWarning)
+
+def test_precision_setter(recwarn):
+    warnings.simplefilter("always")
+    ie = IECore()
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    net.layers['27'].precision = "I8"
+    assert net.layers['27'].precision == "I8"
+    assert len(recwarn) == 1
+    assert recwarn.pop(DeprecationWarning)
+
+def test_affinuty_getter():
+    ie = IECore()
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    assert net.layers['27'].affinity == ""
+
+
+def test_affinity_setter():
+    ie = IECore()
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    net.layers['27'].affinity = "CPU"
+    assert net.layers['27'].affinity == "CPU"
+
+
+def test_blobs():
+    ie = IECore()
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    assert isinstance(net.layers['19/Fused_Add_'].blobs["biases"], numpy.ndarray)
+    assert isinstance(net.layers['19/Fused_Add_'].blobs["weights"], numpy.ndarray)
+    assert net.layers['19/Fused_Add_'].blobs["biases"].size != 0
+    assert net.layers['19/Fused_Add_'].blobs["weights"].size != 0
+
+def test_weights(recwarn):
+    warnings.simplefilter("always")
+    ie = IECore()
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    assert isinstance(net.layers['19/Fused_Add_'].weights["biases"], numpy.ndarray)
+    assert isinstance(net.layers['19/Fused_Add_'].weights["weights"], numpy.ndarray)
+    assert net.layers['19/Fused_Add_'].weights["biases"].size != 0
+    assert net.layers['19/Fused_Add_'].weights["weights"].size != 0
+    assert len(recwarn) == 4
+    assert recwarn.pop(DeprecationWarning)
+
+
+def test_params_getter():
+    ie = IECore()
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    assert net.layers['27'].params == {"kernel" : "2,2", "pads_begin" : "0,0",
+                                       "pads_end" : "0,0", "rounding_type" : "floor",
+                                       "strides" : "2,2", "pool-method" : "max",
+                                       "originalLayersNames" : "27"}
+
+
+def test_params_setter():
+    ie = IECore()
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    params = net.layers['27'].params
+    params.update({'PrimitivesPriority': 'cpu:ref_any'})
+    net.layers['27'].params = params
+    assert net.layers['27'].params == {"kernel" : "2,2", "pads_begin" : "0,0",
+                                       "pads_end" : "0,0", "rounding_type" : "floor",
+                                       "strides" : "2,2", "pool-method" : "max",
+                                       "originalLayersNames" : "27", 'PrimitivesPriority': 'cpu:ref_any'}
+
+
+def test_layer_parents():
+    ie = IECore()
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    assert net.layers['27'].parents == ['26']
+
+
+def test_layer_children():
+    ie = IECore()
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    assert net.layers['27'].children == ['29']
+
+
+def test_layout(recwarn):
+    warnings.simplefilter("always")
+    ie = IECore()
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    assert net.layers['27'].layout == 'NCHW'
+    assert len(recwarn) == 1
+    assert recwarn.pop(DeprecationWarning)
+
+
+def test_shape(recwarn):
+    warnings.simplefilter("always")
+    ie = IECore()
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    assert net.layers['27'].shape == [1, 64, 9, 9]
+    assert len(recwarn) == 1
+
+
+def test_out_data():
+    ie = IECore()
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    assert isinstance(net.layers['27'].out_data[0], DataPtr)
+
+def test_in_data():
+    ie = IECore()
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    assert isinstance(net.layers['27'].in_data[0], DataPtr)
diff --git a/inference-engine/ie_bridges/python/tests/test_IENetwork.py b/inference-engine/ie_bridges/python/tests/test_IENetwork.py
new file mode 100644 (file)
index 0000000..a3841c9
--- /dev/null
@@ -0,0 +1,280 @@
+import os
+import pytest
+import warnings
+import numpy as np
+
+from openvino.inference_engine import IENetwork, IENetLayer, DataPtr, LayersStatsMap, LayerStats, IECore
+from conftest import model_path
+
+test_net_xml, test_net_bin = model_path()
+
+def test_create_ie_network_deprecated():
+    with warnings.catch_warnings(record=True) as w:
+        net = IENetwork(model=test_net_xml, weights=test_net_bin)
+        assert isinstance(net, IENetwork)
+        assert len(w) == 1
+        assert issubclass(w[-1].category, DeprecationWarning)
+        assert "Reading network using constructor is deprecated. " \
+               "Please, use IECore.read_network() method instead" in str(w[0].message)
+
+
+def test_incorrect_xml_deprecated():
+    with warnings.catch_warnings(record=True) as w:
+        with pytest.raises(Exception) as e:
+            IENetwork(model="./model.xml", weights=test_net_bin)
+        assert "Path to the model ./model.xml doesn't exist or it's a directory" in str(e.value)
+        assert len(w) == 1
+        assert issubclass(w[-1].category, DeprecationWarning)
+        assert "Reading network using constructor is deprecated. " \
+               "Please, use IECore.read_network() method instead" in str(w[0].message)
+
+
+def test_incorrect_bin_deprecated():
+    with warnings.catch_warnings(record=True) as w:
+        with pytest.raises(Exception) as e:
+            IENetwork(model=test_net_xml, weights="./model.bin")
+        assert "Path to the weights ./model.bin doesn't exist or it's a directory" in str(e.value)
+        assert len(w) == 1
+        assert issubclass(w[-1].category, DeprecationWarning)
+        assert "Reading network using constructor is deprecated. " \
+               "Please, use IECore.read_network() method instead" in str(w[0].message)
+
+
+@pytest.mark.skip(reason="name) returns wrong name, problem somewhere in ngraph")
+def test_name():
+    ie = IECore()
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    assert net.name == "model"
+
+
+def test_inputs():
+    ie = IECore()
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    assert isinstance(net.inputs['data'], DataPtr)
+    assert net.inputs['data'].layout == "NCHW"
+    assert net.inputs['data'].precision == "FP32"
+    assert net.inputs['data'].shape == [1, 3, 32, 32]
+
+
+def test_input_precision_setter():
+    ie = IECore()
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    assert net.inputs['data'].layout == "NCHW"
+    net.inputs['data'].layout = "NHWC"
+    assert net.inputs['data'].layout == "NHWC"
+
+
+def test_input_layout_setter():
+    ie = IECore()
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    assert net.inputs['data'].precision == "FP32"
+    net.inputs['data'].precision = "I8"
+    assert net.inputs['data'].precision == "I8"
+
+
+def test_input_unsupported_precision_setter():
+    ie = IECore()
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    with pytest.raises(ValueError) as e:
+        net.inputs['data'].precision = "BLA"
+    assert "Unsupported precision BLA! List of supported precisions: " in str(e.value)
+
+
+def test_input_unsupported_layout_setter():
+    ie = IECore()
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    with pytest.raises(ValueError) as e:
+        net.inputs['data'].layout = "BLA"
+    assert "Unsupported layout BLA! List of supported layouts: " in str(e.value)
+
+
+def test_outputs():
+    ie = IECore()
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    assert isinstance(net.outputs['fc_out'], DataPtr)
+    assert net.outputs['fc_out'].layout == "NC"
+    assert net.outputs['fc_out'].precision == "FP32"
+    assert net.outputs['fc_out'].shape == [1, 10]
+
+
+def test_output_precision_setter():
+    ie = IECore()
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    assert net.outputs['fc_out'].precision == "FP32"
+    net.outputs['fc_out'].precision = "I8"
+    assert net.outputs['fc_out'].precision == "I8"
+
+
+def test_output_unsupported_precision_setter():
+    ie = IECore()
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    with pytest.raises(ValueError) as e:
+        net.outputs['fc_out'].precision = "BLA"
+    assert "Unsupported precision BLA! List of supported precisions: " in str(e.value)
+
+
+def test_add_ouputs():
+    ie = IECore()
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    net.add_outputs('28/Reshape')
+    net.add_outputs(['29/WithoutBiases'])
+    assert sorted(net.outputs) == ['28/Reshape', '29/WithoutBiases', 'fc_out']
+
+
+def test_add_outputs_with_port():
+    ie = IECore()
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    net.add_outputs(('28/Reshape', 0))
+    net.add_outputs([('29/WithoutBiases', 0)])
+    assert sorted(net.outputs) == ['28/Reshape', '29/WithoutBiases', 'fc_out']
+
+
+def test_add_outputs_with_and_without_port():
+    ie = IECore()
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    net.add_outputs('28/Reshape')
+    net.add_outputs([('29/WithoutBiases', 0)])
+    assert sorted(net.outputs) == ['28/Reshape', '29/WithoutBiases', 'fc_out']
+
+
+def test_batch_size_getter():
+    ie = IECore()
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    assert net.batch_size == 1
+
+
+def test_batch_size_setter():
+    ie = IECore()
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    net.batch_size = 4
+    assert net.batch_size == 4
+    assert net.inputs['data'].shape == [4, 3, 32, 32]
+
+def test_batch_size_after_reshape():
+    ie = IECore()
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    net.reshape({'data' : [4, 3, 32, 32]})
+    assert net.batch_size == 4
+    assert net.inputs['data'].shape == [4, 3, 32, 32]
+    net.reshape({'data' : [8, 3, 32, 32]})
+    assert net.batch_size == 8
+    assert net.inputs['data'].shape == [8, 3, 32, 32]
+
+def test_layers():
+    ie = IECore()
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    layers_name = [key for key in net.layers]
+    assert sorted(layers_name) == ['19/Fused_Add_', '21', '22', '23', '24/Fused_Add_', '26', '27', '29', 'data', 'fc_out']
+    assert isinstance(net.layers['19/Fused_Add_'], IENetLayer)
+
+
+def test_get_stats_deprecated():
+    with warnings.catch_warnings(record=True) as w:
+        ie = IECore()
+        net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+        stats = net.stats
+        assert isinstance(stats, LayersStatsMap)
+        assert len(w) == 1
+        assert issubclass(w[-1].category, DeprecationWarning)
+        assert "stats property of IENetwork is deprecated." in str(w[-1].message)
+
+
+@pytest.mark.skip(reason="Test is failed due-to ngraph conversion")
+def test_set_new_stats_deprecated():
+    with warnings.catch_warnings(record=True) as w:
+        ie = IECore()
+        net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+        new_stats = LayerStats(min=(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0),
+                               max=(10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0))
+        stats = net.stats
+        stats.update({"fc_out": new_stats})
+        assert net.stats["fc_out"].min == new_stats.min
+        assert net.stats["fc_out"].max == new_stats.max
+        assert len(w) == 3
+        for warns in w:
+            assert issubclass(warns.category, DeprecationWarning)
+            assert "stats property of IENetwork is deprecated." in str(warns.message)
+
+
+@pytest.mark.skip(reason="Test is failed due-to ngraph conversion")
+def test_update_stats_deprecated():
+    with warnings.catch_warnings(record=True) as w:
+        ie = IECore()
+        net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+        initial_stats = LayerStats(min=(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0),
+                                   max=(10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0))
+        stats = net.stats
+        stats.update({"fc_out": initial_stats})
+        new_stats = LayerStats(min=(10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0),
+                               max=(10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0))
+        stats.update({"fc_out": new_stats})
+        assert net.stats["fc_out"].min == new_stats.min
+        assert net.stats["fc_out"].max == new_stats.max
+        assert len(w) == 3
+        for warns in w:
+            assert issubclass(warns.category, DeprecationWarning)
+            assert "stats property of IENetwork is deprecated." in str(warns.message)
+
+
+@pytest.mark.skip(reason="Test is failed due-to ngraph conversion")
+def test_serialize():
+    ie = IECore()
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    net.serialize("./serialized_net.xml", "./serialized_net.bin")
+    serialized_net = ie.read_network(model="./serialized_net.xml", weights="./serialized_net.bin")
+    assert net.layers.keys() == serialized_net.layers.keys()
+    os.remove("./serialized_net.xml")
+    os.remove("./serialized_net.bin")
+
+
+def test_reshape():
+    ie = IECore()
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    net.reshape({"data": (2, 3, 32, 32)})
+
+
+def test_read_net_from_buffer_deprecated():
+    with warnings.catch_warnings(record=True) as w:
+        with open(test_net_bin, 'rb') as f:
+            bin = f.read()
+        with open(test_net_xml, 'rb') as f:
+            xml = f.read()
+        net = IENetwork(model=xml, weights=bin, init_from_buffer=True)
+        assert isinstance(net, IENetwork)
+        assert len(w) == 1
+        assert issubclass(w[-1].category, DeprecationWarning)
+        assert "Reading network using constructor is deprecated. " \
+               "Please, use IECore.read_network() method instead" in str(w[0].message)
+
+
+def test_net_from_buffer_valid_deprecated():
+    with warnings.catch_warnings(record=True) as w:
+        with open(test_net_bin, 'rb') as f:
+            bin = f.read()
+        with open(test_net_xml, 'rb') as f:
+            xml = f.read()
+        net = IENetwork(model=xml, weights=bin, init_from_buffer=True)
+        net2 = IENetwork(model=test_net_xml, weights=test_net_bin)
+        for name, l in net.layers.items():
+            for blob, data in l.blobs.items():
+                assert np.allclose(data, net2.layers[name].blobs[blob]), \
+                    "Incorrect weights for layer {} and blob {}".format(name, blob)
+        assert len(w) == 2
+        for warns in w:
+            assert issubclass(warns.category, DeprecationWarning)
+            assert "Reading network using constructor is deprecated. " \
+                   "Please, use IECore.read_network() method instead" in str(warns.message)
+
+
+def test_multi_out_data():
+    # Regression test CVS-23965
+    # Check that DataPtr for all output layers not copied between outputs map  items
+    ie = IECore()
+    net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+    net.add_outputs(['28/Reshape'])
+    assert "28/Reshape" in net.outputs and "fc_out" in net.outputs
+    assert isinstance(net.outputs["28/Reshape"], DataPtr)
+    assert isinstance(net.outputs["fc_out"], DataPtr)
+    assert net.outputs["28/Reshape"].name == "28/Reshape" and net.outputs["28/Reshape"].shape == [1, 5184]
+    assert net.outputs["fc_out"].name == "fc_out" and net.outputs["fc_out"].shape == [1, 10]
+    pass
diff --git a/inference-engine/ie_bridges/python/tests/test_IEPlugin.py b/inference-engine/ie_bridges/python/tests/test_IEPlugin.py
new file mode 100644 (file)
index 0000000..20d8c58
--- /dev/null
@@ -0,0 +1,135 @@
+import warnings
+import pytest
+
+
+from openvino.inference_engine import IENetwork, IEPlugin, ExecutableNetwork
+from conftest import model_path
+
+test_net_xml, test_net_bin = model_path()
+
+def test_init_plugin(device):
+    with warnings.catch_warnings(record=True) as w:
+        plugin = IEPlugin(device, None)
+        assert isinstance(plugin, IEPlugin)
+    assert len(w) == 1
+    assert "IEPlugin class is deprecated. " \
+                "Please use IECore class instead." in str(w[0].message)
+
+
+def test_device_attr(device):
+    with warnings.catch_warnings(record=True) as w:
+        plugin = IEPlugin(device, None)
+        assert plugin.device == device
+    assert len(w) == 1
+    assert "IEPlugin class is deprecated. " \
+               "Please use IECore class instead." in str(w[0].message)
+
+
+def test_get_version(device):
+    with warnings.catch_warnings(record=True) as w:
+        plugin = IEPlugin(device, None)
+        assert not len(plugin.version) == 0
+    assert len(w) == 1
+    assert "IEPlugin class is deprecated. " \
+               "Please use IECore class instead." in str(w[0].message)
+
+
+def test_load_network(device):
+    with warnings.catch_warnings(record=True) as w:
+        plugin = IEPlugin(device, None)
+        net = IENetwork(model=test_net_xml, weights=test_net_bin)
+        exec_net = plugin.load(net)
+        assert isinstance(exec_net, ExecutableNetwork)
+    assert len(w) == 2
+    assert "IEPlugin class is deprecated. " \
+               "Please use IECore class instead." in str(w[0].message)
+    assert "Reading network using constructor is deprecated. " \
+            "Please, use IECore.read_network() method instead"  in str(w[1].message)
+
+
+def test_load_network_many_requests(device):
+    with warnings.catch_warnings(record=True) as w:
+        plugin = IEPlugin(device)
+        net = IENetwork(model=test_net_xml, weights=test_net_bin)
+        exec_net = plugin.load(net, num_requests=5)
+        assert len(exec_net.requests) == 5
+    assert len(w) == 2
+    assert "IEPlugin class is deprecated. " \
+               "Please use IECore class instead." in str(w[0].message)
+    assert "Reading network using constructor is deprecated. " \
+            "Please, use IECore.read_network() method instead"  in str(w[1].message)
+
+
+def test_get_supported_layers(device):
+    with warnings.catch_warnings(record=True) as w:
+        plugin = IEPlugin(device)
+        net = IENetwork(model=test_net_xml, weights=test_net_bin)
+        supported = plugin.get_supported_layers(net)
+        layers = ['19/Fused_Add_', '21', '22', '23', '24/Fused_Add_', '26', '27', '29', 'data', 'fc_out']
+        if device == "GPU":
+            layers.remove("data")
+        assert sorted(supported) == layers
+    assert len(w) == 2
+    assert "IEPlugin class is deprecated. " \
+               "Please use IECore class instead." in str(w[0].message)
+    assert "Reading network using constructor is deprecated. " \
+            "Please, use IECore.read_network() method instead"  in str(w[1].message)
+
+
+@pytest.mark.skip(reason="Plugiin specific test.")
+def test_set_config(device):
+    with warnings.catch_warnings(record=True) as w:
+        plugin = IEPlugin("HETERO:CPU")
+        plugin.set_config({"TARGET_FALLBACK": "CPU,GPU"})
+    assert len(w) == 1
+    assert "IEPlugin class is deprecated. " \
+               "Please use IECore class instead." in str(w[0].message)
+
+
+@pytest.mark.skip(reason="Sporadically fail in CI, not reproducible locally")
+def test_set_initial_affinity():
+    with warnings.catch_warnings(record=True) as w:
+        plugin = IEPlugin("HETERO:CPU", None)
+        net = IENetwork(model=test_net_xml, weights=test_net_bin)
+        plugin.set_initial_affinity(net)
+        for l, params in net.layers.items():
+            assert params.affinity == "CPU", "Incorrect affinity for {}".format(l)
+    assert len(w) == 1
+    assert "IEPlugin class is deprecated. " \
+               "Please use IECore class instead." in str(w[0].message)
+
+
+def test_set_initial_affinity_wrong_device(device):
+    with pytest.raises(RuntimeError) as e:
+        with warnings.catch_warnings(record=True) as w:
+            plugin = IEPlugin("CPU", None)
+            net = IENetwork(model=test_net_xml, weights=test_net_bin)
+            plugin.set_initial_affinity(net)
+        assert len(w) == 1
+        assert "IEPlugin class is deprecated. " \
+               "Please use IECore class instead." in str(w[0].message)
+    assert "set_initial_affinity method applicable only for HETERO device" in str(e.value)
+
+
+def test_add_cpu_extenstion_wrong_device():
+    with pytest.raises(RuntimeError) as e:
+        with warnings.catch_warnings(record=True) as w:
+            plugin = IEPlugin("GPU", None)
+            plugin.add_cpu_extension("./")
+        assert len(w) == 1
+        assert "IEPlugin class is deprecated. " \
+               "Please use IECore class instead." in str(w[0].message)
+    if "Cannot find plugin to use" in str(e.value):
+        pytest.skip("No GPU found. Skipping test")
+    else:
+        assert "add_cpu_extension method applicable only for CPU or HETERO devices" in str(e.value)
+
+
+def test_unknown_plugin():
+    with pytest.raises(ValueError) as e:
+        with warnings.catch_warnings(record=True) as w:
+            IEPlugin("BLA")
+        assert len(w) == 1
+        assert "IEPlugin class is deprecated. " \
+               "Please use IECore class instead." in str(w[0].message)
+    assert "Unknown plugin: BLA, expected one of:" in str(e.value)
diff --git a/inference-engine/ie_bridges/python/tests/test_IETensorDesk.py b/inference-engine/ie_bridges/python/tests/test_IETensorDesk.py
new file mode 100644 (file)
index 0000000..a0201b2
--- /dev/null
@@ -0,0 +1,55 @@
+import pytest
+
+from openvino.inference_engine import IETensorDesc
+
+
+def test_init():
+    tensor_desc = IETensorDesc("FP32", [1, 127, 127, 3], "NHWC")
+    assert isinstance(tensor_desc, IETensorDesc)
+
+
+def test_precision():
+    tensor_desc = IETensorDesc("FP32", [1, 127, 127, 3], "NHWC")
+    assert tensor_desc.precision == "FP32"
+
+
+def test_layout():
+    tensor_desc = IETensorDesc("FP32", [1, 127, 127, 3], "NHWC")
+    assert tensor_desc.layout == "NHWC"
+
+
+def test_dims():
+    tensor_desc = IETensorDesc("FP32", [1, 127, 127, 3], "NHWC")
+    assert tensor_desc.dims == [1, 127, 127, 3]
+
+
+def test_incorrect_precision_setter():
+    tensor_desc = IETensorDesc("FP32", [1, 127, 127, 3], "NHWC")
+    with pytest.raises(ValueError) as e:
+        tensor_desc.precision = "123"
+    assert "Unsupported precision 123! List of supported precisions:" in str(e.value)
+
+
+def test_incorrect_layout_setter():
+    tensor_desc = IETensorDesc("FP32", [1, 127, 127, 3], "NHWC")
+    with pytest.raises(ValueError) as e:
+        tensor_desc.layout = "123"
+    assert "Unsupported layout 123! List of supported layouts: " in str(e.value)
+
+
+def test_init_incorrect_precision():
+    with pytest.raises(ValueError) as e:
+        IETensorDesc("123", [1, 127, 127, 3], "NHWC")
+    assert "Unsupported precision 123! List of supported precisions: " in str(e.value)
+
+
+def test_eq_operator():
+    tensor_desc = IETensorDesc("FP32", [1, 3, 127, 127], "NHWC")
+    tensor_desc_2 = IETensorDesc("FP32", [1, 3, 127, 127], "NHWC")
+    assert tensor_desc == tensor_desc_2
+
+
+def test_ne_operator():
+    tensor_desc = IETensorDesc("FP32", [1, 3, 127, 127], "NHWC")
+    tensor_desc_2 = IETensorDesc("FP32", [1, 3, 127, 127], "NCHW")
+    assert tensor_desc != tensor_desc_2
diff --git a/inference-engine/ie_bridges/python/tests/test_InferRequest.py b/inference-engine/ie_bridges/python/tests/test_InferRequest.py
new file mode 100644 (file)
index 0000000..b68b837
--- /dev/null
@@ -0,0 +1,430 @@
+import numpy as np
+import os
+import pytest
+import warnings
+
+from openvino.inference_engine import ie_api as ie
+from conftest import model_path, image_path
+
+is_myriad = os.environ.get("TEST_DEVICE") == "MYRIAD"
+test_net_xml, test_net_bin = model_path(is_myriad)
+path_to_img = image_path()
+
+def read_image():
+    import cv2
+    n, c, h, w = (1, 3, 32, 32)
+    image = cv2.imread(path_to_img) / 255
+    if image is None:
+        raise FileNotFoundError("Input image not found")
+
+    image = cv2.resize(image, (h, w))
+    image = image.transpose((2, 0, 1)).astype(np.float32)
+    image = image.reshape((n, c, h, w))
+    return image
+
+
+def load_sample_model(device, num_requests=1):
+    ie_core = ie.IECore()
+    net = ie_core.read_network(test_net_xml, test_net_bin)
+    executable_network = ie_core.load_network(net, device, num_requests=num_requests)
+    return executable_network
+
+
+def test_input_blobs(device):
+    ie_core = ie.IECore()
+    net = ie_core.read_network(test_net_xml, test_net_bin)
+    executable_network = ie_core.load_network(net, device, num_requests=2)
+    td = ie.IETensorDesc("FP32", (1, 3, 32, 32), "NCHW")
+    assert executable_network.requests[0].input_blobs['data'].tensor_desc == td
+
+
+def test_output_blobs(device):
+    ie_core = ie.IECore()
+    net = ie_core.read_network(test_net_xml, test_net_bin)
+    executable_network = ie_core.load_network(net, device, num_requests=2)
+    td = ie.IETensorDesc("FP32", (1, 10), "NC")
+    assert executable_network.requests[0].output_blobs['fc_out'].tensor_desc == td
+
+
+def test_inputs_deprecated(device):
+    ie_core = ie.IECore()
+    net = ie_core.read_network(test_net_xml, test_net_bin)
+    executable_network = ie_core.load_network(net, device, num_requests=2)
+    with warnings.catch_warnings(record=True) as w:
+        inputs = executable_network.requests[0].inputs
+    assert "'inputs' property of InferRequest is deprecated. Please instead use 'input_blobs' property." in str(
+        w[-1].message)
+    del executable_network
+    del ie_core
+    del net
+
+
+def test_outputs_deprecated(device):
+    ie_core = ie.IECore()
+    net = ie_core.read_network(test_net_xml, test_net_bin)
+    executable_network = ie_core.load_network(net, device, num_requests=2)
+    with warnings.catch_warnings(record=True) as w:
+        outputs = executable_network.requests[0].outputs
+    assert "'outputs' property of InferRequest is deprecated. Please instead use 'output_blobs' property." in str(
+        w[-1].message)
+    del executable_network
+    del ie_core
+    del net
+
+
+def test_inputs_list(device):
+    ie_core = ie.IECore()
+    net = ie_core.read_network(test_net_xml, test_net_bin)
+    executable_network = ie_core.load_network(net, device, num_requests=2)
+
+    for req in executable_network.requests:
+        assert len(req._inputs_list) == 1
+        assert "data" in req._inputs_list
+    del ie_core
+
+
+def test_outputs_list(device):
+    ie_core = ie.IECore()
+    net = ie_core.read_network(test_net_xml, test_net_bin)
+    executable_network = ie_core.load_network(net, device, num_requests=2)
+
+    for req in executable_network.requests:
+        assert len(req._outputs_list) == 1
+        assert "fc_out" in req._outputs_list
+    del ie_core
+
+
+def test_access_input_buffer(device):
+    ie_core = ie.IECore()
+    net = ie_core.read_network(test_net_xml, test_net_bin)
+    executable_network = ie_core.load_network(net, device, num_requests=1)
+    buffer = executable_network.requests[0]._get_blob_buffer("data".encode()).to_numpy()
+    assert buffer.shape == (1, 3, 32, 32)
+    assert buffer.strides == (12288, 4096, 128, 4)
+    assert buffer.dtype == np.float32
+    del executable_network
+    del ie_core
+    del net
+
+
+def test_access_output_buffer(device):
+    ie_core = ie.IECore()
+    net = ie_core.read_network(test_net_xml, test_net_bin)
+    executable_network = ie_core.load_network(net, device, num_requests=1)
+    buffer = executable_network.requests[0]._get_blob_buffer("fc_out".encode()).to_numpy()
+    assert buffer.shape == (1, 10)
+    assert buffer.strides == (40, 4)
+    assert buffer.dtype == np.float32
+    del executable_network
+    del ie_core
+    del net
+
+
+def test_write_to_input_blobs_directly(device):
+    ie_core = ie.IECore()
+    net = ie_core.read_network(test_net_xml, test_net_bin)
+    executable_network = ie_core.load_network(net, device, num_requests=1)
+    img = read_image()
+    request = executable_network.requests[0]
+    input_data = request.input_blobs["data"]
+    input_data.buffer[:] = img
+    assert np.array_equal(executable_network.requests[0].input_blobs["data"].buffer, img)
+    del executable_network
+    del ie_core
+    del net
+
+
+def test_write_to_input_blobs_copy(device):
+    ie_core = ie.IECore()
+    net = ie_core.read_network(test_net_xml, test_net_bin)
+    executable_network = ie_core.load_network(net, device, num_requests=1)
+    img = read_image()
+    request = executable_network.requests[0]
+    request.input_blobs["data"].buffer[:] = img
+    assert np.allclose(executable_network.requests[0].input_blobs["data"].buffer, img)
+    del executable_network
+    del ie_core
+    del net
+
+
+def test_infer(device):
+    ie_core = ie.IECore()
+    net = ie_core.read_network(test_net_xml, test_net_bin)
+    exec_net = ie_core.load_network(net, device, num_requests=1)
+    img = read_image()
+    request = exec_net.requests[0]
+    request.infer({'data': img})
+    res = request.output_blobs['fc_out'].buffer
+    assert np.argmax(res) == 2
+    del exec_net
+    del ie_core
+    del net
+
+
+def test_async_infer_default_timeout(device):
+    ie_core = ie.IECore()
+    net = ie_core.read_network(test_net_xml, test_net_bin)
+    exec_net = ie_core.load_network(net, device, num_requests=1)
+    img = read_image()
+    request = exec_net.requests[0]
+    request.async_infer({'data': img})
+    request.wait()
+    res = request.output_blobs['fc_out'].buffer
+    assert np.argmax(res) == 2
+    del exec_net
+    del ie_core
+    del net
+
+
+def test_async_infer_wait_finish(device):
+    ie_core = ie.IECore()
+    net = ie_core.read_network(test_net_xml, test_net_bin)
+    exec_net = ie_core.load_network(net, device, num_requests=1)
+    img = read_image()
+    request = exec_net.requests[0]
+    request.async_infer({'data': img})
+    request.wait(ie.WaitMode.RESULT_READY)
+    res = request.output_blobs['fc_out'].buffer
+    assert np.argmax(res) == 2
+    del exec_net
+    del ie_core
+    del net
+
+
+def test_async_infer_wait_time(device):
+    ie_core = ie.IECore()
+    net = ie_core.read_network(test_net_xml, test_net_bin)
+    exec_net = ie_core.load_network(net, device, num_requests=1)
+    img = read_image()
+    request = exec_net.requests[0]
+    request.async_infer({'data': img})
+    request.wait(100)
+    res = request.output_blobs['fc_out'].buffer
+    assert np.argmax(res) == 2
+    del exec_net
+    del ie_core
+    del net
+
+
+def test_async_infer_wait_status(device):
+    ie_core = ie.IECore()
+    net = ie_core.read_network(test_net_xml, test_net_bin)
+    exec_net = ie_core.load_network(net, device, num_requests=1)
+    img = read_image()
+    request = exec_net.requests[0]
+    request.async_infer({'data': img})
+    request.wait(ie.WaitMode.RESULT_READY)
+    res = request.output_blobs['fc_out'].buffer
+    assert np.argmax(res) == 2
+    status = request.wait(ie.WaitMode.STATUS_ONLY)
+    assert status == ie.StatusCode.OK
+    del exec_net
+    del ie_core
+    del net
+
+
+def test_async_infer_fill_inputs(device):
+    ie_core = ie.IECore()
+    net = ie_core.read_network(test_net_xml, test_net_bin)
+    exec_net = ie_core.load_network(net, device, num_requests=1)
+    img = read_image()
+    request = exec_net.requests[0]
+    request.input_blobs['data'].buffer[:] = img
+    request.async_infer()
+    status_end = request.wait()
+    assert status_end == ie.StatusCode.OK
+    res = request.output_blobs['fc_out'].buffer
+    assert np.argmax(res[0]) == 2
+    del exec_net
+    del ie_core
+    del net
+
+
+def test_infer_modify_outputs(device):
+    ie_core = ie.IECore()
+    net = ie_core.read_network(test_net_xml, test_net_bin)
+    exec_net = ie_core.load_network(net, device, num_requests=1)
+    img = read_image()
+    request = exec_net.requests[0]
+    outputs0 = exec_net.infer({'data': img})
+    status_end = request.wait()
+    assert status_end == ie.StatusCode.OK
+    assert np.argmax(outputs0['fc_out']) == 2
+    outputs0['fc_out'][:] = np.zeros(shape=(1, 10), dtype=np.float32)
+    outputs1 = request.output_blobs
+    assert np.argmax(outputs1['fc_out'].buffer) == 2
+    outputs1['fc_out'].buffer[:] = np.ones(shape=(1, 10), dtype=np.float32)
+    outputs2 = request.output_blobs
+    assert np.argmax(outputs2['fc_out'].buffer) == 2
+    del exec_net
+    del ie_core
+    del net
+
+
+def test_async_infer_callback(device):
+    def static_vars(**kwargs):
+        def decorate(func):
+            for k in kwargs:
+                setattr(func, k, kwargs[k])
+            return func
+
+        return decorate
+
+    @static_vars(callback_called=0)
+    def callback(self, status):
+        callback.callback_called = 1
+
+    ie_core = ie.IECore()
+    net = ie_core.read_network(test_net_xml, test_net_bin)
+    exec_net = ie_core.load_network(net, device, num_requests=1)
+    img = read_image()
+    request = exec_net.requests[0]
+    request.set_completion_callback(callback)
+    request.async_infer({'data': img})
+    status = request.wait()
+    assert status == ie.StatusCode.OK
+    res = request.output_blobs['fc_out'].buffer
+    assert np.argmax(res) == 2
+    assert callback.callback_called == 1
+    del exec_net
+    del ie_core
+
+
+def test_async_infer_callback_wait_before_start(device):
+    def static_vars(**kwargs):
+        def decorate(func):
+            for k in kwargs:
+                setattr(func, k, kwargs[k])
+            return func
+        return decorate
+
+    @static_vars(callback_called=0)
+    def callback(self, status):
+        callback.callback_called = 1
+
+    ie_core = ie.IECore()
+    net = ie_core.read_network(test_net_xml, test_net_bin)
+    exec_net = ie_core.load_network(net, device, num_requests=1)
+    img = read_image()
+    request = exec_net.requests[0]
+    request.set_completion_callback(callback)
+    status = request.wait()
+    assert status == ie.StatusCode.INFER_NOT_STARTED
+    request.async_infer({'data': img})
+    status = request.wait()
+    assert status == ie.StatusCode.OK
+    res = request.output_blobs['fc_out'].buffer
+    assert np.argmax(res) == 2
+    assert callback.callback_called == 1
+    del exec_net
+    del ie_core
+
+
+def test_async_infer_callback_wait_in_callback(device):
+    class InferReqWrap:
+        def __init__(self, request):
+            self.request = request
+            self.request.set_completion_callback(self.callback)
+            self.status_code = self.request.wait(ie.WaitMode.STATUS_ONLY)
+            assert self.status_code == ie.StatusCode.INFER_NOT_STARTED
+
+        def callback(self, statusCode, userdata):
+            self.status_code = self.request.wait(ie.WaitMode.STATUS_ONLY)
+
+        def execute(self, input_data):
+            self.request.async_infer(input_data)
+            status = self.request.wait(ie.WaitMode.RESULT_READY)
+            assert status == ie.StatusCode.OK
+            assert self.status_code == ie.StatusCode.OK
+
+    ie_core = ie.IECore()
+    net = ie_core.read_network(test_net_xml, test_net_bin)
+    exec_net = ie_core.load_network(net, device, num_requests=1)
+    img = read_image()
+    request_wrap = InferReqWrap(exec_net.requests[0])
+    request_wrap.execute({'data': img})
+    del exec_net
+    del ie_core
+
+
+def test_get_perf_counts(device):
+    ie_core = ie.IECore()
+    net = ie_core.read_network(test_net_xml, test_net_bin)
+    ie_core.set_config({"PERF_COUNT": "YES"}, device)
+    exec_net = ie_core.load_network(net, device)
+    img = read_image()
+    request = exec_net.requests[0]
+    request.infer({'data': img})
+    pc = request.get_perf_counts()
+    assert pc['29']["status"] == "EXECUTED"
+    assert pc['29']["layer_type"] == "FullyConnected"
+    del exec_net
+    del ie_core
+    del net
+
+
+@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", reason="Can't run test on device {},"
+                                                                          "Dynamic batch fully supported only on CPU".format(
+    os.environ.get("TEST_DEVICE", "CPU")))
+def test_set_batch_size(device):
+    ie_core = ie.IECore()
+    ie_core.set_config({"DYN_BATCH_ENABLED": "YES"}, device)
+    net = ie_core.read_network(test_net_xml, test_net_bin)
+    net.batch_size = 10
+    data = np.zeros(shape=net.inputs['data'].shape)
+    exec_net = ie_core.load_network(net, device)
+    data[0] = read_image()[0]
+    request = exec_net.requests[0]
+    request.set_batch(1)
+    request.infer({'data': data})
+    assert np.allclose(int(round(request.output_blobs['fc_out'].buffer[0][2])), 1), "Incorrect data for 1st batch"
+    del exec_net
+    del ie_core
+    del net
+
+
+def test_set_zero_batch_size(device):
+    ie_core = ie.IECore()
+    net = ie_core.read_network(test_net_xml, test_net_bin)
+    exec_net = ie_core.load_network(net, device, num_requests=1)
+    request = exec_net.requests[0]
+    with pytest.raises(ValueError) as e:
+        request.set_batch(0)
+    assert "Batch size should be positive integer number but 0 specified" in str(e.value)
+    del exec_net
+    del ie_core
+    del net
+
+
+def test_set_negative_batch_size(device):
+    ie_core = ie.IECore()
+    net = ie_core.read_network(test_net_xml, test_net_bin)
+    exec_net =  ie_core.load_network(net, device, num_requests=1)
+    request = exec_net.requests[0]
+    with pytest.raises(ValueError) as e:
+        request.set_batch(-1)
+    assert "Batch size should be positive integer number but -1 specified" in str(e.value)
+    del exec_net
+    del ie_core
+    del net
+
+
+def test_blob_setter(device):
+    ie_core = ie.IECore()
+    net = ie_core.read_network(test_net_xml, test_net_bin)
+    exec_net_1 = ie_core.load_network(network=net, device_name=device, num_requests=1)
+
+    net.inputs['data'].layout = "NHWC"
+    exec_net_2 = ie_core.load_network(network=net, device_name=device, num_requests=1)
+
+    img = read_image()
+    res_1 = np.sort(exec_net_1.infer({"data": img})['fc_out'])
+
+    img = np.transpose(img, axes=(0, 2, 3, 1)).astype(np.float32)
+    tensor_desc = ie.IETensorDesc("FP32", [1, 3, 32, 32], "NHWC")
+    img_blob = ie.IEBlob(tensor_desc, img)
+    request = exec_net_2.requests[0]
+    request.set_blob('data', img_blob)
+    request.infer()
+    res_2 = np.sort(request.output_blobs['fc_out'].buffer)
+    assert np.allclose(res_1, res_2, atol=1e-2, rtol=1e-2)
diff --git a/inference-engine/ie_bridges/python/tests/test_NGraph.py b/inference-engine/ie_bridges/python/tests/test_NGraph.py
new file mode 100644 (file)
index 0000000..490db12
--- /dev/null
@@ -0,0 +1,39 @@
+from openvino.inference_engine import IENetwork
+try:
+    from ngraph.impl.op import Parameter, Relu
+    from ngraph.impl import Function, Shape, Type
+    ngraph_available=True
+except:
+    ngraph_available=False
+
+import numpy as np
+import pytest
+
+if not ngraph_available:
+    pytest.skip("NGraph is not installed, skip", allow_module_level=True)
+
+@pytest.mark.skip(reason="nGraph python API has been removed in 2020.2 LTS release")
+def test_CreateIENetworkFromNGraph():
+    element_type = Type.f32
+    param = Parameter(element_type, Shape([1, 3, 22, 22]))
+    relu = Relu(param)
+    func = Function([relu], [param], 'test')
+    caps = Function.to_capsule(func)
+    cnnNetwork = IENetwork(caps)
+    assert cnnNetwork != None
+    assert cnnNetwork.get_function() != None
+    assert len(cnnNetwork.layers) == 2
+
+@pytest.mark.skip(reason="nGraph python API has been removed in 2020.2 LTS release")
+def test_GetIENetworkFromNGraph():
+    element_type = Type.f32
+    param = Parameter(element_type, Shape([1, 3, 22, 22]))
+    relu = Relu(param)
+    func = Function([relu], [param], 'test')
+    caps = Function.to_capsule(func)
+    cnnNetwork = IENetwork(caps)
+    assert cnnNetwork != None
+    assert cnnNetwork.get_function() != None
+    caps2 = cnnNetwork.get_function()
+    func2 = Function.from_capsule(caps2)
+    assert func2 != None
diff --git a/inference-engine/include/builders/ie_argmax_layer.hpp b/inference-engine/include/builders/ie_argmax_layer.hpp
deleted file mode 100644 (file)
index a692bc1..0000000
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file ie_argmax_layer.hpp
- */
-
-#pragma once
-
-#include <builders/ie_layer_decorator.hpp>
-#include <ie_network.hpp>
-#include <string>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief The class represents a builder for ArgMax layer
- */
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(ArgMaxLayer): public LayerDecorator {
-public:
-    /**
-     * @brief The constructor creates a builder with the name
-     * @param name Layer name
-     */
-    explicit ArgMaxLayer(const std::string& name = "");
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer pointer to generic builder
-     */
-    explicit ArgMaxLayer(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer constant pointer to generic builder
-     */
-    explicit ArgMaxLayer(const Layer::CPtr& layer);
-    /**
-     * @brief Sets the name for the layer
-     * @param name Layer name
-     * @return reference to layer builder
-     */
-    ArgMaxLayer& setName(const std::string& name);
-
-    /**
-     * @brief Returns port with shapes for the layer
-     * @return Port with shapes
-     */
-    const Port& getPort() const;
-    /**
-     * @brief Sets port shapes for the layer
-     * @param port Port with shapes
-     * @return reference to layer builder
-     */
-    ArgMaxLayer& setPort(const Port& port);
-    /**
-     * @brief Returns axis
-     * @return Axis
-     */
-    int getAxis() const;
-    /**
-     * @brief Sets axis
-     * @param axis Axis
-     * @return reference to layer builder
-     */
-    ArgMaxLayer& setAxis(int axis);
-    /**
-     * @brief Returns top K
-     * @return Top K
-     */
-    size_t getTopK() const;
-    /**
-     * @brief Sets top K
-     * @param topK Top K
-     * @return reference to layer builder
-     */
-    ArgMaxLayer& setTopK(size_t topK);
-    /**
-     * @brief Returns output maximum value
-     * @return Output maximum value
-     */
-    size_t getOutMaxVal() const;
-    /**
-     * @brief Sets output maximum value
-     * @param size Maximum value
-     * @return reference to layer builder
-     */
-    ArgMaxLayer& setOutMaxVal(size_t size);
-};
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_batch_normalization_layer.hpp b/inference-engine/include/builders/ie_batch_normalization_layer.hpp
deleted file mode 100644 (file)
index 27f2e1e..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <builders/ie_layer_decorator.hpp>
-#include <ie_network.hpp>
-#include <string>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief The class represents a builder for BatchNormalization layer
- */
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(BatchNormalizationLayer): public LayerDecorator {
-public:
-    /**
-     * @brief The constructor creates a builder with the name
-     * @param name Layer name
-     */
-    explicit BatchNormalizationLayer(const std::string& name = "");
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer pointer to generic builder
-     */
-    explicit BatchNormalizationLayer(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer constant pointer to generic builder
-     */
-    explicit BatchNormalizationLayer(const Layer::CPtr& layer);
-    /**
-     * @brief Sets the name for the layer
-     * @param name Layer name
-     * @return reference to layer builder
-     */
-    BatchNormalizationLayer& setName(const std::string& name);
-
-    /**
-     * @brief Returns port with shapes for the layer
-     * @return Port with shapes
-     */
-    const Port& getPort() const;
-    /**
-     * @brief Sets port shapes for the layer
-     * @param port Port with shapes
-     * @return reference to layer builder
-     */
-    BatchNormalizationLayer& setPort(const Port& port);
-
-    /**
-     * @brief Returns epsilon
-     * @return Epsilon
-     */
-    float getEpsilon() const;
-    /**
-     * @brief Sets epsilon
-     * @param eps Epsilon
-     * @return reference to layer builder
-     */
-    BatchNormalizationLayer& setEpsilon(float eps);
-};
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_clamp_layer.hpp b/inference-engine/include/builders/ie_clamp_layer.hpp
deleted file mode 100644 (file)
index 471e2e1..0000000
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <builders/ie_layer_decorator.hpp>
-#include <ie_network.hpp>
-#include <string>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief The class represents a builder for Clamp layer
- */
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(ClampLayer): public LayerDecorator {
-public:
-    /**
-     * @brief The constructor creates a builder with the name
-     * @param name Layer name
-     */
-    explicit ClampLayer(const std::string& name = "");
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer pointer to generic builder
-     */
-    explicit ClampLayer(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer constant pointer to generic builder
-     */
-    explicit ClampLayer(const Layer::CPtr& layer);
-    /**
-     * @brief Sets the name for the layer
-     * @param name Layer name
-     * @return reference to layer builder
-     */
-    ClampLayer& setName(const std::string& name);
-
-    /**
-     * @brief Returns port with shapes for the layer
-     * @return Port with shapes
-     */
-    const Port& getPort() const;
-    /**
-     * @brief Sets port shapes for the layer
-     * @param port Port with shapes
-     * @return reference to layer builder
-     */
-    ClampLayer& setPort(const Port& port);
-    /**
-     * @brief Returns minimum value
-     * @return minimum value
-     */
-    float getMinValue() const;
-    /**
-     * @brief Sets minimum value
-     * @param minValue Minimum value
-     * @return reference to layer builder
-     */
-    ClampLayer& setMinValue(float minValue);
-    /**
-     * @brief Returns maximum value
-     * @return Maximum value
-     */
-    float getMaxValue() const;
-    /**
-     * @brief Sets maximum value
-     * @param maxValue Maximum value
-     * @return reference to layer builder
-     */
-    ClampLayer& setMaxValue(float maxValue);
-};
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_concat_layer.hpp b/inference-engine/include/builders/ie_concat_layer.hpp
deleted file mode 100644 (file)
index 3fc9229..0000000
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <builders/ie_layer_decorator.hpp>
-#include <ie_network.hpp>
-#include <string>
-#include <vector>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief The class represents a builder for Concat layer
- */
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(ConcatLayer): public LayerDecorator {
-public:
-    /**
-     * @brief The constructor creates a builder with the name
-     * @param name Layer name
-     */
-    explicit ConcatLayer(const std::string& name = "");
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer pointer to generic builder
-     */
-    explicit ConcatLayer(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer constant pointer to generic builder
-     */
-    explicit ConcatLayer(const Layer::CPtr& layer);
-    /**
-     * @brief Sets the name for the layer
-     * @param name Layer name
-     * @return reference to layer builder
-     */
-    ConcatLayer& setName(const std::string& name);
-
-    /**
-     * @brief Returns vector with input ports
-     * @return vector with ports
-     */
-    const std::vector<Port>& getInputPorts() const;
-    /**
-     * @brief Sets input ports
-     * @param ports Vector of input ports
-     * @return reference to layer builder
-     */
-    ConcatLayer& setInputPorts(const std::vector<Port>& ports);
-    /**
-     * @brief Returns output port
-     * @return Output port
-     */
-    const Port& getOutputPort() const;
-    /**
-     * @brief Sets output port
-     * @param port Output port
-     * @return reference to layer builder
-     */
-    ConcatLayer& setOutputPort(const Port& port);
-    /**
-     * @brief Returns axis
-     * @return Axis
-     */
-    size_t getAxis() const;
-    /**
-     * @brief Sets axis
-     * @param axis Axis
-     * @return reference to layer builder
-     */
-    ConcatLayer& setAxis(size_t axis);
-};
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_const_layer.hpp b/inference-engine/include/builders/ie_const_layer.hpp
deleted file mode 100644 (file)
index 9b31bc8..0000000
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <builders/ie_layer_decorator.hpp>
-#include <ie_network.hpp>
-#include <string>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief The class represents a builder for Const layer
- */
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(ConstLayer): public LayerDecorator {
-public:
-    /**
-     * @brief The constructor creates a builder with the name
-     * @param name Layer name
-     */
-    explicit ConstLayer(const std::string& name = "");
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer pointer to generic builder
-     */
-    explicit ConstLayer(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer constant pointer to generic builder
-     */
-    explicit ConstLayer(const Layer::CPtr& layer);
-    /**
-     * @brief Sets the name for the layer
-     * @param name Layer name
-     * @return reference to layer builder
-     */
-    ConstLayer& setName(const std::string& name);
-
-    /**
-     * @brief Returns port with shapes for the layer
-     * @return Port with shapes
-     */
-    const Port& getPort() const;
-    /**
-     * @brief Sets port shapes for the layer
-     * @param port Port with shapes
-     * @return reference to layer builder
-     */
-    ConstLayer& setPort(const Port& port);
-
-    /**
-     * @brief Sets constant data
-     * @param data constant blob with data
-     * @return reference to layer builder
-     */
-    ConstLayer& setData(const Blob::CPtr& data);
-
-    /**
-     * @brief Returns constant data
-     * @return constant blob with data
-     */
-    const Blob::CPtr& getData() const;
-};
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_convolution_layer.hpp b/inference-engine/include/builders/ie_convolution_layer.hpp
deleted file mode 100644 (file)
index 04b6512..0000000
+++ /dev/null
@@ -1,151 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <builders/ie_layer_decorator.hpp>
-#include <ie_network.hpp>
-#include <string>
-#include <vector>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief The class represents a builder for Convolution layer
- */
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(ConvolutionLayer): public LayerDecorator {
-public:
-    /**
-     * @brief The constructor creates a builder with the name
-     * @param name Layer name
-     */
-    explicit ConvolutionLayer(const std::string& name = "");
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer pointer to generic builder
-     */
-    explicit ConvolutionLayer(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer constant pointer to generic builder
-     */
-    explicit ConvolutionLayer(const Layer::CPtr& layer);
-    /**
-     * @brief Sets the name for the layer
-     * @param name Layer name
-     * @return reference to layer builder
-     */
-    ConvolutionLayer& setName(const std::string& name);
-
-    /**
-     * @brief Returns input port
-     * @return Input port
-     */
-    const Port& getInputPort() const;
-    /**
-     * @brief Sets input port
-     * @param port Input port
-     * @return reference to layer builder
-     */
-    ConvolutionLayer& setInputPort(const Port& port);
-    /**
-     * @brief Returns output port
-     * @return Output port
-     */
-    const Port& getOutputPort() const;
-    /**
-     * @brief Sets output port
-     * @param port Output port
-     * @return reference to layer builder
-     */
-    ConvolutionLayer& setOutputPort(const Port& port);
-    /**
-     * @brief Returns kernel size
-     * @return Kernel size
-     */
-    const std::vector<size_t> getKernel() const;
-    /**
-     * @brief Sets kernel size
-     * @param kernel Kernel size
-     * @return reference to layer builder
-     */
-    ConvolutionLayer& setKernel(const std::vector<size_t>& kernel);
-    /**
-     * @brief Returns vector of strides
-     * @return vector of strides
-     */
-    const std::vector<size_t> getStrides() const;
-    /**
-     * @brief Sets strides
-     * @param strides vector of strides
-     * @return reference to layer builder
-     */
-    ConvolutionLayer& setStrides(const std::vector<size_t>& strides);
-    /**
-     * @brief Returns dilations
-     * @return vector of dilations
-     */
-    const std::vector<size_t> getDilation() const;
-    /**
-     * @brief Sets dilations
-     * @param dilation Vector of dilations
-     * @return reference to layer builder
-     */
-    ConvolutionLayer& setDilation(const std::vector<size_t>& dilation);
-    /**
-     * @brief Returns begin paddings
-     * @return vector of paddings
-     */
-    const std::vector<size_t> getPaddingsBegin() const;
-    /**
-     * @brief Sets begin paddings
-     * @param paddings Vector of paddings
-     * @return reference to layer builder
-     */
-    ConvolutionLayer& setPaddingsBegin(const std::vector<size_t>& paddings);
-    /**
-     * @brief Return end paddings
-     * @return Vector of paddings
-     */
-    const std::vector<size_t> getPaddingsEnd() const;
-    /**
-     * @brief Sets end paddings
-     * @param paddings Vector of paddings
-     * @return reference to layer builder
-     */
-    ConvolutionLayer& setPaddingsEnd(const std::vector<size_t>& paddings);
-    /**
-     * @brief Returns group
-     * @return Group
-     */
-    size_t getGroup() const;
-    /**
-     * @brief Sets group
-     * @param group Group
-     * @return reference to layer builder
-     */
-    ConvolutionLayer& setGroup(size_t group);
-    /**
-     * @brief Return output depth
-     * @return Output depth
-     */
-    size_t getOutDepth() const;
-    /**
-     * @brief Sets output depth
-     * @param outDepth Output depth
-     * @return reference to layer builder
-     */
-    ConvolutionLayer& setOutDepth(size_t outDepth);
-};
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_crop_layer.hpp b/inference-engine/include/builders/ie_crop_layer.hpp
deleted file mode 100644 (file)
index ff75c15..0000000
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <builders/ie_layer_decorator.hpp>
-#include <ie_network.hpp>
-#include <string>
-#include <vector>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief The class represents a builder for Crop layer
- */
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(CropLayer): public LayerDecorator {
-public:
-    /**
-     * @brief The constructor creates a builder with the name
-     * @param name Layer name
-     */
-    explicit CropLayer(const std::string& name = "");
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer pointer to generic builder
-     */
-    explicit CropLayer(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer constant pointer to generic builder
-     */
-    explicit CropLayer(const Layer::CPtr& layer);
-    /**
-     * @brief Sets the name for the layer
-     * @param name Layer name
-     * @return reference to layer builder
-     */
-    CropLayer& setName(const std::string& name);
-
-    /**
-     * @brief Returns input ports
-     * @return Vector of input ports
-     */
-    const std::vector<Port>& getInputPorts() const;
-    /**
-     * @brief Sets input ports
-     * @param ports Vector of input ports
-     * @return reference to layer builder
-     */
-    CropLayer& setInputPorts(const std::vector<Port>& ports);
-    /**
-     * @brief Return output port
-     * @return Output port
-     */
-    const Port& getOutputPort() const;
-    /**
-     * @brief Sets output port
-     * @param port Output port
-     * @return reference to layer builder
-     */
-    CropLayer& setOutputPort(const Port& port);
-    /**
-     * @brief Returns axis
-     * @return Vector of axis
-     */
-    const std::vector<size_t> getAxis() const;
-    /**
-     * @brief Sets axis
-     * @param axis Vector of axis
-     * @return reference to layer builder
-     */
-    CropLayer& setAxis(const std::vector<size_t>& axis);
-    /**
-     * @brief Returns offsets
-     * @return Vector of offsets
-     */
-    const std::vector<size_t> getOffset() const;
-    /**
-     * @brief Sets offsets
-     * @param offsets Vector of offsets
-     * @return reference to layer builder
-     */
-    CropLayer& setOffset(const std::vector<size_t>& offsets);
-};
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_ctc_greedy_decoder_layer.hpp b/inference-engine/include/builders/ie_ctc_greedy_decoder_layer.hpp
deleted file mode 100644 (file)
index 92c3f43..0000000
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <builders/ie_layer_decorator.hpp>
-#include <ie_network.hpp>
-#include <string>
-#include <vector>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief The class represents a builder for CTCGreedyDecoder layer
- */
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(CTCGreedyDecoderLayer): public LayerDecorator {
-public:
-    /**
-     * @brief The constructor creates a builder with the name
-     * @param name Layer name
-     */
-    explicit CTCGreedyDecoderLayer(const std::string& name = "");
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer pointer to generic builder
-     */
-    explicit CTCGreedyDecoderLayer(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer constant pointer to generic builder
-     */
-    explicit CTCGreedyDecoderLayer(const Layer::CPtr& layer);
-    /**
-     * @brief Sets the name for the layer
-     * @param name Layer name
-     * @return reference to layer builder
-     */
-    CTCGreedyDecoderLayer& setName(const std::string& name);
-
-    /**
-     * @brief Returns input ports
-     * @return Vector of input ports
-     */
-    const std::vector<Port>& getInputPorts() const;
-    /**
-     * @brief Sets input ports
-     * @param ports Vector of input ports
-     * @return reference to layer builder
-     */
-    CTCGreedyDecoderLayer& setInputPorts(const std::vector<Port>& ports);
-    /**
-     * @brief Returns output port
-     * @return Output port
-     */
-    const Port& getOutputPort() const;
-    /**
-     * @brief Sets output port
-     * @param port Output port
-     * @return reference to layer builder
-     */
-    CTCGreedyDecoderLayer& setOutputPort(const Port& port);
-    /**
-     * @brief Returns CTCMergeRepeated
-     * @return true if merge repeated
-     */
-    bool getCTCMergeRepeated() const;
-    /**
-     * @brief Sets CTCMergeRepeated
-     * @param flag bool value
-     * @return reference to layer builder
-     */
-    CTCGreedyDecoderLayer& setCTCMergeRepeated(bool flag);
-};
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_deconvolution_layer.hpp b/inference-engine/include/builders/ie_deconvolution_layer.hpp
deleted file mode 100644 (file)
index 6a1b387..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <builders/ie_convolution_layer.hpp>
-#include <ie_network.hpp>
-#include <string>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief The class represents a builder for Deconvolution layer
- */
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(DeconvolutionLayer): public ConvolutionLayer {
-public:
-    /**
-     * @brief The constructor creates a builder with the name
-     * @param name Layer name
-     */
-    explicit DeconvolutionLayer(const std::string& name = "");
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer pointer to generic builder
-     */
-    explicit DeconvolutionLayer(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer constant pointer to generic builder
-     */
-    explicit DeconvolutionLayer(const Layer::CPtr& layer);
-};
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_deformable_convolution_layer.hpp b/inference-engine/include/builders/ie_deformable_convolution_layer.hpp
deleted file mode 100644 (file)
index e60e321..0000000
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <builders/ie_convolution_layer.hpp>
-#include <ie_network.hpp>
-#include <string>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief The class represents a builder for Deconvolution layer
- */
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(DeformableConvolutionLayer): public ConvolutionLayer {
-public:
-    /**
-     * @brief The constructor creates a builder with the name
-     * @param name Layer name
-     */
-    explicit DeformableConvolutionLayer(const std::string& name = "");
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer pointer to generic builder
-     */
-    explicit DeformableConvolutionLayer(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer constant pointer to generic builder
-     */
-    explicit DeformableConvolutionLayer(const Layer::CPtr& layer);
-    /**
-     * @brief Return deformable_group size
-     * @return Deformable group size
-     */
-    size_t getDeformableGroup() const;
-    /**
-     * @brief Sets deformable group size
-     * @param deformableGroup Deformable group
-     * @return reference to layer builder
-     */
-    Builder::DeformableConvolutionLayer& setDeformableGroup(size_t deformableGroup);
-};
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_detection_output_layer.hpp b/inference-engine/include/builders/ie_detection_output_layer.hpp
deleted file mode 100644 (file)
index 404b625..0000000
+++ /dev/null
@@ -1,196 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <builders/ie_layer_decorator.hpp>
-#include <ie_network.hpp>
-#include <string>
-#include <vector>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief The class represents a builder for Detection Output layer
- */
-
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(DetectionOutputLayer): public LayerDecorator {
-public:
-    /**
-     * @brief The constructor creates a builder with the name
-     * @param name Layer name
-     */
-    explicit DetectionOutputLayer(const std::string& name = "");
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer pointer to generic builder
-     */
-    explicit DetectionOutputLayer(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer constant pointer to generic builder
-     */
-    explicit DetectionOutputLayer(const Layer::CPtr& layer);
-    /**
-     * @brief Sets the name for the layer
-     * @param name Layer name
-     * @return reference to layer builder
-     */
-    DetectionOutputLayer& setName(const std::string& name);
-
-    /**
-     * @brief Returns output port
-     * @return Output port
-     */
-    const Port& getOutputPort() const;
-    /**
-     * @brief Sets output port
-     * @param port Output port
-     * @return reference to layer builder
-     */
-    DetectionOutputLayer& setOutputPort(const Port& port);
-    /**
-     * @brief Returns input ports
-     * @return Vector of input ports
-     */
-    const std::vector<Port>& getInputPorts() const;
-    /**
-     * @brief Sets input ports
-     * @param ports Vector of input ports
-     * @return reference to layer builder
-     */
-    DetectionOutputLayer& setInputPorts(const std::vector<Port>& ports);
-    /**
-     * @brief Returns number of classes
-     * @return Number of classes
-     */
-    size_t getNumClasses() const;
-    /**
-     * @brief Sets number of classes to be predict
-     * @param num Number of classes
-     * @return reference to layer builder
-     */
-    DetectionOutputLayer& setNumClasses(size_t num);
-    /**
-     * @brief Returns background label ID
-     * @return Background ID
-     */
-    int getBackgroudLabelId() const;
-    /**
-     * @brief Sets background label ID
-     * @param labelId Background ID if there is no background class, set it to -1.
-     * @return reference to layer builder
-     */
-    DetectionOutputLayer& setBackgroudLabelId(int labelId);
-    /**
-     * @brief Returns maximum number of results to be kept on NMS stage
-     * @return Top K
-     */
-    int getTopK() const;
-    /**
-     * @brief Sets maximum number of results to be kept on NMS stage
-     * @param topK Top K
-     * @return reference to layer builder
-     */
-    DetectionOutputLayer& setTopK(int topK);
-    /**
-     * @brief Returns number of total boxes to be kept per image after NMS step
-     * @return Keep top K
-     */
-    int getKeepTopK() const;
-    /**
-     * @brief Sets number of total boxes to be kept per image after NMS step
-     * @param topK Keep top K
-     * @return reference to layer builder
-     */
-    DetectionOutputLayer& setKeepTopK(int topK);
-    /**
-     * @brief Returns number of oriented classes
-     * @return Number of oriented classes
-     */
-    int getNumOrientClasses() const;
-    /**
-     * @brief Sets number of oriented classes
-     * @param numClasses Number of classes
-     * @return reference to layer builder
-     */
-    DetectionOutputLayer& setNumOrientClasses(int numClasses);
-    /**
-     * @brief Returns type of coding method for bounding boxes
-     * @return String with code type
-     */
-    std::string getCodeType() const;
-    /**
-     * @brief Sets type of coding method for bounding boxes
-     * @param type Type
-     * @return reference to layer builder
-     */
-    DetectionOutputLayer& setCodeType(std::string type);
-    /**
-     * @brief Returns interpolate orientation
-     * @return Interpolate orientation
-     */
-    int getInterpolateOrientation() const;
-    /**
-     * @brief Sets interpolate orientation
-     * @param orient Orientation
-     * @return reference to layer builder
-     */
-    DetectionOutputLayer& setInterpolateOrientation(int orient);
-    /**
-     * @brief Returns threshold to be used in NMS stage
-     * @return Threshold
-     */
-    float getNMSThreshold() const;
-    /**
-     * @brief Sets threshold to be used in NMS stage
-     * @param threshold NMS threshold
-     * @return reference to layer builder
-     */
-    DetectionOutputLayer& setNMSThreshold(float threshold);
-    /**
-     * @brief Returns confidence threshold
-     * @return Threshold
-     */
-    float getConfidenceThreshold() const;
-    /**
-     * @brief Sets confidence threshold
-     * @param threshold Threshold
-     * @return reference to layer builder
-     */
-    DetectionOutputLayer& setConfidenceThreshold(float threshold);
-    /**
-     * @brief Returns share location
-     * @return true if bounding boxes are shared among different classes
-     */
-    bool getShareLocation() const;
-    /**
-     * @brief Sets share location
-     * @param flag true if bounding boxes are shared among different classes
-     * @return reference to layer builder
-     */
-    DetectionOutputLayer& setShareLocation(bool flag);
-    /**
-     * @brief Returns encoded settings
-     * @return true if variance is encoded in target
-     */
-    bool getVariantEncodedInTarget() const;
-    /**
-     * @brief Sets encoded settings
-     * @param flag true if variance is encoded in target
-     * @return reference to layer builder
-     */
-    DetectionOutputLayer& setVariantEncodedInTarget(bool flag);
-};
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_eltwise_layer.hpp b/inference-engine/include/builders/ie_eltwise_layer.hpp
deleted file mode 100644 (file)
index 9ee7fd1..0000000
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <builders/ie_layer_decorator.hpp>
-#include <ie_network.hpp>
-#include <string>
-#include <vector>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief The class represents a builder for Eltwise layer
- */
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(EltwiseLayer): public LayerDecorator {
-public:
-    /**
-     * @brief The enum defines all Eltwise types
-     */
-    enum EltwiseType { SUM = 1, MAX, MUL, SUB, DIV, MIN, SQUARED_DIFF };
-
-    /**
-     * @brief The constructor creates a builder with the name
-     * @param name Layer name
-     */
-    explicit EltwiseLayer(const std::string& name = "");
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer pointer to generic builder
-     */
-    explicit EltwiseLayer(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer constant pointer to generic builder
-     */
-    explicit EltwiseLayer(const Layer::CPtr& layer);
-    /**
-     * @brief Sets the name for the layer
-     * @param name Layer name
-     * @return reference to layer builder
-     */
-    EltwiseLayer& setName(const std::string& name);
-
-    /**
-     * @brief Returns input ports
-     * @return Vector of input ports
-     */
-    const std::vector<Port>& getInputPorts() const;
-    /**
-     * @brief Sets input ports
-     * @param ports Vector of input ports
-     * @return reference to layer builder
-     */
-    EltwiseLayer& setInputPorts(const std::vector<Port>& ports);
-    /**
-     * @brief Returns output port
-     * @return Output port
-     */
-    const Port& getOutputPort() const;
-    /**
-     * @brief Sets output port
-     * @param port Output port
-     * @return reference to layer builder
-     */
-    EltwiseLayer& setOutputPort(const Port& port);
-    /**
-     * @brief Returns eltwise type
-     * @return Eltwise type
-     */
-    EltwiseType getEltwiseType() const;
-    /**
-     * @brief Sets eltwise type
-     * @param type Eltwise type
-     * @return reference to layer builder
-     */
-    EltwiseLayer& setEltwiseType(EltwiseType type);
-    /**
-     * @brief Returns eltwise scales
-     * @return Vector of scales
-     */
-    const std::vector<float> getScales() const;
-    /**
-     * @brief Sets eltwise scales
-     * @param scales Vector of scales
-     * @return reference to layer builder
-     */
-    EltwiseLayer& setScales(const std::vector<float>& scales);
-
-private:
-    EltwiseType type = SUM;
-};
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_elu_layer.hpp b/inference-engine/include/builders/ie_elu_layer.hpp
deleted file mode 100644 (file)
index 84feec8..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <builders/ie_layer_decorator.hpp>
-#include <ie_network.hpp>
-#include <string>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief The class represents a builder for ELU layer
- */
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(ELULayer): public LayerDecorator {
-public:
-    /**
-     * @brief The constructor creates a builder with the name
-     * @param name Layer name
-     */
-    explicit ELULayer(const std::string& name = "");
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer pointer to generic builder
-     */
-    explicit ELULayer(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer constant pointer to generic builder
-     */
-    explicit ELULayer(const Layer::CPtr& layer);
-    /**
-     * @brief Sets the name for the layer
-     * @param name Layer name
-     * @return reference to layer builder
-     */
-    ELULayer& setName(const std::string& name);
-
-    /**
-     * @brief Returns port with shapes for the layer
-     * @return Port with shapes
-     */
-    const Port& getPort() const;
-    /**
-     * @brief Sets port shapes for the layer
-     * @param port Port with shapes
-     * @return reference to layer builder
-     */
-    ELULayer& setPort(const Port& port);
-
-    /**
-     * @brief Returns alpha
-     * @return alpha
-     */
-    float getAlpha() const;
-    /**
-     * @brief Sets alpha
-     * @param alpha Alpha
-     * @return reference to layer builder
-     */
-    ELULayer& setAlpha(float alpha);
-};
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_fully_connected_layer.hpp b/inference-engine/include/builders/ie_fully_connected_layer.hpp
deleted file mode 100644 (file)
index 982845f..0000000
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <builders/ie_layer_decorator.hpp>
-#include <ie_network.hpp>
-#include <string>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief The class represents a builder for FullyConnected layer
- */
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(FullyConnectedLayer): public LayerDecorator {
-public:
-    /**
-     * @brief The constructor creates a builder with the name
-     * @param name Layer name
-     */
-    explicit FullyConnectedLayer(const std::string& name = "");
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer pointer to generic builder
-     */
-    explicit FullyConnectedLayer(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer constant pointer to generic builder
-     */
-    explicit FullyConnectedLayer(const Layer::CPtr& layer);
-    /**
-     * @brief Sets the name for the layer
-     * @param name Layer name
-     * @return reference to layer builder
-     */
-    FullyConnectedLayer& setName(const std::string& name);
-
-    /**
-     * @brief Returns input port
-     * @return Input port
-     */
-    const Port& getInputPort() const;
-    /**
-     * @brief Sets input port
-     * @param port Input port
-     * @return reference to layer builder
-     */
-    FullyConnectedLayer& setInputPort(const Port& port);
-    /**
-     * @brief Returns output port
-     * @return Output port
-     */
-    const Port& getOutputPort() const;
-    /**
-     * @brief Sets output port
-     * @param port Output port
-     * @return reference to layer builder
-     */
-    FullyConnectedLayer& setOutputPort(const Port& port);
-    /**
-     * @brief Return output size
-     * @return Output size
-     */
-    size_t getOutputNum() const;
-    /**
-     * @brief Sets output size
-     * @param outNum Output size
-     * @return reference to layer builder
-     */
-    FullyConnectedLayer& setOutputNum(size_t outNum);
-};
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_grn_layer.hpp b/inference-engine/include/builders/ie_grn_layer.hpp
deleted file mode 100644 (file)
index 63a56fb..0000000
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <builders/ie_layer_decorator.hpp>
-#include <ie_network.hpp>
-#include <string>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief The class represents a builder for GRN layer
- */
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(GRNLayer): public LayerDecorator {
-public:
-    /**
-     * @brief The constructor creates a builder with the name
-     * @param name Layer name
-     */
-    explicit GRNLayer(const std::string& name = "");
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer pointer to generic builder
-     */
-    explicit GRNLayer(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer constant pointer to generic builder
-     */
-    explicit GRNLayer(const Layer::CPtr& layer);
-    /**
-     * @brief Sets the name for the layer
-     * @param name Layer name
-     * @return reference to layer builder
-     */
-    GRNLayer& setName(const std::string& name);
-
-    /**
-     * @brief Returns port with shapes for the layer
-     * @return Port with shapes
-     */
-    const Port& getPort() const;
-    /**
-     * @brief Sets port shapes for the layer
-     * @param port Port with shapes
-     * @return reference to layer builder
-     */
-    GRNLayer& setPort(const Port& port);
-    /**
-     * @brief Returns beta
-     * @return Beta
-     */
-    float getBeta() const;
-    /**
-     * @brief Sets beta
-     * @param beta Beta
-     * @return reference to layer builder
-     */
-    GRNLayer& setBeta(float beta);
-};
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_gru_sequence_layer.hpp b/inference-engine/include/builders/ie_gru_sequence_layer.hpp
deleted file mode 100644 (file)
index a2d435b..0000000
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <builders/ie_layer_decorator.hpp>
-#include <ie_network.hpp>
-#include <string>
-#include <vector>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief The class represents a builder for GRUSequence layer
- */
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(GRUSequenceLayer): public LayerDecorator {
-public:
-    /**
-     * @brief The constructor creates a builder with the name
-     * @param name Layer name
-     */
-    explicit GRUSequenceLayer(const std::string& name = "");
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer pointer to generic builder
-     */
-    explicit GRUSequenceLayer(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer constant pointer to generic builder
-     */
-    explicit GRUSequenceLayer(const Layer::CPtr& layer);
-    /**
-     * @brief Sets the name for the layer
-     * @param name Layer name
-     * @return reference to layer builder
-     */
-    GRUSequenceLayer& setName(const std::string& name);
-
-    /**
-     * @brief Returns input ports with shapes for the layer
-     * @return Vector of ports
-     */
-    const std::vector<Port>& getInputPorts() const;
-    /**
-     * @brief Sets input ports for the layer
-     * @param ports vector of input ports
-     * @return reference to layer builder
-     */
-    GRUSequenceLayer& setInputPorts(const std::vector<Port>& ports);
-
-    /**
-     * @brief Returns output ports with shapes for the layer
-     * @return Vector of ports
-     */
-    const std::vector<Port>& getOutputPorts() const;
-    /**
-     * @brief Sets output ports for the layer
-     * @param ports vector of output ports
-     * @return reference to layer builder
-     */
-    GRUSequenceLayer& setOutputPorts(const std::vector<Port>& ports);
-
-    int getHiddenSize() const;
-    GRUSequenceLayer& setHiddenSize(int size);
-    bool getSequenceDim() const;
-    GRUSequenceLayer& setSqquenceDim(bool flag);
-    const std::vector<std::string>& getActivations() const;
-    GRUSequenceLayer& setActivations(const std::vector<std::string>& activations);
-    const std::vector<float>& getActivationsAlpha() const;
-    GRUSequenceLayer& setActivationsAlpha(const std::vector<float>& activations);
-    const std::vector<float>& getActivationsBeta() const;
-    GRUSequenceLayer& setActivationsBeta(const std::vector<float>& activations);
-    float getClip() const;
-    GRUSequenceLayer& setClip(float clip);
-    bool getLinearBeforeReset() const;
-    GRUSequenceLayer& setLinearBeforeReset(bool flag);
-    const std::string& getDirection() const;
-    GRUSequenceLayer& setDirection(const std::string& direction);
-};
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_input_layer.hpp b/inference-engine/include/builders/ie_input_layer.hpp
deleted file mode 100644 (file)
index 8417e02..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <builders/ie_layer_decorator.hpp>
-#include <ie_network.hpp>
-#include <string>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief The class represents a builder for Input layer
- */
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(InputLayer): public LayerDecorator {
-public:
-    /**
-     * @brief The constructor creates a builder with the name
-     * @param name Layer name
-     */
-    explicit InputLayer(const std::string& name = "");
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer pointer to generic builder
-     */
-    explicit InputLayer(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer constant pointer to generic builder
-     */
-    explicit InputLayer(const Layer::CPtr& layer);
-
-    /**
-     * @brief Sets the name for the layer
-     * @param name Layer name
-     * @return reference to layer builder
-     */
-    InputLayer& setName(const std::string& name);
-
-    /**
-     * @brief Returns port with shapes for the layer
-     * @return Port with shapes
-     */
-    const Port& getPort() const;
-    /**
-     * @brief Sets port shapes for the layer
-     * @param port Port with shapes
-     * @return reference to layer builder
-     */
-    InputLayer& setPort(const Port& port);
-};
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_layer_builder.hpp b/inference-engine/include/builders/ie_layer_builder.hpp
deleted file mode 100644 (file)
index 4b9885b..0000000
+++ /dev/null
@@ -1,222 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <ie_blob.h>
-
-#include <details/caseless.hpp>
-#include <ie_network.hpp>
-#include <ie_parameter.hpp>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-namespace InferenceEngine {
-namespace Builder {
-
-class Layer;
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief This structure implements a holder for validators
- */
-struct ValidatorsHolder {
-    /**
-     * @brief Caseless map connects type with validator
-     */
-    details::caseless_map<std::string, std::function<void(const std::shared_ptr<const Layer>&, bool)>> validators;
-};
-
-IE_SUPPRESS_DEPRECATED_START
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief This class implements a builder for IE Layer
- */
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(Layer): public ILayer, public std::enable_shared_from_this<Layer> {
-public:
-    /**
-     * @brief A shared pointer to the Layer builder
-     */
-    using Ptr = std::shared_ptr<Layer>;
-    /**
-     * @brief A shared pointer to the constant Layer builder
-     */
-    using CPtr = std::shared_ptr<const Layer>;
-
-    /**
-     * @brief The constructor creates a Layer builder with layer type and layer name
-     * @param type Layer type
-     * @param name Layer name
-     */
-    explicit Layer(const std::string& type, const std::string& name = "");
-
-    /**
-     * @brief The constructor creates a Layer builder from shared pointer to constant ILayer
-     * @param layer shared pointer to constant ILayer
-     */
-    explicit Layer(const ILayer::CPtr& layer);
-
-    /**
-     * @brief The constructor creates a Layer builder with layer ID and layer builder
-     * @param id Layer ID
-     * @param layer layer builder
-     */
-    Layer(idx_t id, const Layer& layer);
-
-    /**
-     * @brief Compares the given Layer builder with the current one
-     * @param rhs Layer builder to compare with
-     * @return true if the given Layer builder is equal to the current one, false - otherwise
-     */
-    bool operator==(const Layer& rhs) const {
-        return params == rhs.params;
-    }
-
-    /**
-     * @brief Returns layer ID
-     * @return Layer ID
-     */
-    idx_t getId() const noexcept override;
-
-    /**
-     * @brief Returns a constant reference to layer name
-     * @return Layer name
-     */
-    const std::string& getName() const noexcept override;
-
-    /**
-     * @brief Sets layer name
-     * @param name Layer name
-     * @return Reference to Layer builder
-     */
-    Layer& setName(const std::string& name);
-
-    /**
-     * @brief Returns a constant reference to layer type
-     * @return Layer type
-     */
-    const std::string& getType() const noexcept override;
-
-    /**
-     * @brief Sets layer type
-     * @param type Layer type
-     * @return Reference to Layer builder
-     */
-    Layer& setType(const std::string& type);
-
-    /**
-     * @brief Returns map of parameters
-     * @return map of parameters
-     */
-    const std::map<std::string, Parameter>& getParameters() const noexcept override;
-    /**
-     * @brief Returns map of parameters
-     * @return map of parameters
-     */
-    std::map<std::string, Parameter>& getParameters();
-
-    /**
-     * @brief Sets parameters for layer
-     * @param params constant map of parameters
-     * @return Reference to Layer builder
-     */
-    Layer& setParameters(const std::map<std::string, Parameter>& params);
-
-    /**
-     * @brief Returns vector of input ports
-     * @return Vector of input ports
-     */
-    const std::vector<Port>& getInputPorts() const noexcept override;
-
-    /**
-     * @brief Returns vector of input ports
-     * @return Vector of input ports
-     */
-    std::vector<Port>& getInputPorts();
-
-    /**
-     * @brief Sets input ports
-     * @param ports vector of ports
-     * @return Reference to Layer builder
-     */
-    Layer& setInputPorts(const std::vector<Port>& ports);
-
-    /**
-     * @brief Returns vector of output ports
-     * @return Vector of output ports
-     */
-
-    const std::vector<Port>& getOutputPorts() const noexcept override;
-    /**
-     * @brief Returns vector of output ports
-     * @return Vector of output ports
-     */
-    std::vector<Port>& getOutputPorts();
-
-    /**
-     * @brief Sets output ports
-     * @param ports vector of ports
-     * @return Reference to Layer builder
-     */
-    Layer& setOutputPorts(const std::vector<Port>& ports);
-
-    /**
-     * @brief Validates the current builder and generates ILayer object
-     * @return constant shared pointer to ILayer
-     */
-    const ILayer::CPtr build() const;
-
-    /**
-     * @brief Validates layer builder
-     */
-    void validate(bool partial = false) const;
-
-    /**
-     * @brief Registers a new validator for type
-     * @param type Layer type
-     * @param validator Layer validator
-     */
-    static void addValidator(const std::string& type, const std::function<void(const Layer::CPtr&, bool)>& validator);
-
-private:
-    idx_t id;
-    std::string type;
-    std::string name;
-    std::vector<Port> inPorts;
-    std::vector<Port> outPorts;
-    std::map<std::string, Parameter> params;
-    static std::shared_ptr<ValidatorsHolder> getValidatorsHolder();
-};
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief This class registers layer validators
- */
-class ValidatorRegisterBase {
-public:
-    /**
-     * @brief The constructor registers new layer validator
-     * @param type Layer type
-     * @param validator Layer validator
-     */
-    explicit ValidatorRegisterBase(const std::string& type,
-                                   const std::function<void(const Layer::CPtr&, bool)>& validator) {
-        InferenceEngine::Builder::Layer::addValidator(type, validator);
-    }
-};
-
-IE_SUPPRESS_DEPRECATED_END
-
-#define REG_VALIDATOR_FOR(__type, __validator) \
-    static InferenceEngine::Builder::ValidatorRegisterBase _reg_##__type(#__type, __validator)
-
-}  // namespace Builder
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_layer_decorator.hpp b/inference-engine/include/builders/ie_layer_decorator.hpp
deleted file mode 100644 (file)
index c089b76..0000000
+++ /dev/null
@@ -1,114 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <builders/ie_layer_builder.hpp>
-#include <string>
-#include <vector>
-
-namespace InferenceEngine {
-
-/**
- * @brief Neural network builder API
- */
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief This class defines the basic functional for layer builders
- */
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(LayerDecorator) {
-public:
-    /**
-     * @brief The constructor creates layer builders with layer type and layer name
-     * @param type Layer type
-     * @param name Layer name
-     */
-    LayerDecorator(const std::string& type, const std::string& name);
-    /**
-     * @brief The constructor creates layer builders from reference to generic layer builder
-     * @param layer pointer to generic layer builder
-     */
-
-    IE_SUPPRESS_DEPRECATED_START
-
-    explicit LayerDecorator(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates layer builders from reference to generic layer builder
-     * @param layer constant pointer to generic layer builder
-     */
-    explicit LayerDecorator(const Layer::CPtr& layer);
-
-    /**
-     * @brief The copy constructor
-     * @param rval Source builder
-     */
-    LayerDecorator(const LayerDecorator& rval);
-
-    /**
-     * @brief Copy operator for LayerDecorator
-     * @param rval
-     * @return Layer builder
-     */
-    LayerDecorator& operator=(const LayerDecorator& rval);
-
-    /**
-     * @brief Virtual destructor
-     */
-    virtual ~LayerDecorator() = default;
-
-    /**
-     * @brief The operator creates generic builder
-     * @return Generic builder
-     */
-    virtual operator Layer() const;
-
-    /**
-     * @brief The operator creates generic builder
-     * @return Pointer to generic builder
-     */
-    virtual operator Layer::Ptr();
-
-    /**
-     * @brief The operator creates generic builder
-     * @return Constant pointer to generic builder
-     */
-    virtual operator Layer::CPtr() const;
-
-    IE_SUPPRESS_DEPRECATED_END
-
-    /**
-     * @brief Returns layer type
-     * @return Layer type
-     */
-    const std::string& getType() const;
-    /**
-     * @brief Returns layer name
-     * @return Layer name
-     */
-    const std::string& getName() const;
-
-protected:
-    IE_SUPPRESS_DEPRECATED_START
-
-    Layer::Ptr& getLayer();
-    const Layer::CPtr getLayer() const;
-    void checkType(const std::string& type) const;
-
-    Layer::CPtr cLayer;
-
-private:
-    Layer::Ptr layer;
-
-    IE_SUPPRESS_DEPRECATED_END
-};
-
-}  // namespace Builder
-
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_lrn_layer.hpp b/inference-engine/include/builders/ie_lrn_layer.hpp
deleted file mode 100644 (file)
index cab2cae..0000000
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <builders/ie_layer_decorator.hpp>
-#include <ie_network.hpp>
-#include <string>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief The class represents a builder for LRN layer
- */
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(LRNLayer): public LayerDecorator {
-public:
-    /**
-     * @brief The constructor creates a builder with the name
-     * @param name Layer name
-     */
-    explicit LRNLayer(const std::string& name = "");
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer pointer to generic builder
-     */
-    explicit LRNLayer(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer constant pointer to generic builder
-     */
-    explicit LRNLayer(const Layer::CPtr& layer);
-    /**
-     * @brief Sets the name for the layer
-     * @param name Layer name
-     * @return reference to layer builder
-     */
-    LRNLayer& setName(const std::string& name);
-
-    /**
-     * @brief Returns port with shapes for the layer
-     * @return Port with shapes
-     */
-    const Port& getPort() const;
-    /**
-     * @brief Sets port shapes for the layer
-     * @param port Port with shapes
-     * @return reference to layer builder
-     */
-    LRNLayer& setPort(const Port& port);
-    /**
-     * @brief Returns side length of the region
-     * @return Size
-     */
-    size_t getSize() const;
-    /**
-     * @brief Sets side length of the region
-     * @param size Size
-     * @return reference to layer builder
-     */
-    LRNLayer& setSize(size_t size);
-    /**
-     * @brief Returns scaling parameter for the normalizing sum
-     * @return Scaling parameter
-     */
-    float getAlpha() const;
-    /**
-     * @brief Sets scaling parameter for the normalizing sum
-     * @param alpha Scaling parameter
-     * @return reference to layer builder
-     */
-    LRNLayer& setAlpha(float alpha);
-    /**
-     * @brief Returns exponent for the normalizing sum
-     * @return Exponent
-     */
-    float getBeta() const;
-    /**
-     * @brief Sets exponent for the normalizing sum
-     * @param beta Exponent
-     * @return reference to layer builder
-     */
-    LRNLayer& setBeta(float beta);
-    /**
-     * @brief Returns region type
-     * @return true if normalizing sum is performed over adjacent channels
-     */
-    float getBias() const;
-    /**
-     * @brief Sets bias for the normalizing sum
-     * @param bias Bias
-     * @return reference to layer builder
-     */
-    LRNLayer& setBias(float bias);
-};
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_lstm_sequence_layer.hpp b/inference-engine/include/builders/ie_lstm_sequence_layer.hpp
deleted file mode 100644 (file)
index 457c56e..0000000
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <builders/ie_layer_decorator.hpp>
-#include <ie_network.hpp>
-#include <string>
-#include <vector>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief The class represents a builder for LSTMSequence layer
- */
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(LSTMSequenceLayer): public LayerDecorator {
-public:
-    /**
-     * @brief The constructor creates a builder with the name
-     * @param name Layer name
-     */
-    explicit LSTMSequenceLayer(const std::string& name = "");
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer pointer to generic builder
-     */
-    explicit LSTMSequenceLayer(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer constant pointer to generic builder
-     */
-    explicit LSTMSequenceLayer(const Layer::CPtr& layer);
-    /**
-     * @brief Sets the name for the layer
-     * @param name Layer name
-     * @return reference to layer builder
-     */
-    LSTMSequenceLayer& setName(const std::string& name);
-
-    /**
-     * @brief Returns input ports with shapes for the layer
-     * @return Vector of ports
-     */
-    const std::vector<Port>& getInputPorts() const;
-    /**
-     * @brief Sets input ports for the layer
-     * @param ports vector of input ports
-     * @return reference to layer builder
-     */
-    LSTMSequenceLayer& setInputPorts(const std::vector<Port>& ports);
-
-    /**
-     * @brief Returns output ports with shapes for the layer
-     * @return Vector of ports
-     */
-    const std::vector<Port>& getOutputPorts() const;
-    /**
-     * @brief Sets output ports for the layer
-     * @param ports vector of output ports
-     * @return reference to layer builder
-     */
-    LSTMSequenceLayer& setOutputPorts(const std::vector<Port>& ports);
-
-    int getHiddenSize() const;
-    LSTMSequenceLayer& setHiddenSize(int size);
-    bool getSequenceDim() const;
-    LSTMSequenceLayer& setSqquenceDim(bool flag);
-    const std::vector<std::string>& getActivations() const;
-    LSTMSequenceLayer& setActivations(const std::vector<std::string>& activations);
-    const std::vector<float>& getActivationsAlpha() const;
-    LSTMSequenceLayer& setActivationsAlpha(const std::vector<float>& activations);
-    const std::vector<float>& getActivationsBeta() const;
-    LSTMSequenceLayer& setActivationsBeta(const std::vector<float>& activations);
-    float getClip() const;
-    LSTMSequenceLayer& setClip(float clip);
-    bool getInputForget() const;
-    LSTMSequenceLayer& setInputForget(bool flag);
-    const std::string& getDirection() const;
-    LSTMSequenceLayer& setDirection(const std::string& direction);
-};
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_memory_layer.hpp b/inference-engine/include/builders/ie_memory_layer.hpp
deleted file mode 100644 (file)
index 03e4524..0000000
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <builders/ie_layer_decorator.hpp>
-#include <ie_network.hpp>
-#include <string>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief The class represents a builder for Memory layer
- */
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(MemoryLayer): public LayerDecorator {
-public:
-    /**
-     * @brief The constructor creates a builder with the name
-     * @param name Layer name
-     */
-    explicit MemoryLayer(const std::string& name = "");
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer pointer to generic builder
-     */
-    explicit MemoryLayer(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer constant pointer to generic builder
-     */
-    explicit MemoryLayer(const Layer::CPtr& layer);
-    /**
-     * @brief Sets the name for the layer
-     * @param name Layer name
-     * @return reference to layer builder
-     */
-    MemoryLayer& setName(const std::string& name);
-
-    /**
-     * @brief Returns output port
-     * @return Output port
-     */
-    const Port& getOutputPort() const;
-    /**
-     * @brief Sets output port
-     * @param port Output port
-     * @return reference to layer builder
-     */
-    MemoryLayer& setOutputPort(const Port& port);
-    /**
-     * @brief Returns input port
-     * @return Input port
-     */
-    const Port& getInputPort() const;
-    /**
-     * @brief Sets input port
-     * @param port Input port
-     * @return reference to layer builder
-     */
-    MemoryLayer& setInputPort(const Port& port);
-    /**
-     * @brief Returns memory ID
-     * @return String with memory ID
-     */
-    const std::string getId() const;
-    /**
-     * @brief Sets memory ID
-     * @param id Memory ID
-     * @return reference to layer builder
-     */
-    MemoryLayer& setId(const std::string& id);
-    /**
-     * @brief Returns the index of memory layer
-     * @return Index
-     */
-    size_t getIndex() const;
-    /**
-     * @brief Sets the index of memory layer
-     * @param index Index equal 0 means this layer is output one.
-     * @return reference to layer builder
-     */
-    MemoryLayer& setIndex(size_t index);
-    /**
-     * @brief Returns size of the group
-     * @return Size of the group
-     */
-    size_t getSize() const;
-    /**
-     * @brief Sets size of the group
-     * @param size Size if size equals 2 means this group is a pair (only 2 is supported).
-     * @return reference to layer builder
-     */
-    MemoryLayer& setSize(size_t size);
-};
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_mvn_layer.hpp b/inference-engine/include/builders/ie_mvn_layer.hpp
deleted file mode 100644 (file)
index d4757da..0000000
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <builders/ie_layer_decorator.hpp>
-#include <ie_network.hpp>
-#include <string>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief The class represents a builder for MVN layer
- */
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(MVNLayer): public LayerDecorator {
-public:
-    /**
-     * @brief The constructor creates a builder with the name
-     * @param name Layer name
-     */
-    explicit MVNLayer(const std::string& name = "");
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer pointer to generic builder
-     */
-    explicit MVNLayer(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer constant pointer to generic builder
-     */
-    explicit MVNLayer(const Layer::CPtr& layer);
-    /**
-     * @brief Sets the name for the layer
-     * @param name Layer name
-     * @return reference to layer builder
-     */
-    MVNLayer& setName(const std::string& name);
-
-    /**
-     * @brief Returns port with shapes for the layer
-     * @return Port with shapes
-     */
-    const Port& getPort() const;
-    /**
-     * @brief Sets port shapes for the layer
-     * @param port Port with shapes
-     * @return reference to layer builder
-     */
-    MVNLayer& setPort(const Port& port);
-    /**
-     * @brief Returns across channels value
-     * @return true if mean values are shared across channels
-     */
-    bool getAcrossChannels() const;
-    /**
-     * @brief Sets across channels
-     * @param flag true if mean values are shared across channels
-     * @return reference to layer builder
-     */
-    MVNLayer& setAcrossChannels(bool flag);
-    /**
-     * @brief Returns normalize variance
-     * @return true if variance normalization is performed
-     */
-    bool getNormalize() const;
-    /**
-     * @brief Sets normalize variance
-     * @param flag true if variance normalization is performed
-     * @return reference to layer builder
-     */
-    MVNLayer& setNormalize(bool flag);
-    /**
-     * @brief Return epsilon
-     * @return Epsilon
-     */
-    float getEpsilon() const;
-    /**
-     * @brief Sets epsilon
-     * @param eps Epsilon
-     * @return reference to layer builder
-     */
-    MVNLayer& setEpsilon(float eps);
-};
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_network_builder.hpp b/inference-engine/include/builders/ie_network_builder.hpp
deleted file mode 100644 (file)
index 471dbe4..0000000
+++ /dev/null
@@ -1,264 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <cpp/ie_cnn_network.h>
-#include <ie_blob.h>
-#include <ie_common.h>
-
-#include <builders/ie_layer_builder.hpp>
-#include <ie_context.hpp>
-#include <ie_icnn_network.hpp>
-#include <ie_network.hpp>
-#include <map>
-#include <memory>
-#include <string>
-#include <utility>
-#include <vector>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief This class implements a builder for IE Network
- */
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(Network): public INetwork {
-public:
-    /**
-     * @brief A shared pointer to the Network builder
-     */
-    using Ptr = std::shared_ptr<Network>;
-    /**
-     * @brief An iterator for Network builder definition
-     */
-    using iterator = details::INetworkIterator<Network, Layer>;
-
-    /**
-     * @brief Begin network iterator
-     * @return Network iterator
-     */
-    iterator begin();
-    /**
-     * @brief Begin network iterator
-     * @return const INetwork iterator
-     */
-    const_iterator begin() const noexcept override;
-
-    /**
-     * @brief End network iterator
-     * @return Network iterator
-     */
-    iterator end();
-    /**
-     * @brief End network iterator
-     * @return const INetwork iterator
-     */
-    const_iterator end() const noexcept override;
-
-    /**
-     * @brief Returns a number of layers in the network.
-     * @return Layers count
-     */
-    size_t size() const noexcept override;
-
-    /**
-     * @brief The constructor creates a builder based on ICNNNetwork
-     *
-     * @param network constant reference to ICNNNetwork object
-     */
-    explicit Network(const ICNNNetwork& network);
-    /**
-     * @brief The constructor creates a empty builder with network name
-     *
-     * @param name Network name
-     */
-    explicit Network(const std::string& name);
-    /**
-     * @brief The constructor creates a builder based on INetwork
-     *
-     * @param network constant reference to INetwork object
-     */
-    explicit Network(const INetwork& network);
-
-    /**
-     * @brief The constructor creates a builder based on ICNNNetwork with custom Context
-     *
-     * @param ieContext constant reference to Context object
-     * @param network constant reference to ICNNNetwork object
-     */
-    Network(const Context& ieContext, const ICNNNetwork& network);
-    /**
-     * @brief The constructor creates a empty builder with network name and custom Context
-     *
-     * @param ieContext constant reference to Context object
-     * @param name Network name
-     */
-    Network(const Context& ieContext, const std::string& name);
-    /**
-     * @brief The constructor creates a builder based on INetwork with custom Context
-     *
-     * @param ieContext constant reference to Context object
-     * @param network constant reference to INetwork object
-     */
-    Network(const Context& ieContext, const INetwork& network);
-
-    /**
-     * @brief Adds new layer and connects it with previous layers
-     *
-     * @param inputs Vector with PortInfo objects from previous layers
-     * @param layer Layer builder for new layer
-     *
-     * @return Id of new builder for the current network
-     */
-    idx_t addLayer(const std::vector<PortInfo>& inputs, const Layer& layer);
-    /**
-     * @brief Adds new layer
-     *
-     * @param layer Layer builder for new layer
-     *
-     * @return Id of new builder for the current network
-     */
-    idx_t addLayer(const Layer& layer);
-    /**
-     * @brief Removes a layer by ID
-     *
-     * @param layerId Layer ID
-     */
-    void removeLayer(idx_t layerId);
-
-    /**
-     * @brief Connects two layers
-     *
-     * @param input PortInfo object from previous layer
-     * @param output PortInfo object from next layer
-     */
-    void connect(const PortInfo& input, const PortInfo& output);
-    /**
-     * @brief Removes connection from the network
-     *
-     * @param connection Connection
-     */
-    void disconnect(const Connection& connection);
-
-    /**
-     * @brief Returns vector of layer builders
-     *
-     * @return Vector of layer builders
-     */
-    std::vector<Layer::Ptr>& getLayers();
-    /**
-     * @brief Returns constant vector of layer builders
-     *
-     * @return constant vector of layer builders
-     */
-    const std::vector<Layer::Ptr>& getLayers() const;
-
-    /**
-     * @brief Returns a constant smart pointer to a Layer interface.
-     * If the layer is missing, returns nullptr.
-     * @param id Id of the Layer
-     * @return Layer interface smart pointer
-     */
-    const ILayer::CPtr getLayer(idx_t id) const noexcept override;
-    Layer::Ptr getLayer(idx_t layerId);
-
-    /**
-     * @brief Returns a constant vector of input layers.
-     * @return Vector of input layers
-     */
-    const std::vector<ILayer::CPtr> getInputs() const noexcept override;
-    /**
-     * @brief Returns a vector of input layers.
-     * @return Vector of input layers
-     */
-    std::vector<Layer::Ptr> getInputs();
-
-    /**
-     * @brief Returns a constant vector of output layers.
-     * @return Vector of output layers
-     */
-    const std::vector<ILayer::CPtr> getOutputs() const noexcept override;
-    /**
-     * @brief Returns a vector of input layers.
-     * @return Vector of input layers
-     */
-    std::vector<Layer::Ptr> getOutputs();
-
-    /**
-     * @brief Returns a constant vector of connections for specific layer.
-     * If the layer is missing, returns empty vector.
-     * @param layerId layer index
-     * @return Vector of connections
-     */
-    const std::vector<Connection> getLayerConnections(idx_t layerId) const noexcept override;
-
-    /**
-     * @brief Returns a constant vector of all connections.
-     * @return Vector of connections
-     */
-    const std::vector<Connection>& getConnections() const;
-
-    /**
-     * @brief Returns a network name.
-     * @return Network name
-     */
-    const std::string& getName() const noexcept override;
-
-    /**
-     * @brief Returns a network context
-     * @return const reference to Context
-     */
-    const Context& getContext() const noexcept override;
-    /**
-     * @brief Returns a network context
-     * @return reference to Context
-     */
-    Context& getContext() noexcept;
-
-    /**
-     * @brief Builds and validate network
-     *
-     * @return const shared pointer to INetwork
-     */
-    const INetwork::CPtr build();
-
-    /**
-     * @brief Validates network
-     *
-     */
-    void validate();
-
-    /**
-     * @brief The operator builds network
-     *
-     * @return const shared pointer to INetwork
-     */
-    explicit operator const INetwork::CPtr();
-
-private:
-    std::map<std::string, Parameter> parameters;
-};
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief This function converts INetwork to ICNNNetwork
- *
- * @param network constant shared pointer to INetwork object
- * @return constant shared pointer to ICNNNetwork
- */
-INFERENCE_ENGINE_NN_BUILDER_DEPRECATED
-INFERENCE_ENGINE_API_CPP(const std::shared_ptr<ICNNNetwork>) convertToICNNNetwork(const INetwork::CPtr& network);
-
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_norm_layer.hpp b/inference-engine/include/builders/ie_norm_layer.hpp
deleted file mode 100644 (file)
index 08fa913..0000000
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <builders/ie_layer_decorator.hpp>
-#include <ie_network.hpp>
-#include <string>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief The class represents a builder for Norm layer
- */
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(NormLayer): public LayerDecorator {
-public:
-    /**
-     * @brief The enum defines all Norm types
-     */
-    enum NormType { WITHIN_CHANNEL = 0, ACROSS_CHANNELS = 1 };
-    /**
-     * @brief The constructor creates a builder with the name
-     * @param name Layer name
-     */
-    explicit NormLayer(const std::string& name = "");
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer pointer to generic builder
-     */
-    explicit NormLayer(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer constant pointer to generic builder
-     */
-    explicit NormLayer(const Layer::CPtr& layer);
-    /**
-     * @brief Sets the name for the layer
-     * @param name Layer name
-     * @return reference to layer builder
-     */
-    NormLayer& setName(const std::string& name);
-
-    /**
-     * @brief Returns port with shapes for the layer
-     * @return Port with shapes
-     */
-    const Port& getPort() const;
-    /**
-     * @brief Sets port shapes for the layer
-     * @param port Port with shapes
-     * @return reference to layer builder
-     */
-    NormLayer& setPort(const Port& port);
-    /**
-     * @brief Returns side length of the region
-     * @return Size
-     */
-    size_t getSize() const;
-    /**
-     * @brief Sets side length of the region
-     * @param size Size
-     * @return reference to layer builder
-     */
-    NormLayer& setSize(size_t size);
-    /**
-     * @brief Returns scaling parameter for the normalizing sum
-     * @return Scaling parameter
-     */
-    float getAlpha() const;
-    /**
-     * @brief Sets scaling parameter for the normalizing sum
-     * @param alpha Scaling parameter
-     * @return reference to layer builder
-     */
-    NormLayer& setAlpha(float alpha);
-    /**
-     * @brief Returns exponent for the normalizing sum
-     * @return Exponent
-     */
-    float getBeta() const;
-    /**
-     * @brief Sets exponent for the normalizing sum
-     * @param beta Exponent
-     * @return reference to layer builder
-     */
-    NormLayer& setBeta(float beta);
-    /**
-     * @brief Returns region type
-     * @return true if normalizing sum is performed over adjacent channels
-     */
-    bool getAcrossMaps() const;
-    /**
-     * @brief Sets region type
-     * @param acrossMap true if normalizing sum is performed over adjacent channels
-     * @return reference to layer builder
-     */
-    NormLayer& setAcrossMaps(bool acrossMap);
-    /**
-     * @brief Returns region type
-     * @return Norm type
-     */
-    NormType getRegion() const;
-    /**
-     * @brief Sets region type
-     * @param type region type
-     * @return reference to layer builder
-     */
-    NormLayer& setRegion(NormType type);
-};
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_normalize_layer.hpp b/inference-engine/include/builders/ie_normalize_layer.hpp
deleted file mode 100644 (file)
index 1cdf0fe..0000000
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <builders/ie_layer_decorator.hpp>
-#include <ie_network.hpp>
-#include <string>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief The class represents a builder for Normalize layer
- */
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(NormalizeLayer): public LayerDecorator {
-public:
-    /**
-     * @brief The constructor creates a builder with the name
-     * @param name Layer name
-     */
-    explicit NormalizeLayer(const std::string& name = "");
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer pointer to generic builder
-     */
-    explicit NormalizeLayer(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer constant pointer to generic builder
-     */
-    explicit NormalizeLayer(const Layer::CPtr& layer);
-    /**
-     * @brief Sets the name for the layer
-     * @param name Layer name
-     * @return reference to layer builder
-     */
-    NormalizeLayer& setName(const std::string& name);
-
-    /**
-     * @brief Returns port with shapes for the layer
-     * @return Port with shapes
-     */
-    const Port& getPort() const;
-    /**
-     * @brief Sets port shapes for the layer
-     * @param port Port with shapes
-     * @return reference to layer builder
-     */
-    NormalizeLayer& setPort(const Port& port);
-
-    /**
-     * @brief Returns channel shared flag
-     * @return true if scale parameters are shared across channels
-     */
-    bool getChannelShared() const;
-    /**
-     * @brief Sets channel shared flag
-     * @param acrossMap true if scale parameters are shared across channels
-     * @return reference to layer builder
-     */
-    NormalizeLayer& setChannelShared(bool acrossMap);
-    /**
-     * @brief Returns across maps
-     * @return true if normalization is shared across channels
-     */
-    bool getAcrossMaps() const;
-    /**
-     * @brief Sets across map
-     * @param acrossMap true if normalization is shared across channels
-     * @return reference to layer builder
-     */
-    NormalizeLayer& setAcrossMaps(bool acrossMap);
-
-    /**
-     * @brief Returns epsilon
-     * @return Epsilon
-     */
-    float getEpsilon() const;
-    /**
-     * @brief Sets epsilon
-     * @param eps Epsilon
-     * @return reference to layer builder
-     */
-    NormalizeLayer& setEpsilon(float eps);
-};
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_output_layer.hpp b/inference-engine/include/builders/ie_output_layer.hpp
deleted file mode 100644 (file)
index 84d7aae..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <builders/ie_layer_decorator.hpp>
-#include <ie_network.hpp>
-#include <string>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief The class represents a builder for Output layer
- */
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(OutputLayer): public LayerDecorator {
-public:
-    /**
-     * @brief The constructor creates a builder with the name
-     * @param name Layer name
-     */
-    explicit OutputLayer(const std::string& name = "");
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer pointer to generic builder
-     */
-    explicit OutputLayer(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer constant pointer to generic builder
-     */
-    explicit OutputLayer(const Layer::CPtr& layer);
-    /**
-     * @brief Sets the name for the layer
-     * @param name Layer name
-     * @return reference to layer builder
-     */
-    OutputLayer& setName(const std::string& name);
-
-    /**
-     * @brief Returns port with shapes for the layer
-     * @return Port with shapes
-     */
-    const Port& getPort() const;
-    /**
-     * @brief Sets port shapes for the layer
-     * @param port Port with shapes
-     * @return reference to layer builder
-     */
-    OutputLayer& setPort(const Port& port);
-};
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_permute_layer.hpp b/inference-engine/include/builders/ie_permute_layer.hpp
deleted file mode 100644 (file)
index 2283f3f..0000000
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <builders/ie_layer_decorator.hpp>
-#include <string>
-#include <vector>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief The class represents a builder for Permute layer
- */
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(PermuteLayer): public LayerDecorator {
-public:
-    /**
-     * @brief The constructor creates a builder with the name
-     * @param name Layer name
-     */
-    explicit PermuteLayer(const std::string& name = "");
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer pointer to generic builder
-     */
-    explicit PermuteLayer(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer constant pointer to generic builder
-     */
-    explicit PermuteLayer(const Layer::CPtr& layer);
-    /**
-     * @brief Sets the name for the layer
-     * @param name Layer name
-     * @return reference to layer builder
-     */
-    PermuteLayer& setName(const std::string& name);
-
-    /**
-     * @brief Returns input port
-     * @return Input port
-     */
-    const Port& getInputPort() const;
-    /**
-     * @brief Sets input port
-     * @param port Input port
-     * @return reference to layer builder
-     */
-    PermuteLayer& setInputPort(const Port& port);
-    /**
-     * @brief Returns output port
-     * @return Output port
-     */
-    const Port& getOutputPort() const;
-    /**
-     * @brief Sets output port
-     * @param port Output port
-     * @return reference to layer builder
-     */
-    PermuteLayer& setOutputPort(const Port& port);
-    /**
-     * @brief Return vector of dimensions indexes for output blob
-     * @return Order of dimensions for output blob
-     */
-    const std::vector<size_t> getOrder() const;
-    /**
-     * @brief Sets the order of dimensions for output blob
-     * @param order dimensions indexes for output blob
-     * @return reference to layer builder
-     */
-    PermuteLayer& setOrder(const std::vector<size_t>& order);
-};
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_pooling_layer.hpp b/inference-engine/include/builders/ie_pooling_layer.hpp
deleted file mode 100644 (file)
index e415608..0000000
+++ /dev/null
@@ -1,170 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <builders/ie_layer_decorator.hpp>
-#include <ie_network.hpp>
-#include <string>
-#include <vector>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief The class represents a builder for Pooling layer
- */
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(PoolingLayer): public LayerDecorator {
-public:
-    /**
-     * @brief The enum defines available pooling types
-     */
-    enum PoolingType { MAX = 1, AVG = 2 };
-
-    /**
-     * @brief The enum defines available rounding types
-     */
-    enum RoundingType { CEIL = 1, FLOOR = 2 };
-
-    /**
-     * @brief The constructor creates a builder with the name
-     * @param name Layer name
-     */
-    explicit PoolingLayer(const std::string& name = "");
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer pointer to generic builder
-     */
-    explicit PoolingLayer(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer constant pointer to generic builder
-     */
-    explicit PoolingLayer(const Layer::CPtr& layer);
-    /**
-     * @brief Operator creates generic layer builder
-     * @return Generic layer builder
-     */
-    operator Layer() const override;
-    /**
-     * @brief Sets the name for the layer
-     * @param name Layer name
-     * @return reference to layer builder
-     */
-    PoolingLayer& setName(const std::string& name);
-
-    /**
-     * @brief Returns input port
-     * @return Input port
-     */
-    const Port& getInputPort() const;
-    /**
-     * @brief Sets input port
-     * @param port Input port
-     * @return reference to layer builder
-     */
-    PoolingLayer& setInputPort(const Port& port);
-    /**
-     * @brief Returns output port
-     * @return Output port
-     */
-    const Port& getOutputPort() const;
-    /**
-     * @brief Sets output port
-     * @param port Output port
-     * @return reference to layer builder
-     */
-    PoolingLayer& setOutputPort(const Port& port);
-    /**
-     * @brief Returns kernel size
-     * @return Kernel size
-     */
-    const std::vector<size_t> getKernel() const;
-    /**
-     * @brief Sets kernel size
-     * @param kernel Kernel size
-     * @return reference to layer builder
-     */
-    PoolingLayer& setKernel(const std::vector<size_t>& kernel);
-    /**
-     * @brief Returns vector of strides
-     * @return vector of strides
-     */
-    const std::vector<size_t> getStrides() const;
-    /**
-     * @brief Sets strides
-     * @param strides vector of strides
-     * @return reference to layer builder
-     */
-    PoolingLayer& setStrides(const std::vector<size_t>& strides);
-    /**
-     * @brief Returns begin paddings
-     * @return vector of paddings
-     */
-    const std::vector<size_t> getPaddingsBegin() const;
-    /**
-     * @brief Sets begin paddings
-     * @param paddings Vector of paddings
-     * @return reference to layer builder
-     */
-    PoolingLayer& setPaddingsBegin(const std::vector<size_t>& paddings);
-    /**
-     * @brief Return end paddings
-     * @return Vector of paddings
-     */
-    const std::vector<size_t> getPaddingsEnd() const;
-    /**
-     * @brief Sets end paddings
-     * @param paddings Vector of paddings
-     * @return reference to layer builder
-     */
-    PoolingLayer& setPaddingsEnd(const std::vector<size_t>& paddings);
-    /**
-     * @brief Returns pooling type
-     * @return Pooling type
-     */
-    PoolingType getPoolingType() const;
-    /**
-     * @brief Sets pooling type
-     * @param type Pooling type
-     * @return reference to layer builder
-     */
-    PoolingLayer& setPoolingType(PoolingType type);
-    /**
-     * @brief Returns rounding type
-     * @return Rounding type
-     */
-    RoundingType getRoundingType() const;
-    /**
-     * @brief Sets rounding types
-     * @param type Rounding type
-     * @return reference to layer builder
-     */
-    PoolingLayer& setRoundingType(RoundingType type);
-    /**
-     * @brief Returns a type of pooling strategy
-     * @return true if zero-values in the padding are not used
-     */
-    bool getExcludePad() const;
-    /**
-     * @brief Sets a type of pooling strategy
-     * @param exclude zero-values in the padding are not used if true
-     * @return reference to layer builder
-     */
-    PoolingLayer& setExcludePad(bool exclude);
-
-private:
-    PoolingType type = MAX;
-    RoundingType roundingType = CEIL;
-};
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_power_layer.hpp b/inference-engine/include/builders/ie_power_layer.hpp
deleted file mode 100644 (file)
index 3741b4e..0000000
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <builders/ie_layer_decorator.hpp>
-#include <ie_network.hpp>
-#include <string>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief The class represents a builder for Power layer
- */
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(PowerLayer): public LayerDecorator {
-public:
-    /**
-     * @brief The constructor creates a builder with the name
-     * @param name Layer name
-     */
-    explicit PowerLayer(const std::string& name = "");
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer pointer to generic builder
-     */
-    explicit PowerLayer(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer constant pointer to generic builder
-     */
-    explicit PowerLayer(const Layer::CPtr& layer);
-    /**
-     * @brief Sets the name for the layer
-     * @param name Layer name
-     * @return reference to layer builder
-     */
-    PowerLayer& setName(const std::string& name);
-
-    /**
-     * @brief Returns port with shapes for the layer
-     * @return Port with shapes
-     */
-    const Port& getPort() const;
-    /**
-     * @brief Sets port shapes for the layer
-     * @param port Port with shapes
-     * @return reference to layer builder
-     */
-    PowerLayer& setPort(const Port& port);
-    /**
-     * @brief Returns power
-     * @return Power parameter
-     */
-    float getPower() const;
-    /**
-     * @brief Sets the power parameter
-     * @param power Power parameter
-     * @return reference to layer builder
-     */
-    PowerLayer& setPower(float power);
-    /**
-     * @brief Returns scaling parameter
-     * @return Scaling
-     */
-    float getScale() const;
-    /**
-     * @brief Sets scaling parameter
-     * @param scale Scaling parameter
-     * @return reference to layer builder
-     */
-    PowerLayer& setScale(float scale);
-    /**
-     * @brief Returns shifting parameter
-     * @return Shift
-     */
-    float getShift() const;
-    /**
-     * @brief Sets shift for the layer
-     * @param shift Shifting parameter
-     * @return reference to layer builder
-     */
-    PowerLayer& setShift(float shift);
-};
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_prelu_layer.hpp b/inference-engine/include/builders/ie_prelu_layer.hpp
deleted file mode 100644 (file)
index 20cb919..0000000
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <builders/ie_layer_decorator.hpp>
-#include <ie_network.hpp>
-#include <string>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief The class represents a builder for PReLU layer
- */
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(PReLULayer): public LayerDecorator {
-public:
-    /**
-     * @brief The constructor creates a builder with the name
-     * @param name Layer name
-     */
-    explicit PReLULayer(const std::string& name = "");
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer pointer to generic builder
-     */
-    explicit PReLULayer(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer constant pointer to generic builder
-     */
-    explicit PReLULayer(const Layer::CPtr& layer);
-    /**
-     * @brief Sets the name for the layer
-     * @param name Layer name
-     * @return reference to layer builder
-     */
-    PReLULayer& setName(const std::string& name);
-
-    /**
-     * @brief Returns port with shapes for the layer
-     * @return Port with shapes
-     */
-    const Port& getPort() const;
-    /**
-     * @brief Sets port shapes for the layer
-     * @param port Port with shapes
-     * @return reference to layer builder
-     */
-    PReLULayer& setPort(const Port& port);
-    /**
-     * @brief Returns channel shared flag
-     * @return true if negative slope shared across channels
-     */
-    bool getChannelShared() const;
-    /**
-     * @brief Sets channel shared flag
-     * @param flag true if negative slope shared across channels
-     * @return reference to layer builder
-     */
-    PReLULayer& setChannelShared(bool flag);
-};
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_prior_box_clustered_layer.hpp b/inference-engine/include/builders/ie_prior_box_clustered_layer.hpp
deleted file mode 100644 (file)
index 66c54ea..0000000
+++ /dev/null
@@ -1,173 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <builders/ie_layer_decorator.hpp>
-#include <ie_network.hpp>
-#include <string>
-#include <vector>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief The class represents a builder for PriorBoxClustered layer
- */
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(PriorBoxClusteredLayer): public LayerDecorator {
-public:
-    /**
-     * @brief The constructor creates a builder with the name
-     * @param name Layer name
-     */
-    explicit PriorBoxClusteredLayer(const std::string& name = "");
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer pointer to generic builder
-     */
-    explicit PriorBoxClusteredLayer(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer constant pointer to generic builder
-     */
-    explicit PriorBoxClusteredLayer(const Layer::CPtr& layer);
-    /**
-     * @brief Sets the name for the layer
-     * @param name Layer name
-     * @return reference to layer builder
-     */
-    PriorBoxClusteredLayer& setName(const std::string& name);
-
-    /**
-     * @brief Returns output port
-     * @return Output port
-     */
-    const Port& getOutputPort() const;
-    /**
-     * @brief Sets output port
-     * @param port Output port
-     * @return reference to layer builder
-     */
-    PriorBoxClusteredLayer& setOutputPort(const Port& port);
-    /**
-     * @brief Returns input ports
-     * @return Vector of input ports
-     */
-    const std::vector<Port>& getInputPorts() const;
-    /**
-     * @brief Sets input ports
-     * @param port Vector of input ports
-     * @return reference to layer builder
-     */
-    PriorBoxClusteredLayer& setInputPorts(const std::vector<Port>& port);
-    /**
-     * @brief Returns height and width of input image
-     * @return input image sizes
-     */
-    const std::vector<float> getImgSizes() const;
-    /**
-     * @brief Sets height and width sizes
-     * @param sizes Height and width sizes
-     * @return reference to layer builder
-     */
-    PriorBoxClusteredLayer& setImgSizes(const std::vector<float> sizes);
-    /**
-     * @brief returns distances between (height and width) box centers
-     * @return distances
-     */
-    const std::vector<float> getSteps() const;
-    /**
-     * @brief Sets distances between box centers for height and width
-     * @param steps Distances between box centers
-     * @return reference to layer builder
-     */
-    PriorBoxClusteredLayer& setSteps(const std::vector<float> steps);
-    /**
-     * @brief returns a distance between box centers
-     * @return distance
-     */
-    float getStep() const;
-    /**
-     * @brief Sets a distance between box centers
-     * @param step A distance between box centers
-     * @return reference to layer builder
-     */
-    PriorBoxClusteredLayer& setStep(float step);
-    /**
-     * @brief Returns shift of box respectively to top left corner
-     * @return Shift
-     */
-    float getOffset() const;
-    /**
-     * @brief Sets shift of box respectively to top left corner
-     * @param offset Shift
-     * @return reference to layer builder
-     */
-    PriorBoxClusteredLayer& setOffset(float offset);
-    /**
-     * @brief Returns a variance of adjusting bounding boxes
-     * @return Variance
-     */
-    float getVariance() const;
-    /**
-     * @brief Sets a variance of adjusting bounding boxes
-     * @param variance Variance
-     * @return reference to layer builder
-     */
-    PriorBoxClusteredLayer& setVariance(float variance);
-    /**
-     * @brief Returns desired boxes width in pixels
-     * @return width of desired boxes
-     */
-    float getWidth() const;
-    /**
-     * @brief Sets desired boxes width in pixels
-     * @param width Width of desired boxes
-     * @return reference to layer builder
-     */
-    PriorBoxClusteredLayer& setWidth(float width);
-    /**
-     * @brief Returns desired boxes height in pixels
-     * @return height of desired boxes
-     */
-    float getHeight() const;
-    /**
-     * @brief Sets desired boxes height in pixels
-     * @param height Height of desired boxes
-     * @return reference to layer builder
-     */
-    PriorBoxClusteredLayer& setHeight(float height);
-    /**
-     * @brief Returns clip flag
-     * @return true if each value in the output blob is within [0,1]
-     */
-    bool getClip() const;
-    /**
-     * @brief sets clip flag
-     * @param flag true if each value in the output blob is within [0,1]
-     * @return reference to layer builder
-     */
-    PriorBoxClusteredLayer& setClip(bool flag);
-    /**
-     * @brief Returns flip flag
-     * @return list of boxes is augmented with the flipped ones if true
-     */
-    bool getFlip() const;
-    /**
-     * @brief Sets flip flag
-     * @param flag true if list of boxes is augmented with the flipped ones
-     * @return reference to layer builder
-     */
-    PriorBoxClusteredLayer& setFlip(bool flag);
-};
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_prior_box_layer.hpp b/inference-engine/include/builders/ie_prior_box_layer.hpp
deleted file mode 100644 (file)
index eb2977c..0000000
+++ /dev/null
@@ -1,173 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <builders/ie_layer_decorator.hpp>
-#include <ie_network.hpp>
-#include <string>
-#include <vector>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief The class represents a builder for PriorBox layer
- */
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(PriorBoxLayer): public LayerDecorator {
-public:
-    /**
-     * @brief The constructor creates a builder with the name
-     * @param name Layer name
-     */
-    explicit PriorBoxLayer(const std::string& name = "");
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer pointer to generic builder
-     */
-    explicit PriorBoxLayer(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer constant pointer to generic builder
-     */
-    explicit PriorBoxLayer(const Layer::CPtr& layer);
-    /**
-     * @brief Sets the name for the layer
-     * @param name Layer name
-     * @return reference to layer builder
-     */
-    PriorBoxLayer& setName(const std::string& name);
-
-    /**
-     * @brief Returns output port
-     * @return Output port
-     */
-    const Port& getOutputPort() const;
-    /**
-     * @brief Sets output port
-     * @param port Output port
-     * @return reference to layer builder
-     */
-    PriorBoxLayer& setOutputPort(const Port& port);
-    /**
-     * @brief Returns input ports
-     * @return Vector of input ports
-     */
-    const std::vector<Port>& getInputPorts() const;
-    /**
-     * @brief Sets input ports
-     * @param ports Vector of input ports
-     * @return reference to layer builder
-     */
-    PriorBoxLayer& setInputPorts(const std::vector<Port>& ports);
-    /**
-     * @brief Returns the minimum box size in pixels
-     * @return Minimum box size
-     */
-    size_t getMinSize() const;
-    /**
-     * @brief Sets the minimum box size in pixels
-     * @param minSize Minimum size
-     * @return reference to layer builder
-     */
-    PriorBoxLayer& setMinSize(size_t minSize);
-    /**
-     * @brief Returns the maximum box size in pixels
-     * @return maximum size
-     */
-    size_t getMaxSize() const;
-    /**
-     * @brief Sets the maximum box size in pixels
-     * @param maxSize Maximum size
-     * @return reference to layer builder
-     */
-    PriorBoxLayer& setMaxSize(size_t maxSize);
-    /**
-     * @brief Returns a distance between box centers
-     * @return Distance
-     */
-    float getStep() const;
-    /**
-     * @brief Sets a distance between box centers
-     * @param step Distance
-     * @return reference to layer builder
-     */
-    PriorBoxLayer& setStep(float step);
-    /**
-     * @brief Returns a shift of box respectively to top left corner
-     * @return Shift
-     */
-    float getOffset() const;
-    /**
-     * @brief Sets a shift of box respectively to top left corner
-     * @param offset Shift
-     * @return reference to layer builder
-     */
-    PriorBoxLayer& setOffset(float offset);
-    /**
-     * @brief Returns a variance of adjusting bounding boxes
-     * @return Variance
-     */
-    float getVariance() const;
-    /**
-     * @brief Sets a variance of adjusting bounding boxes
-     * @param variance Variance
-     * @return reference to layer builder
-     */
-    PriorBoxLayer& setVariance(float variance);
-    /**
-     * @brief Returns a flag that denotes type of inference
-     * @return true if max_size is used
-     */
-    bool getScaleAllSizes() const;
-    /**
-     * @brief Sets a flag that denotes a type of inference
-     * @param flag max_size is used if true
-     * @return reference to layer builder
-     */
-    PriorBoxLayer& setScaleAllSizes(bool flag);
-    /**
-     * @brief Returns clip flag
-     * @return true if each value in the output blob is within [0,1]
-     */
-    bool getClip() const;
-    /**
-     * @brief sets clip flag
-     * @param flag true if each value in the output blob is within [0,1]
-     * @return reference to layer builder
-     */
-    PriorBoxLayer& setClip(bool flag);
-    /**
-     * @brief Returns flip flag
-     * @return list of boxes is augmented with the flipped ones if true
-     */
-    bool getFlip() const;
-    /**
-     * @brief Sets flip flag
-     * @param flag true if list of boxes is augmented with the flipped ones
-     * @return reference to layer builder
-     */
-    PriorBoxLayer& setFlip(bool flag);
-    /**
-     * @brief Returns a variance of aspect ratios
-     * @return Vector of aspect ratios
-     */
-    const std::vector<size_t> getAspectRatio() const;
-    /**
-     * @brief Sets a variance of aspect ratios
-     * @param aspectRatio Vector of aspect ratios
-     * @return reference to layer builder
-     */
-    PriorBoxLayer& setAspectRatio(const std::vector<size_t>& aspectRatio);
-};
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_proposal_layer.hpp b/inference-engine/include/builders/ie_proposal_layer.hpp
deleted file mode 100644 (file)
index 779a55d..0000000
+++ /dev/null
@@ -1,162 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <builders/ie_layer_decorator.hpp>
-#include <ie_network.hpp>
-#include <string>
-#include <vector>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief The class represents a builder for Proposal layer
- */
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(ProposalLayer): public LayerDecorator {
-public:
-    /**
-     * @brief The constructor creates a builder with the name
-     * @param name Layer name
-     */
-    explicit ProposalLayer(const std::string& name = "");
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer pointer to generic builder
-     */
-    explicit ProposalLayer(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer constant pointer to generic builder
-     */
-    explicit ProposalLayer(const Layer::CPtr& layer);
-    /**
-     * @brief Sets the name for the layer
-     * @param name Layer name
-     * @return reference to layer builder
-     */
-    ProposalLayer& setName(const std::string& name);
-
-    /**
-     * @brief Returns output port
-     * @return Output port
-     */
-    const Port& getOutputPort() const;
-    /**
-     * @brief Sets output port
-     * @param port Output port
-     * @return reference to layer builder
-     */
-    ProposalLayer& setOutputPort(const Port& port);
-    /**
-     * @brief Returns input ports
-     * @return Vector of input ports
-     */
-    const std::vector<Port>& getInputPorts() const;
-    /**
-     * @brief Sets input ports
-     * @param ports Vector of input ports
-     * @return reference to layer builder
-     */
-    ProposalLayer& setInputPorts(const std::vector<Port>& ports);
-    /**
-     * @brief Returns the quantity of bounding boxes after applying NMS
-     * @return Quantity of bounding boxes
-     */
-    size_t getPostNMSTopN() const;
-    /**
-     * @brief Sets the quantity of bounding boxes after applying NMS
-     * @param topN Quantity of bounding boxes
-     * @return reference to layer builder
-     */
-    ProposalLayer& setPostNMSTopN(size_t topN);
-    /**
-     * @brief Returns the quantity of bounding boxes before applying NMS
-     * @return Quantity of bounding boxes
-     */
-    size_t getPreNMSTopN() const;
-    /**
-     * @brief Sets the quantity of bounding boxes before applying NMS
-     * @param topN Quantity of bounding boxes
-     * @return reference to layer builder
-     */
-    ProposalLayer& setPreNMSTopN(size_t topN);
-    /**
-     * @brief Returns minimum value of the proposal to be taken into consideration
-     * @return Threshold
-     */
-    float getNMSThresh() const;
-    /**
-     * @brief Sets minimum value of the proposal to be taken into consideration
-     * @param thresh Threshold
-     * @return reference to layer builder
-     */
-    ProposalLayer& setNMSThresh(float thresh);
-    /**
-     * @brief Returns base size for anchor generation
-     * @return Base size
-     */
-    size_t getBaseSize() const;
-    /**
-     * @brief Sets base size for anchor generation
-     * @param baseSize Base size for anchor generation
-     * @return reference to layer builder
-     */
-    ProposalLayer& setBaseSize(size_t baseSize);
-    /**
-     * @brief Returns minimum size of box to be taken into consideration
-     * @return Minimum size
-     */
-    size_t getMinSize() const;
-    /**
-     * @brief Sets minimum size of box to be taken into consideration
-     * @param minSize Minimum size of the box
-     * @return reference to layer builder
-     */
-    ProposalLayer& setMinSize(size_t minSize);
-    /**
-     * @brief Returns step size to slide over boxes in pixels
-     * @return Step size
-     */
-    size_t getFeatStride() const;
-    /**
-     * @brief Sets step size to slide over boxes in pixels
-     * @param featStride Step size
-     * @return reference to layer builder
-     */
-    ProposalLayer& setFeatStride(size_t featStride);
-    /**
-     * @brief Returns scales for anchor generation
-     * @return Vector of scales
-     */
-    const std::vector<float> getScale() const;
-    /**
-     * @brief Sets scales for anchor generation
-     * @param scales Vector of scales
-     * @return reference to layer builder
-     */
-    ProposalLayer& setScale(const std::vector<float>& scales);
-    /**
-     * @brief Returns ratios for anchor generation
-     * @return Vector of ratios
-     */
-    const std::vector<float> getRatio() const;
-    /**
-     * @brief Sets ratios for anchor generation
-     * @param ratios Vector of scales
-     * @return reference to layer builder
-     */
-    ProposalLayer& setRatio(const std::vector<float>& ratios);
-};
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_psroi_pooling_layer.hpp b/inference-engine/include/builders/ie_psroi_pooling_layer.hpp
deleted file mode 100644 (file)
index 73b359a..0000000
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <builders/ie_layer_decorator.hpp>
-#include <ie_network.hpp>
-#include <string>
-#include <vector>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief The class represents a builder for PSROIPooling layer
- */
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(PSROIPoolingLayer): public LayerDecorator {
-public:
-    /**
-     * @brief The constructor creates a builder with the name
-     * @param name Layer name
-     */
-    explicit PSROIPoolingLayer(const std::string& name = "");
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer pointer to generic builder
-     */
-    explicit PSROIPoolingLayer(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer constant pointer to generic builder
-     */
-    explicit PSROIPoolingLayer(const Layer::CPtr& layer);
-    /**
-     * @brief Sets the name for the layer
-     * @param name Layer name
-     * @return reference to layer builder
-     */
-    PSROIPoolingLayer& setName(const std::string& name);
-
-    /**
-     * @brief Returns input ports
-     * @return Vector of input ports
-     */
-    const std::vector<Port>& getInputPorts() const;
-    /**
-     * @brief Sets input ports
-     * @param ports Vector of input ports
-     * @return reference to layer builder
-     */
-    PSROIPoolingLayer& setInputPorts(const std::vector<Port>& ports);
-    /**
-     * @brief Returns output ports
-     * @return Vector of output ports
-     */
-    const Port& getOutputPort() const;
-    /**
-     * @brief Sets output ports
-     * @param port Vector of output ports
-     * @return reference to layer builder
-     */
-    PSROIPoolingLayer& setOutputPort(const Port& port);
-    /**
-     * @brief Returns multiplicative spatial scale factor to translate ROI coordinates
-     * @return Spatial scale factor
-     */
-    float getSpatialScale() const;
-    /**
-     * @brief Sets multiplicative spatial scale factor to translate ROI coordinates
-     * @param spatialScale Spatial scale factor
-     * @return reference to layer builder
-     */
-    PSROIPoolingLayer& setSpatialScale(float spatialScale);
-    /**
-     * @brief Returns pooled output channel number
-     * @return Output channel number
-     */
-    size_t getOutputDim() const;
-    /**
-     * @brief Sets pooled output channel number
-     * @param outDim Output channel number
-     * @return reference to layer builder
-     */
-    PSROIPoolingLayer& setOutputDim(size_t outDim);
-    /**
-     * @brief Returns number of groups to encode position-sensitive score maps
-     * @return Number of groups
-     */
-    size_t getGroupSize() const;
-    /**
-     * @brief Sets number of groups to encode position-sensitive score maps
-     * @param size Number of groups
-     * @return reference to layer builder
-     */
-    PSROIPoolingLayer& setGroupSize(size_t size);
-};
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_region_yolo_layer.hpp b/inference-engine/include/builders/ie_region_yolo_layer.hpp
deleted file mode 100644 (file)
index d86cb7b..0000000
+++ /dev/null
@@ -1,162 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <builders/ie_layer_decorator.hpp>
-#include <ie_network.hpp>
-#include <string>
-#include <vector>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief The class represents a builder for RegionYolo layer
- */
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(RegionYoloLayer): public LayerDecorator {
-public:
-    /**
-     * @brief The constructor creates a builder with the name
-     * @param name Layer name
-     */
-    explicit RegionYoloLayer(const std::string& name = "");
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer pointer to generic builder
-     */
-    explicit RegionYoloLayer(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer constant pointer to generic builder
-     */
-    explicit RegionYoloLayer(const Layer::CPtr& layer);
-    /**
-     * @brief Sets the name for the layer
-     * @param name Layer name
-     * @return reference to layer builder
-     */
-    RegionYoloLayer& setName(const std::string& name);
-
-    /**
-     * @brief Returns input port
-     * @return Input port
-     */
-    const Port& getInputPort() const;
-    /**
-     * @brief Sets input port
-     * @param port Input port
-     * @return reference to layer builder
-     */
-    RegionYoloLayer& setInputPort(const Port& port);
-    /**
-     * @brief Returns output port
-     * @return Output port
-     */
-    const Port& getOutputPort() const;
-    /**
-     * @brief Sets output port
-     * @param port Output port
-     * @return reference to layer builder
-     */
-    RegionYoloLayer& setOutputPort(const Port& port);
-    /**
-     * @brief Returns number of coordinates for each region
-     * @return Number of coordinates
-     */
-    int getCoords() const;
-    /**
-     * @brief Sets number of coordinates for each region
-     * @param coords Number of coordinates
-     * @return reference to layer builder
-     */
-    RegionYoloLayer& setCoords(int coords);
-    /**
-     * @brief Returns number of classes for each region
-     * @return Number of classes
-     */
-    int getClasses() const;
-    /**
-     * @brief Sets number of classes for each region
-     * @param classes number of classes
-     * @return reference to layer builder
-     */
-    RegionYoloLayer& setClasses(int classes);
-    /**
-     * @brief Returns number of regions
-     * @return Number of regions
-     */
-    int getNum() const;
-    /**
-     * @brief Sets number of regions
-     * @param num Number of regions
-     * @return reference to layer builder
-     */
-    RegionYoloLayer& setNum(int num);
-    /**
-     * @brief Returns a flag which specifies the method of infer
-     * @return true if softmax is performed
-     */
-    bool getDoSoftMax() const;
-    /**
-     * @brief Sets a flag which specifies the method of infer
-     * @param flag softmax is performed if true
-     * @return reference to layer builder
-     */
-    RegionYoloLayer& setDoSoftMax(bool flag);
-    /**
-     * @brief Returns anchors coordinates of regions
-     * @return anchors coordinates
-     */
-    float getAnchors() const;
-    /**
-     * @brief Sets anchors coordinates of regions
-     * @param anchors Anchors coordinates
-     * @return reference to layer builder
-     */
-    RegionYoloLayer& setAnchors(float anchors);
-    /**
-     * @brief Returns mask
-     * @return Mask
-     */
-    int getMask() const;
-    /**
-     * @brief Sets mask
-     * @param mask Specifies which anchors to use
-     * @return reference to layer builder
-     */
-    RegionYoloLayer& setMask(int mask);
-    /**
-     * @brief Returns the number of the dimension from which flattening is performed
-     * @return Axis
-     */
-    size_t getAxis() const;
-    /**
-     * @brief Sets the number of the dimension from which flattening is performed
-     * @param axis Axis
-     * @return reference to layer builder
-     */
-    RegionYoloLayer& setAxis(size_t axis);
-    /**
-     * @brief Returns the number of the dimension on which flattening is ended
-     * @return End axis
-     */
-    size_t getEndAxis() const;
-    /**
-     * @brief Sets the number of the dimension on which flattening is ended
-     * @param axis End axis
-     * @return reference to layer builder
-     */
-    RegionYoloLayer& setEndAxis(size_t axis);
-};
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_relu6_layer.hpp b/inference-engine/include/builders/ie_relu6_layer.hpp
deleted file mode 100644 (file)
index 2575908..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <builders/ie_layer_decorator.hpp>
-#include <ie_network.hpp>
-#include <string>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief The class represents a builder for ReLU6 layer
- */
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(ReLU6Layer): public LayerDecorator {
-public:
-    /**
-     * @brief The constructor creates a builder with the name
-     * @param name Layer name
-     */
-    explicit ReLU6Layer(const std::string& name = "");
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer pointer to generic builder
-     */
-    explicit ReLU6Layer(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer constant pointer to generic builder
-     */
-    explicit ReLU6Layer(const Layer::CPtr& layer);
-    /**
-     * @brief Sets the name for the layer
-     * @param name Layer name
-     * @return reference to layer builder
-     */
-    ReLU6Layer& setName(const std::string& name);
-
-    /**
-     * @brief Returns port with shapes for the layer
-     * @return Port with shapes
-     */
-    const Port& getPort() const;
-    /**
-     * @brief Sets port shapes for the layer
-     * @param port Port with shapes
-     * @return reference to layer builder
-     */
-    ReLU6Layer& setPort(const Port& port);
-
-    /**
-     * @brief Returns N value
-     * @return N
-     */
-    float getN() const;
-    /**
-     * @brief Sets N value
-     * @param n N value (6 by default)
-     * @return reference to layer builder
-     */
-    ReLU6Layer& setN(float n);
-};
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_relu_layer.hpp b/inference-engine/include/builders/ie_relu_layer.hpp
deleted file mode 100644 (file)
index 5c938a0..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <builders/ie_layer_decorator.hpp>
-#include <ie_network.hpp>
-#include <string>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief The class represents a builder for ReLU layer
- */
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(ReLULayer): public LayerDecorator {
-public:
-    /**
-     * @brief The constructor creates a builder with the name
-     * @param name Layer name
-     */
-    explicit ReLULayer(const std::string& name = "");
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer pointer to generic builder
-     */
-    explicit ReLULayer(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer constant pointer to generic builder
-     */
-    explicit ReLULayer(const Layer::CPtr& layer);
-    /**
-     * @brief Sets the name for the layer
-     * @param name Layer name
-     * @return reference to layer builder
-     */
-    ReLULayer& setName(const std::string& name);
-
-    /**
-     * @brief Returns port with shapes for the layer
-     * @return Port with shapes
-     */
-    const Port& getPort() const;
-    /**
-     * @brief Sets port shapes for the layer
-     * @param port Port with shapes
-     * @return reference to layer builder
-     */
-    ReLULayer& setPort(const Port& port);
-
-    /**
-     * @brief Returns negative slope
-     * @return Negative slope
-     */
-    float getNegativeSlope() const;
-    /**
-     * @brief Sets negative slope
-     * @param negativeSlope Negative slope
-     * @return reference to layer builder
-     */
-    ReLULayer& setNegativeSlope(float negativeSlope);
-};
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_reorg_yolo_layer.hpp b/inference-engine/include/builders/ie_reorg_yolo_layer.hpp
deleted file mode 100644 (file)
index c94e2f4..0000000
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <builders/ie_layer_decorator.hpp>
-#include <ie_network.hpp>
-#include <string>
-#include <vector>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief The class represents a builder for ReorgYolo layer
- */
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(ReorgYoloLayer): public LayerDecorator {
-public:
-    /**
-     * @brief The constructor creates a builder with the name
-     * @param name Layer name
-     */
-    explicit ReorgYoloLayer(const std::string& name = "");
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer pointer to generic builder
-     */
-    explicit ReorgYoloLayer(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer const pointer to generic builder
-     */
-    explicit ReorgYoloLayer(const Layer::CPtr& layer);
-    /**
-     * @brief Sets the name for the layer
-     * @param name Layer name
-     * @return reference to layer builder
-     */
-    ReorgYoloLayer& setName(const std::string& name);
-
-    /**
-     * @brief Returns input port
-     * @return Input port
-     */
-    const Port& getInputPort() const;
-    /**
-     * @brief Sets input port
-     * @param ports Input port
-     * @return reference to layer builder
-     */
-    ReorgYoloLayer& setInputPort(const Port& ports);
-    /**
-     * @brief Returns output port
-     * @return Output port
-     */
-    const Port& getOutputPort() const;
-    /**
-     * @brief Sets output port
-     * @param port Output port
-     * @return reference to layer builder
-     */
-    ReorgYoloLayer& setOutputPort(const Port& port);
-    /**
-     * @brief Returns distance of cut throws in output blobs
-     * @return Stride
-     */
-    int getStride() const;
-    /**
-     * @brief Sets distance of cut throws in output blobs
-     * @param stride Stride
-     * @return reference to layer builder
-     */
-    ReorgYoloLayer& setStride(int stride);
-};
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_resample_layer.hpp b/inference-engine/include/builders/ie_resample_layer.hpp
deleted file mode 100644 (file)
index 234f063..0000000
+++ /dev/null
@@ -1,129 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <builders/ie_layer_decorator.hpp>
-#include <ie_network.hpp>
-#include <string>
-#include <vector>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief The class represents a builder for Resample layer
- */
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(ResampleLayer): public LayerDecorator {
-public:
-    /**
-     * @brief The constructor creates a builder with the name
-     * @param name Layer name
-     */
-    explicit ResampleLayer(const std::string& name = "");
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer pointer to generic builder
-     */
-    explicit ResampleLayer(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer const pointer to generic builder
-     */
-    explicit ResampleLayer(const Layer::CPtr& layer);
-    /**
-     * @brief Sets the name for the layer
-     * @param name Layer name
-     * @return reference to layer builder
-     */
-    ResampleLayer& setName(const std::string& name);
-
-    /**
-     * @brief Returns input port
-     * @return Input port
-     */
-    const Port& getInputPort() const;
-    /**
-     * @brief Sets input port
-     * @param ports Input port
-     * @return reference to layer builder
-     */
-    ResampleLayer& setInputPort(const Port& ports);
-    /**
-     * @brief Returns output port
-     * @return Output port
-     */
-    const Port& getOutputPort() const;
-    /**
-     * @brief Sets output port
-     * @param port Output port
-     * @return reference to layer builder
-     */
-    ResampleLayer& setOutputPort(const Port& port);
-    /**
-     * @brief Returns resample type
-     * @return Type
-     */
-    const std::string& getResampleType() const;
-    /**
-     * @brief Sets resample type
-     * @param type Type
-     * @return reference to layer builder
-     */
-    ResampleLayer& setResampleType(const std::string& type);
-    /**
-     * @brief Returns flag that denotes whether to perform anti-aliasing
-     * @return true if anti-aliasing is performed
-     */
-    bool getAntialias() const;
-    /**
-     * @brief Sets flag that denotes whether to perform anti-aliasing
-     * @param antialias flag
-     * @return reference to layer builder
-     */
-    ResampleLayer& setAntialias(bool antialias);
-    /**
-     * @brief Returns resample factor
-     * @return Factor
-     */
-    float getFactor() const;
-    /**
-     * @brief Sets resample factor
-     * @param factor Factor
-     * @return reference to layer builder
-     */
-    ResampleLayer& setFactor(float factor);
-    /**
-     * @brief Returns width
-     * @return Width
-     */
-    size_t getWidth() const;
-    /**
-     * @brief Sets width
-     * @param width Width
-     * @return reference to layer builder
-     */
-    ResampleLayer& setWidth(size_t width);
-    /**
-     * @brief Returns height
-     * @return Height
-     */
-    size_t getHeight() const;
-    /**
-     * @brief Sets height
-     * @param height Height
-     * @return reference to layer builder
-     */
-    ResampleLayer& setHeight(size_t height);
-};
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_reshape_layer.hpp b/inference-engine/include/builders/ie_reshape_layer.hpp
deleted file mode 100644 (file)
index a26f1e2..0000000
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <builders/ie_layer_decorator.hpp>
-#include <ie_network.hpp>
-#include <string>
-#include <vector>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief The class represents a builder for Reshape layer
- */
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(ReshapeLayer): public LayerDecorator {
-public:
-    /**
-     * @brief The constructor creates a builder with the name
-     * @param name Layer name
-     */
-    explicit ReshapeLayer(const std::string& name = "");
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer pointer to generic builder
-     */
-    explicit ReshapeLayer(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer constant pointer to generic builder
-     */
-    explicit ReshapeLayer(const Layer::CPtr& layer);
-    /**
-     * @brief Sets the name for the layer
-     * @param name Layer name
-     * @return reference to layer builder
-     */
-    ReshapeLayer& setName(const std::string& name);
-
-    /**
-     * @brief Returns input port
-     * @return Input port
-     */
-    const Port& getInputPort() const;
-    /**
-     * @brief Sets input port
-     * @param port Input port
-     * @return reference to layer builder
-     */
-    ReshapeLayer& setInputPort(const Port& port);
-    /**
-     * @brief Returns output port
-     * @return Output port
-     */
-    const Port& getOutputPort() const;
-    /**
-     * @brief Sets output port
-     * @param port Output port
-     * @return reference to layer builder
-     */
-    ReshapeLayer& setOutputPort(const Port& port);
-    /**
-     * @brief Returns reshape dimensions
-     * @return Dimensions
-     */
-    const std::vector<int> getDims() const;
-    /**
-     * @brief Sets reshape dimensions
-     * @param dims Dimensions
-     * @return reference to layer builder
-     */
-    ReshapeLayer& setDims(const std::vector<int>& dims);
-};
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_rnn_sequence_layer.hpp b/inference-engine/include/builders/ie_rnn_sequence_layer.hpp
deleted file mode 100644 (file)
index 6283bc7..0000000
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <builders/ie_layer_decorator.hpp>
-#include <ie_network.hpp>
-#include <string>
-#include <vector>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief The class represents a builder for RNNSequence layer
- */
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(RNNSequenceLayer): public LayerDecorator {
-public:
-    /**
-     * @brief The constructor creates a builder with the name
-     * @param name Layer name
-     */
-    explicit RNNSequenceLayer(const std::string& name = "");
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer pointer to generic builder
-     */
-    explicit RNNSequenceLayer(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer constant pointer to generic builder
-     */
-    explicit RNNSequenceLayer(const Layer::CPtr& layer);
-    /**
-     * @brief Sets the name for the layer
-     * @param name Layer name
-     * @return reference to layer builder
-     */
-    RNNSequenceLayer& setName(const std::string& name);
-
-    /**
-     * @brief Returns input ports with shapes for the layer
-     * @return Vector of ports
-     */
-    const std::vector<Port>& getInputPorts() const;
-    /**
-     * @brief Sets input ports for the layer
-     * @param ports vector of input ports
-     * @return reference to layer builder
-     */
-    RNNSequenceLayer& setInputPorts(const std::vector<Port>& ports);
-
-    /**
-     * @brief Returns output ports with shapes for the layer
-     * @return Vector of ports
-     */
-    const std::vector<Port>& getOutputPorts() const;
-    /**
-     * @brief Sets output ports for the layer
-     * @param ports vector of output ports
-     * @return reference to layer builder
-     */
-    RNNSequenceLayer& setOutputPorts(const std::vector<Port>& ports);
-
-    int getHiddenSize() const;
-    RNNSequenceLayer& setHiddenSize(int size);
-    bool getSequenceDim() const;
-    RNNSequenceLayer& setSqquenceDim(bool flag);
-    const std::vector<std::string>& getActivations() const;
-    RNNSequenceLayer& setActivations(const std::vector<std::string>& activations);
-    const std::vector<float>& getActivationsAlpha() const;
-    RNNSequenceLayer& setActivationsAlpha(const std::vector<float>& activations);
-    const std::vector<float>& getActivationsBeta() const;
-    RNNSequenceLayer& setActivationsBeta(const std::vector<float>& activations);
-    float getClip() const;
-    RNNSequenceLayer& setClip(float clip);
-};
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_roi_pooling_layer.hpp b/inference-engine/include/builders/ie_roi_pooling_layer.hpp
deleted file mode 100644 (file)
index 13a6f94..0000000
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <builders/ie_layer_decorator.hpp>
-#include <ie_network.hpp>
-#include <string>
-#include <vector>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief The class represents a builder for ROIPooling layer
- */
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(ROIPoolingLayer): public LayerDecorator {
-public:
-    /**
-     * @brief The constructor creates a builder with the name
-     * @param name Layer name
-     */
-    explicit ROIPoolingLayer(const std::string& name = "");
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer pointer to generic builder
-     */
-    explicit ROIPoolingLayer(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer constant pointer to generic builder
-     */
-    explicit ROIPoolingLayer(const Layer::CPtr& layer);
-    /**
-     * @brief Sets the name for the layer
-     * @param name Layer name
-     * @return reference to layer builder
-     */
-    ROIPoolingLayer& setName(const std::string& name);
-
-    /**
-     * @brief Returns input ports
-     * @return Vector of input ports
-     */
-    const std::vector<Port>& getInputPorts() const;
-    /**
-     * @brief Sets input ports
-     * @param ports Vector of input ports
-     * @return reference to layer builder
-     */
-    ROIPoolingLayer& setInputPorts(const std::vector<Port>& ports);
-    /**
-     * @brief Returns output port
-     * @return Output port
-     */
-    const Port& getOutputPort() const;
-    /**
-     * @brief Sets output port
-     * @param port Output port
-     * @return reference to layer builder
-     */
-    ROIPoolingLayer& setOutputPort(const Port& port);
-    /**
-     * @brief Returns a ratio of the input feature map over the input image size
-     * @return Spatial scale
-     */
-    float getSpatialScale() const;
-    /**
-     * @brief Sets a ratio of the input feature map over the input image size
-     * @param spatialScale Spatial scale
-     * @return reference to layer builder
-     */
-    ROIPoolingLayer& setSpatialScale(float spatialScale);
-    /**
-     * @brief Returns height and width of the ROI output feature map
-     * @return Vector contains height and width
-     */
-    const std::vector<int> getPooled() const;
-    /**
-     * @brief Sets height and width of the ROI output feature map
-     * @param pooled Vector with height and width
-     * @return reference to layer builder
-     */
-    ROIPoolingLayer& setPooled(const std::vector<int>& pooled);
-};
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_scale_shift_layer.hpp b/inference-engine/include/builders/ie_scale_shift_layer.hpp
deleted file mode 100644 (file)
index 2e558de..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <builders/ie_layer_decorator.hpp>
-#include <ie_network.hpp>
-#include <string>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief The class represents a builder for ScaleShift layer
- */
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(ScaleShiftLayer): public LayerDecorator {
-public:
-    /**
-     * @brief The constructor creates a builder with the name
-     * @param name Layer name
-     */
-    explicit ScaleShiftLayer(const std::string& name = "");
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer pointer to generic builder
-     */
-    explicit ScaleShiftLayer(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer constant pointer to generic builder
-     */
-    explicit ScaleShiftLayer(const Layer::CPtr& layer);
-    /**
-     * @brief Sets the name for the layer
-     * @param name Layer name
-     * @return reference to layer builder
-     */
-    ScaleShiftLayer& setName(const std::string& name);
-
-    /**
-     * @brief Returns port with shapes for the layer
-     * @return Port with shapes
-     */
-    const Port& getPort() const;
-    /**
-     * @brief Sets port shapes for the layer
-     * @param port Port with shapes
-     * @return reference to layer builder
-     */
-    ScaleShiftLayer& setPort(const Port& port);
-};
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_sigmoid_layer.hpp b/inference-engine/include/builders/ie_sigmoid_layer.hpp
deleted file mode 100644 (file)
index af01674..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <builders/ie_layer_decorator.hpp>
-#include <ie_network.hpp>
-#include <string>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief The class represents a builder for Sigmoid layer
- */
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(SigmoidLayer): public LayerDecorator {
-public:
-    /**
-     * @brief The constructor creates a builder with the name
-     * @param name Layer name
-     */
-    explicit SigmoidLayer(const std::string& name = "");
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer pointer to generic builder
-     */
-    explicit SigmoidLayer(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer constant pointer to generic builder
-     */
-    explicit SigmoidLayer(const Layer::CPtr& layer);
-    /**
-     * @brief Sets the name for the layer
-     * @param name Layer name
-     * @return reference to layer builder
-     */
-    SigmoidLayer& setName(const std::string& name);
-
-    /**
-     * @brief Returns port with shapes for the layer
-     * @return Port with shapes
-     */
-    const Port& getPort() const;
-    /**
-     * @brief Sets port shapes for the layer
-     * @param port Port with shapes
-     * @return reference to layer builder
-     */
-    SigmoidLayer& setPort(const Port& port);
-};
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_simpler_nms_layer.hpp b/inference-engine/include/builders/ie_simpler_nms_layer.hpp
deleted file mode 100644 (file)
index c75d38b..0000000
+++ /dev/null
@@ -1,151 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <builders/ie_layer_decorator.hpp>
-#include <ie_network.hpp>
-#include <string>
-#include <vector>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief The class represents a builder for SimplerNMS layer
- */
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(SimplerNMSLayer): public LayerDecorator {
-public:
-    /**
-     * @brief The constructor creates a builder with the name
-     * @param name Layer name
-     */
-    explicit SimplerNMSLayer(const std::string& name = "");
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer pointer to generic builder
-     */
-    explicit SimplerNMSLayer(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer constant pointer to generic builder
-     */
-    explicit SimplerNMSLayer(const Layer::CPtr& layer);
-    /**
-     * @brief Sets the name for the layer
-     * @param name Layer name
-     * @return reference to layer builder
-     */
-    SimplerNMSLayer& setName(const std::string& name);
-
-    /**
-     * @brief Returns input ports
-     * @return Vector of input ports
-     */
-    const std::vector<Port>& getInputPorts() const;
-    /**
-     * @brief Sets input ports
-     * @param ports Vector of input ports
-     */
-    SimplerNMSLayer& setInputPorts(const std::vector<Port>& ports);
-    /**
-     * @brief Returns output port
-     * @return Output port
-     */
-    const Port& getOutputPort() const;
-    /**
-     * @brief Sets output port
-     * @param port Output port
-     * @return reference to layer builder
-     */
-    SimplerNMSLayer& setOutputPort(const Port& port);
-    /**
-     * @brief Returns the quantity of bounding boxes before applying NMS
-     * @return Quantity of bounding boxes
-     */
-    size_t getPreNMSTopN() const;
-    /**
-     * @brief Sets the quantity of bounding boxes before applying NMS
-     * @param topN Quantity of bounding boxes
-     * @return reference to layer builder
-     */
-    SimplerNMSLayer& setPreNMSTopN(size_t topN);
-    /**
-     * @brief Returns the quantity of bounding boxes after applying NMS
-     * @return Quantity of bounding boxes
-     */
-    size_t getPostNMSTopN() const;
-    /**
-     * @brief Sets the quantity of bounding boxes after applying NMS
-     * @param topN Quantity of bounding boxes
-     * @return reference to layer builder
-     */
-    SimplerNMSLayer& setPostNMSTopN(size_t topN);
-    /**
-     * @brief Returns the step size to slide over boxes in pixels
-     * @return Step size
-     */
-    size_t getFeatStride() const;
-    /**
-     * @brief Sets the step size to slide over boxes in pixels
-     * @param featStride Step size
-     * @return reference to layer builder
-     */
-    SimplerNMSLayer& setFeatStride(size_t featStride);
-    /**
-     * @brief Returns the minimum size of box to be taken into consideration
-     * @return Minimum size
-     */
-    size_t getMinBoxSize() const;
-    /**
-     * @brief Sets the minimum size of box to be taken into consideration
-     * @param minSize Minimum size
-     * @return reference to layer builder
-     */
-    SimplerNMSLayer& setMinBoxSize(size_t minSize);
-    /**
-     * @brief Returns scale for anchor boxes generating
-     * @return Scale for anchor boxes
-     */
-    size_t getScale() const;
-    /**
-     * @brief Sets scale for anchor boxes generating
-     * @param scale Scale for anchor boxes
-     * @return reference to layer builder
-     */
-    SimplerNMSLayer& setScale(size_t scale);
-
-    /**
-     * @brief Returns the minimum value of the proposal to be taken into consideration
-     * @return Threshold
-     */
-    float getCLSThreshold() const;
-    /**
-     * @brief Sets the minimum value of the proposal to be taken into consideration
-     * @param threshold Minimum value
-     * @return reference to layer builder
-     */
-    SimplerNMSLayer& setCLSThreshold(float threshold);
-    /**
-     * @brief Returns the minimum ratio of boxes overlapping to be taken into consideration
-     * @return Threshold
-     */
-    float getIOUThreshold() const;
-    /**
-     * @brief Sets the minimum ratio of boxes overlapping to be taken into consideration
-     * @param threshold Minimum value
-     * @return reference to layer builder
-     */
-    SimplerNMSLayer& setIOUThreshold(float threshold);
-};
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_softmax_layer.hpp b/inference-engine/include/builders/ie_softmax_layer.hpp
deleted file mode 100644 (file)
index f4681a9..0000000
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <builders/ie_layer_decorator.hpp>
-#include <ie_network.hpp>
-#include <string>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief The class represents a builder for SoftMax layer
- */
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(SoftMaxLayer): public LayerDecorator {
-public:
-    /**
-     * @brief The constructor creates a builder with the name
-     * @param name Layer name
-     */
-    explicit SoftMaxLayer(const std::string& name = "");
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer pointer to generic builder
-     */
-    explicit SoftMaxLayer(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer constant pointer to generic builder
-     */
-    explicit SoftMaxLayer(const Layer::CPtr& layer);
-    /**
-     * @brief Sets the name for the layer
-     * @param name Layer name
-     * @return reference to layer builder
-     */
-    SoftMaxLayer& setName(const std::string& name);
-
-    /**
-     * @brief Returns port with shapes for the layer
-     * @return Port with shapes
-     */
-    const Port& getPort() const;
-    /**
-     * @brief Sets port shapes for the layer
-     * @param port Port with shapes
-     * @return reference to layer builder
-     */
-    SoftMaxLayer& setPort(const Port& port);
-    /**
-     * @brief Returns axis
-     * @return Axis
-     */
-    size_t getAxis() const;
-    /**
-     * @brief Sets axis
-     * @param axis Axis
-     * @return reference to layer builder
-     */
-    SoftMaxLayer& setAxis(size_t axis);
-};
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_split_layer.hpp b/inference-engine/include/builders/ie_split_layer.hpp
deleted file mode 100644 (file)
index 1393e41..0000000
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <builders/ie_layer_decorator.hpp>
-#include <ie_network.hpp>
-#include <string>
-#include <vector>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief The class represents a builder for Split layer
- */
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(SplitLayer): public LayerDecorator {
-public:
-    /**
-     * @brief The constructor creates a builder with the name
-     * @param name Layer name
-     */
-    explicit SplitLayer(const std::string& name = "");
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer pointer to generic builder
-     */
-    explicit SplitLayer(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer constant pointer to generic builder
-     */
-    explicit SplitLayer(const Layer::CPtr& layer);
-    /**
-     * @brief Sets the name for the layer
-     * @param name Layer name
-     * @return reference to layer builder
-     */
-    SplitLayer& setName(const std::string& name);
-
-    /**
-     * @brief Returns output ports
-     * @return Vector of output ports
-     */
-    const std::vector<Port>& getOutputPorts() const;
-    /**
-     * @brief Sets output ports
-     * @param ports Vector of output ports
-     * @return reference to layer builder
-     */
-    SplitLayer& setOutputPorts(const std::vector<Port>& ports);
-    /**
-     * @brief Returns input port
-     * @return Input port
-     */
-    const Port& getInputPort() const;
-    /**
-     * @brief Sets input port
-     * @param port Input port
-     * @return reference to layer builder
-     */
-    SplitLayer& setInputPort(const Port& port);
-    /**
-     * @brief Returns axis
-     * @return Axis
-     */
-    size_t getAxis() const;
-    /**
-     * @brief Sets axis
-     * @param axis Axis
-     * @return reference to layer builder
-     */
-    SplitLayer& setAxis(size_t axis);
-};
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_tanh_layer.hpp b/inference-engine/include/builders/ie_tanh_layer.hpp
deleted file mode 100644 (file)
index e6c7fe9..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <builders/ie_layer_decorator.hpp>
-#include <ie_network.hpp>
-#include <string>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief The class represents a builder for TanH layer
- */
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(TanHLayer): public LayerDecorator {
-public:
-    /**
-     * @brief The constructor creates a builder with the name
-     * @param name Layer name
-     */
-    explicit TanHLayer(const std::string& name = "");
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer pointer to generic builder
-     */
-    explicit TanHLayer(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer constant pointer to generic builder
-     */
-    explicit TanHLayer(const Layer::CPtr& layer);
-    /**
-     * @brief Sets the name for the layer
-     * @param name Layer name
-     * @return reference to layer builder
-     */
-    TanHLayer& setName(const std::string& name);
-
-    /**
-     * @brief Returns port with shapes for the layer
-     * @return Port with shapes
-     */
-    const Port& getPort() const;
-    /**
-     * @brief Sets port shapes for the layer
-     * @param port Port with shapes
-     * @return reference to layer builder
-     */
-    TanHLayer& setPort(const Port& port);
-};
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
diff --git a/inference-engine/include/builders/ie_tile_layer.hpp b/inference-engine/include/builders/ie_tile_layer.hpp
deleted file mode 100644 (file)
index 0b0e112..0000000
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @file
- */
-
-#pragma once
-
-#include <builders/ie_layer_decorator.hpp>
-#include <ie_network.hpp>
-#include <string>
-#include <vector>
-
-namespace InferenceEngine {
-namespace Builder {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief The class represents a builder for Tile layer
- */
-IE_SUPPRESS_DEPRECATED_START
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(TileLayer): public LayerDecorator {
-public:
-    /**
-     * @brief The constructor creates a builder with the name
-     * @param name Layer name
-     */
-    explicit TileLayer(const std::string& name = "");
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer pointer to generic builder
-     */
-    explicit TileLayer(const Layer::Ptr& layer);
-    /**
-     * @brief The constructor creates a builder from generic builder
-     * @param layer constant pointer to generic builder
-     */
-    explicit TileLayer(const Layer::CPtr& layer);
-    /**
-     * @brief Sets the name for the layer
-     * @param name Layer name
-     * @return reference to layer builder
-     */
-    TileLayer& setName(const std::string& name);
-
-    /**
-     * @brief Returns input port
-     * @return Input port
-     */
-    const Port& getInputPort() const;
-    /**
-     * @brief Sets input port
-     * @param port Input port
-     * @return reference to layer builder
-     */
-    TileLayer& setInputPort(const Port& port);
-    /**
-     * @brief Returns output port
-     * @return Output port
-     */
-    const Port& getOutputPort() const;
-    /**
-     * @brief Sets output port
-     * @param port Output port
-     * @return reference to layer builder
-     */
-    TileLayer& setOutputPort(const Port& port);
-    /**
-     * @brief Returns axis
-     * @return Axis
-     */
-    size_t getAxis() const;
-    /**
-     * @brief Sets axis
-     * @param axis Axis
-     * @return reference to layer builder
-     */
-    TileLayer& setAxis(size_t axis);
-    /**
-     * @brief Returns tiles
-     * @return Tiles
-     */
-    size_t getTiles() const;
-    /**
-     * @brief Sets tiles
-     * @param tiles Tiles
-     * @return reference to layer builder
-     */
-    TileLayer& setTiles(size_t tiles);
-};
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
index 40f6264..4a36a3d 100644 (file)
@@ -17,7 +17,7 @@
 #include "details/ie_exception_conversion.hpp"
 #include "details/os/os_filesystem.hpp"
 #include "ie_blob.h"
-#include "ie_cnn_network.h"
+#include "cpp/ie_cnn_network.h"
 #include "ie_common.h"
 #include "ie_icnn_net_reader.h"
 
index 4f9428e..b6ec009 100644 (file)
@@ -19,8 +19,8 @@
 #include "cpp/ie_infer_request.hpp"
 #include "cpp/ie_memory_state.hpp"
 #include "details/ie_exception_conversion.hpp"
+#include "details/ie_so_loader.h"
 #include "ie_iexecutable_network.hpp"
-#include "ie_plugin_ptr.hpp"
 
 namespace InferenceEngine {
 
@@ -29,7 +29,7 @@ namespace InferenceEngine {
  */
 class ExecutableNetwork {
     IExecutableNetwork::Ptr actual;
-    InferenceEnginePluginPtr plg;
+    details::SharedObjectLoader::Ptr plg;
 
 public:
     /**
@@ -50,7 +50,7 @@ public:
      * @param actual Initialized shared pointer
      * @param plg Plugin to use
      */
-    explicit ExecutableNetwork(IExecutableNetwork::Ptr actual, InferenceEnginePluginPtr plg = {})
+    explicit ExecutableNetwork(IExecutableNetwork::Ptr actual, details::SharedObjectLoader::Ptr plg = {})
         : actual(actual), plg(plg) {
         //  plg can be null, but not the actual
         if (actual == nullptr) {
@@ -129,7 +129,6 @@ public:
      * Wraps IExecutableNetwork::Export.
      *
      * @see Core::ImportNetwork
-     * @see InferencePlugin::ImportNetwork
      *
      * @param modelFileName Full path to the location of the exported file
      */
@@ -143,7 +142,6 @@ public:
      * Wraps IExecutableNetwork::Export.
      *
      * @see Core::ImportNetwork
-     * @see InferencePlugin::ImportNetwork
      *
      * @param networkModel network model output stream
      */
index 86509fc..6c5ff95 100644 (file)
@@ -14,8 +14,8 @@
 #include <string>
 
 #include "details/ie_exception_conversion.hpp"
+#include "details/ie_so_loader.h"
 #include "ie_iinfer_request.hpp"
-#include "ie_plugin_ptr.hpp"
 
 namespace InferenceEngine {
 
@@ -63,7 +63,7 @@ public:
  */
 class InferRequest {
     IInferRequest::Ptr actual;
-    InferenceEnginePluginPtr plg;
+    InferenceEngine::details::SharedObjectLoader::Ptr plg;
     std::shared_ptr<details::ICompletionCallbackWrapper> callback;
 
     static void callWrapper(InferenceEngine::IInferRequest::Ptr request, InferenceEngine::StatusCode code) {
@@ -80,6 +80,20 @@ public:
     InferRequest() = default;
 
     /**
+     * constructs InferRequest from the initialized shared_pointer
+     * @param request Initialized shared pointer to IInferRequest interface
+     * @param plg Plugin to use. This is required to ensure that InferRequest can work properly even if plugin object is destroyed.
+     */
+    explicit InferRequest(IInferRequest::Ptr request,
+                          InferenceEngine::details::SharedObjectLoader::Ptr splg = {}):
+                          actual(request), plg(splg) {
+        //  plg can be null, but not the actual
+        if (actual == nullptr) {
+            THROW_IE_EXCEPTION << "InferRequest wrapper was not initialized.";
+        }
+    }
+
+    /**
      * @brief Destructor
      */
     ~InferRequest() {
@@ -195,18 +209,6 @@ public:
     }
 
     /**
-     * constructs InferRequest from the initialized shared_pointer
-     * @param request Initialized shared pointer to IInferRequest interface
-     * @param plg Plugin to use. This is required to ensure that InferRequest can work properly even if plugin object is destroyed.
-     */
-    explicit InferRequest(IInferRequest::Ptr request, InferenceEnginePluginPtr plg = {}): actual(request), plg(plg) {
-        //  plg can be null, but not the actual
-        if (actual == nullptr) {
-            THROW_IE_EXCEPTION << "InferRequest wrapper was not initialized.";
-        }
-    }
-
-    /**
      * @brief Start inference of specified input(s) in asynchronous mode
      *
      * @note It returns immediately. Inference starts also immediately.
index 708c196..99da414 100644 (file)
@@ -7,7 +7,9 @@
  */
 
 #pragma once
+
 #include <string>
+#include <ie_imemory_state.hpp>
 
 namespace InferenceEngine {
 
index 7512121..e75fdff 100644 (file)
@@ -15,7 +15,7 @@
 
 #include "cpp/ie_executable_network.hpp"
 #include "details/ie_exception_conversion.hpp"
-#include "ie_cnn_network.h"
+#include "cpp/ie_cnn_network.h"
 #include "ie_plugin.hpp"
 #include "ie_plugin_ptr.hpp"
 
@@ -78,8 +78,8 @@ public:
         IExecutableNetwork::Ptr ret;
         IE_SUPPRESS_DEPRECATED_START
         CALL_STATUS_FNC(LoadNetwork, ret, network, config);
-        IE_SUPPRESS_DEPRECATED_END
         return ExecutableNetwork(ret, actual);
+        IE_SUPPRESS_DEPRECATED_END
     }
 
     /**
@@ -94,9 +94,9 @@ public:
         IExecutableNetwork::Ptr ret;
         IE_SUPPRESS_DEPRECATED_START
         CALL_STATUS_FNC(LoadNetwork, ret, network, config);
-        IE_SUPPRESS_DEPRECATED_END
         if (ret.get() == nullptr) THROW_IE_EXCEPTION << "Internal error: pointer to executable network is null";
         return ExecutableNetwork(ret, actual);
+        IE_SUPPRESS_DEPRECATED_END
     }
 
     /**
@@ -137,8 +137,8 @@ public:
         IExecutableNetwork::Ptr ret;
         IE_SUPPRESS_DEPRECATED_START
         CALL_STATUS_FNC(ImportNetwork, ret, modelFileName, config);
-        IE_SUPPRESS_DEPRECATED_END
         return ExecutableNetwork(ret, actual);
+        IE_SUPPRESS_DEPRECATED_END
     }
 
     /**
diff --git a/inference-engine/include/details/ie_inetwork_iterator.hpp b/inference-engine/include/details/ie_inetwork_iterator.hpp
deleted file mode 100644 (file)
index 21848d2..0000000
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @brief A header file for the INetworkIterator class
- * @file ie_inetwork_iterator.hpp
- */
-#pragma once
-#include <ie_network.hpp>
-#include <iterator>
-#include <list>
-#include <memory>
-#include <unordered_map>
-#include <unordered_set>
-#include <utility>
-#include <vector>
-
-namespace InferenceEngine {
-namespace details {
-
-template <class NT, class LT>
-class INFERENCE_ENGINE_NN_BUILDER_DEPRECATED INetworkIterator
-    : public std::iterator<std::input_iterator_tag, std::shared_ptr<LT>> {
-public:
-    explicit INetworkIterator(NT* network, bool toEnd): network(network), currentIdx(0) {}
-    explicit INetworkIterator(NT* network): network(network), currentIdx(0) {
-        if (!network) return;
-        const auto& inputs = network->getInputs();
-
-        std::vector<std::shared_ptr<LT>> allInputs;
-        for (const auto& input : inputs) {
-            allInputs.push_back(std::dynamic_pointer_cast<LT>(input));
-        }
-
-        forestDFS(
-            allInputs,
-            [&](std::shared_ptr<LT> current) {
-                sortedLayers.push_back(current);
-            },
-            false);
-
-        std::reverse(std::begin(sortedLayers), std::end(sortedLayers));
-        currentLayer = getNextLayer();
-    }
-
-    IE_SUPPRESS_DEPRECATED_START
-
-    bool operator!=(const INetworkIterator& that) const {
-        return !operator==(that);
-    }
-
-    bool operator==(const INetworkIterator& that) const {
-        return network == that.network && currentLayer == that.currentLayer;
-    }
-
-    typename INetworkIterator::reference operator*() {
-        if (nullptr == currentLayer) {
-            THROW_IE_EXCEPTION << "iterator out of bound";
-        }
-        return currentLayer;
-    }
-
-    INetworkIterator& operator++() {
-        currentLayer = getNextLayer();
-        return *this;
-    }
-
-    const INetworkIterator<NT, LT> operator++(int) {
-        INetworkIterator<NT, LT> retval = *this;
-        ++(*this);
-        return retval;
-    }
-
-    IE_SUPPRESS_DEPRECATED_END
-
-private:
-    std::vector<std::shared_ptr<LT>> sortedLayers;
-    std::shared_ptr<LT> currentLayer;
-    NT* network = nullptr;
-    size_t currentIdx;
-
-    std::shared_ptr<LT> getNextLayer() {
-        return (sortedLayers.size() > currentIdx) ? sortedLayers[currentIdx++] : nullptr;
-    }
-
-    template <class T>
-    inline void forestDFS(const std::vector<std::shared_ptr<LT>>& heads, const T& visit, bool bVisitBefore) {
-        if (heads.empty()) {
-            return;
-        }
-
-        std::unordered_map<idx_t, bool> visited;
-        for (auto& layer : heads) {
-            DFS(visited, layer, visit, bVisitBefore);
-        }
-    }
-
-    template <class T>
-    inline void DFS(std::unordered_map<idx_t, bool>& visited, const std::shared_ptr<LT>& layer, const T& visit,
-                    bool visitBefore) {
-        if (layer == nullptr) {
-            return;
-        }
-
-        if (visitBefore) visit(layer);
-
-        visited[layer->getId()] = false;
-        for (const auto& connection : network->getLayerConnections(layer->getId())) {
-            if (connection.to().layerId() == layer->getId()) {
-                continue;
-            }
-            const auto outLayer = network->getLayer(connection.to().layerId());
-            if (!outLayer) THROW_IE_EXCEPTION << "Couldn't get layer with id: " << connection.to().layerId();
-            auto i = visited.find(outLayer->getId());
-            if (i != visited.end()) {
-                /**
-                 * cycle detected we entered still not completed node
-                 */
-                if (!i->second) {
-                    THROW_IE_EXCEPTION << "Sorting not possible, due to existed loop.";
-                }
-                continue;
-            }
-
-            DFS(visited, outLayer, visit, visitBefore);
-        }
-        if (!visitBefore) visit(layer);
-        visited[layer->getId()] = true;
-    }
-};
-
-}  // namespace details
-}  // namespace InferenceEngine
index df219cd..643c836 100644 (file)
@@ -188,6 +188,10 @@ public:
         return *this;
     }
 
+    operator std::shared_ptr<Loader>() const noexcept {
+        return _so_loader;
+    }
+
 protected:
     /**
      * @brief Gets a smart pointer to the DLL
index eab0954..cfa165a 100644 (file)
@@ -27,6 +27,11 @@ private:
 
 public:
     /**
+     * @brief A shared pointer to SharedObjectLoader
+     */
+    using Ptr = std::shared_ptr<InferenceEngine::details::SharedObjectLoader>;
+
+    /**
      * @brief Loads a library with the name specified. The library is loaded according to
      *        the POSIX rules for dlopen
      * @param pluginName Full or relative path to the library
index 955375c..2c9bcf6 100644 (file)
@@ -49,6 +49,11 @@ private:
     }
 
 public:
+    /**
+     * @brief A shared pointer to SharedObjectLoader
+     */
+    using Ptr = std::shared_ptr<SharedObjectLoader>;
+
 #ifdef ENABLE_UNICODE_PATH_SUPPORT
     /**
      * @brief Loads a library with the name specified. The library is loaded according to the
@@ -70,7 +75,7 @@ public:
     explicit SharedObjectLoader(LPCSTR pluginName) {
         ExcludeCurrentDirectory();
 
-        shared_object = LoadLibrary(pluginName);
+        shared_object = LoadLibraryA(pluginName);
         if (!shared_object) {
             char cwd[1024];
             THROW_IE_EXCEPTION << "Cannot load library '" << pluginName << "': " << GetLastError()
index 539ccbd..a16e0ea 100644 (file)
 #define INFERENCE_ENGINE_DEPRECATED(msg)
 #endif
 
-#define INFERENCE_ENGINE_NN_BUILDER_DEPRECATED \
-    INFERENCE_ENGINE_DEPRECATED("Use ngraph API. NN Builder API will be removed in 2020.3")
-#define INFERENCE_ENGINE_NN_BUILDER_API_CLASS(...) \
-    INFERENCE_ENGINE_NN_BUILDER_DEPRECATED         \
-    INFERENCE_ENGINE_API_CLASS(__VA_ARGS__)
-
 #if defined IMPLEMENT_INFERENCE_ENGINE_API || defined IMPLEMENT_INFERENCE_ENGINE_PLUGIN
 # define INFERENCE_ENGINE_INTERNAL(msg)
 #else
diff --git a/inference-engine/include/ie_builders.hpp b/inference-engine/include/ie_builders.hpp
deleted file mode 100644 (file)
index 3f588c5..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @brief A header file for NN Builder API
- * @file ie_builders.hpp
- */
-
-#pragma once
-
-#include <builders/ie_argmax_layer.hpp>
-#include <builders/ie_clamp_layer.hpp>
-#include <builders/ie_concat_layer.hpp>
-#include <builders/ie_const_layer.hpp>
-#include <builders/ie_convolution_layer.hpp>
-#include <builders/ie_crop_layer.hpp>
-#include <builders/ie_ctc_greedy_decoder_layer.hpp>
-#include <builders/ie_deconvolution_layer.hpp>
-#include <builders/ie_detection_output_layer.hpp>
-#include <builders/ie_eltwise_layer.hpp>
-#include <builders/ie_elu_layer.hpp>
-#include <builders/ie_fully_connected_layer.hpp>
-#include <builders/ie_grn_layer.hpp>
-#include <builders/ie_gru_sequence_layer.hpp>
-#include <builders/ie_input_layer.hpp>
-#include <builders/ie_layer_builder.hpp>
-#include <builders/ie_lrn_layer.hpp>
-#include <builders/ie_lstm_sequence_layer.hpp>
-#include <builders/ie_memory_layer.hpp>
-#include <builders/ie_mvn_layer.hpp>
-#include <builders/ie_network_builder.hpp>
-#include <builders/ie_norm_layer.hpp>
-#include <builders/ie_normalize_layer.hpp>
-#include <builders/ie_output_layer.hpp>
-#include <builders/ie_permute_layer.hpp>
-#include <builders/ie_pooling_layer.hpp>
-#include <builders/ie_power_layer.hpp>
-#include <builders/ie_prelu_layer.hpp>
-#include <builders/ie_prior_box_clustered_layer.hpp>
-#include <builders/ie_prior_box_layer.hpp>
-#include <builders/ie_proposal_layer.hpp>
-#include <builders/ie_psroi_pooling_layer.hpp>
-#include <builders/ie_region_yolo_layer.hpp>
-#include <builders/ie_relu6_layer.hpp>
-#include <builders/ie_relu_layer.hpp>
-#include <builders/ie_reorg_yolo_layer.hpp>
-#include <builders/ie_resample_layer.hpp>
-#include <builders/ie_reshape_layer.hpp>
-#include <builders/ie_rnn_sequence_layer.hpp>
-#include <builders/ie_roi_pooling_layer.hpp>
-#include <builders/ie_scale_shift_layer.hpp>
-#include <builders/ie_sigmoid_layer.hpp>
-#include <builders/ie_simpler_nms_layer.hpp>
-#include <builders/ie_softmax_layer.hpp>
-#include <builders/ie_split_layer.hpp>
-#include <builders/ie_tanh_layer.hpp>
-#include <builders/ie_tile_layer.hpp>
diff --git a/inference-engine/include/ie_context.hpp b/inference-engine/include/ie_context.hpp
deleted file mode 100644 (file)
index c85bc98..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @brief This is a header file for the IE Context class
- *
- * @file ie_context.hpp
- */
-#pragma once
-
-#include <ie_iextension.h>
-
-#include <details/caseless.hpp>
-#include <map>
-#include <string>
-#include <vector>
-
-namespace InferenceEngine {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief This class implements object
- */
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(Context) {
-public:
-    Context();
-
-    /**
-     * @brief Registers extension within the context
-     *
-     * @param ext Pointer to already loaded extension
-     */
-    void addExtension(const IShapeInferExtensionPtr& ext);
-
-    /**
-     * @brief Registers Shape Infer implementation within the Context
-     *
-     * @param type Layer type
-     * @param impl Shape Infer implementation
-     */
-    IE_SUPPRESS_DEPRECATED_START
-    void addShapeInferImpl(const std::string& type, const IShapeInferImpl::Ptr& impl);
-
-    /**
-     * @brief Returns the shape infer implementation by layer type
-     *
-     * @param type Layer type
-     * @return Shape Infer implementation
-     */
-    IShapeInferImpl::Ptr getShapeInferImpl(const std::string& type);
-
-private:
-    details::caseless_map<std::string, IShapeInferImpl::Ptr> shapeInferImpls;
-    IE_SUPPRESS_DEPRECATED_END
-};
-
-}  // namespace InferenceEngine
index 6d9c89d..50307e2 100644 (file)
@@ -14,7 +14,7 @@
 #include <string>
 #include <vector>
 
-#include "cpp/ie_plugin_cpp.hpp"
+#include <cpp/ie_executable_network.hpp>
 #include "details/os/os_filesystem.hpp"
 #include "ie_extension.h"
 #include "ie_remote_context.hpp"
 namespace InferenceEngine {
 
 /**
+ * @brief Responce structure encapsulating information about supported layer
+ */
+struct QueryNetworkResult {
+    /**
+     * @brief A map of supported layers:
+     * - key - a layer name
+     * - value - a device name on which layer is assigned
+     */
+    std::map<std::string, std::string> supportedLayersMap;
+
+    /**
+     * @brief A status code
+     */
+    StatusCode rc = OK;
+
+    /**
+     * @brief Response mssage
+     */
+    ResponseDesc resp;
+};
+
+/**
  * @brief This class represents Inference Engine Core entity.
  *
  * It can throw exceptions safely for the application, where it is properly handled.
@@ -97,7 +119,7 @@ public:
      * Users can create as many networks as they need and use
      *        them simultaneously (up to the limitation of the hardware resources)
      *
-     * @param network CNNNetwork object acquired from CNNNetReader
+     * @param network CNNNetwork object acquired from Core::ReadNetwork
      * @param deviceName Name of device to load network to
      * @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load
      * operation
@@ -115,7 +137,7 @@ public:
 
     /**
      * @brief Creates an executable network from a network object within a specified remote context.
-     * @param network CNNNetwork object acquired from CNNNetReader
+     * @param network CNNNetwork object acquired from Core::ReadNetwork
      * @param context Pointer to RemoteContext object
      * @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load
      * operation
@@ -231,7 +253,7 @@ public:
      * prefix to identify library full name
      *
      * @param deviceName A device name to register plugin for. If device name is not specified, then it's taken from
-     * plugin using InferenceEnginePluginPtr::GetName function
+     * plugin itself.
      */
     void RegisterPlugin(const std::string& pluginName, const std::string& deviceName);
 
index cd5318c..0fd7ae1 100644 (file)
@@ -34,7 +34,8 @@ public:
 };
 
 /**
- * @brief The SOCreatorTrait class specialization for IExtension case, defines the name of the fabric method for
+ * @deprecated Implement IExtension interface
+ * @brief The SOCreatorTrait class specialization for IShapeInferExtension case, defines the name of the fabric method for
  * creating IExtension object in DLL
  */
 template <>
@@ -205,7 +206,9 @@ public:
      *
      * @param name Full or relative path to extension library
      */
+    IE_SUPPRESS_DEPRECATED_START_WIN
     explicit ShapeInferExtension(const file_name_t& name): actual(name) {}
+    IE_SUPPRESS_DEPRECATED_END_WIN
 
     /**
      * @brief Gets the extension version information
index 21b611f..5efd9be 100644 (file)
@@ -134,13 +134,13 @@ IE_SUPPRESS_DEPRECATED_START
 namespace details {
 
 /**
- * @brief This class defines the name of the fabric for creating an IHeteroInferencePlugin object in DLL
+ * @brief This class defines the name of the fabric for creating an ICNNNetReader object in DLL
  */
 template<>
 class SOCreatorTrait<ICNNNetReader> {
 public:
     /**
-     * @brief A name of the fabric for creating IInferencePlugin object in DLL
+     * @brief A name of the fabric for creating ICNNNetReader object in DLL
      */
     static constexpr auto name = "CreateICNNNetReader";
 };
index c7eca27..004d2f1 100644 (file)
@@ -75,6 +75,10 @@ public:
      *
      * For single and multiple outputs networks.
      *
+     * This method need to be called to find output names for using them later
+     * when calling InferenceEngine::InferRequest::GetBlob or InferenceEngine::InferRequest::SetBlob
+     *
+     *
      * @param out Reference to the OutputsDataMap object
      */
     virtual void getOutputsInfo(OutputsDataMap& out) const noexcept = 0;
@@ -84,8 +88,8 @@ public:
      * object.
      *
      * For single and multiple inputs networks.
-     * This method must be called to find out input names for using them later during filling of a map
-     * of blobs passed later to InferenceEngine::IInferencePlugin::Infer()
+     * This method need to be called to find out input names for using them later
+     * when calling InferenceEngine::InferRequest::SetBlob
      *
      * @param inputs Reference to InputsDataMap object.
      */
index d8ef110..105fe11 100644 (file)
@@ -86,10 +86,12 @@ public:
         float mn = (std::numeric_limits<float>::max)();
         float mx = (std::numeric_limits<float>::min)();
 
+        IE_SUPPRESS_DEPRECATED_START_WIN
         for (int i = 0; i < statCount; i++) {
             _minOutputs.push_back(mn);
             _maxOutputs.push_back(mx);
         }
+        IE_SUPPRESS_DEPRECATED_END_WIN
     }
 
 public:
index 3908817..4012ff8 100644 (file)
@@ -43,9 +43,9 @@ public:
     /**
      * @brief Gets the Executable network output Data node information.
      *
-     * The received info is stored in the given ::ConstOutputsDataMap node.
-     * This method need to be called to find output names for using them later during filling of a map
-     * of blobs passed to InferenceEngine::IInferencePlugin::Infer()
+     * The received info is stored in the given InferenceEngine::ConstOutputsDataMap node.
+     * This method need to be called to find output names for using them later
+     * when calling InferenceEngine::InferRequest::GetBlob or InferenceEngine::InferRequest::SetBlob
      *
      * @param out Reference to the ::ConstOutputsDataMap object
      * @param resp Optional: pointer to an already allocated object to contain information in case of failure
@@ -57,8 +57,8 @@ public:
      * @brief Gets the executable network input Data node information.
      *
      * The received info is stored in the given ::ConstInputsDataMap object.
-     * This method need to be called to find out input names for using them later during filling of a map
-     * of blobs passed to InferenceEngine::IInferencePlugin::Infer()
+     * This method need to be called to find out input names for using them later
+     * when calling InferenceEngine::InferRequest::SetBlob
      *
      * @param inputs Reference to ::ConstInputsDataMap object.
      * @param resp Optional: pointer to an already allocated object to contain information in case of failure
@@ -81,7 +81,6 @@ public:
      * @brief Exports the current executable network.
      *
      * @see Core::ImportNetwork
-     * @see IInferencePlugin::ImportNetwork
      *
      * @param modelFileName Full path to the location of the exported file
      * @param resp Optional: pointer to an already allocated object to contain information in case of failure
@@ -93,7 +92,6 @@ public:
      * @brief Exports the current executable network.
      *
      * @see Core::ImportNetwork
-     * @see IInferencePlugin::ImportNetwork
      *
      * @param networkModel Network model output stream
      * @param resp Optional: pointer to an already allocated object to contain information in case of failure
index 563da85..d75bf5b 100644 (file)
@@ -281,6 +281,9 @@ public:
     INFERENCE_ENGINE_DEPRECATED("Use IExtension::getImplementation to get a concrete implementation")
     virtual StatusCode getFactoryFor(ILayerImplFactory*& factory, const CNNLayer* cnnLayer,
                                      ResponseDesc* resp) noexcept {
+        (void)factory;
+        (void)cnnLayer;
+        (void)resp;
         return NOT_IMPLEMENTED;
     }
     IE_SUPPRESS_DEPRECATED_END
@@ -296,6 +299,9 @@ public:
      */
     INFERENCE_ENGINE_DEPRECATED("Use IExtension::getImplTypes to get implementation types for a particular node")
     virtual StatusCode getPrimitiveTypes(char**& types, unsigned int& size, ResponseDesc* resp) noexcept {
+        (void)types;
+        (void)size;
+        (void)resp;
         return NOT_IMPLEMENTED;
     }
 
@@ -322,6 +328,7 @@ public:
      * @return vector of strings
      */
     virtual std::vector<std::string> getImplTypes(const std::shared_ptr<ngraph::Node>& node) {
+        (void)node;
         return {};
     }
 
@@ -332,6 +339,8 @@ public:
      * @return shared pointer to implementation
      */
     virtual ILayerImpl::Ptr getImplementation(const std::shared_ptr<ngraph::Node>& node, const std::string& implType) {
+        (void)node;
+        (void)implType;
         return nullptr;
     }
 };
index fa40cdd..a0462ac 100644 (file)
@@ -127,7 +127,9 @@ public:
     /**
      * @brief If suggested to fuse - a pointer to the layer which needs to be fused with this layer
      */
+    IE_SUPPRESS_DEPRECATED_START_WIN
     Ptr _fusedWith;
+    IE_SUPPRESS_DEPRECATED_END_WIN
 
     /**
      * @brief Convenience user values to store in this object as extra data
@@ -174,25 +176,18 @@ public:
      *
      * @param layer Reference to the layer to be fused with
      */
+    IE_SUPPRESS_DEPRECATED_START_WIN
     void fuse(Ptr& layer) {
         _fusedWith = layer;
     }
+    IE_SUPPRESS_DEPRECATED_END_WIN
 
     /**
      * @brief Returns the first element of the input data for this layer
      *
      * @return A smart pointer to the input data element
      */
-    virtual const DataPtr input() const {
-        if (insData.empty()) {
-            THROW_IE_EXCEPTION << "Internal error: input data is empty";
-        }
-        auto lockedFirstInsData = insData[0].lock();
-        if (!lockedFirstInsData) {
-            THROW_IE_EXCEPTION << "Internal error: unable to lock weak_ptr\n";
-        }
-        return lockedFirstInsData;
-    }
+    virtual const DataPtr input() const;
 
     /**
      * @brief Checks if the input data and layer data are legitimate
@@ -206,30 +201,13 @@ public:
      * @return float value if parsing was successful
      * @throws InferenceEngineException in case of parsing error
      */
-    static float ie_parse_float(const std::string& str) {
-        if (str == "-inf") {
-            return -std::numeric_limits<float>::infinity();
-        } else if (str == "inf") {
-            return std::numeric_limits<float>::infinity();
-        } else {
-            float res;
-            std::stringstream val_stream(str);
-            val_stream.imbue(std::locale("C"));
-            val_stream >> res;
-            if (!val_stream.eof()) THROW_IE_EXCEPTION;
-            return res;
-        }
-    }
+    static float ie_parse_float(const std::string& str);
+
     /**
      * @brief serialize float with c_locale formating
      * used for default values serializing
      */
-    static std::string ie_serialize_float(float value) {
-        std::stringstream val_stream;
-        val_stream.imbue(std::locale("C"));
-        val_stream << value;
-        return val_stream.str();
-    }
+    static std::string ie_serialize_float(float value);
 
     /**
      * @brief Gets float value for the given parameter
@@ -238,15 +216,7 @@ public:
      * @param def default value of the parameter if not found
      * @return float value
      */
-    float GetParamAsFloat(const char* param, float def) const {
-        std::string val = GetParamAsString(param, ie_serialize_float(def).c_str());
-        try {
-            return ie_parse_float(val);
-        } catch (...) {
-            THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name << ". Value "
-                               << val << " cannot be casted to float.";
-        }
-    }
+    float GetParamAsFloat(const char* param, float def) const;
 
     /**
      * @brief Returns a float value for the given layer parameter
@@ -254,15 +224,7 @@ public:
      * @param param Name of the layer parameter
      * @return A float value for the specified parameter
      */
-    float GetParamAsFloat(const char* param) const {
-        std::string val = GetParamAsString(param);
-        try {
-            return ie_parse_float(val);
-        } catch (...) {
-            THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name << ". Value "
-                               << val << " cannot be casted to float.";
-        }
-    }
+    float GetParamAsFloat(const char* param) const;
 
     /**
      * @brief Returns a vector of float values for the given parameter or returns the default value
@@ -271,23 +233,7 @@ public:
      * @param def Default value of the parameter if not found
      * @return vector of float values
      */
-    std::vector<float> GetParamAsFloats(const char* param, std::vector<float> def) const {
-        std::string vals = GetParamAsString(param, "");
-        std::vector<float> result;
-        std::istringstream stream(vals);
-        std::string str;
-        if (vals.empty()) return def;
-        while (getline(stream, str, ',')) {
-            try {
-                float val = ie_parse_float(str);
-                result.push_back(val);
-            } catch (...) {
-                THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " " << str << " from IR for layer " << name
-                                   << ". Value " << vals << " cannot be casted to floats.";
-            }
-        }
-        return result;
-    }
+    std::vector<float> GetParamAsFloats(const char* param, std::vector<float> def) const;
 
     /**
      * @brief Returns a vector of float values for the given parameter
@@ -295,22 +241,7 @@ public:
      * @param param Name of the layer parameter
      * @return vector of float values
      */
-    std::vector<float> GetParamAsFloats(const char* param) const {
-        std::string vals = GetParamAsString(param);
-        std::vector<float> result;
-        std::istringstream stream(vals);
-        std::string str;
-        while (getline(stream, str, ',')) {
-            try {
-                float val = ie_parse_float(str);
-                result.push_back(val);
-            } catch (...) {
-                THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " " << str << " from IR for layer " << name
-                                   << ". Value " << vals << " cannot be casted to floats.";
-            }
-        }
-        return result;
-    }
+    std::vector<float> GetParamAsFloats(const char* param) const;
 
     /**
      * @brief Returns an integer value for the given parameter or returns the default value
@@ -319,15 +250,7 @@ public:
      * @param def Default value of the parameter if not found
      * @return An int value for the specified parameter
      */
-    int GetParamAsInt(const char* param, int def) const {
-        std::string val = GetParamAsString(param, std::to_string(def).c_str());
-        try {
-            return std::stoi(val);
-        } catch (...) {
-            THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name << ". Value "
-                               << val << " cannot be casted to int.";
-        }
-    }
+    int GetParamAsInt(const char* param, int def) const;
 
     /**
      * @brief Returns an integer value for the given parameter
@@ -335,15 +258,7 @@ public:
      * @param param Name of the layer parameter
      * @return An int value for the specified parameter
      */
-    int GetParamAsInt(const char* param) const {
-        std::string val = GetParamAsString(param);
-        try {
-            return std::stoi(val);
-        } catch (...) {
-            THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name << ". Value "
-                               << val << " cannot be casted to int.";
-        }
-    }
+    int GetParamAsInt(const char* param) const;
 
     /**
      * @brief Returns a vector of int values for the given parameter or returns the default value
@@ -352,22 +267,7 @@ public:
      * @param def Default value of the parameter if not found
      * @return vector of int values
      */
-    std::vector<int> GetParamAsInts(const char* param, std::vector<int> def) const {
-        std::string vals = GetParamAsString(param, "");
-        std::vector<int> result;
-        std::istringstream stream(vals);
-        std::string str;
-        if (vals.empty()) return def;
-        while (getline(stream, str, ',')) {
-            try {
-                result.push_back(std::stoi(str));
-            } catch (...) {
-                THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " " << str << " from IR for layer " << name
-                                   << ". Value " << vals << " cannot be casted to int.";
-            }
-        }
-        return result;
-    }
+    std::vector<int> GetParamAsInts(const char* param, std::vector<int> def) const;
 
     /**
      * @brief Returns a vector of int values for the given parameter
@@ -375,21 +275,8 @@ public:
      * @param param Name of the layer parameter
      * @return vector of int values
      */
-    std::vector<int> GetParamAsInts(const char* param) const {
-        std::string vals = GetParamAsString(param);
-        std::vector<int> result;
-        std::istringstream stream(vals);
-        std::string str;
-        while (getline(stream, str, ',')) {
-            try {
-                result.push_back(std::stoi(str));
-            } catch (...) {
-                THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " " << str << " from IR for layer " << name
-                                   << ". Value " << vals << " cannot be casted to int.";
-            }
-        }
-        return result;
-    }
+    std::vector<int> GetParamAsInts(const char* param) const;
+
     /**
      * @brief Returns an unsigned integer value for the given parameter or returns the default value
      *
@@ -397,20 +284,7 @@ public:
      * @param def Default value of the parameter if not found
      * @return An unsigned integer value for the specified parameter
      */
-    unsigned int GetParamAsUInt(const char* param, unsigned int def) const {
-        std::string val = GetParamAsString(param, std::to_string(def).c_str());
-        std::string message = "Cannot parse parameter " + std::string(param) + " from IR for layer " + name +
-                              ". Value " + val + " cannot be casted to int.";
-        try {
-            int value = std::stoi(val);
-            if (value < 0) {
-                THROW_IE_EXCEPTION << message;
-            }
-            return static_cast<unsigned int>(value);
-        } catch (...) {
-            THROW_IE_EXCEPTION << message;
-        }
-    }
+    unsigned int GetParamAsUInt(const char* param, unsigned int def) const;
 
     /**
      * @brief Returns an unsigned integer value for the given parameter
@@ -418,20 +292,7 @@ public:
      * @param param Name of the layer parameter
      * @return An unsigned integer value for the specified parameter
      */
-    unsigned int GetParamAsUInt(const char* param) const {
-        std::string val = GetParamAsString(param);
-        std::string message = "Cannot parse parameter " + std::string(param) + " from IR for layer " + name +
-                              ". Value " + val + " cannot be casted to unsigned int.";
-        try {
-            int value = std::stoi(val);
-            if (value < 0) {
-                THROW_IE_EXCEPTION << message;
-            }
-            return static_cast<unsigned int>(value);
-        } catch (...) {
-            THROW_IE_EXCEPTION << message;
-        }
-    }
+    unsigned int GetParamAsUInt(const char* param) const;
 
     /**
      * @brief Returns a vector of unsigned int values for the given parameter or returns the default value
@@ -440,27 +301,7 @@ public:
      * @param def Default value of the parameter if not found
      * @return vector of unsigned int values
      */
-    std::vector<unsigned int> GetParamAsUInts(const char* param, std::vector<unsigned int> def) const {
-        std::string vals = GetParamAsString(param, "");
-        std::vector<unsigned int> result;
-        std::istringstream stream(vals);
-        std::string str;
-        std::string message = "Cannot parse parameter " + std::string(param) + " " + str + " from IR for layer " +
-                              name + ". Value " + vals + " cannot be casted to unsigned int.";
-        if (vals.empty()) return def;
-        while (getline(stream, str, ',')) {
-            try {
-                int value = std::stoi(str);
-                if (value < 0) {
-                    THROW_IE_EXCEPTION << message;
-                }
-                result.push_back(static_cast<unsigned int>(value));
-            } catch (...) {
-                THROW_IE_EXCEPTION << message;
-            }
-        }
-        return result;
-    }
+    std::vector<unsigned int> GetParamAsUInts(const char* param, std::vector<unsigned int> def) const;
 
     /**
      * @brief Returns a vector of unsigned int values for the given parameter
@@ -468,26 +309,8 @@ public:
      * @param param Name of the layer parameter
      * @return vector of unsigned int values
      */
-    std::vector<unsigned int> GetParamAsUInts(const char* param) const {
-        std::string vals = GetParamAsString(param);
-        std::vector<unsigned int> result;
-        std::istringstream stream(vals);
-        std::string str;
-        std::string message = "Cannot parse parameter " + std::string(param) + " " + str + " from IR for layer " +
-                              name + ". Value " + vals + " cannot be casted to int.";
-        while (getline(stream, str, ',')) {
-            try {
-                int value = std::stoi(str);
-                if (value < 0) {
-                    THROW_IE_EXCEPTION << message;
-                }
-                result.push_back(static_cast<unsigned int>(value));
-            } catch (...) {
-                THROW_IE_EXCEPTION << message;
-            }
-        }
-        return result;
-    }
+    std::vector<unsigned int> GetParamAsUInts(const char* param) const;
+
     /**
      * @brief Returns a boolean value for the given parameter.
      *
@@ -496,44 +319,15 @@ public:
      * @param def Default value of the parameter if not found
      * @return A bool value for the specified parameter
      */
-    bool GetParamAsBool(const char* param, bool def) const {
-        std::string val = GetParamAsString(param, std::to_string(def).c_str());
-        std::string loweredCaseValue;
-        std::transform(val.begin(), val.end(), std::back_inserter(loweredCaseValue), [](char value) {
-            return std::tolower(value);
-        });
-
-        bool result = false;
-
-        if (!(std::istringstream(loweredCaseValue) >> std::boolalpha >> result)) {
-            // attempting parse using non alpha bool
-            return (GetParamAsInt(param, def) != 0);
-        }
+    bool GetParamAsBool(const char* param, bool def) const;
 
-        return result;
-    }
     /**
      * @brief Returns a boolean value for the given parameter
      *
      * @param param Name of the layer parameter
      * @return A bool value for the specified parameter
      */
-    bool GetParamAsBool(const char* param) const {
-        std::string val = GetParamAsString(param);
-        std::string loweredCaseValue;
-        std::transform(val.begin(), val.end(), std::back_inserter(loweredCaseValue), [](char value) {
-            return std::tolower(value);
-        });
-
-        bool result = false;
-
-        if (!(std::istringstream(loweredCaseValue) >> std::boolalpha >> result)) {
-            // attempting parse using non alpha bool
-            return (GetParamAsInt(param) != 0);
-        }
-
-        return result;
-    }
+    bool GetParamAsBool(const char* param) const;
 
     /**
      * @brief Returns a string value for the given parameter or returns the default one
@@ -542,13 +336,7 @@ public:
      * @param def Default value of the parameter if not found
      * @return A string value
      */
-    std::string GetParamAsString(const char* param, const char* def) const {
-        auto it = params.find(param);
-        if (it == params.end() || it->second.empty()) {
-            return def;
-        }
-        return (*it).second;
-    }
+    std::string GetParamAsString(const char* param, const char* def) const;
 
     /**
      * @brief Checks the param presence in the layer
@@ -556,13 +344,7 @@ public:
      * @param param Name of the layer parameter
      * @return a bool depending param presence
      */
-    bool CheckParamPresence(const char* param) const {
-        auto it = params.find(param);
-        if (it == params.end()) {
-            return false;
-        }
-        return true;
-    }
+    bool CheckParamPresence(const char* param) const;
 
     /**
      * @brief Returns a string value for the given parameter.
@@ -571,13 +353,7 @@ public:
      * @param param Name of the layer parameter
      * @return A string value
      */
-    std::string GetParamAsString(const char* param) const {
-        auto it = params.find(param);
-        if (it == params.end()) {
-            THROW_IE_EXCEPTION << "No such parameter name '" << param << "' for layer " << name;
-        }
-        return (*it).second;
-    }
+    std::string GetParamAsString(const char* param) const;
 
     /**
      * @brief Gets the parameter as a std::vector<std::string>
@@ -585,21 +361,7 @@ public:
      * @param def The default values if case of parameter is not found
      * @return The parameter as strings.
      */
-    std::vector<std::string> GetParamAsStrings(const char* param, std::vector<std::string> def) const {
-        std::string vals = GetParamAsString(param, "");
-        std::vector<std::string> result;
-        std::istringstream stream(vals);
-        std::string str;
-        if (vals.empty()) return def;
-        while (getline(stream, str, ',')) {
-            try {
-                result.push_back(str);
-            } catch (...) {
-                THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name << ".";
-            }
-        }
-        return result;
-    }
+    std::vector<std::string> GetParamAsStrings(const char* param, std::vector<std::string> def) const;
 
     /**
      * @brief Map of pairs: (parameter name, parameter value)
@@ -1881,7 +1643,7 @@ public:
 /**
  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
  * @brief This class represents a standard Space To Batch layer
- * 
+ *
  * Space To Batch picks from input tensor according parameters
  */
 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(SpaceToBatchLayer): public CNNLayer {
@@ -1911,7 +1673,7 @@ public:
 /**
  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
  * @brief This class represents a standard Batch To Space layer
- * 
+ *
  * Batch To Space picks from input tensor according parameters
  */
 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(BatchToSpaceLayer): public CNNLayer {
@@ -2304,6 +2066,20 @@ public:
 
 /**
  * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
+ * @brief This class represents a standard ScatterElementsUpdate layer
+ */
+class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(ScatterElementsUpdateLayer): public CNNLayer {
+public:
+    /**
+     * @brief Creates a new ScatterElementsUpdateLayer instance.
+     */
+    using CNNLayer::CNNLayer;
+
+    ~ScatterElementsUpdateLayer() override;
+};
+
+/**
+ * @deprecated Migrate to IR v10 and work with ngraph::Function directly. The method will be removed in 2020.3
  * @brief This class represents an onnx ExperimentalDetectronPriorGridGenerator Layer
  */
 class INFERENCE_ENGINE_INTERNAL_CNNLAYER_CLASS(ExperimentalDetectronPriorGridGeneratorLayer): public CNNLayer {
index 2123657..fcc9b87 100644 (file)
@@ -10,6 +10,7 @@
 #pragma once
 
 #include <vector>
+#include <details/ie_exception.hpp>
 
 namespace InferenceEngine {
 
diff --git a/inference-engine/include/ie_network.hpp b/inference-engine/include/ie_network.hpp
deleted file mode 100644 (file)
index 11deaf0..0000000
+++ /dev/null
@@ -1,516 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-/**
- * @brief A header file for the Inference Engine Network interface
- * 
- * @file ie_network.hpp
- */
-#pragma once
-
-#include <ie_blob.h>
-#include <ie_layouts.h>
-
-#include <ie_context.hpp>
-#include <ie_parameter.hpp>
-#include <map>
-#include <memory>
-#include <string>
-#include <utility>
-#include <vector>
-
-namespace InferenceEngine {
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief A type of network objects indexes.
- */
-using idx_t = size_t;
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief This class contains a pair from layerId and port index
- */
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(PortInfo) {
-public:
-    /**
-     * @brief The constructor creates a PortInfo object for port 0
-     *
-     * @param layerID Layer id
-     */
-    PortInfo(idx_t layerID): layer(layerID), port(0) {}  // NOLINT
-
-    /**
-     * @brief The constructor creates a PortInfo object
-     *
-     * @param layerID Layer id
-     * @param portID Port id
-     */
-    PortInfo(idx_t layerID, idx_t portID): layer(layerID), port(portID) {}
-
-    /**
-     * @brief Get layer id
-     *
-     * @return Layer id
-     */
-    idx_t layerId() const {
-        return layer;
-    }
-
-    /**
-     * @brief Get port id
-     *
-     * @return Port id
-     */
-    idx_t portId() const {
-        return port;
-    }
-
-    IE_SUPPRESS_DEPRECATED_START
-
-    /**
-     * @brief Compares the given PortInfo object with the current one
-     *
-     * @param portInfo PortInfo object to compare with
-     * @return true if the given PortInfo object is equal to the current one, false - otherwise
-     */
-    bool operator==(const PortInfo& portInfo) const {
-        return layer == portInfo.layerId() && port == portInfo.portId();
-    }
-
-    /**
-     * @brief Checks if the given PortInfo object is not equal to the current one
-     *
-     * @param portInfo PortInfo object to compare with
-     * @return true if the given PortInfo object is not equal to the current one, false - otherwise
-     */
-    bool operator!=(const PortInfo& portInfo) const {
-        return !(*this == portInfo);
-    }
-
-    IE_SUPPRESS_DEPRECATED_END
-
-private:
-    idx_t layer;
-    idx_t port;
-};
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief This class is the main object to describe the Inference Engine connection.
- */
-class INFERENCE_ENGINE_NN_BUILDER_DEPRECATED Connection {
-public:
-    IE_SUPPRESS_DEPRECATED_START
-
-    /**
-     * @brief Constructor of a connection object.
-     *
-     * @param input pair of the index of input layer and the index of output port
-     * @param output pair of the index of output layer and the index of input port
-     */
-    Connection(const PortInfo& input, const PortInfo& output): input(input), output(output) {}
-
-    /**
-     * @brief Compares the given Connection with the current one
-     *
-     * @param connection Connection to compare with
-     * @return true if the given Connection is equal to the current one, false - otherwise
-     */
-    bool operator==(const Connection& connection) const {
-        return input == connection.from() && output == connection.to();
-    }
-
-    /**
-     * @brief Checks if the given Connection is not equal to the current one
-     *
-     * @param connection Connection to compare with
-     * @return true if the given Connection is not equal to the current one, false - otherwise
-     */
-    bool operator!=(const Connection& connection) const {
-        return !(*this == connection);
-    }
-
-    /**
-     * Returns a constant reference to a pair of input layer index and output port index.
-     * @return pair of the index of input layer and the index of output port
-     */
-    const PortInfo& from() const {
-        return input;
-    }
-
-    /**
-     * Returns a constant reference to a pair of output layer index and input port index.
-     * @return pair of the index of output layer and the index of input port
-     */
-    const PortInfo& to() const {
-        return output;
-    }
-
-private:
-    PortInfo input;
-    PortInfo output;
-
-    IE_SUPPRESS_DEPRECATED_END
-};
-
-/**
- * @deprecated Use ngraph API instead.
- * This class describes port data
- */
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(PortData) {
-public:
-    IE_SUPPRESS_DEPRECATED_START
-
-    /**
-     * @brief A shared pointer to the PortData object
-     */
-    using Ptr = std::shared_ptr<PortData>;
-
-    IE_SUPPRESS_DEPRECATED_END
-
-    /**
-     * @brief Default constructor
-     */
-    PortData();
-
-    /**
-     * @brief Creates port data with precision and shape
-     *
-     * @param shape Dimensions
-     * @param precision Precision
-     */
-    PortData(const SizeVector& shape, const Precision& precision);
-
-    /**
-     * @brief Virtual destructor
-     */
-    virtual ~PortData() = default;
-
-    /**
-     * @brief Returns data
-     *
-     * @return Blob with data
-     */
-    const Blob::Ptr& getData() const;
-
-    /**
-     * @brief Sets data
-     *
-     * @param data Blob with data
-     */
-    void setData(const Blob::Ptr& data);
-
-    /**
-     * @brief Returns data parameters
-     *
-     * @return Map of parameters
-     */
-    const std::map<std::string, Parameter>& getParameters() const noexcept;
-
-    /**
-     * @brief Sets new shapes for data
-     *
-     * @param shape New shapes
-     */
-    void setShape(const SizeVector& shape);
-
-private:
-    Blob::Ptr data;
-    std::map<std::string, Parameter> parameters;
-
-    void createData(const TensorDesc& desc);
-};
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief This class is the main object to describe the Inference Engine port.
- */
-class INFERENCE_ENGINE_NN_BUILDER_API_CLASS(Port) {
-public:
-    /**
-     * @brief Default constructor of a port object
-     */
-    Port();
-
-    /**
-     * @brief Constructor of a port object with shapes
-     *
-     * @param shapes port shapes
-     * @param precision Port precision
-     */
-    Port(const SizeVector& shapes, const Precision& precision = Precision::UNSPECIFIED);
-
-    /**
-     * @brief Virtual destructor
-     */
-    virtual ~Port() = default;
-
-    IE_SUPPRESS_DEPRECATED_START
-
-    /**
-     * @brief Copy constructor.
-     * @param port object to copy
-     */
-    Port(const Port& port);
-
-    /**
-     * @brief Compares the given Port with the current one
-     *
-     * @param rhs Port to compare with
-     * @return true if the given Port is equal to the current one, false - otherwise
-     */
-    bool operator==(const Port& rhs) const;
-
-    /**
-     * @brief Compares the given Port with the current one
-     *
-     * @param rhs Port to compare with
-     * @return true if the given Port is NOT equal to the current one, false - otherwise
-     */
-    bool operator!=(const Port& rhs) const;
-
-    IE_SUPPRESS_DEPRECATED_END
-
-    /**
-     * @brief Returns a constant reference to a vector with shapes
-     *
-     * Shapes should be initialized if shape is empty.
-     * @return constant reference to shapes
-     */
-    const SizeVector& shape() const noexcept;
-
-    /**
-     * @brief Sets new shapes for current port
-     *
-     * @param shape New shapes
-     */
-    void setShape(const SizeVector& shape);
-
-    /**
-     * @brief Returns a constant reference to parameters
-     *
-     * @return Map with parameters
-     */
-    const std::map<std::string, Parameter>& getParameters() const noexcept;
-
-    /**
-     * @brief Sets new parameters for current port
-     *
-     * @param params New parameters
-     */
-    void setParameters(const std::map<std::string, Parameter>& params) noexcept;
-
-    /**
-     * @brief Sets the new parameter for current port
-     *
-     * @param name Name of parameter
-     * @param param New value
-     */
-    void setParameter(const std::string& name, const Parameter& param);
-
-    IE_SUPPRESS_DEPRECATED_START
-
-    /**
-     * @brief Returns port data
-     *
-     * @return Port data
-     */
-    const PortData::Ptr& getData() const noexcept;
-
-    /**
-     * @brief Sets new port data for current port
-     *
-     * @param data Port data
-     */
-    void setData(const PortData::Ptr& data);
-
-private:
-    std::map<std::string, Parameter> parameters;
-    PortData::Ptr data;
-
-    IE_SUPPRESS_DEPRECATED_END
-};
-
-class INetwork;
-template <class T>
-class INetwotkIterator;
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief This class is the main interface to describe the Inference Engine layer.
- *
- * All methods here are constant and do not throw exceptions.
- */
-class INFERENCE_ENGINE_NN_BUILDER_DEPRECATED ILayer {
-public:
-    IE_SUPPRESS_DEPRECATED_START
-
-    /**
-     * @brief A shared pointer to the const ILayer object
-     */
-    using CPtr = std::shared_ptr<const ILayer>;
-
-    IE_SUPPRESS_DEPRECATED_END
-
-    /**
-     * @brief Virtual destructor for the layer interface
-     */
-    virtual ~ILayer() = default;
-
-    /**
-     * @brief Returns a id of the layer
-     *
-     * @return Layer id
-     */
-    virtual idx_t getId() const noexcept = 0;
-
-    /**
-     * @brief Returns a layer name
-     *
-     * @return Layer name
-     */
-    virtual const std::string& getName() const noexcept = 0;
-
-    /**
-     * @brief Returns a layer type
-     * @return Layer type
-     */
-    virtual const std::string& getType() const noexcept = 0;
-
-    /**
-     * @brief Returns a constant smart pointer reference to a Parameters interface
-     *
-     * @return Parameters interface smart pointer
-     */
-    virtual const std::map<std::string, Parameter>& getParameters() const noexcept = 0;
-
-    IE_SUPPRESS_DEPRECATED_START
-
-    /**
-     * @brief Returns a constant reference to a vector with input ports
-     *
-     * @return Vector of input ports
-     */
-    virtual const std::vector<Port>& getInputPorts() const noexcept = 0;
-
-    /**
-     * @brief Returns a constant reference to a vector with output ports
-     *
-     * @return Vector of output ports
-     */
-    virtual const std::vector<Port>& getOutputPorts() const noexcept = 0;
-
-    IE_SUPPRESS_DEPRECATED_END
-};
-
-namespace details {
-
-template <class NT, class LT>
-class INFERENCE_ENGINE_NN_BUILDER_DEPRECATED INetworkIterator;
-
-}  // namespace details
-
-/**
- * @deprecated Use ngraph API instead.
- * @brief This class is the main interface to describe the Inference Engine network.
- *
- * All methods here are constant and do not throw exceptions.
- */
-class INFERENCE_ENGINE_NN_BUILDER_DEPRECATED INetwork {
-public:
-    IE_SUPPRESS_DEPRECATED_START
-
-    /**
-     * @brief A shared pointer to the constant INetwork object.
-     */
-    using CPtr = std::shared_ptr<const INetwork>;
-    /**
-     * @brief A constant iterator for INetwork definition
-     */
-    using const_iterator = details::INetworkIterator<const INetwork, const ILayer>;
-
-    IE_SUPPRESS_DEPRECATED_END
-
-    /**
-     * @brief Virtual destructor for the network interface
-     */
-    virtual ~INetwork() = default;
-
-    /**
-     * @brief Begin network iterator
-     *
-     * @return const INetwork iterator
-     */
-    virtual const_iterator begin() const noexcept = 0;
-
-    /**
-     * @brief End network iterator
-     *
-     * @return const INetwork iterator
-     */
-    virtual const_iterator end() const noexcept = 0;
-
-    /**
-     * @brief Returns a number of layers in the network.
-     *
-     * @return Layers count
-     */
-    virtual size_t size() const noexcept = 0;
-
-    IE_SUPPRESS_DEPRECATED_START
-
-    /**
-     * @brief Returns a constant smart pointer to a Layer interface.
-     *
-     * If the layer is missing, returns nullptr.
-     *
-     * @param id Id of the Layer
-     * @return Layer interface smart pointer
-     */
-    virtual const ILayer::CPtr getLayer(idx_t id) const noexcept = 0;
-
-    /**
-     * @brief Returns a constant vector of input layers.
-     *
-     * @return Vector of input layers
-     */
-    virtual const std::vector<ILayer::CPtr> getInputs() const noexcept = 0;
-
-    /**
-     * @brief Returns a constant vector of output layers.
-     *
-     * @return Vector of output layers
-     */
-    virtual const std::vector<ILayer::CPtr> getOutputs() const noexcept = 0;
-
-    /**
-     * @brief Returns a constant vector of connections for specific layer.
-     *
-     * If the layer is missing, returns empty vector.
-     *
-     * @param layerId layer index
-     * @return Vector of connections
-     */
-    virtual const std::vector<Connection> getLayerConnections(idx_t layerId) const noexcept = 0;
-
-    /**
-     * @brief Returns a network context
-     *
-     * @return const reference to Context
-     */
-    virtual const Context& getContext() const noexcept = 0;
-
-    IE_SUPPRESS_DEPRECATED_END
-
-    /**
-     * @brief Returns a network name.
-     * @return Network name
-     */
-    virtual const std::string& getName() const noexcept = 0;
-};
-
-}  // namespace InferenceEngine
-
-#include <details/ie_inetwork_iterator.hpp>
index bb904ef..4c85d39 100644 (file)
@@ -296,12 +296,14 @@ private:
         }
 
         template <class U>
-        typename std::enable_if<!HasOperatorEqual<U>::value, bool>::type equal(const Any& left, const Any& rhs) const {
+        typename std::enable_if<!HasOperatorEqual<U>::value, bool>::type
+        equal(const Any& left, const Any& rhs) const {
             THROW_IE_EXCEPTION << "Parameter doesn't contain equal operator";
         }
 
         template <class U>
-        typename std::enable_if<HasOperatorEqual<U>::value, bool>::type equal(const Any& left, const Any& rhs) const {
+        typename std::enable_if<HasOperatorEqual<U>::value, bool>::type
+        equal(const Any& left, const Any& rhs) const {
             return dyn_cast<U>(&left) == dyn_cast<U>(&rhs);
         }
 
index 6f80897..82b975c 100644 (file)
@@ -20,6 +20,7 @@
 
 #include "details/ie_no_copy.hpp"
 #include "ie_api.h"
+#include "ie_core.hpp"
 #include "ie_error.hpp"
 #include "ie_iexecutable_network.hpp"
 #include "ie_version.hpp"
 namespace InferenceEngine {
 
 /**
- * @brief Responce structure encapsulating information about supported layer
- */
-struct QueryNetworkResult {
-    /**
-     * @brief A map of supported layers:
-     * - key - a layer name
-     * - value - a device name on which layer is assigned
-     */
-    std::map<std::string, std::string> supportedLayersMap;
-
-    /**
-     * @brief A status code
-     */
-    StatusCode rc = OK;
-
-    /**
-     * @brief Response mssage
-     */
-    ResponseDesc resp;
-};
-
-/**
  * @deprecated Use InferenceEngine::Core instead. Will be removed in 2020.3
  * @brief This class is a main plugin interface
  */
@@ -102,7 +81,7 @@ public:
      *        them simultaneously (up to the limitation of the hardware resources)
      *
      * @param ret Reference to a shared ptr of the returned network interface
-     * @param network Network object acquired from CNNNetReader
+     * @param network Network object acquired from Core::ReadNetwork
      * @param config Map of pairs: (config parameter name, config parameter value) relevant only for this load operation
      * @param resp Pointer to the response message that holds a description of an error if any occurred
      * @return Status code of the operation. InferenceEngine::OK if succeeded
index 49a6212..199c613 100644 (file)
@@ -192,7 +192,7 @@ DECLARE_CONFIG_KEY(CPU_THREADS_NUM);
 /**
  * @brief The name for setting CPU affinity per thread option.
  *
- * It is passed to IInferencePlugin::SetConfig(), this option should be used with values:
+ * It is passed to Core::SetConfig(), this option should be used with values:
  * PluginConfigParams::YES (pinning threads to cores, best for static benchmarks),
  * PluginConfigParams::NUMA (pinning therads to NUMA nodes, best for real-life, contented cases)
  * this is TBB-specific knob, and the only pinning option (beyond 'NO', below) on the Windows*
@@ -206,7 +206,7 @@ DECLARE_CONFIG_VALUE(NUMA);
 /**
  * @brief Optimize CPU execution to maximize throughput.
  *
- * It is passed to IInferencePlugin::SetConfig(), this option should be used with values:
+ * It is passed to Core::SetConfig(), this option should be used with values:
  * - KEY_CPU_THROUGHPUT_NUMA creates as many streams as needed to accomodate NUMA and avoid associated penalties
  * - KEY_CPU_THROUGHPUT_AUTO creates bare minimum of streams to improve the performance,
  *   this is the most portable option if you have no insights into how many cores you target machine will have
@@ -220,7 +220,7 @@ DECLARE_CONFIG_KEY(CPU_THROUGHPUT_STREAMS);
 /**
  * @brief Optimize GPU plugin execution to maximize throughput.
  *
- * It is passed to IInferencePlugin::SetConfig(), this option should be used with values:
+ * It is passed to Core::SetConfig(), this option should be used with values:
  * - KEY_GPU_THROUGHPUT_AUTO creates bare minimum of streams that might improve performance in some cases,
  *   this option allows to enable throttle hint for opencl queue thus reduce CPU load without significant performance
  * drop
@@ -232,7 +232,7 @@ DECLARE_CONFIG_KEY(GPU_THROUGHPUT_STREAMS);
 /**
  * @brief The name for setting performance counters option.
  *
- * It is passed to IInferencePlugin::SetConfig(), this option should be used with values:
+ * It is passed to Core::SetConfig(), this option should be used with values:
  * PluginConfigParams::YES or PluginConfigParams::NO
  */
 DECLARE_CONFIG_KEY(PERF_COUNT);
@@ -260,7 +260,7 @@ DECLARE_CONFIG_KEY(DUMP_QUANTIZED_GRAPH_AS_IR);
 /**
  * @brief The key controls threading inside Inference Engine.
  *
- * It is passed to IInferencePlugin::SetConfig(), this option should be used with values:
+ * It is passed to Core::SetConfig(), this option should be used with values:
  * PluginConfigParams::YES or PluginConfigParams::NO
  */
 DECLARE_CONFIG_KEY(SINGLE_THREAD);
index bb8de30..5140555 100644 (file)
@@ -117,7 +117,7 @@ public:
     template <typename T,
         typename std::enable_if<!std::is_pointer<T>::value && !std::is_reference<T>::value, int>::type = 0,
         typename std::enable_if<std::is_base_of<RemoteContext, T>::value, int>::type = 0>
-        bool is() noexcept {
+    bool is() noexcept {
         return dynamic_cast<T*>(this) != nullptr;
     }
 
@@ -130,7 +130,7 @@ public:
     template <typename T,
         typename std::enable_if<!std::is_pointer<T>::value && !std::is_reference<T>::value, int>::type = 0,
         typename std::enable_if<std::is_base_of<RemoteContext, T>::value, int>::type = 0>
-        bool is() const noexcept {
+    bool is() const noexcept {
         return dynamic_cast<const T*>(this) != nullptr;
     }
 
@@ -143,7 +143,7 @@ public:
     template <typename T,
         typename std::enable_if<!std::is_pointer<T>::value && !std::is_reference<T>::value, int>::type = 0,
         typename std::enable_if<std::is_base_of<RemoteContext, T>::value, int>::type = 0>
-        T * as() noexcept {
+    T * as() noexcept {
         return dynamic_cast<T*>(this);
     }
 
@@ -156,7 +156,7 @@ public:
     template <typename T,
         typename std::enable_if<!std::is_pointer<T>::value && !std::is_reference<T>::value, int>::type = 0,
         typename std::enable_if<std::is_base_of<RemoteContext, T>::value, int>::type = 0>
-        const T * as() const noexcept {
+    const T * as() const noexcept {
         return dynamic_cast<const T*>(this);
     }
 
index 6415dfe..70bbb27 100644 (file)
@@ -3,19 +3,15 @@
 //
 
 /**
- * @brief A header file that provides a set of convenience utility functions and the main include file for all other .h
- * files.
- *
+ * @brief A header file that provides a set minimal required Inference Engine API.
  * @file inference_engine.hpp
  */
 #pragma once
 
-#include <cpp/ie_cnn_net_reader.h>
 #include <ie_api.h>
 #include <ie_blob.h>
 #include <ie_layers.h>
-
-#include <algorithm>
+#include <cpp/ie_cnn_net_reader.h>
 #include <cpp/ie_executable_network.hpp>
 #include <cpp/ie_plugin_cpp.hpp>
 #include <ie_core.hpp>
 #include <ie_plugin_config.hpp>
 #include <ie_plugin_dispatcher.hpp>
 #include <ie_version.hpp>
-#include <memory>
-#include <numeric>
-#include <vector>
-
-/**
- * @brief Inference Engine API
- */
-namespace InferenceEngine {
-
-/**
- * @deprecated InferenceEngine utility functions are not a part of public API
- * @brief Gets the top n results from a tblob
- *
- * @param n Top n count
- * @param input 1D tblob that contains probabilities
- * @param output Vector of indexes for the top n places
- */
-template <class T>
-INFERENCE_ENGINE_DEPRECATED(
-    "InferenceEngine utility functions are not a part of public API. Will be removed in 2020.3")
-inline void TopResults(unsigned int n, TBlob<T>& input, std::vector<unsigned>& output) {
-    SizeVector dims = input.getTensorDesc().getDims();
-    size_t input_rank = dims.size();
-    if (!input_rank || !dims[0]) THROW_IE_EXCEPTION << "Input blob has incorrect dimensions!";
-    size_t batchSize = dims[0];
-    std::vector<unsigned> indexes(input.size() / batchSize);
-
-    n = static_cast<unsigned>(std::min<size_t>((size_t)n, input.size()));
-
-    output.resize(n * batchSize);
-
-    for (size_t i = 0; i < batchSize; i++) {
-        size_t offset = i * (input.size() / batchSize);
-        T* batchData = input.data();
-        batchData += offset;
-
-        std::iota(std::begin(indexes), std::end(indexes), 0);
-        std::partial_sort(std::begin(indexes), std::begin(indexes) + n, std::end(indexes),
-                          [&batchData](unsigned l, unsigned r) {
-                              return batchData[l] > batchData[r];
-                          });
-        for (unsigned j = 0; j < n; j++) {
-            output.at(i * n + j) = indexes.at(j);
-        }
-    }
-}
-
-#define TBLOB_TOP_RESULT(precision)                                                           \
-    case InferenceEngine::Precision::precision: {                                             \
-        using myBlobType = InferenceEngine::PrecisionTrait<Precision::precision>::value_type; \
-        TBlob<myBlobType>& tblob = dynamic_cast<TBlob<myBlobType>&>(input);                   \
-        TopResults(n, tblob, output);                                                         \
-        break;                                                                                \
-    }
-
-/**
- * @deprecated InferenceEngine utility functions are not a part of public API
- * @brief Gets the top n results from a blob
- *
- * @param n Top n count
- * @param input 1D blob that contains probabilities
- * @param output Vector of indexes for the top n places
- */
-INFERENCE_ENGINE_DEPRECATED(
-    "InferenceEngine utility functions are not a part of public API. Will be removed in 2020.3")
-inline void TopResults(unsigned int n, Blob& input, std::vector<unsigned>& output) {
-    IE_SUPPRESS_DEPRECATED_START
-    switch (input.getTensorDesc().getPrecision()) {
-        TBLOB_TOP_RESULT(FP32);
-        TBLOB_TOP_RESULT(FP16);
-        TBLOB_TOP_RESULT(Q78);
-        TBLOB_TOP_RESULT(I16);
-        TBLOB_TOP_RESULT(U8);
-        TBLOB_TOP_RESULT(I8);
-        TBLOB_TOP_RESULT(U16);
-        TBLOB_TOP_RESULT(I32);
-        TBLOB_TOP_RESULT(U64);
-        TBLOB_TOP_RESULT(I64);
-    default:
-        THROW_IE_EXCEPTION << "cannot locate blob for precision: " << input.getTensorDesc().getPrecision();
-    }
-    IE_SUPPRESS_DEPRECATED_END
-}
-
-#undef TBLOB_TOP_RESULT
-
-/**
- * @deprecated InferenceEngine utility functions are not a part of public API
- * @brief Copies a 8-bit RGB image to the blob.
- *
- * Throws an exception in case of dimensions or input size mismatch
- *
- * @tparam data_t Type of the target blob
- * @param RGB8 8-bit RGB image
- * @param RGB8_size Size of the image
- * @param blob Target blob to write image to
- */
-template <typename data_t>
-INFERENCE_ENGINE_DEPRECATED(
-    "InferenceEngine utility functions are not a part of public API. Will be removed in 2020.3")
-void copyFromRGB8(uint8_t* RGB8, size_t RGB8_size, InferenceEngine::TBlob<data_t>* blob) {
-    SizeVector dims = blob->getTensorDesc().getDims();
-    if (4 != dims.size())
-        THROW_IE_EXCEPTION << "Cannot write data to input blob! Blob has incorrect dimensions size " << dims.size();
-    size_t num_channels = dims[1];  // because RGB
-    size_t num_images = dims[0];
-    size_t w = dims[3];
-    size_t h = dims[2];
-    size_t nPixels = w * h;
-
-    if (RGB8_size != w * h * num_channels * num_images)
-        THROW_IE_EXCEPTION << "input pixels mismatch, expecting " << w * h * num_channels * num_images
-                           << " bytes, got: " << RGB8_size;
-
-    std::vector<data_t*> dataArray;
-    for (unsigned int n = 0; n < num_images; n++) {
-        for (unsigned int i = 0; i < num_channels; i++) {
-            if (!n && !i && dataArray.empty()) {
-                dataArray.push_back(blob->data());
-            } else {
-                dataArray.push_back(dataArray.at(n * num_channels + i - 1) + nPixels);
-            }
-        }
-    }
-    for (size_t n = 0; n < num_images; n++) {
-        size_t n_num_channels = n * num_channels;
-        size_t n_num_channels_nPixels = n_num_channels * nPixels;
-        for (size_t i = 0; i < nPixels; i++) {
-            size_t i_num_channels = i * num_channels + n_num_channels_nPixels;
-            for (size_t j = 0; j < num_channels; j++) {
-                dataArray.at(n_num_channels + j)[i] = RGB8[i_num_channels + j];
-            }
-        }
-    }
-}
-
-/**
- * @deprecated InferenceEngine utility functions are not a part of public API
- * @brief Splits the RGB channels to either I16 Blob or float blob.
- *
- * The image buffer is assumed to be packed with no support for strides.
- *
- * @param imgBufRGB8 Packed 24bit RGB image (3 bytes per pixel: R-G-B)
- * @param lengthbytesSize Size in bytes of the RGB image. It is equal to amount of pixels times 3 (number of channels)
- * @param input Blob to contain the split image (to 3 channels)
- */
-INFERENCE_ENGINE_DEPRECATED(
-    "InferenceEngine utility functions are not a part of public API. Will be removed in 2020.3")
-inline void ConvertImageToInput(unsigned char* imgBufRGB8, size_t lengthbytesSize, Blob& input) {
-    IE_SUPPRESS_DEPRECATED_START
-    TBlob<float>* float_input = dynamic_cast<TBlob<float>*>(&input);
-    if (float_input != nullptr) copyFromRGB8(imgBufRGB8, lengthbytesSize, float_input);
-
-    TBlob<short>* short_input = dynamic_cast<TBlob<short>*>(&input);
-    if (short_input != nullptr) copyFromRGB8(imgBufRGB8, lengthbytesSize, short_input);
-
-    TBlob<uint8_t>* byte_input = dynamic_cast<TBlob<uint8_t>*>(&input);
-    if (byte_input != nullptr) copyFromRGB8(imgBufRGB8, lengthbytesSize, byte_input);
-    IE_SUPPRESS_DEPRECATED_END
-}
-
-/**
- * @deprecated InferenceEngine utility functions are not a part of public API
- * @brief Copies data from a certain precision to float
- *
- * @param dst Pointer to an output float buffer, must be allocated before the call
- * @param src Source blob to take data from
- */
-template <typename T>
-INFERENCE_ENGINE_DEPRECATED(
-    "InferenceEngine utility functions are not a part of public API. Will be removed in 2020.3")
-void copyToFloat(float* dst, const InferenceEngine::Blob* src) {
-    if (!dst) {
-        return;
-    }
-    const InferenceEngine::TBlob<T>* t_blob = dynamic_cast<const InferenceEngine::TBlob<T>*>(src);
-    if (t_blob == nullptr) {
-        THROW_IE_EXCEPTION << "input type is " << src->getTensorDesc().getPrecision() << " but input is not "
-                           << typeid(T).name();
-    }
-
-    const T* srcPtr = t_blob->readOnly();
-    if (srcPtr == nullptr) {
-        THROW_IE_EXCEPTION << "Input data was not allocated.";
-    }
-    for (size_t i = 0; i < t_blob->size(); i++) dst[i] = srcPtr[i];
-}
-
-}  // namespace InferenceEngine
index 8dc97aa..3c00465 100644 (file)
@@ -15,6 +15,8 @@ if (CMAKE_BUILD_TYPE STREQUAL "")
     set(CMAKE_BUILD_TYPE "Release")
 endif()
 
+set_property(GLOBAL PROPERTY USE_FOLDERS ON)
+
 if (NOT(BIN_FOLDER))
     string(TOLOWER ${CMAKE_SYSTEM_PROCESSOR} ARCH)
     if(ARCH STREQUAL "x86_64" OR ARCH STREQUAL "amd64") # Windows detects Intel's 64-bit CPU as AMD64
@@ -82,6 +84,16 @@ else()
     endif()
 endif()
 
+if(APPLE)
+    set(CMAKE_MACOSX_RPATH ON)
+endif()
+
+set(CMAKE_POLICY_DEFAULT_CMP0063 NEW)
+set(CMAKE_POSITION_INDEPENDENT_CODE ON)
+set(CMAKE_CXX_VISIBILITY_PRESET hidden)
+set(CMAKE_C_VISIBILITY_PRESET hidden)
+set(CMAKE_VISIBILITY_INLINES_HIDDEN ON)
+
 ####################################
 ## to use C++11; can overwritten via cmake command line
 if(NOT DEFINED CMAKE_CXX_STANDARD)
@@ -102,6 +114,7 @@ set (BUILD_TESTING OFF)
 
 if(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/thirdparty/gflags")
     add_subdirectory(thirdparty/gflags)
+    set_target_properties(gflags_nothreads_static PROPERTIES FOLDER thirdparty)
 endif()
 
 if (CMAKE_CXX_COMPILER_ID STREQUAL GNU)
@@ -191,10 +204,14 @@ macro(ie_add_sample)
         target_compile_definitions(${IE_SAMPLE_NAME} PRIVATE USE_OPENCV)
     endif()
 
-    if(WIN32)
-        set_target_properties(${IE_SAMPLE_NAME} PROPERTIES COMPILE_PDB_NAME ${IE_SAMPLE_NAME})
+    set(folder_name cpp_samples)
+    if(IE_SAMPLE_NAME MATCHES ".*_c$")
+        set(folder_name c_samples)
     endif()
 
+    set_target_properties(${IE_SAMPLE_NAME} PROPERTIES FOLDER ${folder_name}
+                                                       COMPILE_PDB_NAME ${IE_SAMPLE_NAME})
+
     if(IE_SAMPLE_INCLUDE_DIRECTORIES)
         target_include_directories(${IE_SAMPLE_NAME} PRIVATE ${IE_SAMPLE_INCLUDE_DIRECTORIES})
     endif()
@@ -210,7 +227,11 @@ macro(ie_add_sample)
     add_dependencies(ie_samples ${IE_SAMPLE_NAME})
 
     if(COMMAND add_cpplint_target AND NOT IE_SAMPLE_EXCLUDE_CPPLINT)
-        add_cpplint_target(${IE_SAMPLE_NAME}_cpplint FOR_TARGETS ${IE_SAMPLE_NAME})
+        if(folder_name STREQUAL "c_samples")
+            set(custom_filters "-readability/casting,-runtime/printf")
+        endif()
+        add_cpplint_target(${IE_SAMPLE_NAME}_cpplint FOR_TARGETS ${IE_SAMPLE_NAME}
+                           CUSTOM_FILTERS ${custom_filters})
     endif()
 endmacro()
 
index be7325b..800e0f7 100644 (file)
@@ -164,7 +164,7 @@ int main(int argc, char *argv[]) {
         if (FLAGS_d.find("CPU") != std::string::npos && !FLAGS_l.empty()) {
             // CPU (MKLDNN) extensions is loaded as a shared library and passed as a pointer to base extension
             const auto extension_ptr = InferenceEngine::make_so_pointer<InferenceEngine::IExtension>(FLAGS_l);
-            ie.AddExtension(extension_ptr, "CPU");
+            ie.AddExtension(extension_ptr);
             slog::info << "CPU (MKLDNN) extensions is loaded " << FLAGS_l << slog::endl;
         }
 
index 8c7d2fa..c761eae 100644 (file)
@@ -77,7 +77,7 @@ int main(int argc, char *argv[]) {
         if (!FLAGS_l.empty()) {
             // CPU(MKLDNN) extensions are loaded as a shared library and passed as a pointer to base extension
             IExtensionPtr extension_ptr = make_so_pointer<IExtension>(FLAGS_l);
-            ie.AddExtension(extension_ptr, "CPU");
+            ie.AddExtension(extension_ptr);
             slog::info << "CPU Extension loaded: " << FLAGS_l << slog::endl;
         }
         if (!FLAGS_c.empty()) {
index 6d93513..48dbed9 100644 (file)
@@ -39,4 +39,5 @@ target_compile_definitions(${TARGET_NAME} PRIVATE IMPLEMENT_FORMAT_READER)
 target_include_directories(${TARGET_NAME} PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}"
                                                  "${CMAKE_CURRENT_SOURCE_DIR}/..")
 
-set_target_properties(${TARGET_NAME} PROPERTIES COMPILE_PDB_NAME ${TARGET_NAME})
+set_target_properties(${TARGET_NAME} PROPERTIES COMPILE_PDB_NAME ${TARGET_NAME}
+                                                FOLDER cpp_samples)
index 292adc5..7e4674a 100644 (file)
@@ -37,7 +37,7 @@ int main(int argc, char* argv[]) {
         if (device_name.find("CPU") != std::string::npos) {
             inPlaceExtension = std::make_shared<InPlaceExtension>();
             // register sample's custom kernel (CustomReLU)
-            ie.AddExtension(inPlaceExtension, "CPU");
+            ie.AddExtension(inPlaceExtension);
         }
         // -----------------------------------------------------------------------------------------------------
 
index 2aaf80e..f3ea08a 100644 (file)
@@ -86,7 +86,7 @@ int main(int argc, char *argv[]) {
         if (!FLAGS_l.empty()) {
             // CPU(MKLDNN) extensions are loaded as a shared library and passed as a pointer to base extension
             IExtensionPtr extension_ptr = make_so_pointer<IExtension>(FLAGS_l);
-            ie.AddExtension(extension_ptr, "CPU");
+            ie.AddExtension(extension_ptr);
             slog::info << "CPU Extension loaded: " << FLAGS_l << slog::endl;
         }
 
index d98457f..0e2da0c 100644 (file)
@@ -69,7 +69,7 @@ int main(int argc, char *argv[]) {
         if (!FLAGS_l.empty()) {
             // CPU(MKLDNN) extensions are loaded as a shared library and passed as a pointer to base extension
             IExtensionPtr extension_ptr = make_so_pointer<IExtension>(FLAGS_l);
-            ie.AddExtension(extension_ptr, "CPU");
+            ie.AddExtension(extension_ptr);
             slog::info << "CPU Extension loaded: " << FLAGS_l << slog::endl;
         }
         if (!FLAGS_c.empty()) {
index 6873def..1fa9111 100644 (file)
@@ -25,11 +25,13 @@ ie_add_plugin(NAME ${TARGET_NAME}
               SOURCES ${MAIN_SRC} ${LIBRARY_HEADERS}
               VERSION_DEFINES_FOR cldnn_engine.cpp)
 
-target_link_libraries(${TARGET_NAME} PRIVATE ${INTEL_ITT_LIBS} inference_engine inference_engine_lp_transformations clDNN_lib pugixml)
+target_link_libraries(${TARGET_NAME} PRIVATE ${INTEL_ITT_LIBS} inference_engine inference_engine_lp_transformations
+                                             clDNN_lib pugixml inference_engine_transformations)
 
 set (CLDNN_TOP_FOLDER ${IE_MAIN_SOURCE_DIR}/thirdparty/clDNN)
 target_include_directories(${TARGET_NAME} PRIVATE
         ${CMAKE_CURRENT_SOURCE_DIR}
+        $<TARGET_PROPERTY:inference_engine_transformations,INTERFACE_INCLUDE_DIRECTORIES>
         ${CLDNN__IOCL_ICD_INCDIRS}
         ${CLDNN_TOP_FOLDER})
 
@@ -42,9 +44,9 @@ add_custom_command(TARGET ${TARGET_NAME} POST_BUILD
 # install
 
 install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/cldnn_global_custom_kernels
-        DESTINATION ${IE_CPACK_LIBRARY_PATH}
+        DESTINATION ${IE_CPACK_RUNTIME_PATH}
         COMPONENT gpu)
 
 install(FILES "${clDNN_SOURCE_DIR}/kernel_selector/core/cache/cache.json"
-        DESTINATION ${IE_CPACK_LIBRARY_PATH}
+        DESTINATION ${IE_CPACK_RUNTIME_PATH}
         COMPONENT gpu)
index 25bd481..139c295 100644 (file)
@@ -258,14 +258,11 @@ void Config::adjustKeyMapValues() {
         default: break;
         }
         key_config_map[PluginConfigParams::KEY_TUNING_MODE] = tm;
-        if (!tuningConfig.cache_file_path.empty())
-            key_config_map[PluginConfigParams::KEY_TUNING_FILE] = tuningConfig.cache_file_path;
+        key_config_map[PluginConfigParams::KEY_TUNING_FILE] = tuningConfig.cache_file_path;
     }
 
-    if (!graph_dumps_dir.empty())
-        key_config_map[CLDNNConfigParams::KEY_CLDNN_GRAPH_DUMPS_DIR] = graph_dumps_dir;
-    if (!sources_dumps_dir.empty())
-        key_config_map[CLDNNConfigParams::KEY_CLDNN_SOURCES_DUMPS_DIR] = sources_dumps_dir;
+    key_config_map[CLDNNConfigParams::KEY_CLDNN_GRAPH_DUMPS_DIR] = graph_dumps_dir;
+    key_config_map[CLDNNConfigParams::KEY_CLDNN_SOURCES_DUMPS_DIR] = sources_dumps_dir;
 
     key_config_map[PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS] = std::to_string(throughput_streams);
     key_config_map[PluginConfigParams::KEY_DEVICE_ID] = device_id;
index 4161630..e41e649 100644 (file)
 #include "ie_plugin_config.hpp"
 #include "details/caseless.hpp"
 #include <details/ie_cnn_network_tools.h>
+#include <ngraph/opsets/opset2.hpp>
+#include <ngraph/op/fused/gelu.hpp>
+#include <generic_ie.hpp>
+#include <transformations/convert_opset1_to_legacy/convert_opset1_to_legacy.hpp>
+#include <transformations/convert_opset2_to_opset1/convert_opset2_to_opset1.hpp>
+#include "convert_function_to_cnn_network.hpp"
 
 #undef min
 #undef max
@@ -48,6 +54,48 @@ struct clDNNEngine::impl {
     CLDNNPlugin::Config m_config;
 };
 
+cldnn::device_info clDNNEngine::GetDeviceInfo(const std::map<std::string, std::string> &config) const {
+    auto device_info = device_map.begin()->second.get_info();
+    if (config.find(PluginConfigParams::KEY_DEVICE_ID) != config.end()) {
+        auto val = config.at(PluginConfigParams::KEY_DEVICE_ID);
+        if (device_map.find(val) == device_map.end()) {
+            THROW_IE_EXCEPTION << "Invalid device ID: " << val;
+        }
+        device_info = device_map.at(val).get_info();
+    }
+
+    return device_info;
+}
+
+InferenceEngine::ICNNNetwork::Ptr clDNNEngine::CloneNetwork(const InferenceEngine::ICNNNetwork& network) const {
+    std::shared_ptr<ICNNNetwork> clonedNetwork(nullptr);
+    if (network.getFunction()) {
+        const auto transformations_callback = [](const std::shared_ptr<const ::ngraph::Node> &node) -> bool {
+            return std::dynamic_pointer_cast<const ::ngraph::opset2::Gelu>(node) != nullptr;
+        };
+        CNNNetwork net(network.getFunction());
+        auto nGraphFunc = net.getFunction();
+        // Disable shape inference (WA for generic operations)
+        ::ngraph::op::GenericIE::DisableReshape noReshape(nGraphFunc);
+
+        // Note: instead of running all Conversion Transformations you can make up your own transformation pipeline
+        ngraph::pass::ConvertOpSet2ToOpSet1(transformations_callback).run_on_function(nGraphFunc);
+        ngraph::pass::ConvertOpSet1ToLegacy(transformations_callback).run_on_function(nGraphFunc);
+        clonedNetwork = InferenceEngine::details::convertFunctionToICNNNetwork(nGraphFunc, network);
+    } else {
+        clonedNetwork = cloneNet(network);
+    }
+
+    auto implNetwork = std::dynamic_pointer_cast<InferenceEngine::details::CNNNetworkImpl>(clonedNetwork);
+    if (implNetwork) {
+        // valid for CNNNetworkImpl only, while there's no API in ICNNNetwork to change network
+        ConstTransformer transformator(implNetwork.get());
+        transformator.fullTrim();
+    }
+
+    return clonedNetwork;
+}
+
 clDNNEngine::clDNNEngine() : m_defaultContext(nullptr) {
     _pluginName = "GPU";
     _impl = std::make_shared<impl>();
@@ -103,13 +151,8 @@ ExecutableNetworkInternal::Ptr clDNNEngine::LoadExeNetworkImpl(const InferenceEn
     check_inputs(_networkInputs);
 
     CLDNNPlugin::Config conf = _impl->m_config;
-    auto iter = device_map.find(conf.device_id);
-    auto device_info = iter != device_map.end() ?
-                       iter->second.get_info() :
-                       device_map.begin()->second.get_info();
-
+    auto device_info = GetDeviceInfo(config);
     conf.enableInt8 = device_info.supports_imad || device_info.supports_immad;
-
     conf.UpdateFromMap(config);
 
     if (conf.enableDynamicBatch) {
@@ -143,11 +186,7 @@ ExecutableNetworkInternal::Ptr clDNNEngine::LoadExeNetworkImpl(const InferenceEn
 
     context = m_defaultContext;
 
-    auto clonedNetwork = cloneNet(network);
-    ConstTransformer transformator(clonedNetwork.get());
-    transformator.fullTrim();
-
-    return std::make_shared<CLDNNExecNetwork>(*clonedNetwork, context, conf);
+    return std::make_shared<CLDNNExecNetwork>(*CloneNetwork(network), context, conf);
 }
 
 ExecutableNetworkInternal::Ptr clDNNEngine::LoadExeNetworkImpl(const InferenceEngine::ICore * /*core*/, const InferenceEngine::ICNNNetwork &network,
@@ -163,25 +202,15 @@ ExecutableNetworkInternal::Ptr clDNNEngine::LoadExeNetworkImpl(const InferenceEn
     }
 
     CLDNNPlugin::Config conf = getContextImpl(casted)->GetConfig();
-    auto iter = device_map.find(conf.device_id);
-    auto device_info = iter != device_map.end() ?
-                       iter->second.get_info() :
-                       device_map.begin()->second.get_info();
-
+    auto device_info = GetDeviceInfo(config);
     conf.enableInt8 = device_info.supports_imad || device_info.supports_immad;
-
-    // TODO - change this when context config and network config will be separated
     conf.UpdateFromMap(config);
 
     if (conf.enableDynamicBatch) {
         conf.max_dynamic_batch = static_cast<int>(network.getBatchSize());
     }
 
-    auto clonedNetwork = cloneNet(network);
-    ConstTransformer transformator(clonedNetwork.get());
-    transformator.fullTrim();
-
-    return std::make_shared<CLDNNExecNetwork>(*clonedNetwork, casted, conf);
+    return std::make_shared<CLDNNExecNetwork>(*CloneNetwork(network), casted, conf);
 }
 
 RemoteContext::Ptr clDNNEngine::CreateContext(const ParamMap& params) {
@@ -218,6 +247,9 @@ void clDNNEngine::QueryNetwork(const ICNNNetwork& network, const std::map<std::s
     std::vector <CNNLayer::Ptr> concats;
     std::vector <CNNLayer::Ptr> nextLayerDependent;
 
+    // Verify device id
+    GetDeviceInfo(config);
+
     std::vector<CNNLayerPtr> sortedLayers = CNNNetSortTopologically(network);
     for (auto layer : sortedLayers) {
         if (CaselessEq<std::string>()(layer->type, "DetectionOutput")) {
index c2f024a..50c06c8 100644 (file)
@@ -25,6 +25,8 @@ class clDNNEngine : public InferenceEngine::InferencePluginInternal,
 
     CLDNNRemoteCLContext::Ptr m_defaultContext;
 
+    cldnn::device_info GetDeviceInfo(const std::map<std::string, std::string> &config) const;
+    InferenceEngine::ICNNNetwork::Ptr CloneNetwork(const InferenceEngine::ICNNNetwork& network) const;
 public:
     clDNNEngine();
 
index 0f217db..f7c1c7d 100644 (file)
@@ -250,6 +250,7 @@ void CLDNNInferRequest::copyInputData(std::shared_ptr<cldnn::network> network,
     case Precision::BOOL: {
         uint8_t* blob_ptr = const_cast<uint8_t*>(locked.as<const uint8_t*>()) + offset;
         network->set_input_data(internalName, cldnn::memory::attach(inputLayout, blob_ptr, n));
+        break;
     }
     default:
         THROW_IE_EXCEPTION << "The plugin does not support input " << inputBlob.getTensorDesc().getPrecision() << " precision";
@@ -808,6 +809,28 @@ void CLDNNInferRequest::GetPerformanceCounts(
     }
 }
 
+namespace {
+
+template <typename T>
+void copyToFloat(float* dst, const InferenceEngine::Blob* src) {
+    if (!dst) {
+        return;
+    }
+    const InferenceEngine::TBlob<T>* t_blob = dynamic_cast<const InferenceEngine::TBlob<T>*>(src);
+    if (t_blob == nullptr) {
+        THROW_IE_EXCEPTION << "input type is " << src->getTensorDesc().getPrecision() << " but input is not "
+                           << typeid(T).name();
+    }
+
+    const T* srcPtr = t_blob->readOnly();
+    if (srcPtr == nullptr) {
+        THROW_IE_EXCEPTION << "Input data was not allocated.";
+    }
+    for (size_t i = 0; i < t_blob->size(); i++) dst[i] = srcPtr[i];
+}
+
+}  // namespace
+
 void CLDNNInferRequest::PrepareInput(const cldnn::primitive_id &inputName, const Blob &inputBlob) {
     // Get input layout
     if (m_graph->GetInputLayouts().find(inputName) == m_graph->GetInputLayouts().end()) {
@@ -837,9 +860,7 @@ void CLDNNInferRequest::PrepareInput(const cldnn::primitive_id &inputName, const
         // clDNN doesn't support I16 input precision, so we always have to convert input data to fp32 precision
         const cldnn::memory& fp32_mem = inputsMemory.at(inputName+fp32_suffix);
         cldnn::pointer<float> ptr = fp32_mem.pointer<float>();
-        IE_SUPPRESS_DEPRECATED_START
-        InferenceEngine::copyToFloat<int16_t>(ptr.data(), &inputBlob);
-        IE_SUPPRESS_DEPRECATED_END
+        copyToFloat<int16_t>(ptr.data(), &inputBlob);
         _nw_ptr->set_input_data(internalName, fp32_mem);
     } else if (is_same_buffer(inputBlob, memory)) {
         // If input memory was allocated by cldnn engine and wasn't overwritten by user set_input_data method won't copy input data.
index c18dc0f..00173b5 100644 (file)
@@ -567,6 +567,7 @@ Program::LayerType Program::LayerTypeFromStr(const std::string &str) {
         { "Sinh" , Sinh },
         { "Cosh" , Cosh },
         { "Swish" , Swish },
+        { "Gelu" , Gelu },
         { "Atanh" , Atanh },
         { "Floor" , Floor },
         { "Ceil" , Ceil },
@@ -1142,6 +1143,7 @@ void Program::CreateSingleLayerPrimitive(cldnn::topology& topology, InferenceEng
         case SoftPlus:
         case SoftSign:
         case Swish:
+        case Gelu:
             CreateActivationPrimitive(topology, layer, LayerTypeFromStr(layer->type));
             break;
         case LRN: CreateLRNPrimitive(topology, layer);
@@ -2760,6 +2762,8 @@ void Program::CreateActivationPrimitive(cldnn::topology& topology, InferenceEngi
             activationType = ELU;
         } else if (activation_type == "swish")  {
             activationType = Swish;
+        } else if (activation_type == "gelu")  {
+            activationType = Gelu;
         } else if (activation_type == "relu")  {
             activationType = ReLU;
         } else if (activation_type == "relu6")  {
@@ -2948,6 +2952,11 @@ void Program::CreateActivationPrimitive(cldnn::topology& topology, InferenceEngi
         func = cldnn::activation_func::swish;
         break;
     }
+    case Gelu:
+    {
+        func = cldnn::activation_func::gelu;
+        break;
+    }
     case Sign:
     {
         func = cldnn::activation_func::sign;
@@ -4222,11 +4231,80 @@ void Program::CreateSelectPrimitive(cldnn::topology& topology, InferenceEngine::
     ValidateLayer(layer, 3);
     auto inputPrimitives = GetPrevLayersPrimitives(layer);
 
-    auto layerName = layer_type_name_ID(layer);
-    auto primitive = cldnn::select(layerName, inputPrimitives[0], inputPrimitives[1], inputPrimitives[2]);
+    auto selectLayerName = layer_type_name_ID(layer);
+
+    auto outDims = layer->outData[0]->getTensorDesc().getDims();
+    auto outDimsN = outDims.size();
+
+    std::string broadcast_type = layer->GetParamAsString("auto_broadcast", "numpy");
+
+    if ((broadcast_type != "none") && (broadcast_type != "numpy")) {
+        THROW_CLDNN_EXCEPTION("Unsupported broadcast type (" + broadcast_type +
+                                  ") in layer " + selectLayerName);
+    }
+
+    auto selectSpecificTensor = [](const InferenceEngine::SizeVector& dims, int def = 1) {
+        switch (dims.size()) {
+        case 0: return cldnn::tensor(cldnn::batch(def), cldnn::feature(def), cldnn::spatial(def, def));
+        case 1: return cldnn::tensor(cldnn::batch(def), cldnn::feature(def), cldnn::spatial(dims[0], def));
+        case 2: return cldnn::tensor(cldnn::batch(def), cldnn::feature(def), cldnn::spatial(dims[1], dims[0]));
+        case 3: return cldnn::tensor(cldnn::batch(def), cldnn::feature(dims[0]), cldnn::spatial(dims[2], dims[1]));
+        case 4: return cldnn::tensor(cldnn::batch(dims[0]), cldnn::feature(dims[1]), cldnn::spatial(dims[3], dims[2]));
+        case 5: return cldnn::tensor(cldnn::batch(dims[0]), cldnn::feature(dims[1]), cldnn::spatial(dims[4], dims[3], dims[2]));
+        case 6: return cldnn::tensor(cldnn::batch(dims[0]), cldnn::feature(dims[1]), cldnn::spatial(dims[5], dims[4], dims[3], dims[2]));
+        default: THROW_CLDNN_EXCEPTION("Invalid dimensions size(" << dims.size() << ") for Select layer");
+        }
+    };
+
+    if (broadcast_type == "numpy") {
+        // Preprocess inputs
+        for (size_t i = 0; i < inputPrimitives.size(); ++i) {
+            auto inputDims = layer->insData[i].lock()->getTensorDesc().getDims();
+            auto inputDimsN = inputDims.size();
+
+            // Add reorder if changing number of dimensions requires changing format
+            auto targetFormat = defaultFormatForDims(outDimsN);
+
+            if (targetFormat.value != defaultFormatForDims(inputDimsN).value) {
+                auto reorderName = selectLayerName + "_cldnn_in" + std::to_string(i) + "_reorder";
+                auto targetDatatype = DataTypeFromPrecision(layer->precision);
+                auto reorderPrim = cldnn::reorder(reorderName, inputPrimitives[i], targetFormat, targetDatatype);
+
+                topology.add(reorderPrim);
+                AddInnerPrimitiveToProfiler(reorderName, selectLayerName, layer);
+
+                inputPrimitives[i] = reorderName;
+            }
+
+            // Reshape input if they differ or select specific shape matches default one
+            if (inputDimsN != outDimsN || inputDimsN < 4) {
+                auto reshapeName = selectLayerName + "_cldnn_in" + std::to_string(i) + "_reshape";
+
+                // Extend input dimensions to the same size as output dimensions by prepending ones
+                inputDims.insert(inputDims.begin(), outDimsN - inputDimsN, 1ul);
+
+                auto targetShape = selectSpecificTensor(inputDims);
+
+                auto reshapePrim = cldnn::reshape(reshapeName, inputPrimitives[i], targetShape);
+
+                topology.add(reshapePrim);
+                AddInnerPrimitiveToProfiler(reshapeName, selectLayerName, layer);
+
+                inputPrimitives[i] = reshapeName;
+            }
+        }
+    }
+
+    auto primitive = cldnn::select(
+        selectLayerName,
+        inputPrimitives[0],
+        inputPrimitives[1],
+        inputPrimitives[2],
+        cldnn::padding(),
+        broadcast_type);
 
     topology.add(primitive);
-    AddPrimitiveToProfiler(layerName, layer);
+    AddPrimitiveToProfiler(selectLayerName, layer);
 }
 
 bool Program::IsValidSplitConvMerge(const InferenceEngine::SplitLayer *splitLayer) const {
index c982acf..b1cf269 100644 (file)
@@ -198,6 +198,7 @@ public:
         SoftPlus,
         SoftSign,
         Swish,
+        Gelu,
         Sin,
         Sinh,
         Cos,
index c3e5244..6825717 100644 (file)
@@ -1339,9 +1339,8 @@ void GNAPluginNS::backend::AMIntelDNN::InitGNAStruct(intel_nnet_type_t *ptr_nnet
                                 comp.num_columns_out,
                                 comp.op.affine.num_bytes_per_bias,
                                 comp.op.affine.ptr_biases),
-                        createGna2Tensor1D(
+                        createGna2TensorPwl(
                                 0,
-                                1,
                                 nullptr),  //  Temporal PWL as not null required by Gna2OperationInitRecurrent
                         create_uint32_parameter(1));    // TODO: GNA2: Handle other delays
                 AdvanceOperationIfAllApplied(component, i, gnaOperation);
@@ -1402,9 +1401,8 @@ void GNAPluginNS::backend::AMIntelDNN::InitGNAStruct(intel_nnet_type_t *ptr_nnet
                                 comp.op.conv1D.num_filters,
                                 comp.op.conv1D.num_bytes_per_bias,
                                 comp.op.conv1D.ptr_biases),
-                        createGna2Tensor1D(
+                        createGna2TensorPwl(
                                 0,
-                                1,
                                 nullptr),  // Temporal PWL as not null required by Gna2OperationInitConvolution
                         create_shape1D_parameter(
                                 comp.op.conv1D.num_feature_maps * comp.op.conv1D.num_feature_map_columns),
@@ -1535,7 +1533,7 @@ void GNAPluginNS::backend::AMIntelDNN::InitGNAStruct(intel_nnet_type_t *ptr_nnet
                         || ((component[i - 1].operation == kDnnMaxPoolOp) &&
                         (component[i - 2].operation == kDnnConvolutional1dOp))) {
                         if (gnaOperation->Operands[PwlOpIdx] == nullptr) {
-                            HelperGna2OperationSetOperand(gnaOperation, gnaUserAllocator, gnaUserFree, PwlOpIdx, createGna2Tensor1D(1, 1, nullptr));
+                            HelperGna2OperationSetOperand(gnaOperation, gnaUserAllocator, gnaUserFree, PwlOpIdx, createGna2TensorPwl(1, nullptr));
                         }
                         auto& pwlTensor = const_cast<Gna2Tensor&>(*gnaOperation->Operands[PwlOpIdx]);
                         pwlTensor = HelperGna2TensorInit1D(comp.op.pwl.num_segments, Gna2DataTypePwlSegment, comp.op.pwl.ptr_segments);
@@ -1544,7 +1542,7 @@ void GNAPluginNS::backend::AMIntelDNN::InitGNAStruct(intel_nnet_type_t *ptr_nnet
                                 THROW_GNA_EXCEPTION << "CNN output NumberOfDimensions != 3";
                             }
                             if (outputTensor.Shape.Dimensions[0] * outputTensor.Shape.Dimensions[1] * outputTensor.Shape.Dimensions[2] !=
-                                comp.num_columns_out) {
+                                comp.num_columns_out * comp.num_rows_out) {
                                 THROW_GNA_EXCEPTION << "PWL after CNN output size mismatch";
                             }
                         }
index 7038782..f8338c6 100644 (file)
@@ -52,30 +52,34 @@ void * ExportSueLegacyUsingGnaApi2(
 }
 
 
-void ExportLdForNoMmu(uint32_t modelId, std::ostream & outStream) {
+void ExportLdForDeviceVersion(
+    uint32_t modelId,
+    std::ostream & outStream,
+    const Gna2DeviceVersion deviceVersionToExport) {
+
     uint32_t exportConfig;
     auto status = Gna2ModelExportConfigCreate(gnaUserAllocatorAlignedPage, &exportConfig);
     GNADeviceHelper::checkGna2Status(status);
 
     status = Gna2ModelExportConfigSetSource(exportConfig, 0, modelId);
     GNADeviceHelper::checkGna2Status(status);
-    status = Gna2ModelExportConfigSetTarget(exportConfig, Gna2DeviceVersionEmbedded3_0);
+    status = Gna2ModelExportConfigSetTarget(exportConfig, deviceVersionToExport);
     GNADeviceHelper::checkGna2Status(status);
 
-    void * ldNoMmu;
-    uint32_t ldNoMmuSize;
+    void * ldDump;
+    uint32_t ldDumpSize;
 
     status = Gna2ModelExport(exportConfig,
         Gna2ModelExportComponentLayerDescriptors,
-        &ldNoMmu, &ldNoMmuSize);
+        &ldDump, &ldDumpSize);
     GNADeviceHelper::checkGna2Status(status);
 
-    outStream.write(static_cast<char*>(ldNoMmu), ldNoMmuSize);
+    outStream.write(static_cast<char*>(ldDump), ldDumpSize);
 
     status = Gna2ModelExportConfigRelease(exportConfig);
     GNADeviceHelper::checkGna2Status(status);
 
-    gnaUserFree(ldNoMmu);
+    gnaUserFree(ldDump);
 }
 
 void ExportGnaDescriptorPartiallyFilled(uint32_t number_of_layers, std::ostream& outStream) {
index fcf3e4b..04d8b10 100644 (file)
@@ -6,6 +6,7 @@
 
 #if GNA_LIB_VER == 2
 
+#include "gna2-common-api.h"
 #include "gna2-model-suecreek-header.h"
 
 #include <cstdint>
@@ -15,7 +16,11 @@ void * ExportSueLegacyUsingGnaApi2(
     uint32_t modelId,
     Gna2ModelSueCreekHeader* modelHeader);
 
-void ExportLdForNoMmu(uint32_t modelId, std::ostream & outStream);
+void ExportLdForDeviceVersion(
+    uint32_t modelId,
+    std::ostream & outStream,
+    Gna2DeviceVersion deviceVersionToExport);
+
 void ExportGnaDescriptorPartiallyFilled(uint32_t numberOfLayers, std::ostream & outStream);
 
 #endif
index 626ae0f..9397962 100644 (file)
@@ -64,6 +64,14 @@ Gna2Tensor * createGna2Tensor1D(uint32_t x, uint32_t byteSize, void* data) {
     return input;
 }
 
+Gna2Tensor * createGna2TensorPwl(uint32_t x, void* data) {
+    auto ret = createGna2Tensor1D(x, 1, data);
+    ret->Type = Gna2DataTypePwlSegment;
+    if (data == nullptr)
+        ret->Mode = Gna2TensorModeDisabled;
+    return ret;
+}
+
 Gna2Tensor * createGna2BiasTensor1D(uint32_t x, uint32_t byteSize, void* data) {
     const auto input = reinterpret_cast<Gna2Tensor*>(gnaUserAllocator(sizeof(Gna2Tensor)));
     if (byteSize == 8) {
index c8699e3..c8ef8cd 100644 (file)
@@ -46,6 +46,8 @@ Gna2Tensor HelperGna2TensorInit3D(uint32_t x, uint32_t y, uint32_t z, Gna2DataTy
 
 Gna2Tensor * createGna2Tensor1D(uint32_t x, uint32_t byteSize, void* data);
 
+Gna2Tensor * createGna2TensorPwl(uint32_t x, void* data);
+
 Gna2Tensor * createGna2BiasTensor1D(uint32_t x, uint32_t byteSize, void* data);
 
 Gna2Tensor * createGna2Tensor2D(uint32_t x, uint32_t y, uint32_t byteSize, void* data);
index 3e7f881..4605cae 100644 (file)
@@ -171,14 +171,18 @@ GNADeviceHelper::DumpResult GNADeviceHelper::dumpXnn(const uint32_t modelId) {
 
 #if GNA_LIB_VER == 2
 
-void GNADeviceHelper::dumpXnnNoMmu(const uint32_t modelId, std::ostream & outStream) {
+void GNADeviceHelper::dumpXnnForDeviceVersion(
+    const uint32_t modelId,
+    std::ostream & outStream,
+    const Gna2DeviceVersion targetDeviceVersion) {
+
     Gna2ModelSueCreekHeader sueHeader;
     auto ptr = ExportSueLegacyUsingGnaApi2(modelId, &sueHeader);
     gnaUserFree(ptr);
 
     ExportGnaDescriptorPartiallyFilled(sueHeader.NumberOfLayers, outStream);
 
-    ExportLdForNoMmu(modelId, outStream);
+    ExportLdForDeviceVersion(modelId, outStream, targetDeviceVersion);
     if (dumpXNNROPtr == nullptr) {
         THROW_GNA_EXCEPTION << "Bad RO pointer (nullptr)";
     }
index 1df6dc6..ee97bd4 100644 (file)
@@ -142,7 +142,9 @@ public:
 #else
 
     DumpResult dumpXnn(const uint32_t modelId);
-    void dumpXnnNoMmu(const uint32_t modelId, std::ostream & outStream);
+    void dumpXnnForDeviceVersion(const uint32_t modelId,
+        std::ostream & outStream,
+        Gna2DeviceVersion targetDeviceVersion);
 #endif
     void free(void * ptr);
 
index a5b352c..60ff272 100644 (file)
@@ -350,6 +350,7 @@ void GNAGraphCompiler::PowerPrimitive(InferenceEngine::CNNLayerPtr layer) {
     uint32_t num_rows_in = FROM_IR_DIM(input, 1);
     uint32_t num_columns_in = FROM_IR_DIM(input, 2);
     uint32_t num_rows_out = num_rows_in;
+    uint32_t num_padding = ALIGN(num_rows_in, 8) - num_rows_in;
 
     void* ptr_inputs = nullptr;
     void* ptr_outputs = nullptr;
@@ -359,9 +360,9 @@ void GNAGraphCompiler::PowerPrimitive(InferenceEngine::CNNLayerPtr layer) {
     auto& currentComponent = dnnComponents.addComponent(layer->name, "power");
 
     dnn->InitAffineComponent(currentComponent,
-        num_rows_in,
+        num_rows_in + num_padding,
         num_columns_in,
-        num_rows_out,
+        num_rows_out + num_padding,
         input->getPrecision().size(),
         outputs->getPrecision().size(),
         // TODO: only fp32 and Int16 tested
index 3d462a9..0eef490 100644 (file)
@@ -66,6 +66,7 @@ inline std::vector<int> CNNLayerFindInsDataIdxes(DataPtr sourceData, CNNLayerPtr
 inline InferenceEngine::CNNLayerPtr  CNNNetPrevLayer(const InferenceEngine::CNNLayerPtr & layer, int idx = 0) {
     if (CNNNetHasPrevLayer(layer.get(), idx)) {
         auto prevData = layer->insData[idx].lock();
+        IE_ASSERT(prevData != nullptr);
         return prevData->getCreatorLayer().lock();
     } else {
         THROW_IE_EXCEPTION << "Layer " << layer->name << " has no previous layer";
index 1099911..74f3af1 100644 (file)
@@ -239,6 +239,7 @@ void GNAModelSerial::Export(void * basePointer, size_t gnaGraphSize, std::ostrea
         out.elements_count = ep.elements_count;
         out.descriptor_offset = offsetFromBase(ep.descriptor_ptr);
         out.scaleFactor = ep.scaleFactor;
+        out.element_size = ep.element_size;
         return out;
     };
     /**
index 27caace..28dacfb 100644 (file)
@@ -108,7 +108,7 @@ struct ModelHeader {
 class GNAModelSerial {
  public:
     /*
-     * In runtime endpoint mostly same as in serial version, except pf descriptor field
+     * In runtime endpoint mostly same as in serial version, except of descriptor field
      */
     struct RuntimeEndPoint {
         /**
index 23f0cdc..1a96164 100644 (file)
@@ -524,6 +524,18 @@ void GNAPlugin::LoadNetwork(ICNNNetwork &network) {
         // gets output layer pointer in original topology not in cloned
         auto outLayer = outPort.second->getCreatorLayer().lock();
 
+        // Memory layers are not dnnComponents hence we need to make switch with identity layer
+        if (outLayer->type == "Memory") {
+            // traverse memory connection to find corresponding output_memory
+            for (auto && memConnection : graphCompiler.memory_connection) {
+                if (memConnection.second.getInput()->name == outLayer->name) {
+                    // if connection is found, replace memory input layer with memory output layer
+                    outLayer = memConnection.second.getOutput();
+                    break;
+                }
+            }
+        }
+
         // searching for outData represented in GNA blob
         // using ufs - upper first search
         gnalog() << "[UFS] searching for : "<< outPort.first << " representation in GNA\n";
@@ -745,8 +757,24 @@ void GNAPlugin::createRequestConfigsForGnaModels() {
         gnaRequestConfigToRequestIdMap.push_back(std::make_tuple(requestConfigId, -1, InferenceEngine::BlobMap()));
     }
 }
+
 #endif
 
+int GNAPlugin::GetDeviceVersionFromString(const std::string deviceString) {
+    constexpr uint32_t embeddedSuffix = 0xE;
+    if (deviceString.empty())
+        return 0x100 + embeddedSuffix;
+    if (deviceString.size() == 4 && deviceString.substr(0, 3) == "GNA") {
+        int version = deviceString[3] - '0';
+            if (version > 0) {
+            version <<= 8;
+            version += embeddedSuffix;
+            return version;
+        }
+    }
+    THROW_GNA_EXCEPTION << "Wrong GNA generation for embedded model dump: " << deviceString;
+}
+
 void GNAPlugin::DumpXNNToFile() const {
     // TODO: output  precision as well as pointer might be incorrect, LSTM for sure
     // gna looks automatically set layer 0 as output and adjust it's pointer / precision/ size respectively
@@ -754,17 +782,15 @@ void GNAPlugin::DumpXNNToFile() const {
         return;
     }
 
-    if (config.dumpXNNGeneration != "GNA1" &&
-        config.dumpXNNGeneration != "GNA3" &&
-        !config.dumpXNNGeneration.empty()) {
-        THROW_GNA_EXCEPTION << "Wrong GNA generation for embedded model dump: " << config.dumpXNNGeneration;
-    }
+    const auto versionInt = GetDeviceVersionFromString(config.dumpXNNGeneration);
 
     if (!gnadevice) {
         THROW_GNA_EXCEPTION << "Cannot generate XNNDump for float network";
     }
     std::ofstream dumpStream(config.dumpXNNPath, std::ios::out | std::ios::binary);
 #if GNA_LIB_VER == 1
+    if (versionInt != 0x10E)
+        THROW_GNA_EXCEPTION << "Wrong GNA version for embedded model dump: " << config.dumpXNNGeneration;
     auto dump = gnadevice->dumpXnn(&std::get<0>(nnets.front())->obj, ptr_active_indices, num_active_indices);
     dump.header.rw_region_size = gnamem->getRWBytes();
     dump.header.input_scaling_factor = inputsDesc->inputScaleFactors.front();
@@ -773,7 +799,7 @@ void GNAPlugin::DumpXNNToFile() const {
     dumpStream.write(reinterpret_cast<char*>(dump.model.get()), dump.header.model_size);
 #else
     auto const modelId = gnadevice->createModel(std::get<0>(gnaModels.front())->obj);
-    if (config.dumpXNNGeneration != "GNA3") {
+    if (versionInt == Gna2DeviceVersionEmbedded1_0) {
         auto dump = gnadevice->dumpXnn(modelId);
         dump.header.RwRegionSize = gnamem->getRWBytes();
         dump.header.InputScalingFactor = inputsDesc->inputScaleFactors.front();
@@ -781,7 +807,9 @@ void GNAPlugin::DumpXNNToFile() const {
         dumpStream.write(reinterpret_cast<char*>(&dump.header), sizeof(Gna2ModelSueCreekHeader));
         dumpStream.write(reinterpret_cast<char*>(dump.model.get()), dump.header.ModelSize);
     } else {
-        gnadevice->dumpXnnNoMmu(modelId, dumpStream);
+        static_assert(sizeof(versionInt) >= sizeof(Gna2DeviceVersion), "");
+        gnadevice->dumpXnnForDeviceVersion(modelId, dumpStream,
+            *reinterpret_cast<const Gna2DeviceVersion*>(&versionInt));
     }
     gnadevice->releseModel(modelId);
 #endif
@@ -1175,7 +1203,7 @@ InferenceEngine::IExecutableNetwork::Ptr GNAPlugin::ImportNetwork(const std::str
     DumpXNNToFile();
 
 #ifdef PLOT
-    dnn->WriteGraphWizModel("gna-blob.dot");
+    dnn->WriteGraphWizModel("gna-blob-imported.dot");
 #endif
 #if GNA_LIB_VER == 2
     createRequestConfigsForGnaModels();
index 10ccc7b..f59d525 100644 (file)
@@ -73,6 +73,8 @@ class GNAPlugin : public InferenceEngine::IInferencePluginInternal, public std::
     void createRequestConfigsForGnaModels();
 #endif
 
+    static int GetDeviceVersionFromString(const std::string deviceString);
+
     std::shared_ptr<GNADeviceHelper> gnadevice;
     /**
      * @brief size of RW segment without extra memory for parallel execution
index 55a047d..e9660bb 100644 (file)
@@ -3,7 +3,6 @@
 //
 
 #include "ie_metric_helpers.hpp"
-#include "cpp/ie_cnn_net_reader.h"
 #include "hetero_executable_network.hpp"
 #include "hetero_async_infer_request.hpp"
 #include "ie_util_internal.hpp"
@@ -23,7 +22,6 @@
 #include <array>
 #include <cstdint>
 
-#include <ie_plugin_dispatcher.hpp>
 #include "details/caseless.hpp"
 #include "ie_plugin_config.hpp"
 #include "cpp_interfaces/interface/ie_internal_plugin_config.hpp"
@@ -424,24 +422,24 @@ HeteroExecutableNetwork::HeteroExecutableNetwork(std::istream&
             executableNetwork = pluginAPI->ImportNetwork(heteroModel, supportedConfig);
         } catch(InferenceEngine::details::InferenceEngineException& ie_ex) {
             if (std::string::npos != std::string{ie_ex.what()}.find(NOT_IMPLEMENTED_str)) {
-                IE_SUPPRESS_DEPRECATED_START
-                CNNNetReader reader;
+                // read XML content
                 std::string xmlString;
                 std::getline(heteroModel, xmlString);
-                reader.ReadNetwork(xmlString.data(), xmlString.size());
                 std::uint64_t dataSize = 0;
                 heteroModel.read(reinterpret_cast<char*>(&dataSize), sizeof(dataSize));
+
+                // read blob content
+                InferenceEngine::Blob::Ptr dataBlob;
                 if (0 != dataSize) {
-                    auto dataBlob = InferenceEngine::make_shared_blob<std::uint8_t>(
+                    dataBlob = InferenceEngine::make_shared_blob<std::uint8_t>(
                         InferenceEngine::TensorDesc(InferenceEngine::Precision::U8,
                                                     {static_cast<std::size_t>(dataSize)},
                                                     InferenceEngine::Layout::C));
                     dataBlob->allocate();
                     heteroModel.read(dataBlob->buffer(), dataSize);
-                    reader.SetWeights(std::move(dataBlob));
                 }
-                cnnnetwork = reader.getNetwork();
-                IE_SUPPRESS_DEPRECATED_END
+
+                cnnnetwork = _plugin->GetCore()->ReadNetwork(xmlString, std::move(dataBlob));
                 auto inputs = cnnnetwork.getInputsInfo();
                 auto inputsNode = subnetworkNode.child("inputs");
                 for (auto inputNode = inputsNode.child("input"); !inputNode.empty(); inputNode = inputNode.next_sibling("input")) {
@@ -600,22 +598,107 @@ void HeteroExecutableNetwork::GetConfig(const std::string &name, InferenceEngine
         IE_ASSERT(it != _config.end());
         result = it->second == YES ? true : false;
     } else {
+        // find config key among plugin config keys
+        for (auto&& desc : networks) {
+            auto execNetwork = desc._network;
+            auto param = execNetwork.GetMetric(METRIC_KEY(SUPPORTED_CONFIG_KEYS));
+            for (auto && configKey : param.as<std::vector<std::string>>()) {
+                if (configKey == name) {
+                    result = execNetwork.GetConfig(configKey);
+                    return;
+                }
+            }
+        }
+
         THROW_IE_EXCEPTION << "Unsupported ExecutableNetwork config key: " << name;
     }
 }
 
+using Metrics = std::map<std::string, Parameter>;
+
+namespace {
+
+void collectPluginMetrics(std::vector<std::string> & baseMetrics,
+                          const std::vector<::Metrics> pluginMetrics) {
+    // check whether the metric has unique name and value among all the plugins
+    auto isMetricValueUnique = [&](const std::string & key,
+                                    const Parameter & value) -> bool {
+        if (std::find(baseMetrics.begin(), baseMetrics.end(), key) !=  baseMetrics.end())
+            return false;
+
+        for (auto && metrics : pluginMetrics) {
+            for (auto && metric : metrics)
+                if (key == metric.first && value != metric.second)
+                    return false;
+        }
+
+        return true;
+    };
+
+    // collect only unique metrics
+    std::vector<std::string> uniqueMetrics;
+    for (auto && metrics : pluginMetrics) {
+        for (auto && metric : metrics) {
+            if (isMetricValueUnique(metric.first, metric.second)) {
+                uniqueMetrics.push_back(metric.first);
+            }
+        }
+    }
+
+    // add plugin specific metrics which don't conflict with base ones
+    std::copy(uniqueMetrics.begin(), uniqueMetrics.end(), std::back_inserter(baseMetrics));
+}
+
+}  // namespace
+
 void HeteroExecutableNetwork::GetMetric(const std::string &name, InferenceEngine::Parameter &result, InferenceEngine::ResponseDesc *) const {
     if (METRIC_KEY(SUPPORTED_METRICS) == name) {
-        result = IE_SET_METRIC(SUPPORTED_METRICS, std::vector<std::string>{
+        std::vector<std::string> heteroMetrics = {
             METRIC_KEY(NETWORK_NAME),
             METRIC_KEY(SUPPORTED_METRICS),
             METRIC_KEY(SUPPORTED_CONFIG_KEYS),
-            METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)});
+            METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)
+        };
+
+        {
+            std::vector<::Metrics> pluginMetrics;
+            for (auto&& desc : networks) {
+                auto execNetwork = desc._network;
+                auto param = execNetwork.GetMetric(METRIC_KEY(SUPPORTED_METRICS));
+                ::Metrics metrics;
+                for (auto && metricName : param.as<std::vector<std::string>>()) {
+                    metrics[metricName] = execNetwork.GetMetric(metricName);
+                }
+                pluginMetrics.push_back(std::move(metrics));
+            }
+
+            collectPluginMetrics(heteroMetrics, pluginMetrics);
+        }
+
+        result = IE_SET_METRIC(SUPPORTED_METRICS, heteroMetrics);
     } else if (METRIC_KEY(SUPPORTED_CONFIG_KEYS) == name) {
-        result = IE_SET_METRIC(SUPPORTED_CONFIG_KEYS, std::vector<std::string>{
+        std::vector<std::string> heteroConfigKeys = {
             "TARGET_FALLBACK",
             HETERO_CONFIG_KEY(DUMP_GRAPH_DOT),
-            CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS)});
+            CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS)
+        };
+
+        {
+            std::vector<::Metrics> pluginConfigKeys;
+            for (auto&& desc : networks) {
+                auto execNetwork = desc._network;
+                auto param = execNetwork.GetMetric(METRIC_KEY(SUPPORTED_CONFIG_KEYS));
+                ::Metrics configKeys;
+                for (auto && metricName : param.as<std::vector<std::string>>()) {
+                    configKeys[metricName] = execNetwork.GetConfig(metricName);
+                }
+                pluginConfigKeys.push_back(std::move(configKeys));
+            }
+
+            collectPluginMetrics(heteroConfigKeys, pluginConfigKeys);
+        }
+
+        result = IE_SET_METRIC(SUPPORTED_CONFIG_KEYS, heteroConfigKeys);
     } else if (METRIC_KEY(NETWORK_NAME) == name) {
         result = IE_SET_METRIC(NETWORK_NAME, _name);
     } else if (METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS) == name) {
@@ -625,6 +708,18 @@ void HeteroExecutableNetwork::GetMetric(const std::string &name, InferenceEngine
         }
         result = IE_SET_METRIC(OPTIMAL_NUMBER_OF_INFER_REQUESTS, value);
     } else {
+        // find metric key among plugin metrics
+        for (auto&& desc : networks) {
+            auto execNetwork = desc._network;
+            auto param = execNetwork.GetMetric(METRIC_KEY(SUPPORTED_METRICS));
+            for (auto && metricKey : param.as<std::vector<std::string>>()) {
+                if (metricKey == name) {
+                    result = execNetwork.GetMetric(metricKey);
+                    return;
+                }
+            }
+        }
+
         THROW_IE_EXCEPTION << "Unsupported ExecutableNetwork metric: " << name;
     }
 }
index 6c9fe6e..953fa90 100644 (file)
@@ -297,12 +297,15 @@ Parameter Engine::GetMetric(const std::string& name, const std::map<std::string,
     if (METRIC_KEY(SUPPORTED_METRICS) == name) {
         IE_SET_METRIC_RETURN(SUPPORTED_METRICS, std::vector<std::string>{
             METRIC_KEY(SUPPORTED_METRICS),
+            METRIC_KEY(FULL_DEVICE_NAME),
             METRIC_KEY(SUPPORTED_CONFIG_KEYS)});
     } else if (METRIC_KEY(SUPPORTED_CONFIG_KEYS) == name) {
         IE_SET_METRIC_RETURN(SUPPORTED_CONFIG_KEYS, std::vector<std::string>{
             HETERO_CONFIG_KEY(DUMP_GRAPH_DOT),
             "TARGET_FALLBACK",
             CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS)});
+    } else if (METRIC_KEY(FULL_DEVICE_NAME) == name) {
+        IE_SET_METRIC_RETURN(FULL_DEVICE_NAME, std::string{"HETERO"});
     } else {
         THROW_IE_EXCEPTION << "Unsupported Plugin metric: " << name;
     }
index 68d6d0f..030dc76 100644 (file)
@@ -6,7 +6,6 @@
 
 #include "description_buffer.hpp"
 #include "ie_icore.hpp"
-#include "ie_error.hpp"
 #include "cpp_interfaces/impl/ie_plugin_internal.hpp"
 #include "cpp/ie_plugin_cpp.hpp"
 #include <memory>
index 54c9a23..832d69c 100644 (file)
@@ -174,38 +174,12 @@ target_compile_definitions(${TARGET_NAME} PRIVATE IMPLEMENT_INFERENCE_ENGINE_API
 ie_register_plugins(MAIN_TARGET ${TARGET_NAME}
                     POSSIBLE_PLUGINS HeteroPlugin clDNNPlugin GNAPlugin MKLDNNPlugin myriadPlugin)
 
-# Create NN Builder
-
-file(GLOB NN_BUILDER_LIBRARY_SRC ${CMAKE_CURRENT_SOURCE_DIR}/builders/*.cpp)
-
-# disable deprecated warnings for NN Builder
-
-function(nn_builder_disable_warnings)
-    disable_deprecated_warnings()
-    set_source_files_properties(${NN_BUILDER_LIBRARY_SRC} PROPERTIES COMPILE_FLAGS "${ie_c_cxx_deprecated}")
-endfunction()
-
-nn_builder_disable_warnings()
-
-add_library(${TARGET_NAME}_nn_builder SHARED ${NN_BUILDER_LIBRARY_SRC})
-
-target_compile_definitions(${TARGET_NAME}_nn_builder PRIVATE IMPLEMENT_INFERENCE_ENGINE_API)
-
-target_link_libraries(${TARGET_NAME}_nn_builder PUBLIC ${TARGET_NAME})
-
-target_include_directories(${TARGET_NAME}_nn_builder PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}"
-    $<TARGET_PROPERTY:inference_engine_plugin_api,INTERFACE_INCLUDE_DIRECTORIES>
-    "${IE_MAIN_SOURCE_DIR}/src/legacy_api/src")
-
-add_cpplint_target(${TARGET_NAME}_nn_builder_cpplint FOR_TARGETS ${TARGET_NAME}_nn_builder)
-
 # Static library used for unit tests which are always built
 
 add_library(${TARGET_NAME}_s STATIC
             $<TARGET_OBJECTS:${TARGET_NAME}_obj>
             $<TARGET_OBJECTS:${TARGET_NAME}_common_obj>
             $<TARGET_OBJECTS:${TARGET_NAME}_legacy_obj>
-            ${NN_BUILDER_LIBRARY_SRC}
             ${IE_STATIC_DEPENDENT_FILES})
 
 set_ie_threading_interface_for(${TARGET_NAME}_s)
@@ -232,7 +206,7 @@ endif()
 
 # export targets
 
-export(TARGETS ${TARGET_NAME} ${TARGET_NAME}_nn_builder NAMESPACE IE:: APPEND FILE "${CMAKE_BINARY_DIR}/targets.cmake")
+export(TARGETS ${TARGET_NAME} NAMESPACE IE:: APPEND FILE "${CMAKE_BINARY_DIR}/targets.cmake")
 
 configure_file(
     "${IE_MAIN_SOURCE_DIR}/cmake/config.cmake.in"
@@ -249,7 +223,7 @@ configure_file(
 add_library(xbyak INTERFACE)
 target_include_directories(xbyak INTERFACE ${IE_MAIN_SOURCE_DIR}/thirdparty/mkl-dnn/src/cpu/xbyak)
 
-ie_developer_export_targets(${TARGET_NAME} ${TARGET_NAME}_plugin_api ${TARGET_NAME}_nn_builder xbyak)
+ie_developer_export_targets(${TARGET_NAME} ${TARGET_NAME}_plugin_api xbyak)
 
 # install
 
@@ -283,7 +257,7 @@ ie_cpack_add_component(core REQUIRED DEPENDS ${core_components})
 
 install(DIRECTORY "${IE_MAIN_SOURCE_DIR}/include" DESTINATION ${IE_CPACK_IE_DIR}
         COMPONENT core)
-install(TARGETS ${TARGET_NAME} ${TARGET_NAME}_nn_builder
+install(TARGETS ${TARGET_NAME}
         RUNTIME DESTINATION ${IE_CPACK_RUNTIME_PATH} COMPONENT core
         ARCHIVE DESTINATION ${IE_CPACK_ARCHIVE_PATH} COMPONENT core
         LIBRARY DESTINATION ${IE_CPACK_LIBRARY_PATH} COMPONENT core)
@@ -293,5 +267,5 @@ install(FILES "${OpenVINO_BINARY_DIR}/share/ie_parallel.cmake"
         DESTINATION ${IE_CPACK_IE_DIR}/share
         COMPONENT core)
 install(FILES $<TARGET_FILE_DIR:${TARGET_NAME}>/plugins.xml
-        DESTINATION ${IE_CPACK_LIBRARY_PATH}
+        DESTINATION ${IE_CPACK_RUNTIME_PATH}
         COMPONENT core)
diff --git a/inference-engine/src/inference_engine/builders/ie_argmax_layer.cpp b/inference-engine/src/inference_engine/builders/ie_argmax_layer.cpp
deleted file mode 100644 (file)
index e85502e..0000000
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_argmax_layer.hpp>
-#include <ie_cnn_layer_builder.h>
-
-#include <vector>
-#include <string>
-
-using namespace InferenceEngine;
-
-Builder::ArgMaxLayer::ArgMaxLayer(const std::string& name): LayerDecorator("ArgMax", name) {
-    getLayer()->getOutputPorts().resize(1);
-    getLayer()->getInputPorts().resize(1);
-}
-
-Builder::ArgMaxLayer::ArgMaxLayer(const Layer::Ptr& layer): LayerDecorator(layer) {
-    checkType("ArgMax");
-}
-
-Builder::ArgMaxLayer::ArgMaxLayer(const Layer::CPtr& layer): LayerDecorator(layer) {
-    checkType("ArgMax");
-}
-
-Builder::ArgMaxLayer& Builder::ArgMaxLayer::setName(const std::string& name) {
-    getLayer()->setName(name);
-    return *this;
-}
-
-const Port& Builder::ArgMaxLayer::getPort() const {
-    return getLayer()->getInputPorts()[0];
-}
-
-Builder::ArgMaxLayer& Builder::ArgMaxLayer::setPort(const Port &port) {
-    getLayer()->getInputPorts()[0] = port;
-    getLayer()->getOutputPorts()[0] = port;
-    return *this;
-}
-
-int Builder::ArgMaxLayer::getAxis() const {
-    return getLayer()->getParameters().at("axis");
-}
-Builder::ArgMaxLayer& Builder::ArgMaxLayer::setAxis(int axis) {
-    getLayer()->getParameters()["axis"] = axis;
-    return *this;
-}
-size_t Builder::ArgMaxLayer::getTopK() const {
-    return getLayer()->getParameters().at("top_k");
-}
-Builder::ArgMaxLayer& Builder::ArgMaxLayer::setTopK(size_t topK) {
-    getLayer()->getParameters()["top_k"] = topK;
-    return *this;
-}
-size_t Builder::ArgMaxLayer::getOutMaxVal() const {
-    return getLayer()->getParameters().at("out_max_val");
-}
-Builder::ArgMaxLayer& Builder::ArgMaxLayer::setOutMaxVal(size_t outMaxVal) {
-    getLayer()->getParameters()["out_max_val"] = outMaxVal;
-    return *this;
-}
-
-REG_VALIDATOR_FOR(ArgMax, [] (const InferenceEngine::Builder::Layer::CPtr& input_layer, bool partial) {
-    if (!input_layer->getInputPorts().empty() &&
-        !input_layer->getOutputPorts().empty() &&
-        !input_layer->getInputPorts()[0].shape().empty() &&
-        !input_layer->getOutputPorts()[0].shape().empty() &&
-        input_layer->getInputPorts()[0].shape() != input_layer->getOutputPorts()[0].shape()) {
-        THROW_IE_EXCEPTION << "Input and output ports should be equal";
-    }
-    Builder::ArgMaxLayer layer(input_layer);
-    if (layer.getAxis() > 1) {
-        THROW_IE_EXCEPTION << "axis supports only 0 and 1 values.";
-    }
-    if (layer.getOutMaxVal() > 1) {
-        THROW_IE_EXCEPTION << "OutMaxVal supports only 0 and 1 values.";
-    }
-});
-
-REG_CONVERTER_FOR(ArgMax, [](const CNNLayerPtr& cnnLayer, Builder::Layer& layer) {
-    layer.getParameters()["axis"] = cnnLayer->GetParamAsInt("axis");
-    layer.getParameters()["top_k"] = static_cast<size_t>(cnnLayer->GetParamAsUInt("top_k"));
-    layer.getParameters()["out_max_val"] = static_cast<size_t>(cnnLayer->GetParamAsUInt("out_max_val"));
-});
-
-
diff --git a/inference-engine/src/inference_engine/builders/ie_batch_normalization_layer.cpp b/inference-engine/src/inference_engine/builders/ie_batch_normalization_layer.cpp
deleted file mode 100644 (file)
index 1296444..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_batch_normalization_layer.hpp>
-#include <ie_cnn_layer_builder.h>
-
-#include <string>
-
-using namespace InferenceEngine;
-
-Builder::BatchNormalizationLayer::BatchNormalizationLayer(const std::string& name): LayerDecorator("BatchNormalization", name) {
-    getLayer()->getInputPorts().resize(3);
-    getLayer()->getInputPorts()[1].setParameter("type", "weights");
-    getLayer()->getInputPorts()[2].setParameter("type", "biases");
-    getLayer()->getOutputPorts().resize(1);
-    setEpsilon(0.00000001f);
-}
-
-Builder::BatchNormalizationLayer::BatchNormalizationLayer(const Layer::Ptr& layer): LayerDecorator(layer) {
-    checkType("BatchNormalization");
-}
-
-Builder::BatchNormalizationLayer::BatchNormalizationLayer(const Layer::CPtr& layer): LayerDecorator(layer) {
-    checkType("BatchNormalization");
-}
-
-Builder::BatchNormalizationLayer& Builder::BatchNormalizationLayer::setName(const std::string& name) {
-    getLayer()->setName(name);
-    return *this;
-}
-
-const Port& Builder::BatchNormalizationLayer::getPort() const {
-    return getLayer()->getOutputPorts()[0];
-}
-
-Builder::BatchNormalizationLayer& Builder::BatchNormalizationLayer::setPort(const Port &port) {
-    getLayer()->getOutputPorts()[0] = port;
-    getLayer()->getInputPorts()[0] = port;
-    return *this;
-}
-
-float Builder::BatchNormalizationLayer::getEpsilon() const {
-    return getLayer()->getParameters().at("epsilon");
-}
-Builder::BatchNormalizationLayer& Builder::BatchNormalizationLayer::setEpsilon(float eps) {
-    getLayer()->getParameters()["epsilon"] = eps;
-    return *this;
-}
-
-REG_VALIDATOR_FOR(BatchNormalization, [](const Builder::Layer::CPtr& layer, bool partial)  {
-    Builder::BatchNormalizationLayer batchNormBuilder(layer);
-    if (partial)
-        return;
-    auto weights = layer->getInputPorts()[1].getData()->getData();
-    auto biases = layer->getInputPorts()[2].getData()->getData();
-    if (!weights || weights->cbuffer() == nullptr || !biases || biases->cbuffer() == nullptr)
-        THROW_IE_EXCEPTION << "Cannot create BatchNormalization layer! Weights and biases are required!";
-});
-
-REG_CONVERTER_FOR(BatchNormalization, [](const CNNLayerPtr& cnnLayer, Builder::Layer& layer) {
-    layer.getParameters()["epsilon"] = cnnLayer->GetParamAsFloat("epsilon");
-});
\ No newline at end of file
diff --git a/inference-engine/src/inference_engine/builders/ie_clamp_layer.cpp b/inference-engine/src/inference_engine/builders/ie_clamp_layer.cpp
deleted file mode 100644 (file)
index 02289b9..0000000
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_clamp_layer.hpp>
-#include <ie_cnn_layer_builder.h>
-
-#include <string>
-
-using namespace InferenceEngine;
-
-Builder::ClampLayer::ClampLayer(const std::string& name): LayerDecorator("Clamp", name) {
-    getLayer()->getOutputPorts().resize(1);
-    getLayer()->getInputPorts().resize(1);
-    setMinValue(0.0f);
-    setMaxValue(1.0f);
-}
-
-Builder::ClampLayer::ClampLayer(const Layer::Ptr& layer): LayerDecorator(layer) {
-    checkType("Clamp");
-}
-
-Builder::ClampLayer::ClampLayer(const Layer::CPtr& layer): LayerDecorator(layer) {
-    checkType("Clamp");
-}
-
-Builder::ClampLayer& Builder::ClampLayer::setName(const std::string& name) {
-    getLayer()->setName(name);
-    return *this;
-}
-
-const Port& Builder::ClampLayer::getPort() const {
-    return getLayer()->getOutputPorts()[0];
-}
-
-Builder::ClampLayer& Builder::ClampLayer::setPort(const Port &port) {
-    getLayer()->getOutputPorts()[0] = port;
-    getLayer()->getInputPorts()[0] = port;
-    return *this;
-}
-
-float Builder::ClampLayer::getMaxValue() const {
-    return getLayer()->getParameters().at("max");
-}
-
-Builder::ClampLayer& Builder::ClampLayer::setMaxValue(float maxValue) {
-    getLayer()->getParameters()["max"] = maxValue;
-    return *this;
-}
-
-float Builder::ClampLayer::getMinValue() const {
-    return getLayer()->getParameters().at("min");
-}
-
-Builder::ClampLayer& Builder::ClampLayer::setMinValue(float minValue) {
-    getLayer()->getParameters()["min"] = minValue;
-    return *this;
-}
-
-REG_VALIDATOR_FOR(Clamp, [](const InferenceEngine::Builder::Layer::CPtr& input_layer, bool partial) {
-    Builder::ClampLayer layer(input_layer);
-    if (layer.getMinValue() > layer.getMaxValue()) {
-        THROW_IE_EXCEPTION << "MinValue should be less or equal MaxValue";
-    }
-    if (!input_layer->getInputPorts().empty() &&
-        !input_layer->getOutputPorts().empty() &&
-        !input_layer->getInputPorts()[0].shape().empty() &&
-        !input_layer->getOutputPorts()[0].shape().empty() &&
-        input_layer->getInputPorts()[0].shape() != input_layer->getOutputPorts()[0].shape()) {
-        THROW_IE_EXCEPTION << "Input and output ports should be equal";
-    }
-});
-
-REG_CONVERTER_FOR(Clamp, [](const CNNLayerPtr& cnnLayer, Builder::Layer& layer) {
-    layer.getParameters()["max"] = cnnLayer->GetParamAsFloat("max", 0);
-    layer.getParameters()["min"] = cnnLayer->GetParamAsFloat("min", 0);
-});
diff --git a/inference-engine/src/inference_engine/builders/ie_concat_layer.cpp b/inference-engine/src/inference_engine/builders/ie_concat_layer.cpp
deleted file mode 100644 (file)
index 5e92fc0..0000000
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_concat_layer.hpp>
-#include <ie_cnn_layer_builder.h>
-
-#include <vector>
-#include <string>
-
-using namespace InferenceEngine;
-
-Builder::ConcatLayer::ConcatLayer(const std::string& name): LayerDecorator("Concat", name) {
-    getLayer()->getOutputPorts().resize(1);
-    setAxis(1);
-}
-
-Builder::ConcatLayer::ConcatLayer(const Layer::Ptr& layer): LayerDecorator(layer) {
-    checkType("Concat");
-}
-
-Builder::ConcatLayer::ConcatLayer(const Layer::CPtr& layer): LayerDecorator(layer) {
-    checkType("Concat");
-}
-
-Builder::ConcatLayer& Builder::ConcatLayer::setName(const std::string& name) {
-    getLayer()->setName(name);
-    return *this;
-}
-
-const Port& Builder::ConcatLayer::getOutputPort() const {
-    return getLayer()->getOutputPorts()[0];
-}
-
-Builder::ConcatLayer& Builder::ConcatLayer::setOutputPort(const Port &port) {
-    getLayer()->getOutputPorts()[0] = port;
-    return *this;
-}
-
-const std::vector<Port>& Builder::ConcatLayer::getInputPorts() const {
-    return getLayer()->getInputPorts();
-}
-
-Builder::ConcatLayer& Builder::ConcatLayer::setInputPorts(const std::vector<Port>& ports) {
-    getLayer()->getInputPorts() = ports;
-    return *this;
-}
-
-size_t Builder::ConcatLayer::getAxis() const {
-    return getLayer()->getParameters().at("axis");
-}
-
-Builder::ConcatLayer& Builder::ConcatLayer::setAxis(size_t axis) {
-    getLayer()->getParameters()["axis"] = axis;
-    return *this;
-}
-
-REG_VALIDATOR_FOR(Concat, [] (const InferenceEngine::Builder::Layer::CPtr &input_layer, bool partial) {
-    if (partial) {
-        return;
-    }
-    Builder::ConcatLayer layer(input_layer);
-    if (layer.getInputPorts().size() < 1) {
-        THROW_IE_EXCEPTION << "Layer " << layer.getName() << " contains incorrect input ports. "
-                           << "It takes at least two Blobs";
-    }
-    for (size_t i = 1; i < layer.getInputPorts().size(); ++i) {
-        if (layer.getInputPorts()[i - 1].shape().size() != layer.getInputPorts()[i].shape().size()) {
-            THROW_IE_EXCEPTION << "Layer " << layer.getName() << " contains incorrect input ports. "
-                               << "It should have equal number of dimensions";
-        }
-    }
-    if (layer.getInputPorts()[0].shape().size() != layer.getOutputPort().shape().size()) {
-        THROW_IE_EXCEPTION << "Layer " << layer.getName() << " contains incorrect input and output ports "
-                           << "It should have equal number of dimensions";
-    }
-    if (layer.getAxis() >= layer.getOutputPort().shape().size()) {
-        THROW_IE_EXCEPTION << "Layer " << layer.getName() << "contains incorrect axis. "
-                           << "It should be >= 0 and < number of port's dimensions.";
-    }
-    for (size_t i = 0; i < layer.getOutputPort().shape().size(); ++i) {
-        if (i == layer.getAxis()) {
-            size_t sumInputDimensions = 0;
-            for (const Port& port : layer.getInputPorts()) {
-                sumInputDimensions += port.shape()[i];
-            }
-            if (sumInputDimensions != layer.getOutputPort().shape()[i]) {
-                THROW_IE_EXCEPTION << "Layer " << layer.getName() << " contains incorrect input and output ports "
-                                   << "Sum of input port's dimensions in the given axis should be equal to output ports dimension in the same axis.";
-            }
-        } else {
-            for (const Port& port : layer.getInputPorts()) {
-                if (port.shape()[i] != layer.getOutputPort().shape()[i]) {
-                    THROW_IE_EXCEPTION << "Layer " << layer.getName() << " contains incorrect input and output ports. "
-                                       << "It should have equal dimensions in axis different from given";
-                }
-            }
-        }
-    }
-});
-
-REG_CONVERTER_FOR(Concat, [] (const CNNLayerPtr& cnnLayer, Builder::Layer& layer) {
-    layer.getParameters()["axis"] = static_cast<size_t>(cnnLayer->GetParamAsUInt("axis", 1));
-});
-
diff --git a/inference-engine/src/inference_engine/builders/ie_convolution_layer.cpp b/inference-engine/src/inference_engine/builders/ie_convolution_layer.cpp
deleted file mode 100644 (file)
index a94d14f..0000000
+++ /dev/null
@@ -1,255 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_convolution_layer.hpp>
-#include <ie_cnn_layer_builder.h>
-
-#include <vector>
-#include <string>
-#include <limits>
-
-using namespace InferenceEngine;
-
-Builder::ConvolutionLayer::ConvolutionLayer(const std::string& name): LayerDecorator("Convolution", name) {
-    getLayer()->getInputPorts().resize(3);
-    getLayer()->getInputPorts()[1].setParameter("type", "weights");
-    getLayer()->getInputPorts()[2].setParameter("type", "biases");
-    getLayer()->getOutputPorts().resize(1);
-    setGroup(1);
-    setKernel({});
-    setOutDepth(0);
-    setStrides({});
-    setDilation({});
-    setPaddingsEnd({});
-    setPaddingsBegin({});
-}
-
-Builder::ConvolutionLayer::ConvolutionLayer(const Layer::Ptr& layer): LayerDecorator(layer) {
-    checkType("Convolution");
-}
-
-Builder::ConvolutionLayer::ConvolutionLayer(const Layer::CPtr& layer): LayerDecorator(layer) {
-    checkType("Convolution");
-}
-
-Builder::ConvolutionLayer &Builder::ConvolutionLayer::setName(const std::string &name) {
-    getLayer()->setName(name);
-    return *this;
-}
-
-const Port& Builder::ConvolutionLayer::getInputPort() const {
-    return getLayer()->getInputPorts()[0];
-}
-
-Builder::ConvolutionLayer& Builder::ConvolutionLayer::setInputPort(const Port& port) {
-    getLayer()->getInputPorts()[0] = port;
-    return *this;
-}
-
-const Port& Builder::ConvolutionLayer::getOutputPort() const {
-    return getLayer()->getOutputPorts()[0];
-}
-
-Builder::ConvolutionLayer& Builder::ConvolutionLayer::setOutputPort(const Port& port) {
-    getLayer()->getOutputPorts()[0] = port;
-    return *this;
-}
-
-const std::vector<size_t> Builder::ConvolutionLayer::getKernel() const {
-    return getLayer()->getParameters().at("kernel");
-}
-Builder::ConvolutionLayer& Builder::ConvolutionLayer::setKernel(const std::vector<size_t>& kernel) {
-    getLayer()->getParameters()["kernel"] = kernel;
-    return *this;
-}
-
-const std::vector<size_t> Builder::ConvolutionLayer::getStrides() const {
-    return getLayer()->getParameters().at("strides");
-}
-Builder::ConvolutionLayer& Builder::ConvolutionLayer::setStrides(const std::vector<size_t>& strides) {
-    getLayer()->getParameters()["strides"] = strides;
-    return *this;
-}
-
-const std::vector<size_t> Builder::ConvolutionLayer::getDilation() const {
-    return getLayer()->getParameters().at("dilations");
-}
-Builder::ConvolutionLayer& Builder::ConvolutionLayer::setDilation(const std::vector<size_t>& dilation) {
-    getLayer()->getParameters()["dilations"] = dilation;
-    return *this;
-}
-
-const std::vector<size_t> Builder::ConvolutionLayer::getPaddingsBegin() const {
-    return getLayer()->getParameters().at("pads_begin");
-}
-Builder::ConvolutionLayer& Builder::ConvolutionLayer::setPaddingsBegin(const std::vector<size_t>& paddings) {
-    getLayer()->getParameters()["pads_begin"] = paddings;
-    return *this;
-}
-
-const std::vector<size_t> Builder::ConvolutionLayer::getPaddingsEnd() const {
-    return getLayer()->getParameters().at("pads_end");
-}
-Builder::ConvolutionLayer& Builder::ConvolutionLayer::setPaddingsEnd(const std::vector<size_t>& paddings) {
-    getLayer()->getParameters()["pads_end"] = paddings;
-    return *this;
-}
-
-size_t Builder::ConvolutionLayer::getGroup() const {
-    return getLayer()->getParameters().at("group");
-}
-Builder::ConvolutionLayer& Builder::ConvolutionLayer::setGroup(size_t group) {
-    getLayer()->getParameters()["group"] = group;
-    return *this;
-}
-
-size_t Builder::ConvolutionLayer::getOutDepth() const {
-    return getLayer()->getParameters().at("output");
-}
-Builder::ConvolutionLayer& Builder::ConvolutionLayer::setOutDepth(size_t outDepth) {
-    getLayer()->getParameters()["output"] = outDepth;
-    return *this;
-}
-
-REG_VALIDATOR_FOR(Convolution, [] (const InferenceEngine::Builder::Layer::CPtr& layer, bool partial) {
-    // WA for old IRs
-    if (layer->getParameters().find("kernel") == layer->getParameters().end() &&
-        layer->getParameters().find("kernel-x") != layer->getParameters().end() &&
-        layer->getParameters().find("kernel-y") != layer->getParameters().end())
-        return;
-
-    Builder::ConvolutionLayer convBuilder(layer);
-    std::vector<size_t> l_kernel = convBuilder.getKernel();
-    std::vector<size_t> l_dilation = convBuilder.getDilation();
-    std::vector<size_t> l_paddingBegin = convBuilder.getPaddingsBegin();
-    std::vector<size_t> l_paddingEnd = convBuilder.getPaddingsEnd();
-    std::vector<size_t> l_strides = convBuilder.getStrides();
-
-    if (l_paddingBegin.empty() && !l_kernel.empty())
-        l_paddingBegin.resize(l_kernel.size(), 0);
-    if (l_paddingEnd.empty() && !l_kernel.empty())
-        l_paddingEnd.resize(l_kernel.size(), 0);
-    if (l_dilation.empty() && !l_kernel.empty())
-        l_dilation.resize(l_kernel.size(), 1);
-    if (l_strides.empty() && !l_kernel.empty())
-        l_strides.resize(l_kernel.size(), 1);
-
-    if (l_kernel.empty()) {
-        THROW_IE_EXCEPTION << "Kernel is empty!";
-    }
-
-    if (l_paddingBegin.size() != l_paddingEnd.size()) {
-        THROW_IE_EXCEPTION << "Padding_begin dimension is not equal to padding_end dimension";
-    }
-
-    if (!l_paddingBegin.empty() && l_kernel.size() != l_paddingBegin.size()) {
-        THROW_IE_EXCEPTION << "Padding dimension is not equal to kernel dimension";
-    }
-
-    if (l_kernel.size() != l_strides.size()) {
-        THROW_IE_EXCEPTION << "Stride dimension is not equal to kernel dimension";
-    }
-
-    if (!l_dilation.empty() && l_kernel.size() != l_dilation.size()) {
-        THROW_IE_EXCEPTION << "Dilation dimension is not equal to kernel dimension";
-    }
-
-    if (convBuilder.getOutDepth() == 0) {
-        THROW_IE_EXCEPTION << "OutDepth parameter should be more than 0";
-    }
-
-    for (size_t kernel_dim : l_kernel) {
-        if (kernel_dim == 0) {
-            THROW_IE_EXCEPTION << "Kernel dimensions should be more than 0";
-        }
-    }
-
-    for (size_t i_stride : l_strides) {
-        if (i_stride == 0) {
-            THROW_IE_EXCEPTION << "Strides should be more than 0";
-        }
-    }
-
-    for (size_t dil : l_dilation) {
-        if (dil == 0)
-            THROW_IE_EXCEPTION << "Dilation should be more than 0";
-    }
-
-    if (!convBuilder.getGroup())
-        THROW_IE_EXCEPTION << "Group should be more than 0";
-
-    if (convBuilder.getInputPort().shape().empty())
-        return;
-
-    const size_t IC = convBuilder.getInputPort().shape()[1];
-    if (IC % convBuilder.getGroup())
-        THROW_IE_EXCEPTION << "Number of input channels (" << IC <<
-                           ") is not divided by group number (" << convBuilder.getGroup() << ")";
-
-    size_t weight_size = convBuilder.getOutDepth() * IC / convBuilder.getGroup();
-    for (size_t kernel_dim : l_kernel) {
-        if (0 != kernel_dim && weight_size > std::numeric_limits<size_t>::max() / kernel_dim) {
-            THROW_IE_EXCEPTION << "Weight size exceeds the size_t max";
-        }
-        weight_size *= kernel_dim;
-    }
-
-    if (partial)
-        return;
-
-    const auto weights = layer->getInputPorts()[1].getData()->getData();
-    if (weights->size() != weight_size) {
-        THROW_IE_EXCEPTION << "Weight size is not correct!";
-    }
-
-    const auto biases = layer->getInputPorts()[2].getData()->getData();
-    if (biases && biases->cbuffer() && biases->size() != convBuilder.getOutDepth())
-        THROW_IE_EXCEPTION << "Biases size is incorrect!";
-});
-
-REG_CONVERTER_FOR(Convolution, [](const CNNLayerPtr& cnnLayer, Builder::Layer& layer) {
-    // WA for old IRs
-    if (cnnLayer->params.find("kernel") == cnnLayer->params.end() &&
-        cnnLayer->params.find("kernel-x") != cnnLayer->params.end() &&
-        cnnLayer->params.find("kernel-y") != cnnLayer->params.end())
-        return;
-
-    std::vector<unsigned int> tmp = cnnLayer->GetParamAsUInts("kernel");
-    std::vector<size_t> cur(tmp.size());
-    for (size_t i = 0; i < tmp.size(); ++i) {
-        cur[i] = static_cast<size_t>(tmp[i]);
-    }
-    layer.getParameters()["kernel"] = cur;
-
-    tmp = cnnLayer->GetParamAsUInts("strides");
-    cur.resize(tmp.size());
-    for (size_t i = 0; i < tmp.size(); ++i) {
-        cur[i] = static_cast<size_t>(tmp[i]);
-    }
-    layer.getParameters()["strides"] = cur;
-
-    tmp = cnnLayer->GetParamAsUInts("dilations");
-    cur.resize(tmp.size());
-    for (size_t i = 0; i < tmp.size(); ++i) {
-        cur[i] = static_cast<size_t>(tmp[i]);
-    }
-    layer.getParameters()["dilations"] = cur;
-
-    tmp = cnnLayer->GetParamAsUInts("pads_begin");
-    cur.resize(tmp.size());
-    for (size_t i = 0; i < tmp.size(); ++i) {
-        cur[i] = static_cast<size_t>(tmp[i]);
-    }
-    layer.getParameters()["pads_begin"] = cur;
-
-    tmp = cnnLayer->GetParamAsUInts("pads_end");
-    cur.resize(tmp.size());
-    for (size_t i = 0; i < tmp.size(); ++i) {
-        cur[i] = static_cast<size_t>(tmp[i]);
-    }
-    layer.getParameters()["pads_end"] = cur;
-
-    layer.getParameters()["group"] = static_cast<size_t>(cnnLayer->GetParamAsUInt("group"));
-    layer.getParameters()["output"] = static_cast<size_t>(cnnLayer->GetParamAsUInt("output"));
-});
diff --git a/inference-engine/src/inference_engine/builders/ie_crop_layer.cpp b/inference-engine/src/inference_engine/builders/ie_crop_layer.cpp
deleted file mode 100644 (file)
index 9ae5c8a..0000000
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_crop_layer.hpp>
-#include <ie_cnn_layer_builder.h>
-
-#include <vector>
-#include <string>
-
-using namespace InferenceEngine;
-
-Builder::CropLayer::CropLayer(const std::string& name): LayerDecorator("Crop", name) {
-    getLayer()->getOutputPorts().resize(1);
-    getLayer()->getInputPorts().resize(2);
-}
-
-Builder::CropLayer::CropLayer(const Layer::Ptr& layer): LayerDecorator(layer) {
-    checkType("Crop");
-}
-
-Builder::CropLayer::CropLayer(const Layer::CPtr& layer): LayerDecorator(layer) {
-    checkType("Crop");
-}
-
-Builder::CropLayer& Builder::CropLayer::setName(const std::string& name) {
-    getLayer()->setName(name);
-    return *this;
-}
-
-const std::vector<Port>& Builder::CropLayer::getInputPorts() const {
-    return getLayer()->getInputPorts();
-}
-
-Builder::CropLayer& Builder::CropLayer::setInputPorts(const std::vector<Port>& ports) {
-    getLayer()->getInputPorts() = ports;
-    return *this;
-}
-
-const Port& Builder::CropLayer::getOutputPort() const {
-    return getLayer()->getOutputPorts()[0];
-}
-
-Builder::CropLayer& Builder::CropLayer::setOutputPort(const Port &port) {
-    getLayer()->getOutputPorts()[0] = port;
-    return *this;
-}
-
-const std::vector<size_t> Builder::CropLayer::getAxis() const {
-    return getLayer()->getParameters().at("axis");
-}
-
-Builder::CropLayer& Builder::CropLayer::setAxis(const std::vector<size_t>& axis) {
-    getLayer()->getParameters()["axis"] = axis;
-    return *this;
-}
-
-const std::vector<size_t> Builder::CropLayer::getOffset() const {
-    return getLayer()->getParameters().at("offset");
-}
-
-Builder::CropLayer& Builder::CropLayer::setOffset(const std::vector<size_t>& offsets) {
-    getLayer()->getParameters()["offset"] = offsets;
-    return *this;
-}
-
-REG_VALIDATOR_FOR(Crop, [] (const InferenceEngine::Builder::Layer::CPtr& input_layer, bool partial) {
-    if (input_layer->getInputPorts().size() != 2) {
-        THROW_IE_EXCEPTION << "Incorrect parameters for getLayer() " << input_layer->getName()
-                           << " should have 2 input ports.";
-    }
-    if (input_layer->getOutputPorts().size() != 1) {
-        THROW_IE_EXCEPTION << "Incorrect parameters for getLayer() " << input_layer->getName()
-                           << " should have 1 output port";
-    }
-    Builder::CropLayer layer(input_layer);
-    if (layer.getAxis().size() != layer.getOffset().size()) {
-        THROW_IE_EXCEPTION <<  "Incorrect parameters for getLayer() " << input_layer->getName()
-                           << ". Axis size must be equal to the size of Offset";
-    }
-    for (size_t i = 0; i < layer.getAxis().size(); ++i) {
-        const size_t index = layer.getAxis()[i];
-        if (index >= layer.getInputPorts()[0].shape().size()) {
-            THROW_IE_EXCEPTION << "Incorrect parameters for getLayer() " << input_layer->getName()
-                               << ". Each element of Axis should be less than input shape length";
-        }
-        if (layer.getOutputPort().shape()[index] != layer.getInputPorts()[1].shape()[index]) {
-            THROW_IE_EXCEPTION <<  "Incorrect parameters for getLayer() " << input_layer->getName()
-                               << ". The second input shapes should have the same value as the output shapes in the indexes contained in Axis";
-        }
-        if (layer.getInputPorts()[0].shape()[index] < layer.getOutputPort().shape()[index] + layer.getOffset()[i]) {
-            THROW_IE_EXCEPTION <<  "Incorrect parameters for getLayer() " << input_layer->getName()
-                               << ". The sum of offset and output shape in the " << i + 1 << " dimension is bigger then input shape size";
-        }
-    }
-});
-
-REG_CONVERTER_FOR(Crop, [](const CNNLayerPtr& cnnLayer, Builder::Layer& layer) {
-    std::vector<unsigned int> tmp = cnnLayer->GetParamAsUInts("axis");
-    layer.getParameters()["axis"] = std::vector<size_t>(tmp.size());
-    for (size_t i = 0; i < tmp.size(); ++i) {
-        layer.getParameters()["axis"].as<std::vector<size_t>>()[i] = static_cast<size_t>(tmp[i]);
-    }
-
-    tmp = cnnLayer->GetParamAsUInts("offset");
-    layer.getParameters()["offset"] = std::vector<size_t>(tmp.size());
-    for (size_t i = 0; i < tmp.size(); ++i) {
-        layer.getParameters()["offset"].as<std::vector<size_t>>()[i] = static_cast<size_t>(tmp[i]);
-    }
-});
diff --git a/inference-engine/src/inference_engine/builders/ie_ctc_greedy_decoder_layer.cpp b/inference-engine/src/inference_engine/builders/ie_ctc_greedy_decoder_layer.cpp
deleted file mode 100644 (file)
index 665a5a1..0000000
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_ctc_greedy_decoder_layer.hpp>
-#include <ie_cnn_layer_builder.h>
-#include <vector>
-#include <string>
-
-using namespace InferenceEngine;
-
-Builder::CTCGreedyDecoderLayer::CTCGreedyDecoderLayer(const std::string& name): LayerDecorator("CTCGreedyDecoder", name) {
-    getLayer()->getOutputPorts().resize(1);
-}
-
-Builder::CTCGreedyDecoderLayer::CTCGreedyDecoderLayer(const Layer::Ptr& layer): LayerDecorator(layer) {
-    checkType("CTCGreedyDecoder");
-}
-
-Builder::CTCGreedyDecoderLayer::CTCGreedyDecoderLayer(const Layer::CPtr& layer): LayerDecorator(layer) {
-    checkType("CTCGreedyDecoder");
-}
-
-Builder::CTCGreedyDecoderLayer& Builder::CTCGreedyDecoderLayer::setName(const std::string& name) {
-    getLayer()->setName(name);
-    return *this;
-}
-const std::vector<Port>& Builder::CTCGreedyDecoderLayer::getInputPorts() const {
-    return getLayer()->getInputPorts();
-}
-Builder::CTCGreedyDecoderLayer& Builder::CTCGreedyDecoderLayer::setInputPorts(const std::vector<Port>& ports) {
-    getLayer()->getInputPorts() = ports;
-    return *this;
-}
-const Port& Builder::CTCGreedyDecoderLayer::getOutputPort() const {
-    return getLayer()->getOutputPorts()[0];
-}
-Builder::CTCGreedyDecoderLayer& Builder::CTCGreedyDecoderLayer::setOutputPort(const Port& port) {
-    getLayer()->getOutputPorts()[0] = port;
-    return *this;
-}
-bool Builder::CTCGreedyDecoderLayer::getCTCMergeRepeated() const {
-    return getLayer()->getParameters().at("ctc_merge_repeated");
-}
-Builder::CTCGreedyDecoderLayer& Builder::CTCGreedyDecoderLayer::setCTCMergeRepeated(bool flag) {
-    getLayer()->getParameters()["ctc_merge_repeated"] = flag;
-    return *this;
-}
-
-REG_VALIDATOR_FOR(CTCGreedyDecoder, [](const InferenceEngine::Builder::Layer::CPtr& input_layer, bool partial) {
-    Builder::CTCGreedyDecoderLayer layer(input_layer);
-
-    if (layer.getInputPorts().empty() || layer.getInputPorts().size() > 2) {
-        THROW_IE_EXCEPTION << "Input ports are wrong in layer " << layer.getName() <<
-                           ". There are should be 1 or 2 input ports";
-    }
-});
-
-REG_CONVERTER_FOR(CTCGreedyDecoder, [](const CNNLayerPtr& cnnLayer, Builder::Layer& layer) {
-    layer.getParameters()["ctc_merge_repeated"] = cnnLayer->GetParamAsBool("ctc_merge_repeated", false);
-});
diff --git a/inference-engine/src/inference_engine/builders/ie_deconvolution_layer.cpp b/inference-engine/src/inference_engine/builders/ie_deconvolution_layer.cpp
deleted file mode 100644 (file)
index 00e87d6..0000000
+++ /dev/null
@@ -1,164 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_deconvolution_layer.hpp>
-#include <ie_cnn_layer_builder.h>
-#include <limits>
-#include <vector>
-#include <string>
-
-using namespace InferenceEngine;
-
-Builder::DeconvolutionLayer::DeconvolutionLayer(const std::string& name): ConvolutionLayer(name) {
-    getLayer()->setType("Deconvolution");
-}
-Builder::DeconvolutionLayer::DeconvolutionLayer(const Layer::Ptr& layer): ConvolutionLayer(layer->getName()) {
-    this->getLayer() = layer;
-    checkType("Deconvolution");
-}
-Builder::DeconvolutionLayer::DeconvolutionLayer(const Layer::CPtr& layer): ConvolutionLayer(layer->getName()) {
-    this->getLayer().reset();
-    cLayer = layer;
-    checkType("Deconvolution");
-}
-
-REG_VALIDATOR_FOR(Deconvolution, [] (const InferenceEngine::Builder::Layer::CPtr& layer, bool partial) {
-    // WA for old IRs
-    if (layer->getParameters().find("kernel") == layer->getParameters().end() &&
-        layer->getParameters().find("kernel-x") != layer->getParameters().end() &&
-        layer->getParameters().find("kernel-y") != layer->getParameters().end())
-        return;
-    Builder::DeconvolutionLayer deconvBuilder(layer);
-    std::vector<size_t> l_kernel = deconvBuilder.getKernel();
-    std::vector<size_t> l_dilation = deconvBuilder.getDilation();
-    std::vector<size_t> l_paddingBegin = deconvBuilder.getPaddingsBegin();
-    std::vector<size_t> l_paddingEnd = deconvBuilder.getPaddingsEnd();
-    std::vector<size_t> l_strides = deconvBuilder.getStrides();
-
-    if (l_paddingBegin.empty() && !l_kernel.empty())
-        l_paddingBegin.resize(l_kernel.size(), 0);
-    if (l_paddingEnd.empty() && !l_kernel.empty())
-        l_paddingEnd.resize(l_kernel.size(), 0);
-    if (l_dilation.empty() && !l_kernel.empty())
-        l_dilation.resize(l_kernel.size(), 1);
-    if (l_strides.empty() && !l_kernel.empty())
-        l_strides.resize(l_kernel.size(), 1);
-
-    if (l_kernel.empty()) {
-        THROW_IE_EXCEPTION << "Kernel is empty!";
-    }
-
-    if (l_paddingBegin.size() != l_paddingEnd.size()) {
-        THROW_IE_EXCEPTION << "Padding_begin dimension is not equal to padding_end dimension";
-    }
-
-    if (!l_paddingBegin.empty() && l_kernel.size() != l_paddingBegin.size()) {
-        THROW_IE_EXCEPTION << "Padding dimension is not equal to kernel dimension";
-    }
-
-    if (l_kernel.size() != l_strides.size()) {
-        THROW_IE_EXCEPTION << "Stride dimension is not equal to kernel dimension";
-    }
-
-    if (!l_dilation.empty() && l_kernel.size() != l_dilation.size()) {
-        THROW_IE_EXCEPTION << "Dilation dimension is not equal to kernel dimension";
-    }
-
-    if (deconvBuilder.getOutDepth() == 0) {
-        THROW_IE_EXCEPTION << "OutDepth parameter should be more than 0";
-    }
-
-    for (size_t kernel_dim : l_kernel) {
-        if (kernel_dim == 0) {
-            THROW_IE_EXCEPTION << "Kernel dimensions should be more than 0";
-        }
-    }
-
-    for (size_t i_stride : l_strides) {
-        if (i_stride == 0) {
-            THROW_IE_EXCEPTION << "Strides should be more than 0";
-        }
-    }
-
-    for (size_t dil : l_dilation) {
-        if (dil == 0)
-            THROW_IE_EXCEPTION << "Dilation should be more than 0";
-    }
-
-    if (!deconvBuilder.getGroup())
-        THROW_IE_EXCEPTION << "Group should be more than 0";
-
-    if (deconvBuilder.getInputPort().shape().empty())
-        return;
-
-    const size_t IC = deconvBuilder.getInputPort().shape()[1];
-    if (IC % deconvBuilder.getGroup())
-        THROW_IE_EXCEPTION << "Number of input channels (" << IC <<
-                           ") is not divided by group number (" << deconvBuilder.getGroup() << ")";
-
-    size_t weight_size = deconvBuilder.getOutDepth() * IC / deconvBuilder.getGroup();
-    for (size_t kernel_dim : l_kernel) {
-        if (weight_size > std::numeric_limits<size_t>::max() / kernel_dim) {
-            THROW_IE_EXCEPTION << "Weight size exceeds the size_t max";
-        }
-        weight_size *= kernel_dim;
-    }
-
-    if (partial)
-        return;
-
-    const auto weights = layer->getInputPorts()[1].getData()->getData();
-    if (weights->size() != weight_size) {
-        THROW_IE_EXCEPTION << "Weight size is not correct!";
-    }
-
-    const auto biases = layer->getInputPorts()[2].getData()->getData();
-    if (biases && biases->cbuffer() && biases->size() != deconvBuilder.getOutDepth())
-        THROW_IE_EXCEPTION << "Biases size is incorrect!";
-});
-
-REG_CONVERTER_FOR(Deconvolution, [](const CNNLayerPtr& cnnLayer, Builder::Layer& layer) {
-    // WA for old IRs
-    if (cnnLayer->params.find("kernel") == cnnLayer->params.end() &&
-        cnnLayer->params.find("kernel-x") != cnnLayer->params.end() &&
-        cnnLayer->params.find("kernel-y") != cnnLayer->params.end())
-        return;
-    std::vector<unsigned int> tmp = cnnLayer->GetParamAsUInts("kernel");
-    std::vector<size_t> cur(tmp.size());
-    for (size_t i = 0; i < tmp.size(); ++i) {
-        cur[i] = static_cast<size_t>(tmp[i]);
-    }
-    layer.getParameters()["kernel"] = cur;
-
-    tmp = cnnLayer->GetParamAsUInts("strides");
-    cur.resize(tmp.size());
-    for (size_t i = 0; i < tmp.size(); ++i) {
-        cur[i] = static_cast<size_t>(tmp[i]);
-    }
-    layer.getParameters()["strides"] = cur;
-
-    tmp = cnnLayer->GetParamAsUInts("dilations");
-    cur.resize(tmp.size());
-    for (size_t i = 0; i < tmp.size(); ++i) {
-        cur[i] = static_cast<size_t>(tmp[i]);
-    }
-    layer.getParameters()["dilations"] = cur;
-
-    tmp = cnnLayer->GetParamAsUInts("pads_begin");
-    cur.resize(tmp.size());
-    for (size_t i = 0; i < tmp.size(); ++i) {
-        cur[i] = static_cast<size_t>(tmp[i]);
-    }
-    layer.getParameters()["pads_begin"] = cur;
-
-    tmp = cnnLayer->GetParamAsUInts("pads_end");
-    cur.resize(tmp.size());
-    for (size_t i = 0; i < tmp.size(); ++i) {
-        cur[i] = static_cast<size_t>(tmp[i]);
-    }
-    layer.getParameters()["pads_end"] = cur;
-
-    layer.getParameters()["group"] = static_cast<size_t>(cnnLayer->GetParamAsUInt("group"));
-    layer.getParameters()["output"] = static_cast<size_t>(cnnLayer->GetParamAsUInt("output"));
-});
\ No newline at end of file
diff --git a/inference-engine/src/inference_engine/builders/ie_deformable_convolution_layer.cpp b/inference-engine/src/inference_engine/builders/ie_deformable_convolution_layer.cpp
deleted file mode 100644 (file)
index b8f6294..0000000
+++ /dev/null
@@ -1,178 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_deformable_convolution_layer.hpp>
-#include <ie_cnn_layer_builder.h>
-#include <limits>
-#include <vector>
-#include <string>
-
-using namespace InferenceEngine;
-
-Builder::DeformableConvolutionLayer::DeformableConvolutionLayer(const std::string& name): ConvolutionLayer(name) {
-    getLayer()->setType("DeformableConvolution");
-    setDeformableGroup(1);
-}
-Builder::DeformableConvolutionLayer::DeformableConvolutionLayer(const Layer::Ptr& layer): ConvolutionLayer(layer->getName()) {
-    this->getLayer() = layer;
-    checkType("DeformableConvolution");
-}
-Builder::DeformableConvolutionLayer::DeformableConvolutionLayer(const Layer::CPtr& layer): ConvolutionLayer(layer->getName()) {
-    this->getLayer().reset();
-    cLayer = layer;
-    checkType("DeformableConvolution");
-}
-
-size_t Builder::DeformableConvolutionLayer::getDeformableGroup() const {
-    return getLayer()->getParameters().at("deformable_group");
-}
-Builder::DeformableConvolutionLayer& Builder::DeformableConvolutionLayer::setDeformableGroup(size_t deformableGroup) {
-    getLayer()->getParameters()["deformable_group"] = deformableGroup;
-    return *this;
-}
-
-REG_VALIDATOR_FOR(DeformableConvolution, [] (const InferenceEngine::Builder::Layer::CPtr& layer, bool partial) {
-    // WA for old IRs
-    if (layer->getParameters().find("kernel") == layer->getParameters().end() &&
-        layer->getParameters().find("kernel-x") != layer->getParameters().end() &&
-        layer->getParameters().find("kernel-y") != layer->getParameters().end())
-        return;
-    Builder::DeformableConvolutionLayer deformableConvBuilder(layer);
-    std::vector<size_t> l_kernel = deformableConvBuilder.getKernel();
-    std::vector<size_t> l_dilation = deformableConvBuilder.getDilation();
-    std::vector<size_t> l_paddingBegin = deformableConvBuilder.getPaddingsBegin();
-    std::vector<size_t> l_paddingEnd = deformableConvBuilder.getPaddingsEnd();
-    std::vector<size_t> l_strides = deformableConvBuilder.getStrides();
-
-    if (l_paddingBegin.empty() && !l_kernel.empty())
-        l_paddingBegin.resize(l_kernel.size(), 0);
-    if (l_paddingEnd.empty() && !l_kernel.empty())
-        l_paddingEnd.resize(l_kernel.size(), 0);
-    if (l_dilation.empty() && !l_kernel.empty())
-        l_dilation.resize(l_kernel.size(), 1);
-    if (l_strides.empty() && !l_kernel.empty())
-        l_strides.resize(l_kernel.size(), 1);
-
-    if (l_kernel.empty()) {
-        THROW_IE_EXCEPTION << "Kernel is empty!";
-    }
-
-    if (l_paddingBegin.size() != l_paddingEnd.size()) {
-        THROW_IE_EXCEPTION << "Padding_begin dimension is not equal to padding_end dimension";
-    }
-
-    if (!l_paddingBegin.empty() && l_kernel.size() != l_paddingBegin.size()) {
-        THROW_IE_EXCEPTION << "Padding dimension is not equal to kernel dimension";
-    }
-
-    if (l_kernel.size() != l_strides.size()) {
-        THROW_IE_EXCEPTION << "Stride dimension is not equal to kernel dimension";
-    }
-
-    if (!l_dilation.empty() && l_kernel.size() != l_dilation.size()) {
-        THROW_IE_EXCEPTION << "Dilation dimension is not equal to kernel dimension";
-    }
-
-    if (deformableConvBuilder.getOutDepth() == 0) {
-        THROW_IE_EXCEPTION << "OutDepth parameter should be more than 0";
-    }
-
-    for (size_t kernel_dim : l_kernel) {
-        if (kernel_dim == 0) {
-            THROW_IE_EXCEPTION << "Kernel dimensions should be more than 0";
-        }
-    }
-
-    for (size_t i_stride : l_strides) {
-        if (i_stride == 0) {
-            THROW_IE_EXCEPTION << "Strides should be more than 0";
-        }
-    }
-
-    for (size_t dil : l_dilation) {
-        if (dil == 0)
-            THROW_IE_EXCEPTION << "Dilation should be more than 0";
-    }
-
-    if (!deformableConvBuilder.getGroup())
-        THROW_IE_EXCEPTION << "Group should be more than 0";
-
-    if (!deformableConvBuilder.getDeformableGroup())
-        THROW_IE_EXCEPTION << "Deformable group should be more than 0";
-
-    if (deformableConvBuilder.getInputPort().shape().empty())
-        return;
-
-    const size_t IC = deformableConvBuilder.getInputPort().shape()[1];
-    if (IC % deformableConvBuilder.getGroup())
-        THROW_IE_EXCEPTION << "Number of input channels (" << IC <<
-                           ") is not divided by group number (" << deformableConvBuilder.getGroup() << ")";
-
-    size_t weight_size = deformableConvBuilder.getOutDepth() * IC / deformableConvBuilder.getGroup();
-    for (size_t kernel_dim : l_kernel) {
-        if (0 != kernel_dim &&weight_size > std::numeric_limits<size_t>::max() / kernel_dim) {
-            THROW_IE_EXCEPTION << "Weight size exceeds the size_t max";
-        }
-        weight_size *= kernel_dim;
-    }
-
-    if (partial)
-        return;
-
-    const auto weights = layer->getInputPorts()[1].getData()->getData();
-    if (weights->size() != weight_size) {
-        THROW_IE_EXCEPTION << "Weight size is not correct!";
-    }
-
-    const auto biases = layer->getInputPorts()[2].getData()->getData();
-    if (biases && biases->cbuffer() && biases->size() != deformableConvBuilder.getOutDepth())
-        THROW_IE_EXCEPTION << "Biases size is incorrect!";
-});
-
-REG_CONVERTER_FOR(DeformableConvolution, [](const CNNLayerPtr& cnnLayer, Builder::Layer& layer) {
-    // WA for old IRs
-    if (cnnLayer->params.find("kernel") == cnnLayer->params.end() &&
-        cnnLayer->params.find("kernel-x") != cnnLayer->params.end() &&
-        cnnLayer->params.find("kernel-y") != cnnLayer->params.end())
-        return;
-    std::vector<unsigned int> tmp = cnnLayer->GetParamAsUInts("kernel");
-    std::vector<size_t> cur(tmp.size());
-    for (size_t i = 0; i < tmp.size(); ++i) {
-        cur[i] = static_cast<size_t>(tmp[i]);
-    }
-    layer.getParameters()["kernel"] = cur;
-
-    tmp = cnnLayer->GetParamAsUInts("strides");
-    cur.resize(tmp.size());
-    for (size_t i = 0; i < tmp.size(); ++i) {
-        cur[i] = static_cast<size_t>(tmp[i]);
-    }
-    layer.getParameters()["strides"] = cur;
-
-    tmp = cnnLayer->GetParamAsUInts("dilations");
-    cur.resize(tmp.size());
-    for (size_t i = 0; i < tmp.size(); ++i) {
-        cur[i] = static_cast<size_t>(tmp[i]);
-    }
-    layer.getParameters()["dilations"] = cur;
-
-    tmp = cnnLayer->GetParamAsUInts("pads_begin");
-    cur.resize(tmp.size());
-    for (size_t i = 0; i < tmp.size(); ++i) {
-        cur[i] = static_cast<size_t>(tmp[i]);
-    }
-    layer.getParameters()["pads_begin"] = cur;
-
-    tmp = cnnLayer->GetParamAsUInts("pads_end");
-    cur.resize(tmp.size());
-    for (size_t i = 0; i < tmp.size(); ++i) {
-        cur[i] = static_cast<size_t>(tmp[i]);
-    }
-    layer.getParameters()["pads_end"] = cur;
-
-    layer.getParameters()["group"] = static_cast<size_t>(cnnLayer->GetParamAsUInt("group"));
-    layer.getParameters()["output"] = static_cast<size_t>(cnnLayer->GetParamAsUInt("output"));
-
-    layer.getParameters()["deformable_group"] = static_cast<size_t>(cnnLayer->GetParamAsUInt("deformable_group"));
-});
diff --git a/inference-engine/src/inference_engine/builders/ie_detection_output_layer.cpp b/inference-engine/src/inference_engine/builders/ie_detection_output_layer.cpp
deleted file mode 100644 (file)
index 4a6c105..0000000
+++ /dev/null
@@ -1,168 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_detection_output_layer.hpp>
-#include <ie_cnn_layer_builder.h>
-
-#include <cfloat>
-#include <vector>
-#include <string>
-
-using namespace InferenceEngine;
-
-Builder::DetectionOutputLayer::DetectionOutputLayer(const std::string& name): LayerDecorator("DetectionOutput", name) {
-    getLayer()->getOutputPorts().resize(1);
-    getLayer()->getInputPorts().resize(2);
-    setBackgroudLabelId(-1);
-}
-
-Builder::DetectionOutputLayer::DetectionOutputLayer(const Layer::Ptr& layer): LayerDecorator(layer) {
-    checkType("DetectionOutput");
-}
-
-Builder::DetectionOutputLayer::DetectionOutputLayer(const Layer::CPtr& layer): LayerDecorator(layer) {
-    checkType("DetectionOutput");
-}
-
-Builder::DetectionOutputLayer& Builder::DetectionOutputLayer::setName(const std::string& name) {
-    getLayer()->setName(name);
-    return *this;
-}
-
-const std::vector<Port>& Builder::DetectionOutputLayer::getInputPorts() const {
-    return getLayer()->getInputPorts();
-}
-
-Builder::DetectionOutputLayer& Builder::DetectionOutputLayer::setInputPorts(const std::vector<Port> &ports) {
-    if (ports.size() != 3)
-        THROW_IE_EXCEPTION << "Incorrect number of inputs for DetectionOutput getLayer().";
-    getLayer()->getInputPorts() = ports;
-    return *this;
-}
-
-const Port& Builder::DetectionOutputLayer::getOutputPort() const {
-    return getLayer()->getOutputPorts()[0];
-}
-
-Builder::DetectionOutputLayer& Builder::DetectionOutputLayer::setOutputPort(const Port &port) {
-    getLayer()->getOutputPorts()[0] = port;
-    return *this;
-}
-
-size_t Builder::DetectionOutputLayer::getNumClasses() const {
-    return getLayer()->getParameters().at("num_classes");
-}
-Builder::DetectionOutputLayer& Builder::DetectionOutputLayer::setNumClasses(size_t num) {
-    getLayer()->getParameters()["num_classes"] = num;
-    return *this;
-}
-int Builder::DetectionOutputLayer::getBackgroudLabelId() const {
-    return getLayer()->getParameters().at("background_label_id");
-}
-Builder::DetectionOutputLayer& Builder::DetectionOutputLayer::setBackgroudLabelId(int labelId) {
-    getLayer()->getParameters()["background_label_id"] = labelId;
-    return *this;
-}
-int Builder::DetectionOutputLayer::getTopK() const {
-    return getLayer()->getParameters().at("top_k");
-}
-Builder::DetectionOutputLayer& Builder::DetectionOutputLayer::setTopK(int topK) {
-    getLayer()->getParameters()["top_k"] = topK;
-    return *this;
-}
-int Builder::DetectionOutputLayer::getKeepTopK() const {
-    return getLayer()->getParameters().at("keep_top_k");
-}
-Builder::DetectionOutputLayer& Builder::DetectionOutputLayer::setKeepTopK(int topK) {
-    getLayer()->getParameters()["keep_top_k"] = topK;
-    return *this;
-}
-int Builder::DetectionOutputLayer::getNumOrientClasses() const {
-    return getLayer()->getParameters().at("num_orient_classes");
-}
-Builder::DetectionOutputLayer& Builder::DetectionOutputLayer::setNumOrientClasses(int numClasses) {
-    getLayer()->getParameters()["num_orient_classes"] = numClasses;
-    return *this;
-}
-std::string Builder::DetectionOutputLayer::getCodeType() const {
-    return getLayer()->getParameters().at("code_type");
-}
-Builder::DetectionOutputLayer& Builder::DetectionOutputLayer::setCodeType(std::string type) {
-    getLayer()->getParameters()["code_type"] = type;
-    return *this;
-}
-int Builder::DetectionOutputLayer::getInterpolateOrientation() const {
-    return getLayer()->getParameters().at("interpolate_orientation");
-}
-Builder::DetectionOutputLayer& Builder::DetectionOutputLayer::setInterpolateOrientation(int orient) {
-    getLayer()->getParameters()["interpolate_orientation"] = orient;
-    return *this;
-}
-float Builder::DetectionOutputLayer::getNMSThreshold() const {
-    return getLayer()->getParameters().at("nms_threshold");
-}
-Builder::DetectionOutputLayer& Builder::DetectionOutputLayer::setNMSThreshold(float threshold) {
-    getLayer()->getParameters()["nms_threshold"] = threshold;
-    return *this;
-}
-float Builder::DetectionOutputLayer::getConfidenceThreshold() const {
-    return getLayer()->getParameters().at("confidence_threshold");
-}
-Builder::DetectionOutputLayer& Builder::DetectionOutputLayer::setConfidenceThreshold(float threshold) {
-    getLayer()->getParameters()["confidence_threshold"] = threshold;
-    return *this;
-}
-bool Builder::DetectionOutputLayer::getShareLocation() const {
-    return getLayer()->getParameters().at("share_location");
-}
-Builder::DetectionOutputLayer& Builder::DetectionOutputLayer::setShareLocation(bool flag) {
-    getLayer()->getParameters()["share_location"] = flag;
-    return *this;
-}
-bool Builder::DetectionOutputLayer::getVariantEncodedInTarget() const {
-    return getLayer()->getParameters().at("variance_encoded_in_target");
-}
-Builder::DetectionOutputLayer& Builder::DetectionOutputLayer::setVariantEncodedInTarget(bool flag) {
-    getLayer()->getParameters()["variance_encoded_in_target"] = flag;
-    return *this;
-}
-
-REG_VALIDATOR_FOR(DetectionOutput, [](const InferenceEngine::Builder::Layer::CPtr& input_layer, bool partial) {
-    Builder::DetectionOutputLayer layer(input_layer);
-    if (layer.getNumClasses() == 0) {
-        THROW_IE_EXCEPTION << "NumClasses parameter is wrong in layer " << layer.getName() <<
-                           ". It should be > 0.";
-    }
-    if (layer.getCodeType() != "caffe.PriorBoxParameter.CENTER_SIZE" &&
-        layer.getCodeType() != "caffe.PriorBoxParameter.CORNER") {
-        THROW_IE_EXCEPTION << "CodeType parameter is wrong in layer " << layer.getName() <<
-                           ". It should be equal to 'caffe.PriorBoxParameter.CORNER' or 'caffe.PriorBoxParameter.CENTER_SIZE'";
-    }
-    if (layer.getBackgroudLabelId() < -1) {
-        THROW_IE_EXCEPTION << "BackgroundLabelId parameter is wrong in layer " << layer.getName() <<
-                           ". It should be >= 0 if this one is an Id of existing label else it should be equal to -1";
-    }
-    if (layer.getNMSThreshold() < 0) {
-        THROW_IE_EXCEPTION << "NMSThreshold parameter is wrong in layer " << layer.getName() <<
-                           ". It should be >= 0.";
-    }
-    if (layer.getConfidenceThreshold() < 0) {
-        THROW_IE_EXCEPTION << "ConfidenceThreshold parameter is wrong in layer " << layer.getName() <<
-                           ". It should be >= 0.";
-    }
-});
-
-REG_CONVERTER_FOR(DetectionOutput, [](const CNNLayerPtr& cnnLayer, Builder::Layer& layer) {
-    layer.getParameters()["num_classes"] = static_cast<size_t>(cnnLayer->GetParamAsUInt("num_classes"));
-    layer.getParameters()["background_label_id"] = cnnLayer->GetParamAsInt("background_label_id", 0);
-    layer.getParameters()["top_k"] = cnnLayer->GetParamAsInt("top_k", -1);
-    layer.getParameters()["keep_top_k"] = cnnLayer->GetParamAsInt("keep_top_k", -1);
-    layer.getParameters()["num_orient_classes"] = cnnLayer->GetParamAsInt("num_orient_classes", 0);
-    layer.getParameters()["code_type"] = cnnLayer->GetParamAsString("code_type", "caffe.PriorBoxParameter.CORNER");
-    layer.getParameters()["interpolate_orientation"] = cnnLayer->GetParamAsInt("interpolate_orientation", 1);
-    layer.getParameters()["nms_threshold"] = cnnLayer->GetParamAsFloat("nms_threshold");
-    layer.getParameters()["confidence_threshold"] = cnnLayer->GetParamAsFloat("confidence_threshold", -FLT_MAX);
-    layer.getParameters()["share_location"] = cnnLayer->GetParamAsBool("share_location", true);
-    layer.getParameters()["variance_encoded_in_target"] = cnnLayer->GetParamAsBool("variance_encoded_in_target", false);
-});
diff --git a/inference-engine/src/inference_engine/builders/ie_eltwise_layer.cpp b/inference-engine/src/inference_engine/builders/ie_eltwise_layer.cpp
deleted file mode 100644 (file)
index 9ca8108..0000000
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_eltwise_layer.hpp>
-#include <ie_cnn_layer_builder.h>
-
-#include <vector>
-#include <string>
-
-using namespace InferenceEngine;
-
-Builder::EltwiseLayer::EltwiseLayer(const std::string& name): LayerDecorator("Eltwise", name) {
-    getLayer()->getOutputPorts().resize(1);
-    getLayer()->getInputPorts().resize(2);
-    setEltwiseType(EltwiseType::SUM);
-}
-
-Builder::EltwiseLayer::EltwiseLayer(const Layer::Ptr& layer): LayerDecorator(layer) {
-    checkType("Eltwise");
-
-    std::string operatorStr = getLayer()->getParameters()["operation"];
-    if (operatorStr == "max") {
-        type = MAX;
-    } else if (operatorStr == "sum") {
-        type = SUM;
-    } else if (operatorStr == "mul") {
-        type = MUL;
-    } else if (operatorStr == "sub") {
-        type = SUB;
-    } else if (operatorStr == "div") {
-        type = DIV;
-    } else if (operatorStr == "min") {
-        type = MIN;
-    } else if (operatorStr == "squared_diff") {
-        type = SQUARED_DIFF;
-    }
-}
-
-Builder::EltwiseLayer::EltwiseLayer(const Layer::CPtr& layer): LayerDecorator(layer) {
-    checkType("Eltwise");
-
-    const auto cLayer = static_cast<const EltwiseLayer*>(this)->getLayer();
-
-    std::string operatorStr = cLayer->getParameters().at("operation");
-    if (operatorStr == "max") {
-        type = MAX;
-    } else if (operatorStr == "sum") {
-        type = SUM;
-    } else if (operatorStr == "mul") {
-        type = MUL;
-    } else if (operatorStr == "sub") {
-        type = SUB;
-    } else if (operatorStr == "div") {
-        type = DIV;
-    } else if (operatorStr == "min") {
-        type = MIN;
-    } else if (operatorStr == "squared_diff") {
-        type = SQUARED_DIFF;
-    }
-}
-
-Builder::EltwiseLayer& Builder::EltwiseLayer::setName(const std::string& name) {
-    getLayer()->setName(name);
-    return *this;
-}
-
-const std::vector<Port>& Builder::EltwiseLayer::getInputPorts() const {
-    return getLayer()->getInputPorts();
-}
-
-Builder::EltwiseLayer& Builder::EltwiseLayer::setInputPorts(const std::vector<Port>& ports) {
-    getLayer()->getInputPorts() = ports;
-    return *this;
-}
-
-const Port& Builder::EltwiseLayer::getOutputPort() const {
-    return getLayer()->getOutputPorts()[0];
-}
-
-Builder::EltwiseLayer& Builder::EltwiseLayer::setOutputPort(const Port &port) {
-    getLayer()->getOutputPorts()[0] = port;
-    return *this;
-}
-
-const std::vector<float> Builder::EltwiseLayer::getScales() const {
-    return getLayer()->getParameters().at("scales");
-}
-
-// TODO: IR doesn't contain Scales!!!
-Builder::EltwiseLayer& Builder::EltwiseLayer::setScales(const std::vector<float>& scales) {
-    getLayer()->getParameters()["scales"] = scales;
-    return *this;
-}
-
-Builder::EltwiseLayer::EltwiseType Builder::EltwiseLayer::getEltwiseType() const {
-    return type;
-}
-
-Builder::EltwiseLayer& Builder::EltwiseLayer::setEltwiseType(Builder::EltwiseLayer::EltwiseType type) {
-    this->type = type;
-    std::string operatorStr;
-    switch (type) {
-        case MAX:
-            operatorStr = "max";
-            break;
-        case SUM:
-            operatorStr = "sum";
-            break;
-        case MUL:
-            operatorStr = "mul";
-            break;
-        case SUB:
-            operatorStr = "sub";
-            break;
-        case DIV:
-            operatorStr = "div";
-            break;
-        case MIN:
-            operatorStr = "min";
-            break;
-        case SQUARED_DIFF:
-            operatorStr = "squared_diff";
-            break;
-    }
-    getLayer()->getParameters()["operation"] = operatorStr;
-    return *this;
-}
-
-REG_VALIDATOR_FOR(Eltwise, [](const InferenceEngine::Builder::Layer::CPtr& input_layer, bool partial) {
-    Builder::EltwiseLayer layer(input_layer);
-
-    if (layer.getInputPorts().size() < 2) {
-        THROW_IE_EXCEPTION << "Input ports are incorrect in the layer " << layer.getName()
-                           << ". Number of input ports should be >= 2.";
-    }
-    if (partial && (layer.getInputPorts()[0].shape().empty() || layer.getInputPorts()[1].shape().empty() ||
-            layer.getOutputPort().shape().empty()))
-        return;
-
-    if (layer.getInputPorts()[0].shape() != layer.getInputPorts()[1].shape()) {
-        THROW_IE_EXCEPTION << "Input ports are incorrect in the layer " << layer.getName()
-                           << ". They should have equal dimensions";
-    }
-
-    if (layer.getInputPorts()[0].shape() != layer.getOutputPort().shape()) {
-        THROW_IE_EXCEPTION << "Layer " << layer.getName() << " have different input and output ports. "
-                           << "They should have equal dimensions.";
-    }
-});
-
-REG_CONVERTER_FOR(Eltwise, [](const CNNLayerPtr& cnnLayer, Builder::Layer& layer) {
-    layer.getParameters()["scales"] = cnnLayer->GetParamAsFloats("scales", {});
-    layer.getParameters()["operation"] = cnnLayer->GetParamAsString("operation");
-});
diff --git a/inference-engine/src/inference_engine/builders/ie_elu_layer.cpp b/inference-engine/src/inference_engine/builders/ie_elu_layer.cpp
deleted file mode 100644 (file)
index d4dfe91..0000000
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_elu_layer.hpp>
-#include <ie_cnn_layer_builder.h>
-
-#include <string>
-
-using namespace InferenceEngine;
-
-Builder::ELULayer::ELULayer(const std::string& name): LayerDecorator("ELU", name) {
-    getLayer()->getOutputPorts().resize(1);
-    getLayer()->getInputPorts().resize(1);
-    setAlpha(1);
-}
-
-Builder::ELULayer::ELULayer(const Layer::Ptr& layer): LayerDecorator(layer) {
-    checkType("ELU");
-}
-
-Builder::ELULayer::ELULayer(const Layer::CPtr& layer): LayerDecorator(layer) {
-    checkType("ELU");
-}
-
-Builder::ELULayer& Builder::ELULayer::setName(const std::string& name) {
-    getLayer()->setName(name);
-    return *this;
-}
-
-const Port& Builder::ELULayer::getPort() const {
-    return getLayer()->getOutputPorts()[0];
-}
-
-Builder::ELULayer& Builder::ELULayer::setPort(const Port &port) {
-    getLayer()->getOutputPorts()[0] = port;
-    getLayer()->getInputPorts()[0] = port;
-    return *this;
-}
-
-float Builder::ELULayer::getAlpha() const {
-    return getLayer()->getParameters().at("alpha");
-}
-
-Builder::ELULayer& Builder::ELULayer::setAlpha(float alpha) {
-    getLayer()->getParameters()["alpha"] = alpha;
-    return *this;
-}
-
-REG_VALIDATOR_FOR(ELU, [] (const InferenceEngine::Builder::Layer::CPtr& input_layer, bool partial) {
-    if (!input_layer->getInputPorts().empty() &&
-        !input_layer->getOutputPorts().empty() &&
-        !input_layer->getInputPorts()[0].shape().empty() &&
-        !input_layer->getOutputPorts()[0].shape().empty() &&
-        input_layer->getInputPorts()[0].shape() != input_layer->getOutputPorts()[0].shape()) {
-        THROW_IE_EXCEPTION << "Input and output ports should be equal";
-    }
-    Builder::ELULayer layer(input_layer);
-    if (layer.getAlpha() < 0) {
-        THROW_IE_EXCEPTION << "Alpha should be >= 0";
-    }
-});
-
-REG_CONVERTER_FOR(ELU, [](const CNNLayerPtr& cnnLayer, Builder::Layer& layer) {
-    layer.getParameters()["alpha"] = cnnLayer->GetParamAsFloat("alpha", 0);
-});
-
diff --git a/inference-engine/src/inference_engine/builders/ie_fully_connected_layer.cpp b/inference-engine/src/inference_engine/builders/ie_fully_connected_layer.cpp
deleted file mode 100644 (file)
index 78b3fe7..0000000
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_fully_connected_layer.hpp>
-#include <ie_cnn_layer_builder.h>
-
-#include <vector>
-#include <string>
-
-using namespace InferenceEngine;
-
-Builder::FullyConnectedLayer::FullyConnectedLayer(const std::string& name): LayerDecorator("FullyConnected", name) {
-    getLayer()->getInputPorts().resize(3);
-    getLayer()->getInputPorts()[1].setParameter("type", "weights");
-    getLayer()->getInputPorts()[2].setParameter("type", "biases");
-    getLayer()->getOutputPorts().resize(1);
-    getLayer()->getParameters()["out-size"] = 0;
-}
-
-Builder::FullyConnectedLayer::FullyConnectedLayer(const Layer::Ptr& layer): LayerDecorator(layer) {
-    checkType("FullyConnected");
-}
-
-Builder::FullyConnectedLayer::FullyConnectedLayer(const Layer::CPtr& layer): LayerDecorator(layer) {
-    checkType("FullyConnected");
-}
-
-Builder::FullyConnectedLayer &Builder::FullyConnectedLayer::setName(const std::string &name) {
-    getLayer()->setName(name);
-    return *this;
-}
-
-const Port& Builder::FullyConnectedLayer::getInputPort() const {
-    return getLayer()->getInputPorts()[0];
-}
-
-Builder::FullyConnectedLayer& Builder::FullyConnectedLayer::setInputPort(const Port& port) {
-    getLayer()->getInputPorts()[0] = port;
-    return *this;
-}
-
-const Port& Builder::FullyConnectedLayer::getOutputPort() const {
-    return getLayer()->getOutputPorts()[0];
-}
-
-Builder::FullyConnectedLayer& Builder::FullyConnectedLayer::setOutputPort(const Port& port) {
-    getLayer()->getOutputPorts()[0] = port;
-    return *this;
-}
-
-size_t Builder::FullyConnectedLayer::getOutputNum() const {
-    return getLayer()->getParameters().at("out-size");
-}
-
-Builder::FullyConnectedLayer& Builder::FullyConnectedLayer::setOutputNum(size_t outNum) {
-    getLayer()->getParameters()["out-size"] = outNum;
-    return *this;
-}
-
-REG_VALIDATOR_FOR(FullyConnected, [](const InferenceEngine::Builder::Layer::CPtr& layer, bool partial) {
-});
-
-REG_CONVERTER_FOR(FullyConnected, [](const CNNLayerPtr& cnnLayer, Builder::Layer& layer) {
-    layer.getParameters()["out-size"] = static_cast<size_t>(cnnLayer->GetParamAsUInt("out-size", 0));
-});
diff --git a/inference-engine/src/inference_engine/builders/ie_grn_layer.cpp b/inference-engine/src/inference_engine/builders/ie_grn_layer.cpp
deleted file mode 100644 (file)
index 27c7fc4..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_grn_layer.hpp>
-#include <ie_cnn_layer_builder.h>
-
-#include <string>
-
-using namespace InferenceEngine;
-
-Builder::GRNLayer::GRNLayer(const std::string& name): LayerDecorator("GRN", name) {
-    getLayer()->getOutputPorts().resize(1);
-    getLayer()->getInputPorts().resize(1);
-    setBeta(0);
-}
-
-Builder::GRNLayer::GRNLayer(const Layer::Ptr& layer): LayerDecorator(layer) {
-    checkType("GRN");
-}
-
-Builder::GRNLayer::GRNLayer(const Layer::CPtr& layer): LayerDecorator(layer) {
-    checkType("GRN");
-}
-
-Builder::GRNLayer& Builder::GRNLayer::setName(const std::string& name) {
-    getLayer()->setName(name);
-    return *this;
-}
-
-const Port& Builder::GRNLayer::getPort() const {
-    return getLayer()->getOutputPorts()[0];
-}
-
-Builder::GRNLayer& Builder::GRNLayer::setPort(const Port &port) {
-    getLayer()->getOutputPorts()[0] = port;
-    getLayer()->getInputPorts()[0] = port;
-    return *this;
-}
-
-float Builder::GRNLayer::getBeta() const {
-    return getLayer()->getParameters().at("beta");
-}
-
-Builder::GRNLayer& Builder::GRNLayer::setBeta(float beta) {
-    getLayer()->getParameters()["beta"] = beta;
-    return *this;
-}
-
-REG_CONVERTER_FOR(GRN, [](const CNNLayerPtr& cnnLayer, Builder::Layer& layer) {
-    layer.getParameters()["beta"] = static_cast<size_t>(cnnLayer->GetParamAsFloat("beta"));
-});
\ No newline at end of file
diff --git a/inference-engine/src/inference_engine/builders/ie_gru_sequence_layer.cpp b/inference-engine/src/inference_engine/builders/ie_gru_sequence_layer.cpp
deleted file mode 100644 (file)
index 0091ec2..0000000
+++ /dev/null
@@ -1,126 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_gru_sequence_layer.hpp>
-#include <ie_cnn_layer_builder.h>
-
-#include <vector>
-#include <string>
-
-using namespace InferenceEngine;
-
-Builder::GRUSequenceLayer::GRUSequenceLayer(const std::string& name): LayerDecorator("GRUSequence", name) {
-    getLayer()->getOutputPorts().resize(2);
-    getLayer()->getInputPorts().resize(5);
-    getLayer()->getInputPorts()[1].setParameter("type", "weights");
-    getLayer()->getInputPorts()[2].setParameter("type", "biases");
-    getLayer()->getInputPorts()[3].setParameter("type", "optional");
-}
-
-Builder::GRUSequenceLayer::GRUSequenceLayer(const Layer::Ptr& layer): LayerDecorator(layer) {
-    checkType("GRUSequence");
-}
-
-Builder::GRUSequenceLayer::GRUSequenceLayer(const Layer::CPtr& layer): LayerDecorator(layer) {
-    checkType("GRUSequence");
-}
-
-Builder::GRUSequenceLayer& Builder::GRUSequenceLayer::setName(const std::string& name) {
-    getLayer()->setName(name);
-    return *this;
-}
-
-const std::vector<Port>& Builder::GRUSequenceLayer::getInputPorts() const {
-    return getLayer()->getInputPorts();
-}
-
-Builder::GRUSequenceLayer& Builder::GRUSequenceLayer::setInputPorts(const std::vector<Port>& ports) {
-    getLayer()->getInputPorts() = ports;
-    return *this;
-}
-
-const std::vector<Port>& Builder::GRUSequenceLayer::getOutputPorts() const {
-    return getLayer()->getOutputPorts();
-}
-
-Builder::GRUSequenceLayer& Builder::GRUSequenceLayer::setOutputPorts(const std::vector<Port>& ports) {
-    getLayer()->getOutputPorts() = ports;
-    return *this;
-}
-int Builder::GRUSequenceLayer::getHiddenSize() const {
-    return getLayer()->getParameters().at("hidden_size");
-}
-Builder::GRUSequenceLayer& Builder::GRUSequenceLayer::setHiddenSize(int size) {
-    getLayer()->getParameters()["hidden_size"] = size;
-    return *this;
-}
-bool Builder::GRUSequenceLayer::getSequenceDim() const {
-    return getLayer()->getParameters().at("sequence_dim");
-}
-Builder::GRUSequenceLayer& Builder::GRUSequenceLayer::setSqquenceDim(bool flag) {
-    getLayer()->getParameters()["sequence_dim"] = flag;
-    return *this;
-}
-const std::vector<std::string>& Builder::GRUSequenceLayer::getActivations() const {
-    return getLayer()->getParameters().at("activations");
-}
-Builder::GRUSequenceLayer& Builder::GRUSequenceLayer::setActivations(const std::vector<std::string>& activations) {
-    getLayer()->getParameters()["activations"] = activations;
-    return *this;
-}
-const std::vector<float>& Builder::GRUSequenceLayer::getActivationsAlpha() const {
-    return getLayer()->getParameters().at("activations_alpha");
-}
-Builder::GRUSequenceLayer& Builder::GRUSequenceLayer::setActivationsAlpha(const std::vector<float>& activations) {
-    getLayer()->getParameters()["activations_alpha"] = activations;
-    return *this;
-}
-const std::vector<float>& Builder::GRUSequenceLayer::getActivationsBeta() const {
-    return getLayer()->getParameters().at("activations_beta");
-}
-Builder::GRUSequenceLayer& Builder::GRUSequenceLayer::setActivationsBeta(const std::vector<float>& activations) {
-    getLayer()->getParameters()["activations_beta"] = activations;
-    return *this;
-}
-float Builder::GRUSequenceLayer::getClip() const {
-    return getLayer()->getParameters().at("clip");
-}
-Builder::GRUSequenceLayer& Builder::GRUSequenceLayer::setClip(float clip) {
-    getLayer()->getParameters()["clip"] = clip;
-    return *this;
-}
-
-bool Builder::GRUSequenceLayer::getLinearBeforeReset() const {
-    return getLayer()->getParameters().at("linear_before_reset");
-}
-Builder::GRUSequenceLayer& Builder::GRUSequenceLayer::setLinearBeforeReset(bool flag) {
-    getLayer()->getParameters()["linear_before_reset"] = flag;
-    return *this;
-}
-const std::string& Builder::GRUSequenceLayer::getDirection() const {
-    return getLayer()->getParameters().at("direction");
-}
-Builder::GRUSequenceLayer& Builder::GRUSequenceLayer::setDirection(const std::string& direction) {
-    getLayer()->getParameters()["direction"] = direction;
-    return *this;
-}
-
-REG_CONVERTER_FOR(GRUSequence, [](const CNNLayerPtr& cnnLayer, Builder::Layer& layer) {
-    layer.getParameters()["hidden_size"] = cnnLayer->GetParamAsInt("hidden_size");
-    layer.getParameters()["sequence_dim"] = cnnLayer->GetParamAsBool("sequence_dim", true);
-    std::vector<std::string> activations;
-    std::istringstream stream(cnnLayer->GetParamAsString("activations"));
-    std::string str;
-    while (getline(stream, str, ',')) {
-         activations.push_back(str);
-    }
-    layer.getParameters()["activations"] = activations;
-    layer.getParameters()["activations_alpha"] = cnnLayer->GetParamAsFloats("activations_alpha");
-    layer.getParameters()["activations_beta"] = cnnLayer->GetParamAsFloats("activations_beta");
-    layer.getParameters()["clip"] = cnnLayer->GetParamAsFloat("clip");
-    layer.getParameters()["linear_before_reset"] = cnnLayer->GetParamAsBool("linear_before_reset", true);
-    layer.getParameters()["direction"] = cnnLayer->GetParamAsString("direction", "");
-});
-
-
diff --git a/inference-engine/src/inference_engine/builders/ie_lrn_layer.cpp b/inference-engine/src/inference_engine/builders/ie_lrn_layer.cpp
deleted file mode 100644 (file)
index afb3c28..0000000
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_lrn_layer.hpp>
-#include <ie_cnn_layer_builder.h>
-
-#include <string>
-
-using namespace InferenceEngine;
-
-Builder::LRNLayer::LRNLayer(const std::string& name): LayerDecorator("LRN", name) {
-    getLayer()->getOutputPorts().resize(1);
-    getLayer()->getInputPorts().resize(1);
-    setSize(1);
-    setAlpha(1e-4);
-    setBeta(0.75f);
-    setBias(1.0f);
-}
-
-Builder::LRNLayer::LRNLayer(const Layer::Ptr& layer): LayerDecorator(layer) {
-    checkType("LRN");
-}
-
-Builder::LRNLayer::LRNLayer(const Layer::CPtr& layer): LayerDecorator(layer) {
-    checkType("LRN");
-}
-
-Builder::LRNLayer& Builder::LRNLayer::setName(const std::string& name) {
-    getLayer()->setName(name);
-    return *this;
-}
-
-const Port& Builder::LRNLayer::getPort() const {
-    return getLayer()->getOutputPorts()[0];
-}
-
-Builder::LRNLayer& Builder::LRNLayer::setPort(const Port &port) {
-    getLayer()->getOutputPorts()[0] = port;
-    getLayer()->getInputPorts()[0] = port;
-    return *this;
-}
-
-size_t Builder::LRNLayer::getSize() const {
-    return getLayer()->getParameters().at("size");
-}
-
-Builder::LRNLayer& Builder::LRNLayer::setSize(size_t size) {
-    getLayer()->getParameters()["size"] = size;
-    return *this;
-}
-
-float Builder::LRNLayer::getAlpha() const {
-    return getLayer()->getParameters().at("alpha");
-}
-
-Builder::LRNLayer& Builder::LRNLayer::setAlpha(float alpha) {
-    getLayer()->getParameters()["alpha"] = alpha;
-    return *this;
-}
-
-float Builder::LRNLayer::getBeta() const {
-    return getLayer()->getParameters().at("beta");
-}
-
-Builder::LRNLayer& Builder::LRNLayer::setBeta(float beta) {
-    getLayer()->getParameters()["beta"] = beta;
-    return *this;
-}
-
-float Builder::LRNLayer::getBias() const {
-    return getLayer()->getParameters().at("bias");
-}
-
-Builder::LRNLayer& Builder::LRNLayer::setBias(float bias) {
-    getLayer()->getParameters()["bias"] = bias;
-    return *this;
-}
-
-REG_VALIDATOR_FOR(LRN, [](const Builder::Layer::CPtr &input_layer, bool partial) {
-    Builder::LRNLayer layer(input_layer);
-    if (layer.getAlpha() <= 0) {
-        THROW_IE_EXCEPTION << "Alpha should be > 0";
-    }
-    if (layer.getBeta() <= 0) {
-        THROW_IE_EXCEPTION << "Beta should be > 0";
-    }
-    if (layer.getSize() == 0) {
-        THROW_IE_EXCEPTION << "Size should be > 0";
-    }
-    if (!input_layer->getInputPorts().empty() &&
-        !input_layer->getOutputPorts().empty() &&
-        !input_layer->getInputPorts()[0].shape().empty() &&
-        !input_layer->getOutputPorts()[0].shape().empty() &&
-        input_layer->getInputPorts()[0].shape() != input_layer->getOutputPorts()[0].shape()) {
-        THROW_IE_EXCEPTION << "Input and output ports should be equal";
-    }
-});
-
-REG_CONVERTER_FOR(LRN, [](const CNNLayerPtr& cnnLayer, Builder::Layer& layer) {
-    layer.getParameters()["bias"] = cnnLayer->GetParamAsFloat("bias", 1.0f);
-    layer.getParameters()["beta"] = cnnLayer->GetParamAsFloat("beta", 0.75f);
-    layer.getParameters()["alpha"] = cnnLayer->GetParamAsFloat("alpha", 1e-4f);
-    layer.getParameters()["size"] = cnnLayer->GetParamAsUInt("size", 1);
-});
\ No newline at end of file
diff --git a/inference-engine/src/inference_engine/builders/ie_lstm_sequence_layer.cpp b/inference-engine/src/inference_engine/builders/ie_lstm_sequence_layer.cpp
deleted file mode 100644 (file)
index edca01e..0000000
+++ /dev/null
@@ -1,127 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_lstm_sequence_layer.hpp>
-#include <ie_cnn_layer_builder.h>
-
-#include <vector>
-#include <string>
-
-using namespace InferenceEngine;
-
-Builder::LSTMSequenceLayer::LSTMSequenceLayer(const std::string& name): LayerDecorator("LSTMSequence", name) {
-    getLayer()->getOutputPorts().resize(3);
-    getLayer()->getInputPorts().resize(7);
-    getLayer()->getInputPorts()[1].setParameter("type", "weights");
-    getLayer()->getInputPorts()[2].setParameter("type", "biases");
-    getLayer()->getInputPorts()[3].setParameter("type", "optional");
-    getLayer()->getInputPorts()[6].setParameter("type", "weights");
-}
-
-Builder::LSTMSequenceLayer::LSTMSequenceLayer(const Layer::Ptr& layer): LayerDecorator(layer) {
-    checkType("LSTMSequence");
-}
-
-Builder::LSTMSequenceLayer::LSTMSequenceLayer(const Layer::CPtr& layer): LayerDecorator(layer) {
-    checkType("LSTMSequence");
-}
-
-Builder::LSTMSequenceLayer& Builder::LSTMSequenceLayer::setName(const std::string& name) {
-    getLayer()->setName(name);
-    return *this;
-}
-
-const std::vector<Port>& Builder::LSTMSequenceLayer::getInputPorts() const {
-    return getLayer()->getInputPorts();
-}
-
-Builder::LSTMSequenceLayer& Builder::LSTMSequenceLayer::setInputPorts(const std::vector<Port>& ports) {
-    getLayer()->getInputPorts() = ports;
-    return *this;
-}
-
-const std::vector<Port>& Builder::LSTMSequenceLayer::getOutputPorts() const {
-    return getLayer()->getOutputPorts();
-}
-
-Builder::LSTMSequenceLayer& Builder::LSTMSequenceLayer::setOutputPorts(const std::vector<Port>& ports) {
-    getLayer()->getOutputPorts() = ports;
-    return *this;
-}
-int Builder::LSTMSequenceLayer::getHiddenSize() const {
-    return getLayer()->getParameters().at("hidden_size");
-}
-Builder::LSTMSequenceLayer& Builder::LSTMSequenceLayer::setHiddenSize(int size) {
-    getLayer()->getParameters()["hidden_size"] = size;
-    return *this;
-}
-bool Builder::LSTMSequenceLayer::getSequenceDim() const {
-    return getLayer()->getParameters().at("sequence_dim");
-}
-Builder::LSTMSequenceLayer& Builder::LSTMSequenceLayer::setSqquenceDim(bool flag) {
-    getLayer()->getParameters()["sequence_dim"] = flag;
-    return *this;
-}
-const std::vector<std::string>& Builder::LSTMSequenceLayer::getActivations() const {
-    return getLayer()->getParameters().at("activations");
-}
-Builder::LSTMSequenceLayer& Builder::LSTMSequenceLayer::setActivations(const std::vector<std::string>& activations) {
-    getLayer()->getParameters()["activations"] = activations;
-    return *this;
-}
-const std::vector<float>& Builder::LSTMSequenceLayer::getActivationsAlpha() const {
-    return getLayer()->getParameters().at("activations_alpha");
-}
-Builder::LSTMSequenceLayer& Builder::LSTMSequenceLayer::setActivationsAlpha(const std::vector<float>& activations) {
-    getLayer()->getParameters()["activations_alpha"] = activations;
-    return *this;
-}
-const std::vector<float>& Builder::LSTMSequenceLayer::getActivationsBeta() const {
-    return getLayer()->getParameters().at("activations_beta");
-}
-Builder::LSTMSequenceLayer& Builder::LSTMSequenceLayer::setActivationsBeta(const std::vector<float>& activations) {
-    getLayer()->getParameters()["activations_beta"] = activations;
-    return *this;
-}
-float Builder::LSTMSequenceLayer::getClip() const {
-    return getLayer()->getParameters().at("clip");
-}
-Builder::LSTMSequenceLayer& Builder::LSTMSequenceLayer::setClip(float clip) {
-    getLayer()->getParameters()["clip"] = clip;
-    return *this;
-}
-
-bool Builder::LSTMSequenceLayer::getInputForget() const {
-    return getLayer()->getParameters().at("input_forget");
-}
-Builder::LSTMSequenceLayer& Builder::LSTMSequenceLayer::setInputForget(bool flag) {
-    getLayer()->getParameters()["input_forget"] = flag;
-    return *this;
-}
-const std::string& Builder::LSTMSequenceLayer::getDirection() const {
-    return getLayer()->getParameters().at("direction");
-}
-Builder::LSTMSequenceLayer& Builder::LSTMSequenceLayer::setDirection(const std::string& direction) {
-    getLayer()->getParameters()["direction"] = direction;
-    return *this;
-}
-
-REG_CONVERTER_FOR(LSTMSequence, [](const CNNLayerPtr& cnnLayer, Builder::Layer& layer) {
-    layer.getParameters()["hidden_size"] = cnnLayer->GetParamAsInt("hidden_size");
-    layer.getParameters()["sequence_dim"] = cnnLayer->GetParamAsBool("sequence_dim", true);
-    std::vector<std::string> activations;
-    std::istringstream stream(cnnLayer->GetParamAsString("activations"));
-    std::string str;
-    while (getline(stream, str, ',')) {
-         activations.push_back(str);
-    }
-    layer.getParameters()["activations"] = activations;
-    layer.getParameters()["activations_alpha"] = cnnLayer->GetParamAsFloats("activations_alpha");
-    layer.getParameters()["activations_beta"] = cnnLayer->GetParamAsFloats("activations_beta");
-    layer.getParameters()["clip"] = cnnLayer->GetParamAsFloat("clip");
-    layer.getParameters()["input_forget"] = cnnLayer->GetParamAsBool("input_forget", true);
-    layer.getParameters()["direction"] = cnnLayer->GetParamAsString("direction", "");
-});
-
-
diff --git a/inference-engine/src/inference_engine/builders/ie_memory_layer.cpp b/inference-engine/src/inference_engine/builders/ie_memory_layer.cpp
deleted file mode 100644 (file)
index 5b69008..0000000
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_memory_layer.hpp>
-#include <ie_cnn_layer_builder.h>
-
-#include <vector>
-#include <string>
-
-using namespace InferenceEngine;
-
-Builder::MemoryLayer::MemoryLayer(const std::string& name): LayerDecorator("Memory", name) {
-    setSize(2);
-}
-
-Builder::MemoryLayer::MemoryLayer(const Layer::Ptr& layer): LayerDecorator(layer) {
-    checkType("Memory");
-}
-
-Builder::MemoryLayer::MemoryLayer(const Layer::CPtr& layer): LayerDecorator(layer) {
-    checkType("Memory");
-}
-
-Builder::MemoryLayer& Builder::MemoryLayer::setName(const std::string& name) {
-    getLayer()->setName(name);
-    return *this;
-}
-
-const Port& Builder::MemoryLayer::getInputPort() const {
-    if (getLayer()->getInputPorts().empty()) {
-        THROW_IE_EXCEPTION << "No inputs ports for layer: " << getLayer()->getName();
-    }
-    return getLayer()->getInputPorts()[0];
-}
-
-Builder::MemoryLayer& Builder::MemoryLayer::setInputPort(const Port &port) {
-    getLayer()->getInputPorts().resize(1);
-    getLayer()->getInputPorts()[0] = port;
-    setIndex(0);
-    return *this;
-}
-
-const Port& Builder::MemoryLayer::getOutputPort() const {
-    if (getLayer()->getOutputPorts().empty()) {
-        THROW_IE_EXCEPTION << "No output ports for layer: " << getLayer()->getName();
-    }
-    return getLayer()->getOutputPorts()[0];
-}
-
-Builder::MemoryLayer& Builder::MemoryLayer::setOutputPort(const Port &port) {
-    getLayer()->getOutputPorts().resize(1);
-    getLayer()->getOutputPorts()[0] = port;
-    setIndex(1);
-    return *this;
-}
-
-const std::string Builder::MemoryLayer::getId() const {
-    return getLayer()->getParameters().at("id");
-}
-Builder::MemoryLayer& Builder::MemoryLayer::setId(const std::string& id) {
-    getLayer()->getParameters()["id"] = id;
-    return *this;
-}
-size_t Builder::MemoryLayer::getIndex() const {
-    return getLayer()->getParameters().at("index");
-}
-Builder::MemoryLayer& Builder::MemoryLayer::setIndex(size_t index) {
-    if (index > 1)
-        THROW_IE_EXCEPTION << "Index supports only 0 and 1 values.";
-    getLayer()->getParameters()["index"] = index;
-    return *this;
-}
-size_t Builder::MemoryLayer::getSize() const {
-    return getLayer()->getParameters().at("size");
-}
-Builder::MemoryLayer& Builder::MemoryLayer::setSize(size_t size) {
-    if (size != 2)
-        THROW_IE_EXCEPTION << "Only size equal 2 is supported.";
-    getLayer()->getParameters()["size"] = size;
-    return *this;
-}
-REG_VALIDATOR_FOR(Memory, [](const InferenceEngine::Builder::Layer::CPtr& layer, bool partial) {
-});
-
-REG_CONVERTER_FOR(Memory, [](const CNNLayerPtr& cnnLayer, Builder::Layer& layer) {
-    layer.getParameters()["id"] = cnnLayer->GetParamAsString("id", 0);
-    layer.getParameters()["index"] = static_cast<size_t>(cnnLayer->GetParamAsUInt("index", 0));
-    layer.getParameters()["size"] = static_cast<size_t>(cnnLayer->GetParamAsUInt("size", 0));
-});
-
diff --git a/inference-engine/src/inference_engine/builders/ie_mvn_layer.cpp b/inference-engine/src/inference_engine/builders/ie_mvn_layer.cpp
deleted file mode 100644 (file)
index fd313a7..0000000
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_mvn_layer.hpp>
-#include <ie_cnn_layer_builder.h>
-
-#include <string>
-
-using namespace InferenceEngine;
-
-Builder::MVNLayer::MVNLayer(const std::string& name): LayerDecorator("MVN", name) {
-    getLayer()->getOutputPorts().resize(1);
-    getLayer()->getInputPorts().resize(1);
-    setEpsilon(9.999999717180685e-10f);
-    setNormalize(true);
-    setAcrossChannels(true);
-}
-
-Builder::MVNLayer::MVNLayer(const Layer::Ptr& layer): LayerDecorator(layer) {
-    checkType("MVN");
-}
-
-Builder::MVNLayer::MVNLayer(const Layer::CPtr& layer): LayerDecorator(layer) {
-    checkType("MVN");
-}
-
-Builder::MVNLayer& Builder::MVNLayer::setName(const std::string& name) {
-    getLayer()->setName(name);
-    return *this;
-}
-
-const Port& Builder::MVNLayer::getPort() const {
-    return getLayer()->getOutputPorts()[0];
-}
-
-Builder::MVNLayer& Builder::MVNLayer::setPort(const Port &port) {
-    getLayer()->getOutputPorts()[0] = port;
-    getLayer()->getInputPorts()[0] = port;
-    return *this;
-}
-
-bool Builder::MVNLayer::getAcrossChannels() const {
-    return getLayer()->getParameters().at("across_channels");
-}
-Builder::MVNLayer& Builder::MVNLayer::setAcrossChannels(bool flag) {
-    getLayer()->getParameters()["across_channels"] = flag ? 1 : 0;
-    return *this;
-}
-bool Builder::MVNLayer::getNormalize() const {
-    return getLayer()->getParameters().at("normalize_variance");
-}
-Builder::MVNLayer& Builder::MVNLayer::setNormalize(bool flag) {
-    getLayer()->getParameters()["normalize_variance"] = flag ? 1 : 0;
-    return *this;
-}
-float Builder::MVNLayer::getEpsilon() const {
-    return getLayer()->getParameters().at("eps");
-}
-Builder::MVNLayer& Builder::MVNLayer::setEpsilon(float eps) {
-    getLayer()->getParameters()["eps"] = eps;
-    return *this;
-}
-
-REG_VALIDATOR_FOR(MVN, [](const Builder::Layer::CPtr& input_layer, bool partial) {
-    Builder::MVNLayer layer(input_layer);
-    if (layer.getEpsilon() <= 0) {
-        THROW_IE_EXCEPTION << "Epsilon should be > 0";
-    }
-    if (!input_layer->getInputPorts().empty() &&
-        !input_layer->getOutputPorts().empty() &&
-        !input_layer->getInputPorts()[0].shape().empty() &&
-        !input_layer->getOutputPorts()[0].shape().empty() &&
-        input_layer->getInputPorts()[0].shape() != input_layer->getOutputPorts()[0].shape()) {
-        THROW_IE_EXCEPTION << "Input and output ports should be equal";
-    }
-});
-
-REG_CONVERTER_FOR(MVN, [](const CNNLayerPtr& cnnLayer, Builder::Layer& layer) {
-    layer.getParameters()["across_channels"] = cnnLayer->GetParamAsBool("across_channels", 0);
-    layer.getParameters()["normalize_variance"] = cnnLayer->GetParamAsBool("normalize_variance", 0);
-    layer.getParameters()["eps"] = cnnLayer->GetParamAsFloat("eps", 0);
-});
\ No newline at end of file
diff --git a/inference-engine/src/inference_engine/builders/ie_network_builder_converter.cpp b/inference-engine/src/inference_engine/builders/ie_network_builder_converter.cpp
deleted file mode 100644 (file)
index 4daa169..0000000
+++ /dev/null
@@ -1,329 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <ie_builders.hpp>
-#include <ie_cnn_layer_builder.h>
-#include <cnn_network_impl.hpp>
-
-#include <memory>
-#include <vector>
-#include <unordered_set>
-#include <string>
-
-using namespace InferenceEngine;
-
-class BaseConverter {
-public:
-    explicit BaseConverter(const std::string& type): type(type) {}
-
-    virtual CNNLayer::Ptr createLayer(const std::shared_ptr<const ILayer>& layer, Precision precision) = 0;
-    virtual bool canCreate(const std::string& nodeType) const = 0;
-
-protected:
-    std::string type;
-};
-
-template <class CLT>
-class LayerConverter: public BaseConverter {
-public:
-    explicit LayerConverter(const std::string& type): BaseConverter(type) {}
-
-    CNNLayer::Ptr createLayer(const std::shared_ptr<const ILayer>& layer, Precision precision) override {
-        LayerParams params = {layer->getName(), layer->getType(), precision};
-        auto res = std::make_shared<CLT>(params);
-
-        auto * weightLayerPtr = dynamic_cast<WeightableLayer *>(res.get());
-
-        for (const auto& port : layer->getInputPorts()) {
-            if (port.getParameters().find("type") == port.getParameters().end() ||
-                    port.getData()->getData()->cbuffer() == nullptr)
-                continue;
-            res->blobs[port.getParameters().at("type")] = port.getData()->getData();
-            if (weightLayerPtr == nullptr)
-                continue;
-            if (port.getParameters().at("type").as<std::string>() == "weights") {
-                weightLayerPtr->_weights = port.getData()->getData();
-            } else if (port.getParameters().at("type").as<std::string>() == "biases") {
-                weightLayerPtr->_biases = port.getData()->getData();
-            }
-        }
-
-        // For constant layers
-        for (auto& it : layer->getParameters()) {
-            if (it.second.is<Blob::CPtr>()) {
-                res->blobs[it.first] = std::const_pointer_cast<Blob>(it.second.as<Blob::CPtr>());
-            } else if (it.second.is<Blob::Ptr>()) {
-                res->blobs[it.first] = it.second.as<Blob::Ptr>();
-            }
-        }
-
-        res->params = InferenceEngine::Builder::convertParameters2Strings(layer->getParameters());
-        return res;
-    }
-
-    bool canCreate(const std::string& nodeType) const override {
-        details::CaselessEq<std::string> comparator;
-        return comparator(nodeType, type);
-    }
-};
-
-class ActivationConverter: public BaseConverter {
-public:
-    ActivationConverter(): BaseConverter("Activation") {}
-
-    CNNLayer::Ptr createLayer(const std::shared_ptr<const ILayer>& layer, Precision precision) override {
-        LayerParams params = {layer->getName(), layer->getType(), precision};
-        static details::caseless_map<std::string, std::shared_ptr<BaseConverter>> activationCreators = {
-                {"relu", std::make_shared<LayerConverter<InferenceEngine::ReLULayer>>("ReLU")},
-                {"prelu", std::make_shared<LayerConverter<InferenceEngine::PReLULayer>>("PReLU")},
-                {"clamp", std::make_shared<LayerConverter<InferenceEngine::ClampLayer>>("Clamp")},
-                {"elu", std::make_shared<LayerConverter<InferenceEngine::CNNLayer>>("ELU")},
-                {"sigmoid", std::make_shared<LayerConverter<InferenceEngine::CNNLayer>>("Sigmoid")},
-                {"tanh", std::make_shared<LayerConverter<InferenceEngine::CNNLayer>>("TanH")},
-        };
-
-        auto typeIt = layer->getParameters().find("type");
-        if (typeIt == layer->getParameters().end())
-            THROW_IE_EXCEPTION << "Unsupported Activation layer. Type is unknown.";
-
-        auto activationBuilder = activationCreators.find(typeIt->second);
-        if (activationBuilder == activationCreators.end()) {
-            THROW_IE_EXCEPTION << "Unsupported Activation layer type: " << typeIt->second.as<std::string>();
-        }
-
-        auto activation = activationBuilder->second->createLayer(layer, precision);
-
-        activation->type = activationBuilder->first;
-        activation->params.erase("type");
-        activation->validateLayer();
-        return activation;
-    }
-
-    bool canCreate(const std::string& nodeType) const override {
-        details::CaselessEq<std::string> comparator;
-        return comparator(nodeType, type);
-    }
-};
-
-class RNNSequenceConverter: public BaseConverter {
-public:
-    RNNSequenceConverter(): BaseConverter("RNN") {}
-
-    CNNLayer::Ptr createLayer(const std::shared_ptr<const ILayer>& layer, Precision precision) override {
-        auto rnnLayer = LayerConverter<InferenceEngine::RNNSequenceLayer>("RNN").createLayer(layer, precision);
-        rnnLayer->type = "RNN";
-        std::string type = layer->getType();
-        size_t pos = type.find("Sequence");
-        if (pos != std::string::npos)
-            type.erase(pos);
-        rnnLayer->params["cell_type"] = type;
-        return rnnLayer;
-    }
-
-    bool canCreate(const std::string& nodeType) const override {
-        static const details::caseless_set<std::string> supportedRnnTypes {
-            "LSTMSequence", "GRUSequence", "RNNSequence"
-        };
-        return supportedRnnTypes.find(nodeType) != supportedRnnTypes.end();
-    }
-};
-
-const std::shared_ptr<ICNNNetwork> Builder::convertToICNNNetwork(const INetwork::CPtr& network) {
-    auto createCNNLayer = [](const std::shared_ptr<const ILayer>& layer, Precision precision) {
-        static std::vector<std::shared_ptr<BaseConverter>> convertors = {
-                std::make_shared<LayerConverter<InferenceEngine::PowerLayer>>("Power"),
-                std::make_shared<LayerConverter<InferenceEngine::ConvolutionLayer>>("Convolution"),
-                std::make_shared<LayerConverter<InferenceEngine::DeformableConvolutionLayer>>("DeformableConvolution"),
-                std::make_shared<LayerConverter<InferenceEngine::DeconvolutionLayer>>("Deconvolution"),
-                std::make_shared<LayerConverter<InferenceEngine::PoolingLayer>>("Pooling"),
-                std::make_shared<LayerConverter<InferenceEngine::FullyConnectedLayer>>("InnerProduct"),
-                std::make_shared<LayerConverter<InferenceEngine::FullyConnectedLayer>>("FullyConnected"),
-                std::make_shared<LayerConverter<InferenceEngine::NormLayer>>("LRN"),
-                std::make_shared<LayerConverter<InferenceEngine::NormLayer>>("Norm"),
-                std::make_shared<LayerConverter<InferenceEngine::SoftMaxLayer>>("Softmax"),
-                std::make_shared<LayerConverter<InferenceEngine::SoftMaxLayer>>("LogSoftmax"),
-                std::make_shared<LayerConverter<InferenceEngine::GRNLayer>>("GRN"),
-                std::make_shared<LayerConverter<InferenceEngine::MVNLayer>>("MVN"),
-                std::make_shared<LayerConverter<InferenceEngine::ReLULayer>>("ReLU"),
-                std::make_shared<LayerConverter<InferenceEngine::ClampLayer>>("Clamp"),
-                std::make_shared<LayerConverter<InferenceEngine::SplitLayer>>("Split"),
-                std::make_shared<LayerConverter<InferenceEngine::SplitLayer>>("Slice"),
-                std::make_shared<LayerConverter<InferenceEngine::ConcatLayer>>("Concat"),
-                std::make_shared<LayerConverter<InferenceEngine::EltwiseLayer>>("Eltwise"),
-                std::make_shared<LayerConverter<InferenceEngine::ScaleShiftLayer>>("ScaleShift"),
-                std::make_shared<LayerConverter<InferenceEngine::PReLULayer>>("PReLU"),
-                std::make_shared<LayerConverter<InferenceEngine::CropLayer>>("Crop"),
-                std::make_shared<LayerConverter<InferenceEngine::ReshapeLayer>>("Reshape"),
-                std::make_shared<LayerConverter<InferenceEngine::ReshapeLayer>>("Flatten"),
-                std::make_shared<LayerConverter<InferenceEngine::TileLayer>>("Tile"),
-                std::make_shared<LayerConverter<InferenceEngine::PadLayer>>("Pad"),
-                std::make_shared<ActivationConverter>(),
-                std::make_shared<RNNSequenceConverter>(),
-                std::make_shared<LayerConverter<InferenceEngine::BatchNormalizationLayer>>("BatchNormalization"),
-        };
-        for (auto &convertor : convertors) {
-            if (!convertor->canCreate(layer->getType()))
-                continue;
-            return convertor->createLayer(layer, precision);
-        }
-        static LayerConverter<CNNLayer> genericCreator("");
-        return genericCreator.createLayer(layer, precision);
-    };
-
-    auto keep_input_info = [](std::unique_ptr<details::CNNNetworkImpl>& network, DataPtr &in_data,
-            PreProcessInfo preProc) {
-        InputInfo::Ptr info(new InputInfo());
-        info->getPreProcess() = preProc;
-        info->setInputData(in_data);
-        Precision prc = info->getPrecision();
-
-        // Convert precision into native format (keep element size)
-        prc = prc == Precision::Q78 ? Precision::I16 :
-              prc == Precision::FP16 ? Precision::FP32 :
-              static_cast<Precision::ePrecision>(prc);
-
-        info->setPrecision(prc);
-        network->setInputInfo(info);
-    };
-
-    std::unique_ptr<details::CNNNetworkImpl> cnnNetworkImpl(new details::CNNNetworkImpl());
-
-    Precision detectedPrecision = Precision::UNSPECIFIED;
-    for (const auto& layer : *network) {
-        for (const auto& port : layer->getInputPorts()) {
-            Precision prc = port.getData()->getData()->getTensorDesc().getPrecision();
-            if (prc != Precision::UNSPECIFIED) {
-                detectedPrecision = prc;
-                break;
-            }
-        }
-        for (const auto& port : layer->getOutputPorts()) {
-            Precision prc = port.getData()->getData()->getTensorDesc().getPrecision();
-            if (prc != Precision::UNSPECIFIED) {
-                detectedPrecision = prc;
-                break;
-            }
-        }
-        if (detectedPrecision != Precision::UNSPECIFIED)
-            break;
-    }
-    if (detectedPrecision == Precision::UNSPECIFIED)
-        detectedPrecision = Precision::FP32;
-
-    details::CaselessEq<std::string> eq;
-    cnnNetworkImpl->setName(network->getName());
-    IE_SUPPRESS_DEPRECATED_START
-    cnnNetworkImpl->setPrecision(Precision::UNSPECIFIED);
-    IE_SUPPRESS_DEPRECATED_END
-    for (const auto& layer : *network) {
-        bool isInternalLayer = eq(layer->getType(), "Const");
-        for (const auto& connection : network->getLayerConnections(layer->getId())) {
-            if (!isInternalLayer)
-                break;
-            if (connection.from().layerId() != layer->getId())
-                continue;
-            const auto& port = network->getLayer(connection.to().layerId())->getInputPorts()[connection.to().portId()];
-            isInternalLayer = isInternalLayer &&
-                    port.getParameters().find("type") != port.getParameters().end();
-        }
-        isInternalLayer = isInternalLayer || eq(layer->getType(), "Output");
-
-        if (isInternalLayer)
-            continue;
-
-        CNNLayerPtr cnnLayer = createCNNLayer(layer, detectedPrecision);
-        if (cnnLayer == nullptr)
-            THROW_IE_EXCEPTION << "Could not create CNN layer '" << layer->getName() << "'";
-        if (cnnNetworkImpl->getPrecision() == Precision::UNSPECIFIED) {
-            cnnNetworkImpl->setPrecision(cnnLayer->precision);
-        } else if (cnnNetworkImpl->getPrecision() == Precision::MIXED &&
-                   cnnNetworkImpl->getPrecision() != cnnLayer->precision) {
-            cnnNetworkImpl->setPrecision(Precision::MIXED);
-        }
-
-        auto connections = network->getLayerConnections(layer->getId());
-        std::unordered_set<idx_t> inputNum, outputNum;
-        for (const auto& connection : connections) {
-            if (connection.from().layerId() != layer->getId()) {
-                const auto& port = layer->getInputPorts()[connection.to().portId()];
-                if (port.getParameters().find("type") == port.getParameters().end())
-                    inputNum.insert(connection.to().portId());
-            } else {
-                outputNum.insert(connection.from().portId());
-            }
-        }
-        cnnLayer->insData.resize(inputNum.size());
-        cnnLayer->outData.resize(outputNum.size());
-        cnnNetworkImpl->addLayer(cnnLayer);
-    }
-
-    for (const auto& layer : *network) {
-        auto connections = network->getLayerConnections(layer->getId());
-        CNNLayerPtr cnnLayer;
-        StatusCode sts = cnnNetworkImpl->getLayerByName(layer->getName().c_str(), cnnLayer, nullptr);
-
-        if (sts != OK && (eq(layer->getType(), "Output") || eq(layer->getType(), "Const")))
-            continue;
-        else if (sts != OK)
-            THROW_IE_EXCEPTION << "Cannot find CNNLayer by name " << layer->getName();
-
-        for (const auto& connection : connections) {
-            if (connection.from().layerId() != layer->getId())
-                continue;
-
-            const auto& outLayer = network->getLayer(connection.to().layerId());
-
-            CNNLayerPtr cnnOutLayer;
-            sts = cnnNetworkImpl->getLayerByName(outLayer->getName().c_str(), cnnOutLayer, nullptr);
-            if (sts != OK && !eq(outLayer->getType(), "Output") && !eq(layer->getType(), "Const"))
-                THROW_IE_EXCEPTION << "Cannot find CNNLayer by name " << outLayer->getName();
-
-            std::string dataName = layer->getName();
-            if (cnnLayer->outData.size() > 1) {
-                dataName += "." + std::to_string(connection.from().portId());
-            }
-            DataPtr& data = cnnNetworkImpl->getData(dataName);
-            if (!data) {
-                TensorDesc dataDesc(detectedPrecision, layer->getOutputPorts()[connection.from().portId()].shape(),
-                                    TensorDesc::getLayoutByDims(layer->getOutputPorts()[connection.from().portId()].shape()));
-                data = std::make_shared<Data>(dataName, dataDesc);
-                data->getCreatorLayer() = cnnLayer;
-            }
-            cnnLayer->outData[connection.from().portId()] = data;
-
-            idx_t realPortId(0);
-            const auto inputPorts = outLayer->getInputPorts();
-            for (size_t i = 0; i < connection.to().portId() && i < inputPorts.size(); i++) {
-                if (inputPorts[i].getParameters().find("type") == inputPorts[i].getParameters().end())
-                    realPortId++;
-            }
-            if (cnnOutLayer) {
-                data->getInputTo()[outLayer->getName()] = cnnOutLayer;
-                cnnOutLayer->insData[realPortId] = data;
-            } else {
-                cnnNetworkImpl->addOutput(data->getName());
-            }
-        }
-
-        cnnLayer->validateLayer();
-        if (eq(cnnLayer->type, "Input")) {
-            PreProcessInfo preProc;
-            if (layer->getParameters().find("preProcess") != layer->getParameters().end())
-                preProc = layer->getParameters().at("preProcess");
-            keep_input_info(cnnNetworkImpl, *cnnLayer->outData.begin(), preProc);
-        }
-    }
-
-    // Set default output precision to FP32 (for back-compatibility)
-    OutputsDataMap outputsInfo;
-    cnnNetworkImpl->getOutputsInfo(outputsInfo);
-    for (auto outputInfo : outputsInfo) {
-        if (outputInfo.second->getPrecision() != Precision::FP32 &&
-            outputInfo.second->getPrecision() != Precision::I32) {
-            outputInfo.second->setPrecision(Precision::FP32);
-        }
-    }
-
-    return std::shared_ptr<ICNNNetwork>(cnnNetworkImpl.release());
-}
diff --git a/inference-engine/src/inference_engine/builders/ie_norm_layer.cpp b/inference-engine/src/inference_engine/builders/ie_norm_layer.cpp
deleted file mode 100644 (file)
index 575be22..0000000
+++ /dev/null
@@ -1,114 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_norm_layer.hpp>
-#include <ie_cnn_layer_builder.h>
-
-#include <string>
-
-using namespace InferenceEngine;
-
-Builder::NormLayer::NormLayer(const std::string& name): LayerDecorator("Norm", name) {
-    getLayer()->getOutputPorts().resize(1);
-    getLayer()->getInputPorts().resize(1);
-    setAcrossMaps(false);
-    setSize(0);
-    setAlpha(0);
-    setBeta(0);
-}
-
-Builder::NormLayer::NormLayer(const Layer::Ptr& layer): LayerDecorator(layer) {
-    checkType("Norm");
-}
-
-Builder::NormLayer::NormLayer(const Layer::CPtr& layer): LayerDecorator(layer) {
-    checkType("Norm");
-}
-
-Builder::NormLayer& Builder::NormLayer::setName(const std::string& name) {
-    getLayer()->setName(name);
-    return *this;
-}
-
-const Port& Builder::NormLayer::getPort() const {
-    return getLayer()->getOutputPorts()[0];
-}
-
-Builder::NormLayer& Builder::NormLayer::setPort(const Port &port) {
-    getLayer()->getOutputPorts()[0] = port;
-    getLayer()->getInputPorts()[0] = port;
-    return *this;
-}
-
-size_t Builder::NormLayer::getSize() const {
-    return getLayer()->getParameters().at("local-size");
-}
-
-Builder::NormLayer& Builder::NormLayer::setSize(size_t size) {
-    getLayer()->getParameters()["local-size"] = size;
-    return *this;
-}
-
-float Builder::NormLayer::getAlpha() const {
-    return getLayer()->getParameters().at("alpha");
-}
-
-Builder::NormLayer& Builder::NormLayer::setAlpha(float alpha) {
-    getLayer()->getParameters()["alpha"] = alpha;
-    return *this;
-}
-
-float Builder::NormLayer::getBeta() const {
-    return getLayer()->getParameters().at("beta");
-}
-
-Builder::NormLayer& Builder::NormLayer::setBeta(float beta) {
-    getLayer()->getParameters()["beta"] = beta;
-    return *this;
-}
-
-bool Builder::NormLayer::getAcrossMaps() const {
-    return getLayer()->getParameters().at("region").as<std::string>() == "across";
-}
-
-Builder::NormLayer& Builder::NormLayer::setAcrossMaps(bool acrossMap)  {
-    std::string value = acrossMap ? "across" : "same";
-    getLayer()->getParameters()["region"] = value;
-    return *this;
-}
-
-Builder::NormLayer::NormType Builder::NormLayer::getRegion() const {
-    return getAcrossMaps() ? Builder::NormLayer::NormType::ACROSS_CHANNELS :
-                             Builder::NormLayer::NormType::WITHIN_CHANNEL;
-}
-Builder::NormLayer& Builder::NormLayer::setRegion(Builder::NormLayer::NormType type) {
-    setAcrossMaps(type);
-    return *this;
-}
-
-REG_VALIDATOR_FOR(Norm, [] (const InferenceEngine::Builder::Layer::CPtr& input_layer, bool partial) {
-    Builder::NormLayer layer(input_layer);
-    if (layer.getAlpha() <= 0) {
-        THROW_IE_EXCEPTION << "Alpha should be > 0";
-    }
-    if (layer.getBeta() <= 0) {
-        THROW_IE_EXCEPTION << "Beta should be > 0";
-    }
-    if (layer.getSize() == 0) {
-        THROW_IE_EXCEPTION << "Size should be > 0";
-    }
-    if (!input_layer->getInputPorts().empty() &&
-        !input_layer->getOutputPorts().empty() &&
-        !input_layer->getInputPorts()[0].shape().empty() &&
-        !input_layer->getOutputPorts()[0].shape().empty() &&
-        input_layer->getInputPorts()[0].shape() != input_layer->getOutputPorts()[0].shape()) {
-        THROW_IE_EXCEPTION << "Input and output ports should be equal";
-    }
-});
-
-REG_CONVERTER_FOR(Norm, [](const CNNLayerPtr& cnnLayer, Builder::Layer& layer) {
-    layer.getParameters()["local-size"] = (size_t)cnnLayer->GetParamAsUInt("local-size", 0);
-    layer.getParameters()["alpha"] = cnnLayer->GetParamAsFloat("alpha", 0);
-    layer.getParameters()["beta"] = cnnLayer->GetParamAsFloat("beta", 0);
-});
diff --git a/inference-engine/src/inference_engine/builders/ie_normalize_layer.cpp b/inference-engine/src/inference_engine/builders/ie_normalize_layer.cpp
deleted file mode 100644 (file)
index c3321cc..0000000
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_normalize_layer.hpp>
-#include <ie_cnn_layer_builder.h>
-
-#include <string>
-
-using namespace InferenceEngine;
-
-Builder::NormalizeLayer::NormalizeLayer(const std::string& name): LayerDecorator("Normalize", name) {
-    getLayer()->getOutputPorts().resize(1);
-    getLayer()->getInputPorts().resize(1);
-    setAcrossMaps(false);
-    setChannelShared(false);
-    setEpsilon(0.0000001f);
-}
-
-Builder::NormalizeLayer::NormalizeLayer(const Layer::Ptr& layer): LayerDecorator(layer) {
-    checkType("Normalize");
-}
-
-Builder::NormalizeLayer::NormalizeLayer(const Layer::CPtr& layer): LayerDecorator(layer) {
-    checkType("Normalize");
-}
-
-Builder::NormalizeLayer& Builder::NormalizeLayer::setName(const std::string& name) {
-    getLayer()->setName(name);
-    return *this;
-}
-
-const Port& Builder::NormalizeLayer::getPort() const {
-    return getLayer()->getOutputPorts()[0];
-}
-
-Builder::NormalizeLayer& Builder::NormalizeLayer::setPort(const Port &port) {
-    getLayer()->getOutputPorts()[0] = port;
-    getLayer()->getInputPorts()[0] = port;
-    return *this;
-}
-
-bool Builder::NormalizeLayer::getAcrossMaps() const {
-    return getLayer()->getParameters().at("region");
-}
-
-Builder::NormalizeLayer& Builder::NormalizeLayer::setAcrossMaps(bool acrossMap)  {
-    getLayer()->getParameters()["region"] = acrossMap ? 1 : 0;
-    return *this;
-}
-
-bool Builder::NormalizeLayer::getChannelShared() const {
-    return getLayer()->getParameters().at("channel_shared");
-}
-
-Builder::NormalizeLayer& Builder::NormalizeLayer::setChannelShared(bool channelShared)  {
-    getLayer()->getParameters()["channel_shared"] = channelShared ? 1 : 0;
-    return *this;
-}
-
-float Builder::NormalizeLayer::getEpsilon() const {
-    return getLayer()->getParameters().at("eps");
-}
-
-Builder::NormalizeLayer& Builder::NormalizeLayer::setEpsilon(float eps) {
-    getLayer()->getParameters()["eps"] = eps;
-    return *this;
-}
-
-REG_VALIDATOR_FOR(Normalize, [] (const InferenceEngine::Builder::Layer::CPtr& input_layer, bool partial) {
-    Builder::NormalizeLayer layer(input_layer);
-    if (layer.getEpsilon() <= 0) {
-        THROW_IE_EXCEPTION << "Epsilon should be > 0";
-    }
-    if (!input_layer->getInputPorts().empty() &&
-        !input_layer->getOutputPorts().empty() &&
-        !input_layer->getInputPorts()[0].shape().empty() &&
-        !input_layer->getOutputPorts()[0].shape().empty() &&
-        input_layer->getInputPorts()[0].shape() != input_layer->getOutputPorts()[0].shape()) {
-        THROW_IE_EXCEPTION << "Input and output ports should be equal";
-    }
-});
-
-REG_CONVERTER_FOR(Normalize, [](const CNNLayerPtr& cnnLayer, Builder::Layer& layer) {
-    layer.getParameters()["region"] = cnnLayer->GetParamAsBool("region", 0);
-    layer.getParameters()["channel_shared"] = cnnLayer->GetParamAsBool("channel_shared", 0);
-    layer.getParameters()["eps"] = cnnLayer->GetParamAsFloat("eps", 0);
-});
-
diff --git a/inference-engine/src/inference_engine/builders/ie_output_layer_layer.cpp b/inference-engine/src/inference_engine/builders/ie_output_layer_layer.cpp
deleted file mode 100644 (file)
index 7de9e2d..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_output_layer.hpp>
-
-#include <string>
-
-using namespace InferenceEngine;
-
-Builder::OutputLayer::OutputLayer(const std::string& name): LayerDecorator("Output", name) {
-    getLayer()->getInputPorts().resize(1);
-}
-
-Builder::OutputLayer::OutputLayer(const Layer::Ptr& layer): LayerDecorator(layer) {
-    checkType("Output");
-}
-
-Builder::OutputLayer::OutputLayer(const Layer::CPtr& layer): LayerDecorator(layer) {
-    checkType("Output");
-}
-
-Builder::OutputLayer& Builder::OutputLayer::setName(const std::string& name) {
-    getLayer()->setName(name);
-    return *this;
-}
-
-const Port& Builder::OutputLayer::getPort() const {
-    return getLayer()->getInputPorts()[0];
-}
-
-Builder::OutputLayer& Builder::OutputLayer::setPort(const Port &port) {
-    getLayer()->getInputPorts()[0] = port;
-    return *this;
-}
-
-REG_VALIDATOR_FOR(Output, [] (const InferenceEngine::Builder::Layer::CPtr& input_layer, bool partial) {});
diff --git a/inference-engine/src/inference_engine/builders/ie_permute_layer.cpp b/inference-engine/src/inference_engine/builders/ie_permute_layer.cpp
deleted file mode 100644 (file)
index c8f1159..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_permute_layer.hpp>
-#include <ie_cnn_layer_builder.h>
-
-#include <string>
-#include <vector>
-
-using namespace InferenceEngine;
-
-Builder::PermuteLayer::PermuteLayer(const std::string& name): LayerDecorator("Permute", name) {
-    getLayer()->getOutputPorts().resize(1);
-    getLayer()->getInputPorts().resize(1);
-}
-
-Builder::PermuteLayer::PermuteLayer(const Layer::Ptr& layer): LayerDecorator(layer) {
-    checkType("Permute");
-}
-
-Builder::PermuteLayer::PermuteLayer(const Layer::CPtr& layer): LayerDecorator(layer) {
-    checkType("Permute");
-}
-
-Builder::PermuteLayer& Builder::PermuteLayer::setName(const std::string& name) {
-    getLayer()->setName(name);
-    return *this;
-}
-
-const Port& Builder::PermuteLayer::getOutputPort() const {
-    return getLayer()->getOutputPorts()[0];
-}
-
-Builder::PermuteLayer& Builder::PermuteLayer::setOutputPort(const Port &port) {
-    getLayer()->getOutputPorts()[0] = port;
-    return *this;
-}
-
-const Port& Builder::PermuteLayer::getInputPort() const {
-    return getLayer()->getOutputPorts()[0];
-}
-
-Builder::PermuteLayer& Builder::PermuteLayer::setInputPort(const Port &port) {
-    getLayer()->getOutputPorts()[0] = port;
-    return *this;
-}
-
-const std::vector<size_t> Builder::PermuteLayer::getOrder() const {
-    return getLayer()->getParameters().at("order");
-}
-Builder::PermuteLayer& Builder::PermuteLayer::setOrder(const std::vector<size_t>& ratios) {
-    getLayer()->getParameters()["order"] = ratios;
-    return *this;
-}
-
-REG_CONVERTER_FOR(Permute, [](const CNNLayerPtr& cnnLayer, Builder::Layer& layer) {
-    std::vector<unsigned int> tmp = cnnLayer->GetParamAsUInts("order");
-    layer.getParameters()["order"] = std::vector<size_t>(tmp.size());
-    for (size_t i = 0; i < tmp.size(); ++i) {
-        layer.getParameters()["order"].as<std::vector<size_t>>()[i] = static_cast<size_t>(tmp[i]);
-    }
-});
\ No newline at end of file
diff --git a/inference-engine/src/inference_engine/builders/ie_power_layer.cpp b/inference-engine/src/inference_engine/builders/ie_power_layer.cpp
deleted file mode 100644 (file)
index 75ae9c9..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_power_layer.hpp>
-#include <ie_cnn_layer_builder.h>
-
-#include <string>
-
-using namespace InferenceEngine;
-
-Builder::PowerLayer::PowerLayer(const std::string& name): LayerDecorator("Power", name) {
-    getLayer()->getOutputPorts().resize(1);
-    getLayer()->getInputPorts().resize(1);
-    setPower(1);
-    setScale(1);
-    setShift(0);
-}
-
-Builder::PowerLayer::PowerLayer(const Layer::Ptr& layer): LayerDecorator(layer) {
-    checkType("Power");
-}
-
-Builder::PowerLayer::PowerLayer(const Layer::CPtr& layer): LayerDecorator(layer) {
-    checkType("Power");
-}
-
-Builder::PowerLayer& Builder::PowerLayer::setName(const std::string& name) {
-    getLayer()->setName(name);
-    return *this;
-}
-
-const Port& Builder::PowerLayer::getPort() const {
-    return getLayer()->getOutputPorts()[0];
-}
-
-Builder::PowerLayer& Builder::PowerLayer::setPort(const Port &port) {
-    getLayer()->getOutputPorts()[0] = port;
-    getLayer()->getInputPorts()[0] = port;
-    return *this;
-}
-
-float Builder::PowerLayer::getPower() const {
-    return getLayer()->getParameters().at("power");
-}
-
-Builder::PowerLayer& Builder::PowerLayer::setPower(float power) {
-    getLayer()->getParameters()["power"] = power;
-    return *this;
-}
-
-float Builder::PowerLayer::getScale() const {
-    return getLayer()->getParameters().at("scale");
-}
-
-Builder::PowerLayer& Builder::PowerLayer::setScale(float scale) {
-    getLayer()->getParameters()["scale"] = scale;
-    return *this;
-}
-
-float Builder::PowerLayer::getShift() const {
-    return getLayer()->getParameters().at("shift");
-}
-
-Builder::PowerLayer& Builder::PowerLayer::setShift(float shift) {
-    getLayer()->getParameters()["shift"] = shift;
-    return *this;
-}
-
-REG_CONVERTER_FOR(Power, [](const CNNLayerPtr& cnnLayer, Builder::Layer& layer) {
-    layer.getParameters()["shift"] = cnnLayer->GetParamAsFloat("shift", 0);
-    layer.getParameters()["scale"] = cnnLayer->GetParamAsFloat("scale", 1);
-    layer.getParameters()["power"] = cnnLayer->GetParamAsFloat("power", 1);
-});
diff --git a/inference-engine/src/inference_engine/builders/ie_prelu_layer.cpp b/inference-engine/src/inference_engine/builders/ie_prelu_layer.cpp
deleted file mode 100644 (file)
index 2927f3d..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_prelu_layer.hpp>
-#include <ie_cnn_layer_builder.h>
-
-#include <string>
-
-using namespace InferenceEngine;
-
-Builder::PReLULayer::PReLULayer(const std::string& name): LayerDecorator("PReLU", name) {
-    getLayer()->getInputPorts().resize(2);
-    getLayer()->getInputPorts()[1].setParameter("type", "weights");
-    getLayer()->getOutputPorts().resize(1);
-    setChannelShared(false);
-}
-
-Builder::PReLULayer::PReLULayer(const Layer::Ptr& layer): LayerDecorator(layer) {
-    checkType("PReLU");
-}
-
-Builder::PReLULayer::PReLULayer(const Layer::CPtr& layer): LayerDecorator(layer) {
-    checkType("PReLU");
-}
-
-Builder::PReLULayer& Builder::PReLULayer::setName(const std::string& name) {
-    getLayer()->setName(name);
-    return *this;
-}
-
-const Port& Builder::PReLULayer::getPort() const {
-    return getLayer()->getOutputPorts()[0];
-}
-
-Builder::PReLULayer& Builder::PReLULayer::setPort(const Port &port) {
-    getLayer()->getOutputPorts()[0] = port;
-    getLayer()->getInputPorts()[0] = port;
-    return *this;
-}
-
-bool Builder::PReLULayer::getChannelShared() const {
-    return getLayer()->getParameters().at("channel_shared");
-}
-Builder::PReLULayer& Builder::PReLULayer::setChannelShared(bool flag) {
-    getLayer()->getParameters()["channel_shared"] = flag ? 1 : 0;
-    return *this;
-}
-
-REG_CONVERTER_FOR(PReLU, [](const CNNLayerPtr& cnnLayer, Builder::Layer& layer) {
-    layer.getParameters()["channel_shared"] = cnnLayer->GetParamAsBool("channel_shared", false);
-});
\ No newline at end of file
diff --git a/inference-engine/src/inference_engine/builders/ie_prior_box_clustered_layer.cpp b/inference-engine/src/inference_engine/builders/ie_prior_box_clustered_layer.cpp
deleted file mode 100644 (file)
index 25e1abb..0000000
+++ /dev/null
@@ -1,141 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_prior_box_clustered_layer.hpp>
-#include <ie_cnn_layer_builder.h>
-
-#include <vector>
-#include <string>
-
-using namespace InferenceEngine;
-
-Builder::PriorBoxClusteredLayer::PriorBoxClusteredLayer(const std::string& name): LayerDecorator("PriorBoxClustered", name) {
-    getLayer()->getOutputPorts().resize(1);
-    getLayer()->getInputPorts().resize(2);
-}
-
-Builder::PriorBoxClusteredLayer::PriorBoxClusteredLayer(const Layer::Ptr& layer): LayerDecorator(layer) {
-    checkType("PriorBoxClustered");
-}
-
-Builder::PriorBoxClusteredLayer::PriorBoxClusteredLayer(const Layer::CPtr& layer): LayerDecorator(layer) {
-    checkType("PriorBoxClustered");
-}
-
-Builder::PriorBoxClusteredLayer& Builder::PriorBoxClusteredLayer::setName(const std::string& name) {
-    getLayer()->setName(name);
-    return *this;
-}
-
-const std::vector<Port>& Builder::PriorBoxClusteredLayer::getInputPorts() const {
-    return getLayer()->getInputPorts();
-}
-
-Builder::PriorBoxClusteredLayer& Builder::PriorBoxClusteredLayer::setInputPorts(const std::vector<Port> &ports) {
-    if (ports.size() != 2)
-        THROW_IE_EXCEPTION << "Incorrect number of inputs for PriorBoxClustered getLayer().";
-    getLayer()->getInputPorts() = ports;
-    return *this;
-}
-
-const Port& Builder::PriorBoxClusteredLayer::getOutputPort() const {
-    return getLayer()->getOutputPorts()[0];
-}
-
-Builder::PriorBoxClusteredLayer& Builder::PriorBoxClusteredLayer::setOutputPort(const Port &port) {
-    getLayer()->getOutputPorts()[0] = port;
-    return *this;
-}
-
-float Builder::PriorBoxClusteredLayer::getVariance() const {
-    return getLayer()->getParameters().at("variance");
-}
-Builder::PriorBoxClusteredLayer& Builder::PriorBoxClusteredLayer::setVariance(float variance) {
-    getLayer()->getParameters()["variance"] = variance;
-    return *this;
-}
-
-float Builder::PriorBoxClusteredLayer::getOffset() const {
-    return getLayer()->getParameters().at("offset");
-}
-Builder::PriorBoxClusteredLayer& Builder::PriorBoxClusteredLayer::setOffset(float offset) {
-    getLayer()->getParameters()["offset"] = offset;
-    return *this;
-}
-
-float Builder::PriorBoxClusteredLayer::getWidth() const {
-    return getLayer()->getParameters().at("width");
-}
-Builder::PriorBoxClusteredLayer& Builder::PriorBoxClusteredLayer::setWidth(float width) {
-    getLayer()->getParameters()["width"] = width;
-    return *this;
-}
-
-float Builder::PriorBoxClusteredLayer::getHeight() const {
-    return getLayer()->getParameters().at("height");
-}
-Builder::PriorBoxClusteredLayer& Builder::PriorBoxClusteredLayer::setHeight(float height) {
-    getLayer()->getParameters()["height"] = height;
-    return *this;
-}
-
-const std::vector<float> Builder::PriorBoxClusteredLayer::getSteps() const {
-    return {getLayer()->getParameters().at("step_h"), getLayer()->getParameters().at("step_w")};
-}
-Builder::PriorBoxClusteredLayer& Builder::PriorBoxClusteredLayer::setSteps(const std::vector<float> steps) {
-    if (steps.size() != 2)
-        THROW_IE_EXCEPTION << "PriorBoxClusteredLayer supports sizes only for height and width dimensions!";
-    getLayer()->getParameters()["step_h"] = steps[0];
-    getLayer()->getParameters()["step_w"] = steps[1];
-    return *this;
-}
-
-const std::vector<float> Builder::PriorBoxClusteredLayer::getImgSizes() const {
-    return {getLayer()->getParameters().at("img_h"), getLayer()->getParameters().at("img_w")};
-}
-Builder::PriorBoxClusteredLayer& Builder::PriorBoxClusteredLayer::setImgSizes(const std::vector<float> sizes) {
-    if (sizes.size() != 2)
-        THROW_IE_EXCEPTION << "PriorBoxClusteredLayer allows to specify only height and width dimensions of an input image!";
-    getLayer()->getParameters()["img_h"] = sizes[0];
-    getLayer()->getParameters()["img_w"] = sizes[1];
-    return *this;
-}
-
-float Builder::PriorBoxClusteredLayer::getStep() const {
-    return getLayer()->getParameters().at("step");
-}
-Builder::PriorBoxClusteredLayer& Builder::PriorBoxClusteredLayer::setStep(float step) {
-    getLayer()->getParameters()["step"] = step;
-    return *this;
-}
-
-bool Builder::PriorBoxClusteredLayer::getClip() const {
-    return getLayer()->getParameters().at("clip");
-}
-Builder::PriorBoxClusteredLayer& Builder::PriorBoxClusteredLayer::setClip(bool flag) {
-    getLayer()->getParameters()["clip"] = flag;
-    return *this;
-}
-
-bool Builder::PriorBoxClusteredLayer::getFlip() const {
-    return getLayer()->getParameters().at("flip");
-}
-Builder::PriorBoxClusteredLayer& Builder::PriorBoxClusteredLayer::setFlip(bool flag) {
-    getLayer()->getParameters()["flip"] = flag;
-    return *this;
-}
-
-REG_CONVERTER_FOR(PriorBoxClustered, [](const CNNLayerPtr& cnnLayer, Builder::Layer& layer) {
-    layer.getParameters()["flip"] = cnnLayer->GetParamAsBool("flip", false);
-    layer.getParameters()["clip"] = cnnLayer->GetParamAsBool("clip", false);
-    layer.getParameters()["step"] = cnnLayer->GetParamAsFloat("step");
-    layer.getParameters()["img_h"] = cnnLayer->GetParamAsFloat("img_h", 0);
-    layer.getParameters()["img_w"] = cnnLayer->GetParamAsFloat("img_w", 0);
-    layer.getParameters()["step_h"] = cnnLayer->GetParamAsFloat("step_h", 0);
-    layer.getParameters()["step_w"] = cnnLayer->GetParamAsFloat("step_w", 0);
-    layer.getParameters()["height"] = cnnLayer->GetParamAsFloat("height", 0);
-    layer.getParameters()["width"] = cnnLayer->GetParamAsFloat("width", 0);
-    layer.getParameters()["offset"] = cnnLayer->GetParamAsFloat("offset", 0);
-    layer.getParameters()["variance"] = cnnLayer->GetParamAsFloats("variance", {});
-});
diff --git a/inference-engine/src/inference_engine/builders/ie_prior_box_layer.cpp b/inference-engine/src/inference_engine/builders/ie_prior_box_layer.cpp
deleted file mode 100644 (file)
index 5a9eef6..0000000
+++ /dev/null
@@ -1,133 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_prior_box_layer.hpp>
-#include <ie_cnn_layer_builder.h>
-
-#include <vector>
-#include <string>
-
-using namespace InferenceEngine;
-
-Builder::PriorBoxLayer::PriorBoxLayer(const std::string& name): LayerDecorator("PriorBox", name) {
-    getLayer()->getOutputPorts().resize(1);
-    getLayer()->getInputPorts().resize(2);
-    setScaleAllSizes(true);
-}
-
-Builder::PriorBoxLayer::PriorBoxLayer(const Layer::Ptr& layer): LayerDecorator(layer) {
-    checkType("PriorBox");
-}
-
-Builder::PriorBoxLayer::PriorBoxLayer(const Layer::CPtr& layer): LayerDecorator(layer) {
-    checkType("PriorBox");
-}
-
-Builder::PriorBoxLayer& Builder::PriorBoxLayer::setName(const std::string& name) {
-    getLayer()->setName(name);
-    return *this;
-}
-
-const std::vector<Port>& Builder::PriorBoxLayer::getInputPorts() const {
-    return getLayer()->getInputPorts();
-}
-
-Builder::PriorBoxLayer& Builder::PriorBoxLayer::setInputPorts(const std::vector<Port> &ports) {
-    if (ports.size() != 2)
-        THROW_IE_EXCEPTION << "Incorrect number of inputs for PriorBox getLayer().";
-    getLayer()->getInputPorts() = ports;
-    return *this;
-}
-
-const Port& Builder::PriorBoxLayer::getOutputPort() const {
-    return getLayer()->getOutputPorts()[0];
-}
-
-Builder::PriorBoxLayer& Builder::PriorBoxLayer::setOutputPort(const Port &port) {
-    getLayer()->getOutputPorts()[0] = port;
-    return *this;
-}
-
-float Builder::PriorBoxLayer::getVariance() const {
-    return getLayer()->getParameters().at("variance");
-}
-Builder::PriorBoxLayer& Builder::PriorBoxLayer::setVariance(float variance) {
-    getLayer()->getParameters()["variance"] = variance;
-    return *this;
-}
-
-float Builder::PriorBoxLayer::getOffset() const {
-    return getLayer()->getParameters().at("offset");
-}
-Builder::PriorBoxLayer& Builder::PriorBoxLayer::setOffset(float offset) {
-    getLayer()->getParameters()["offset"] = offset;
-    return *this;
-}
-
-float Builder::PriorBoxLayer::getStep() const {
-    return getLayer()->getParameters().at("step");
-}
-Builder::PriorBoxLayer& Builder::PriorBoxLayer::setStep(float step) {
-    getLayer()->getParameters()["step"] = step;
-    return *this;
-}
-
-size_t Builder::PriorBoxLayer::getMinSize() const {
-    return getLayer()->getParameters().at("min_size");
-}
-Builder::PriorBoxLayer& Builder::PriorBoxLayer::setMinSize(size_t minSize) {
-    getLayer()->getParameters()["min_size"] = minSize;
-    return *this;
-}
-size_t Builder::PriorBoxLayer::getMaxSize() const {
-    return getLayer()->getParameters().at("max_size");
-}
-Builder::PriorBoxLayer& Builder::PriorBoxLayer::setMaxSize(size_t maxSize) {
-    getLayer()->getParameters()["max_size"] = maxSize;
-    return *this;
-}
-
-bool Builder::PriorBoxLayer::getScaleAllSizes() const {
-    return getLayer()->getParameters().at("scale_all_sizes");
-}
-Builder::PriorBoxLayer& Builder::PriorBoxLayer::setScaleAllSizes(bool flag) {
-    getLayer()->getParameters()["scale_all_sizes"] = flag;
-    return *this;
-}
-
-bool Builder::PriorBoxLayer::getClip() const {
-    return getLayer()->getParameters().at("clip");
-}
-Builder::PriorBoxLayer& Builder::PriorBoxLayer::setClip(bool flag) {
-    getLayer()->getParameters()["clip"] = flag;
-    return *this;
-}
-
-bool Builder::PriorBoxLayer::getFlip() const {
-    return getLayer()->getParameters().at("flip");
-}
-Builder::PriorBoxLayer& Builder::PriorBoxLayer::setFlip(bool flag) {
-    getLayer()->getParameters()["flip"] = flag;
-    return *this;
-}
-
-const std::vector<size_t> Builder::PriorBoxLayer::getAspectRatio() const {
-    return getLayer()->getParameters().at("aspect_ratio");
-}
-Builder::PriorBoxLayer& Builder::PriorBoxLayer::setAspectRatio(const std::vector<size_t>& aspectRatio) {
-    getLayer()->getParameters()["aspect_ratio"] = aspectRatio;
-    return *this;
-}
-
-REG_CONVERTER_FOR(PriorBox, [](const CNNLayerPtr& cnnLayer, Builder::Layer& layer) {
-    layer.getParameters()["flip"] = cnnLayer->GetParamAsBool("flip", false);
-    layer.getParameters()["clip"] = cnnLayer->GetParamAsBool("clip", false);
-    layer.getParameters()["scale_all_sizes"] = cnnLayer->GetParamAsBool("scale_all_sizes", true);
-    layer.getParameters()["step"] = cnnLayer->GetParamAsFloat("step", 0);
-    layer.getParameters()["offset"] = cnnLayer->GetParamAsFloat("offset");
-    layer.getParameters()["variance"] = cnnLayer->GetParamAsFloats("variance", {});
-    layer.getParameters()["aspect_ratio"] = cnnLayer->GetParamAsFloats("aspect_ratio", {});
-    layer.getParameters()["min_size"] = static_cast<size_t>(cnnLayer->GetParamAsUInt("min_size", 0));
-    layer.getParameters()["max_size"] = static_cast<size_t>(cnnLayer->GetParamAsUInt("max_size", 0));
-});
diff --git a/inference-engine/src/inference_engine/builders/ie_proposal_layer.cpp b/inference-engine/src/inference_engine/builders/ie_proposal_layer.cpp
deleted file mode 100644 (file)
index 8f97c10..0000000
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_proposal_layer.hpp>
-#include <ie_cnn_layer_builder.h>
-
-#include <vector>
-#include <string>
-
-using namespace InferenceEngine;
-
-Builder::ProposalLayer::ProposalLayer(const std::string& name): LayerDecorator("Proposal", name) {
-    getLayer()->getOutputPorts().resize(1);
-    getLayer()->getInputPorts().resize(3);
-}
-
-Builder::ProposalLayer::ProposalLayer(const Layer::Ptr& layer): LayerDecorator(layer) {
-    checkType("Proposal");
-}
-
-Builder::ProposalLayer::ProposalLayer(const Layer::CPtr& layer): LayerDecorator(layer) {
-    checkType("Proposal");
-}
-
-Builder::ProposalLayer& Builder::ProposalLayer::setName(const std::string& name) {
-    getLayer()->setName(name);
-    return *this;
-}
-
-const std::vector<Port>& Builder::ProposalLayer::getInputPorts() const {
-    return getLayer()->getInputPorts();
-}
-
-Builder::ProposalLayer& Builder::ProposalLayer::setInputPorts(const std::vector<Port> &ports) {
-    if (ports.size() != 3)
-        THROW_IE_EXCEPTION << "Incorrect number of inputs for Proposal getLayer().";
-    getLayer()->getInputPorts() = ports;
-    return *this;
-}
-
-const Port& Builder::ProposalLayer::getOutputPort() const {
-    return getLayer()->getOutputPorts()[0];
-}
-
-Builder::ProposalLayer& Builder::ProposalLayer::setOutputPort(const Port &port) {
-    getLayer()->getOutputPorts()[0] = port;
-    return *this;
-}
-
-size_t Builder::ProposalLayer::getPostNMSTopN() const {
-    return getLayer()->getParameters().at("post_nms_topn");
-}
-Builder::ProposalLayer& Builder::ProposalLayer::setPostNMSTopN(size_t topN) {
-    getLayer()->getParameters()["post_nms_topn"] = topN;
-    return *this;
-}
-size_t Builder::ProposalLayer::getPreNMSTopN() const {
-    return getLayer()->getParameters().at("pre_nms_topn");
-}
-Builder::ProposalLayer& Builder::ProposalLayer::setPreNMSTopN(size_t topN) {
-    getLayer()->getParameters()["pre_nms_topn"] = topN;
-    return *this;
-}
-float Builder::ProposalLayer::getNMSThresh() const {
-    return getLayer()->getParameters().at("nms_thresh");
-}
-Builder::ProposalLayer& Builder::ProposalLayer::setNMSThresh(float thresh) {
-    getLayer()->getParameters()["nms_thresh"] = thresh;
-    return *this;
-}
-size_t Builder::ProposalLayer::getBaseSize() const {
-    return getLayer()->getParameters().at("base_size");
-}
-Builder::ProposalLayer& Builder::ProposalLayer::setBaseSize(size_t baseSize) {
-    getLayer()->getParameters()["base_size"] = baseSize;
-    return *this;
-}
-size_t Builder::ProposalLayer::getMinSize() const {
-    return getLayer()->getParameters().at("min_size");
-}
-Builder::ProposalLayer& Builder::ProposalLayer::setMinSize(size_t minSize) {
-    getLayer()->getParameters()["min_size"] = minSize;
-    return *this;
-}
-size_t Builder::ProposalLayer::getFeatStride() const {
-    return getLayer()->getParameters().at("feat_stride");
-}
-Builder::ProposalLayer& Builder::ProposalLayer::setFeatStride(size_t featStride) {
-    getLayer()->getParameters()["feat_stride"] = featStride;
-    return *this;
-}
-const std::vector<float> Builder::ProposalLayer::getScale() const {
-    return getLayer()->getParameters().at("scale");
-}
-Builder::ProposalLayer& Builder::ProposalLayer::setScale(const std::vector<float>& scales) {
-    getLayer()->getParameters()["scale"] = scales;
-    return *this;
-}
-const std::vector<float> Builder::ProposalLayer::getRatio() const {
-    return getLayer()->getParameters().at("ratio");
-}
-Builder::ProposalLayer& Builder::ProposalLayer::setRatio(const std::vector<float>& ratios) {
-    getLayer()->getParameters()["ratio"] = ratios;
-    return *this;
-}
-
-REG_CONVERTER_FOR(Proposal, [](const CNNLayerPtr& cnnLayer, Builder::Layer& layer) {
-    layer.getParameters()["post_nms_topn"] = static_cast<size_t>(cnnLayer->GetParamAsUInt("post_nms_topn", 0));
-    layer.getParameters()["pre_nms_topn"] = static_cast<size_t>(cnnLayer->GetParamAsUInt("pre_nms_topn", 0));
-    layer.getParameters()["nms_thresh"] = cnnLayer->GetParamAsFloat("nms_thresh", 0);
-    layer.getParameters()["min_size"] = static_cast<size_t>(cnnLayer->GetParamAsUInt("base_size", 0));
-    layer.getParameters()["max_size"] = static_cast<size_t>(cnnLayer->GetParamAsUInt("max_size", 0));
-    layer.getParameters()["max_size"] = static_cast<size_t>(cnnLayer->GetParamAsUInt("feat_stride", 0));
-    layer.getParameters()["scale"] = cnnLayer->GetParamAsFloats("scale");
-    layer.getParameters()["ratio"] = cnnLayer->GetParamAsFloats("ratio");
-});
\ No newline at end of file
diff --git a/inference-engine/src/inference_engine/builders/ie_psroi_pooling_layer.cpp b/inference-engine/src/inference_engine/builders/ie_psroi_pooling_layer.cpp
deleted file mode 100644 (file)
index d3b30ad..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_psroi_pooling_layer.hpp>
-#include <ie_cnn_layer_builder.h>
-#include <vector>
-#include <string>
-
-using namespace InferenceEngine;
-
-Builder::PSROIPoolingLayer::PSROIPoolingLayer(const std::string& name): LayerDecorator("PSROIPooling", name) {
-    getLayer()->getOutputPorts().resize(1);
-}
-
-Builder::PSROIPoolingLayer::PSROIPoolingLayer(const Layer::Ptr& layer): LayerDecorator(layer) {
-    checkType("PSROIPooling");
-}
-
-Builder::PSROIPoolingLayer::PSROIPoolingLayer(const Layer::CPtr& layer): LayerDecorator(layer) {
-    checkType("PSROIPooling");
-}
-
-Builder::PSROIPoolingLayer& Builder::PSROIPoolingLayer::setName(const std::string& name) {
-    getLayer()->setName(name);
-    return *this;
-}
-const std::vector<Port>& Builder::PSROIPoolingLayer::getInputPorts() const {
-    return getLayer()->getInputPorts();
-}
-Builder::PSROIPoolingLayer& Builder::PSROIPoolingLayer::setInputPorts(const std::vector<Port>& ports) {
-    if (ports.size() != 2)
-        THROW_IE_EXCEPTION << "PSROIPoolingLayer should have 2 inputs!";
-    getLayer()->getInputPorts() = ports;
-    return *this;
-}
-const Port& Builder::PSROIPoolingLayer::getOutputPort() const {
-    return getLayer()->getOutputPorts()[0];
-}
-Builder::PSROIPoolingLayer& Builder::PSROIPoolingLayer::setOutputPort(const Port& port) {
-    getLayer()->getOutputPorts()[0] = port;
-    return *this;
-}
-float Builder::PSROIPoolingLayer::getSpatialScale() const {
-    return getLayer()->getParameters().at("spatial_scale");
-}
-Builder::PSROIPoolingLayer& Builder::PSROIPoolingLayer::setSpatialScale(float spatialScale) {
-    getLayer()->getParameters()["spatial_scale"] = spatialScale;
-    return *this;
-}
-size_t Builder::PSROIPoolingLayer::getOutputDim() const {
-    return getLayer()->getParameters().at("output_dim");
-}
-Builder::PSROIPoolingLayer& Builder::PSROIPoolingLayer::setOutputDim(size_t outDim) {
-    getLayer()->getParameters()["output_dim"] = outDim;
-    return *this;
-}
-size_t Builder::PSROIPoolingLayer::getGroupSize() const {
-    return getLayer()->getParameters().at("group_size");
-}
-Builder::PSROIPoolingLayer& Builder::PSROIPoolingLayer::setGroupSize(size_t size) {
-    getLayer()->getParameters()["group_size"] = size;
-    return *this;
-}
-
-REG_CONVERTER_FOR(PSROIPooling, [](const CNNLayerPtr& cnnLayer, Builder::Layer& layer) {
-    layer.getParameters()["group_size"] = static_cast<size_t>(cnnLayer->GetParamAsUInt("group_size", 0));
-    layer.getParameters()["output_dim"] = static_cast<size_t>(cnnLayer->GetParamAsUInt("output_dim", 0));
-    layer.getParameters()["spatial_scale"] = cnnLayer->GetParamAsFloat("spatial_scale", 0);
-});
diff --git a/inference-engine/src/inference_engine/builders/ie_region_yolo_layer.cpp b/inference-engine/src/inference_engine/builders/ie_region_yolo_layer.cpp
deleted file mode 100644 (file)
index 64eba5f..0000000
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_region_yolo_layer.hpp>
-#include <ie_cnn_layer_builder.h>
-#include <vector>
-#include <string>
-
-using namespace InferenceEngine;
-
-Builder::RegionYoloLayer::RegionYoloLayer(const std::string& name): LayerDecorator("RegionYolo", name) {
-    getLayer()->getInputPorts().resize(1);
-    getLayer()->getOutputPorts().resize(1);
-}
-
-Builder::RegionYoloLayer::RegionYoloLayer(const Layer::Ptr& layer): LayerDecorator(layer) {
-    checkType("RegionYolo");
-}
-
-Builder::RegionYoloLayer::RegionYoloLayer(const Layer::CPtr& layer): LayerDecorator(layer) {
-    checkType("RegionYolo");
-}
-
-Builder::RegionYoloLayer& Builder::RegionYoloLayer::setName(const std::string& name) {
-    getLayer()->setName(name);
-    return *this;
-}
-const Port& Builder::RegionYoloLayer::getInputPort() const {
-    return getLayer()->getInputPorts()[0];
-}
-Builder::RegionYoloLayer& Builder::RegionYoloLayer::setInputPort(const Port& port) {
-    getLayer()->getInputPorts()[0] = port;
-    return *this;
-}
-const Port& Builder::RegionYoloLayer::getOutputPort() const {
-    return getLayer()->getOutputPorts()[0];
-}
-Builder::RegionYoloLayer& Builder::RegionYoloLayer::setOutputPort(const Port& port) {
-    getLayer()->getOutputPorts()[0] = port;
-    return *this;
-}
-
-int Builder::RegionYoloLayer::getCoords() const {
-    return getLayer()->getParameters().at("coords");
-}
-Builder::RegionYoloLayer& Builder::RegionYoloLayer::setCoords(int coords) {
-    getLayer()->getParameters()["coords"] = coords;
-    return *this;
-}
-int Builder::RegionYoloLayer::getClasses() const {
-    return getLayer()->getParameters().at("classes");
-}
-Builder::RegionYoloLayer& Builder::RegionYoloLayer::setClasses(int classes) {
-    getLayer()->getParameters()["classes"] = classes;
-    return *this;
-}
-int Builder::RegionYoloLayer::getNum() const {
-    return getLayer()->getParameters().at("num");
-}
-Builder::RegionYoloLayer& Builder::RegionYoloLayer::setNum(int num) {
-    getLayer()->getParameters()["num"] = num;
-    return *this;
-}
-bool Builder::RegionYoloLayer::getDoSoftMax() const {
-    return getLayer()->getParameters().at("do_softmax");
-}
-Builder::RegionYoloLayer& Builder::RegionYoloLayer::setDoSoftMax(bool flag) {
-    getLayer()->getParameters()["do_softmax"] = flag ? 1 : 0;
-    return *this;
-}
-float Builder::RegionYoloLayer::getAnchors() const {
-    return getLayer()->getParameters().at("anchors");
-}
-Builder::RegionYoloLayer& Builder::RegionYoloLayer::setAnchors(float anchors) {
-    getLayer()->getParameters()["anchors"] = anchors;
-    return *this;
-}
-int Builder::RegionYoloLayer::getMask() const {
-    return getLayer()->getParameters().at("mask");
-}
-Builder::RegionYoloLayer& Builder::RegionYoloLayer::setMask(int mask) {
-    getLayer()->getParameters()["mask"] = mask;
-    return *this;
-}
-size_t Builder::RegionYoloLayer::getAxis() const {
-    return getLayer()->getParameters().at("axis");
-}
-Builder::RegionYoloLayer& Builder::RegionYoloLayer::setAxis(size_t axis) {
-    getLayer()->getParameters()["axis"] = axis;
-    return *this;
-}
-size_t Builder::RegionYoloLayer::getEndAxis() const {
-    return getLayer()->getParameters().at("end_axis");
-}
-Builder::RegionYoloLayer& Builder::RegionYoloLayer::setEndAxis(size_t axis) {
-    getLayer()->getParameters()["end_axis"] = axis;
-    return *this;
-}
-
-REG_CONVERTER_FOR(RegionYoloLayer, [](const CNNLayerPtr& cnnLayer, Builder::Layer& layer) {
-    layer.getParameters()["end_axis"] = static_cast<size_t>(cnnLayer->GetParamAsUInt("end_axis", 0));
-    layer.getParameters()["axis"] = static_cast<size_t>(cnnLayer->GetParamAsUInt("axis", 0));
-    layer.getParameters()["num"] = cnnLayer->GetParamAsInt("num", 0);
-    layer.getParameters()["mask"] = cnnLayer->GetParamAsInt("mask", 0);
-    layer.getParameters()["coords"] = cnnLayer->GetParamAsInt("coords", 0);
-    layer.getParameters()["classes"] = cnnLayer->GetParamAsInt("classes", 0);
-    layer.getParameters()["anchors"] = cnnLayer->GetParamAsFloat("anchors", 0);
-    layer.getParameters()["do_softmax"] = cnnLayer->GetParamAsBool("do_softmax", false);
-});
\ No newline at end of file
diff --git a/inference-engine/src/inference_engine/builders/ie_relu6_layer.cpp b/inference-engine/src/inference_engine/builders/ie_relu6_layer.cpp
deleted file mode 100644 (file)
index 82c56bd..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_relu6_layer.hpp>
-#include <ie_cnn_layer_builder.h>
-
-#include <string>
-
-using namespace InferenceEngine;
-
-Builder::ReLU6Layer::ReLU6Layer(const std::string& name): LayerDecorator("ReLU6", name) {
-    getLayer()->getOutputPorts().resize(1);
-    getLayer()->getInputPorts().resize(1);
-    setN(6);
-}
-
-Builder::ReLU6Layer::ReLU6Layer(const Layer::Ptr& layer): LayerDecorator(layer) {
-    checkType("ReLU6");
-}
-
-Builder::ReLU6Layer::ReLU6Layer(const Layer::CPtr& layer): LayerDecorator(layer) {
-    checkType("ReLU6");
-}
-
-Builder::ReLU6Layer& Builder::ReLU6Layer::setName(const std::string& name) {
-    getLayer()->setName(name);
-    return *this;
-}
-
-const Port& Builder::ReLU6Layer::getPort() const {
-    return getLayer()->getOutputPorts()[0];
-}
-
-Builder::ReLU6Layer& Builder::ReLU6Layer::setPort(const Port &port) {
-    getLayer()->getOutputPorts()[0] = port;
-    getLayer()->getInputPorts()[0] = port;
-    return *this;
-}
-
-float Builder::ReLU6Layer::getN() const {
-    return getLayer()->getParameters().at("n");
-}
-
-Builder::ReLU6Layer& Builder::ReLU6Layer::setN(float n) {
-    getLayer()->getParameters()["n"] = n;
-    return *this;
-}
-
-REG_VALIDATOR_FOR(ReLU6, [] (const InferenceEngine::Builder::Layer::CPtr& input_layer, bool partial) {
-    if (!input_layer->getInputPorts().empty() &&
-        !input_layer->getOutputPorts().empty() &&
-        !input_layer->getInputPorts()[0].shape().empty() &&
-        !input_layer->getOutputPorts()[0].shape().empty() &&
-        input_layer->getInputPorts()[0].shape() != input_layer->getOutputPorts()[0].shape()) {
-        THROW_IE_EXCEPTION << "Input and output ports should be equal";
-    }
-});
-
-REG_CONVERTER_FOR(ReLU6, [](const CNNLayerPtr& cnnLayer, Builder::Layer& layer) {
-    layer.getParameters()["n"] = cnnLayer->GetParamAsFloat("n", 0);
-});
diff --git a/inference-engine/src/inference_engine/builders/ie_relu_layer.cpp b/inference-engine/src/inference_engine/builders/ie_relu_layer.cpp
deleted file mode 100644 (file)
index e2786a5..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_relu_layer.hpp>
-#include <ie_cnn_layer_builder.h>
-
-#include <string>
-
-using namespace InferenceEngine;
-
-Builder::ReLULayer::ReLULayer(const std::string& name): LayerDecorator("ReLU", name) {
-    getLayer()->getOutputPorts().resize(1);
-    getLayer()->getInputPorts().resize(1);
-    setNegativeSlope(0);
-}
-
-Builder::ReLULayer::ReLULayer(const Layer::Ptr& layer): LayerDecorator(layer) {
-    checkType("ReLU");
-}
-
-Builder::ReLULayer::ReLULayer(const Layer::CPtr& layer): LayerDecorator(layer) {
-    checkType("ReLU");
-}
-
-Builder::ReLULayer& Builder::ReLULayer::setName(const std::string& name) {
-    getLayer()->setName(name);
-    return *this;
-}
-
-const Port& Builder::ReLULayer::getPort() const {
-    return getLayer()->getOutputPorts()[0];
-}
-
-Builder::ReLULayer& Builder::ReLULayer::setPort(const Port &port) {
-    getLayer()->getOutputPorts()[0] = port;
-    getLayer()->getInputPorts()[0] = port;
-    return *this;
-}
-
-float Builder::ReLULayer::getNegativeSlope() const {
-    return getLayer()->getParameters().at("negative_slope");
-}
-
-Builder::ReLULayer& Builder::ReLULayer::setNegativeSlope(float negativeSlope) {
-    getLayer()->getParameters()["negative_slope"] = negativeSlope;
-    return *this;
-}
-
-REG_VALIDATOR_FOR(ReLU, [] (const InferenceEngine::Builder::Layer::CPtr& input_layer, bool partial) {
-    Builder::ReLULayer layer(input_layer);
-    if (!input_layer->getInputPorts().empty() &&
-        !input_layer->getOutputPorts().empty() &&
-        !input_layer->getInputPorts()[0].shape().empty() &&
-        !input_layer->getOutputPorts()[0].shape().empty() &&
-        input_layer->getInputPorts()[0].shape() != input_layer->getOutputPorts()[0].shape()) {
-        THROW_IE_EXCEPTION << "Input and output ports should be equal";
-    }
-});
-
-REG_CONVERTER_FOR(ReLU, [](const CNNLayerPtr& cnnLayer, Builder::Layer& layer) {
-    layer.getParameters()["negative_slope"] = cnnLayer->GetParamAsFloat("negative_slope", 0);
-});
diff --git a/inference-engine/src/inference_engine/builders/ie_reorg_yolo_layer.cpp b/inference-engine/src/inference_engine/builders/ie_reorg_yolo_layer.cpp
deleted file mode 100644 (file)
index feb42b7..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_reorg_yolo_layer.hpp>
-#include <ie_cnn_layer_builder.h>
-#include <vector>
-#include <string>
-
-using namespace InferenceEngine;
-
-Builder::ReorgYoloLayer::ReorgYoloLayer(const std::string& name): LayerDecorator("ReorgYolo", name) {
-    getLayer()->getInputPorts().resize(1);
-    getLayer()->getOutputPorts().resize(1);
-}
-
-Builder::ReorgYoloLayer::ReorgYoloLayer(const Layer::Ptr& layer): LayerDecorator(layer) {
-    checkType("ReorgYolo");
-}
-
-Builder::ReorgYoloLayer::ReorgYoloLayer(const Layer::CPtr& layer): LayerDecorator(layer) {
-    checkType("ReorgYolo");
-}
-
-Builder::ReorgYoloLayer& Builder::ReorgYoloLayer::setName(const std::string& name) {
-    getLayer()->setName(name);
-    return *this;
-}
-const Port& Builder::ReorgYoloLayer::getInputPort() const {
-    return getLayer()->getInputPorts()[0];
-}
-Builder::ReorgYoloLayer& Builder::ReorgYoloLayer::setInputPort(const Port& port) {
-    getLayer()->getInputPorts()[0] = port;
-    return *this;
-}
-const Port& Builder::ReorgYoloLayer::getOutputPort() const {
-    return getLayer()->getOutputPorts()[0];
-}
-Builder::ReorgYoloLayer& Builder::ReorgYoloLayer::setOutputPort(const Port& port) {
-    getLayer()->getOutputPorts()[0] = port;
-    return *this;
-}
-int Builder::ReorgYoloLayer::getStride() const {
-    return getLayer()->getParameters().at("stride");
-}
-Builder::ReorgYoloLayer& Builder::ReorgYoloLayer::setStride(int stride) {
-    getLayer()->getParameters()["stride"] = stride;
-    return *this;
-}
-
-REG_CONVERTER_FOR(ReorgYolo, [](const CNNLayerPtr& cnnLayer, Builder::Layer& layer) {
-    layer.getParameters()["stride"] = cnnLayer->GetParamAsInt("stride", 0);
-});
\ No newline at end of file
diff --git a/inference-engine/src/inference_engine/builders/ie_resample_layer.cpp b/inference-engine/src/inference_engine/builders/ie_resample_layer.cpp
deleted file mode 100644 (file)
index 31a5ec7..0000000
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_resample_layer.hpp>
-#include <ie_cnn_layer_builder.h>
-#include <vector>
-#include <string>
-
-using namespace InferenceEngine;
-
-Builder::ResampleLayer::ResampleLayer(const std::string& name): LayerDecorator("Resample", name) {
-    getLayer()->getInputPorts().resize(1);
-    getLayer()->getOutputPorts().resize(1);
-}
-
-Builder::ResampleLayer::ResampleLayer(const Layer::Ptr& layer): LayerDecorator(layer) {
-    checkType("Resample");
-}
-
-Builder::ResampleLayer::ResampleLayer(const Layer::CPtr& layer): LayerDecorator(layer) {
-    checkType("Resample");
-}
-
-Builder::ResampleLayer& Builder::ResampleLayer::setName(const std::string& name) {
-    getLayer()->setName(name);
-    return *this;
-}
-const Port& Builder::ResampleLayer::getInputPort() const {
-    return getLayer()->getInputPorts()[0];
-}
-Builder::ResampleLayer& Builder::ResampleLayer::setInputPort(const Port& port) {
-    getLayer()->getInputPorts()[0] = port;
-    return *this;
-}
-const Port& Builder::ResampleLayer::getOutputPort() const {
-    return getLayer()->getOutputPorts()[0];
-}
-Builder::ResampleLayer& Builder::ResampleLayer::setOutputPort(const Port& port) {
-    getLayer()->getOutputPorts()[0] = port;
-    return *this;
-}
-
-const std::string &Builder::ResampleLayer::getResampleType() const {
-    return getLayer()->getParameters().at("type");
-}
-
-Builder::ResampleLayer &Builder::ResampleLayer::setResampleType(const std::string &type) {
-    getLayer()->getParameters()["type"] = type;
-    return *this;
-}
-
-bool Builder::ResampleLayer::getAntialias() const {
-    return getLayer()->getParameters().at("antialias");
-}
-
-Builder::ResampleLayer &Builder::ResampleLayer::setAntialias(bool antialias) {
-    getLayer()->getParameters()["antialias"] = antialias;
-    return *this;
-}
-
-float Builder::ResampleLayer::getFactor() const {
-    return getLayer()->getParameters().at("factor");
-}
-
-Builder::ResampleLayer &Builder::ResampleLayer::setFactor(float factor) {
-    getLayer()->getParameters()["factor"] = factor;
-    return *this;
-}
-
-size_t Builder::ResampleLayer::getWidth() const {
-    return getLayer()->getParameters().at("width");
-}
-
-Builder::ResampleLayer &Builder::ResampleLayer::setWidth(size_t width) {
-    getLayer()->getParameters()["width"] = width;
-    return *this;
-}
-
-size_t Builder::ResampleLayer::getHeight() const {
-    return getLayer()->getParameters().at("height");
-}
-
-Builder::ResampleLayer &Builder::ResampleLayer::setHeight(size_t height) {
-    getLayer()->getParameters()["height"] = height;
-    return *this;
-}
-
-REG_CONVERTER_FOR(Resample, [](const CNNLayerPtr& cnnLayer, Builder::Layer& layer) {
-    layer.getParameters()["height"] = static_cast<size_t>(cnnLayer->GetParamAsUInt("height", 0));
-    layer.getParameters()["width"] = static_cast<size_t>(cnnLayer->GetParamAsUInt("width", 0));
-    layer.getParameters()["factor"] = cnnLayer->GetParamAsFloat("factor", 0);
-    layer.getParameters()["antialias"] = cnnLayer->GetParamAsBool("antialias", false);
-    layer.getParameters()["type"] = cnnLayer->GetParamAsString("type");
-});
\ No newline at end of file
diff --git a/inference-engine/src/inference_engine/builders/ie_reshape_layer.cpp b/inference-engine/src/inference_engine/builders/ie_reshape_layer.cpp
deleted file mode 100644 (file)
index 1a66a5b..0000000
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_reshape_layer.hpp>
-#include <ie_cnn_layer_builder.h>
-
-#include <vector>
-#include <string>
-
-using namespace InferenceEngine;
-
-Builder::ReshapeLayer::ReshapeLayer(const std::string& name): LayerDecorator("Reshape", name) {
-    getLayer()->getOutputPorts().resize(1);
-    getLayer()->getInputPorts().resize(1);
-}
-
-Builder::ReshapeLayer::ReshapeLayer(const Layer::Ptr& layer): LayerDecorator(layer) {
-    checkType("Reshape");
-}
-
-Builder::ReshapeLayer::ReshapeLayer(const Layer::CPtr& layer): LayerDecorator(layer) {
-    checkType("Reshape");
-}
-
-Builder::ReshapeLayer& Builder::ReshapeLayer::setName(const std::string& name) {
-    getLayer()->setName(name);
-    return *this;
-}
-
-const Port& Builder::ReshapeLayer::getInputPort() const {
-    return getLayer()->getInputPorts()[0];
-}
-
-Builder::ReshapeLayer& Builder::ReshapeLayer::setInputPort(const Port &port) {
-    getLayer()->getInputPorts()[0] = port;
-    return *this;
-}
-
-const Port& Builder::ReshapeLayer::getOutputPort() const {
-    return getLayer()->getOutputPorts()[0];
-}
-
-Builder::ReshapeLayer& Builder::ReshapeLayer::setOutputPort(const Port &port) {
-    getLayer()->getOutputPorts()[0] = port;
-    return *this;
-}
-
-const std::vector<int> Builder::ReshapeLayer::getDims() const {
-    return getLayer()->getParameters().at("dim");
-}
-
-Builder::ReshapeLayer& Builder::ReshapeLayer::setDims(const std::vector<int>& dims) {
-    getLayer()->getParameters()["dim"] = dims;
-    return *this;
-}
-
-REG_CONVERTER_FOR(Flatten, [](const CNNLayerPtr& cnnLayer, Builder::Layer& layer) {
-    layer.getParameters()["axis"] = static_cast<size_t>(cnnLayer->GetParamAsUInt("axis", 0));
-    layer.getParameters()["dim"] = cnnLayer->GetParamAsInts("dim", {});
-});
-REG_CONVERTER_FOR(Reshape, [](const CNNLayerPtr& cnnLayer, Builder::Layer& layer) {
-    layer.getParameters()["axis"] = static_cast<size_t>(cnnLayer->GetParamAsUInt("axis", 0));
-    layer.getParameters()["dim"] = cnnLayer->GetParamAsInts("dim", {});
-});
diff --git a/inference-engine/src/inference_engine/builders/ie_rnn_sequence_layer.cpp b/inference-engine/src/inference_engine/builders/ie_rnn_sequence_layer.cpp
deleted file mode 100644 (file)
index 95cd10c..0000000
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_rnn_sequence_layer.hpp>
-#include <ie_cnn_layer_builder.h>
-
-#include <vector>
-#include <string>
-
-using namespace InferenceEngine;
-
-Builder::RNNSequenceLayer::RNNSequenceLayer(const std::string& name): LayerDecorator("RNNSequence", name) {
-    getLayer()->getOutputPorts().resize(2);
-    getLayer()->getInputPorts().resize(5);
-    getLayer()->getInputPorts()[1].setParameter("type", "weights");
-    getLayer()->getInputPorts()[2].setParameter("type", "biases");
-    getLayer()->getInputPorts()[3].setParameter("type", "optional");
-}
-
-Builder::RNNSequenceLayer::RNNSequenceLayer(const Layer::Ptr& layer): LayerDecorator(layer) {
-    checkType("RNNSequence");
-}
-
-Builder::RNNSequenceLayer::RNNSequenceLayer(const Layer::CPtr& layer): LayerDecorator(layer) {
-    checkType("RNNSequence");
-}
-
-Builder::RNNSequenceLayer& Builder::RNNSequenceLayer::setName(const std::string& name) {
-    getLayer()->setName(name);
-    return *this;
-}
-
-const std::vector<Port>& Builder::RNNSequenceLayer::getInputPorts() const {
-    return getLayer()->getInputPorts();
-}
-
-Builder::RNNSequenceLayer& Builder::RNNSequenceLayer::setInputPorts(const std::vector<Port>& ports) {
-    getLayer()->getInputPorts() = ports;
-    return *this;
-}
-
-const std::vector<Port>& Builder::RNNSequenceLayer::getOutputPorts() const {
-    return getLayer()->getOutputPorts();
-}
-
-Builder::RNNSequenceLayer& Builder::RNNSequenceLayer::setOutputPorts(const std::vector<Port>& ports) {
-    getLayer()->getOutputPorts() = ports;
-    return *this;
-}
-int Builder::RNNSequenceLayer::getHiddenSize() const {
-    return getLayer()->getParameters().at("hidden_size");
-}
-Builder::RNNSequenceLayer& Builder::RNNSequenceLayer::setHiddenSize(int size) {
-    getLayer()->getParameters()["hidden_size"] = size;
-    return *this;
-}
-bool Builder::RNNSequenceLayer::getSequenceDim() const {
-    return getLayer()->getParameters().at("sequence_dim");
-}
-Builder::RNNSequenceLayer& Builder::RNNSequenceLayer::setSqquenceDim(bool flag) {
-    getLayer()->getParameters()["sequence_dim"] = flag;
-    return *this;
-}
-const std::vector<std::string>& Builder::RNNSequenceLayer::getActivations() const {
-    return getLayer()->getParameters().at("activations");
-}
-Builder::RNNSequenceLayer& Builder::RNNSequenceLayer::setActivations(const std::vector<std::string>& activations) {
-    getLayer()->getParameters()["activations"] = activations;
-    return *this;
-}
-const std::vector<float>& Builder::RNNSequenceLayer::getActivationsAlpha() const {
-    return getLayer()->getParameters().at("activations_alpha");
-}
-Builder::RNNSequenceLayer& Builder::RNNSequenceLayer::setActivationsAlpha(const std::vector<float>& activations) {
-    getLayer()->getParameters()["activations_alpha"] = activations;
-    return *this;
-}
-const std::vector<float>& Builder::RNNSequenceLayer::getActivationsBeta() const {
-    return getLayer()->getParameters().at("activations_beta");
-}
-Builder::RNNSequenceLayer& Builder::RNNSequenceLayer::setActivationsBeta(const std::vector<float>& activations) {
-    getLayer()->getParameters()["activations_beta"] = activations;
-    return *this;
-}
-REG_CONVERTER_FOR(RNNSequence, [](const CNNLayerPtr& cnnLayer, Builder::Layer& layer) {
-    layer.getParameters()["hidden_size"] = cnnLayer->GetParamAsInt("hidden_size");
-    layer.getParameters()["sequence_dim"] = cnnLayer->GetParamAsBool("sequence_dim", true);
-    std::vector<std::string> activations;
-    std::istringstream stream(cnnLayer->GetParamAsString("activations"));
-    std::string str;
-    while (getline(stream, str, ',')) {
-         activations.push_back(str);
-    }
-    layer.getParameters()["activations"] = activations;
-    layer.getParameters()["activations_alpha"] = cnnLayer->GetParamAsFloats("activations_alpha");
-    layer.getParameters()["activations_beta"] = cnnLayer->GetParamAsFloats("activations_beta");
-});
-
-
diff --git a/inference-engine/src/inference_engine/builders/ie_roi_pooling_layer.cpp b/inference-engine/src/inference_engine/builders/ie_roi_pooling_layer.cpp
deleted file mode 100644 (file)
index 7ff10f4..0000000
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_roi_pooling_layer.hpp>
-#include <ie_cnn_layer_builder.h>
-#include <vector>
-#include <string>
-
-using namespace InferenceEngine;
-
-Builder::ROIPoolingLayer::ROIPoolingLayer(const std::string& name): LayerDecorator("ROIPooling", name) {
-    getLayer()->getOutputPorts().resize(1);
-    setPooled({0, 0});
-}
-
-Builder::ROIPoolingLayer::ROIPoolingLayer(const Layer::Ptr& layer): LayerDecorator(layer) {
-    checkType("ROIPooling");
-}
-
-Builder::ROIPoolingLayer::ROIPoolingLayer(const Layer::CPtr& layer): LayerDecorator(layer) {
-    checkType("ROIPooling");
-}
-
-Builder::ROIPoolingLayer& Builder::ROIPoolingLayer::setName(const std::string& name) {
-    getLayer()->setName(name);
-    return *this;
-}
-const std::vector<Port>& Builder::ROIPoolingLayer::getInputPorts() const {
-    return getLayer()->getInputPorts();
-}
-Builder::ROIPoolingLayer& Builder::ROIPoolingLayer::setInputPorts(const std::vector<Port>& ports) {
-    if (ports.size() != 2)
-        THROW_IE_EXCEPTION << "ROIPoolingLayer should have 2 inputs!";
-    getLayer()->getInputPorts() = ports;
-    return *this;
-}
-const Port& Builder::ROIPoolingLayer::getOutputPort() const {
-    return getLayer()->getOutputPorts()[0];
-}
-Builder::ROIPoolingLayer& Builder::ROIPoolingLayer::setOutputPort(const Port& port) {
-    getLayer()->getOutputPorts()[0] = port;
-    return *this;
-}
-float Builder::ROIPoolingLayer::getSpatialScale() const {
-    return getLayer()->getParameters().at("spatial_scale");
-}
-Builder::ROIPoolingLayer& Builder::ROIPoolingLayer::setSpatialScale(float spatialScale) {
-    getLayer()->getParameters()["spatial_scale"] = spatialScale;
-    return *this;
-}
-const std::vector<int> Builder::ROIPoolingLayer::getPooled() const {
-    return {getLayer()->getParameters().at("pooled_h"),
-            getLayer()->getParameters().at("pooled_w")};
-}
-Builder::ROIPoolingLayer& Builder::ROIPoolingLayer::setPooled(const std::vector<int>& pooled) {
-    if (pooled.size() != 2)
-        THROW_IE_EXCEPTION << "ROIPoolingLayer supports only pooled for height and width dimensions";
-    getLayer()->getParameters()["pooled_h"] = pooled[0];
-    getLayer()->getParameters()["pooled_w"] = pooled[1];
-    return *this;
-}
-
-REG_CONVERTER_FOR(ROIPooling, [](const CNNLayerPtr& cnnLayer, Builder::Layer& layer) {
-    layer.getParameters()["pooled_h"] = cnnLayer->GetParamAsInt("pooled_h", 0);
-    layer.getParameters()["pooled_w"] = cnnLayer->GetParamAsInt("pooled_w", 0);
-    layer.getParameters()["spatial_scale"] = cnnLayer->GetParamAsFloat("spatial_scale");
-});
\ No newline at end of file
diff --git a/inference-engine/src/inference_engine/builders/ie_scale_shift_layer.cpp b/inference-engine/src/inference_engine/builders/ie_scale_shift_layer.cpp
deleted file mode 100644 (file)
index d90e0b1..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_scale_shift_layer.hpp>
-
-#include <string>
-
-using namespace InferenceEngine;
-
-Builder::ScaleShiftLayer::ScaleShiftLayer(const std::string& name): LayerDecorator("ScaleShift", name) {
-    getLayer()->getInputPorts().resize(3);
-    getLayer()->getInputPorts()[1].setParameter("type", "weights");
-    getLayer()->getInputPorts()[2].setParameter("type", "biases");
-    getLayer()->getOutputPorts().resize(1);
-}
-
-Builder::ScaleShiftLayer::ScaleShiftLayer(const Layer::Ptr& layer): LayerDecorator(layer) {
-    checkType("ScaleShift");
-}
-
-Builder::ScaleShiftLayer::ScaleShiftLayer(const Layer::CPtr& layer): LayerDecorator(layer) {
-    checkType("ScaleShift");
-}
-
-Builder::ScaleShiftLayer& Builder::ScaleShiftLayer::setName(const std::string& name) {
-    getLayer()->setName(name);
-    return *this;
-}
-
-const Port& Builder::ScaleShiftLayer::getPort() const {
-    return getLayer()->getOutputPorts()[0];
-}
-
-Builder::ScaleShiftLayer& Builder::ScaleShiftLayer::setPort(const Port &port) {
-    getLayer()->getOutputPorts()[0] = port;
-    getLayer()->getInputPorts()[0] = port;
-    return *this;
-}
\ No newline at end of file
diff --git a/inference-engine/src/inference_engine/builders/ie_sigmoid_layer.cpp b/inference-engine/src/inference_engine/builders/ie_sigmoid_layer.cpp
deleted file mode 100644 (file)
index daef989..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_sigmoid_layer.hpp>
-
-#include <string>
-
-using namespace InferenceEngine;
-
-Builder::SigmoidLayer::SigmoidLayer(const std::string& name): LayerDecorator("Sigmoid", name) {
-    getLayer()->getOutputPorts().resize(1);
-    getLayer()->getInputPorts().resize(1);
-}
-
-Builder::SigmoidLayer::SigmoidLayer(const Layer::Ptr& layer): LayerDecorator(layer) {
-    checkType("Sigmoid");
-}
-
-Builder::SigmoidLayer::SigmoidLayer(const Layer::CPtr& layer): LayerDecorator(layer) {
-    checkType("Sigmoid");
-}
-
-Builder::SigmoidLayer& Builder::SigmoidLayer::setName(const std::string& name) {
-    getLayer()->setName(name);
-    return *this;
-}
-
-const Port& Builder::SigmoidLayer::getPort() const {
-    return getLayer()->getOutputPorts()[0];
-}
-
-Builder::SigmoidLayer& Builder::SigmoidLayer::setPort(const Port &port) {
-    getLayer()->getOutputPorts()[0] = port;
-    getLayer()->getInputPorts()[0] = port;
-    return *this;
-}
diff --git a/inference-engine/src/inference_engine/builders/ie_simpler_nms_layer.cpp b/inference-engine/src/inference_engine/builders/ie_simpler_nms_layer.cpp
deleted file mode 100644 (file)
index 966ff8a..0000000
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_simpler_nms_layer.hpp>
-#include <ie_cnn_layer_builder.h>
-#include <vector>
-#include <string>
-
-using namespace InferenceEngine;
-
-Builder::SimplerNMSLayer::SimplerNMSLayer(const std::string& name): LayerDecorator("SimplerNMS", name) {
-    getLayer()->getOutputPorts().resize(1);
-}
-
-Builder::SimplerNMSLayer::SimplerNMSLayer(const Layer::Ptr& layer): LayerDecorator(layer) {
-    checkType("SimplerNMS");
-}
-
-Builder::SimplerNMSLayer::SimplerNMSLayer(const Layer::CPtr& layer): LayerDecorator(layer) {
-    checkType("SimplerNMS");
-}
-
-Builder::SimplerNMSLayer& Builder::SimplerNMSLayer::setName(const std::string& name) {
-    getLayer()->setName(name);
-    return *this;
-}
-const std::vector<Port>& Builder::SimplerNMSLayer::getInputPorts() const {
-    return getLayer()->getInputPorts();
-}
-Builder::SimplerNMSLayer& Builder::SimplerNMSLayer::setInputPorts(const std::vector<Port>& ports) {
-    getLayer()->getInputPorts() = ports;
-    return *this;
-}
-const Port& Builder::SimplerNMSLayer::getOutputPort() const {
-    return getLayer()->getOutputPorts()[0];
-}
-Builder::SimplerNMSLayer& Builder::SimplerNMSLayer::setOutputPort(const Port& port) {
-    getLayer()->getOutputPorts()[0] = port;
-    return *this;
-}
-
-size_t Builder::SimplerNMSLayer::getPreNMSTopN() const {
-    return getLayer()->getParameters().at("pre_nms_topn");
-}
-Builder::SimplerNMSLayer& Builder::SimplerNMSLayer::setPreNMSTopN(size_t topN) {
-    getLayer()->getParameters()["pre_nms_topn"] = topN;
-    return *this;
-}
-size_t Builder::SimplerNMSLayer::getPostNMSTopN() const {
-    return getLayer()->getParameters().at("post_nms_topn");
-}
-Builder::SimplerNMSLayer& Builder::SimplerNMSLayer::setPostNMSTopN(size_t topN) {
-    getLayer()->getParameters()["post_nms_topn"] = topN;
-    return *this;
-}
-size_t Builder::SimplerNMSLayer::getFeatStride() const {
-    return getLayer()->getParameters().at("feat_stride");
-}
-Builder::SimplerNMSLayer& Builder::SimplerNMSLayer::setFeatStride(size_t featStride) {
-    getLayer()->getParameters()["feat_stride"] = featStride;
-    return *this;
-}
-size_t Builder::SimplerNMSLayer::getMinBoxSize() const {
-    return getLayer()->getParameters().at("min_bbox_size");
-}
-Builder::SimplerNMSLayer& Builder::SimplerNMSLayer::setMinBoxSize(size_t minSize) {
-    getLayer()->getParameters()["min_bbox_size"] = minSize;
-    return *this;
-}
-size_t Builder::SimplerNMSLayer::getScale() const {
-    return getLayer()->getParameters().at("scale");
-}
-Builder::SimplerNMSLayer& Builder::SimplerNMSLayer::setScale(size_t scale) {
-    getLayer()->getParameters()["scale"] = scale;
-    return *this;
-}
-
-float Builder::SimplerNMSLayer::getCLSThreshold() const {
-    return getLayer()->getParameters().at("cls_threshold");
-}
-Builder::SimplerNMSLayer& Builder::SimplerNMSLayer::setCLSThreshold(float threshold) {
-    getLayer()->getParameters()["cls_threshold"] = threshold;
-    return *this;
-}
-float Builder::SimplerNMSLayer::getIOUThreshold() const {
-    return getLayer()->getParameters().at("iou_threshold");
-}
-Builder::SimplerNMSLayer& Builder::SimplerNMSLayer::setIOUThreshold(float threshold) {
-    getLayer()->getParameters()["iou_threshold"] = threshold;
-    return *this;
-}
-
-REG_CONVERTER_FOR(SimplerNMS, [](const CNNLayerPtr& cnnLayer, Builder::Layer& layer) {
-    layer.getParameters()["iou_threshold"] = cnnLayer->GetParamAsFloat("iou_threshold");
-    layer.getParameters()["cls_threshold"] = cnnLayer->GetParamAsFloat("cls_threshold");
-    layer.getParameters()["scale"] = static_cast<size_t>(cnnLayer->GetParamAsUInt("scale"));
-    layer.getParameters()["min_bbox_size"] = static_cast<size_t>(cnnLayer->GetParamAsUInt("min_bbox_size"));
-    layer.getParameters()["feat_stride"] = static_cast<size_t>(cnnLayer->GetParamAsUInt("feat_stride"));
-    layer.getParameters()["pre_nms_topn"] = static_cast<size_t>(cnnLayer->GetParamAsUInt("pre_nms_topn"));
-    layer.getParameters()["post_nms_topn"] = static_cast<size_t>(cnnLayer->GetParamAsUInt("post_nms_topn"));
-});
\ No newline at end of file
diff --git a/inference-engine/src/inference_engine/builders/ie_softmax_layer.cpp b/inference-engine/src/inference_engine/builders/ie_softmax_layer.cpp
deleted file mode 100644 (file)
index fa66c5e..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_softmax_layer.hpp>
-#include <ie_cnn_layer_builder.h>
-
-#include <string>
-
-using namespace InferenceEngine;
-
-Builder::SoftMaxLayer::SoftMaxLayer(const std::string& name): LayerDecorator("SoftMax", name) {
-    getLayer()->getOutputPorts().resize(1);
-    getLayer()->getInputPorts().resize(1);
-    setAxis(1);
-}
-
-Builder::SoftMaxLayer::SoftMaxLayer(const Layer::Ptr& layer): LayerDecorator(layer) {
-    checkType("SoftMax");
-}
-
-Builder::SoftMaxLayer::SoftMaxLayer(const Layer::CPtr& layer): LayerDecorator(layer) {
-    checkType("SoftMax");
-}
-
-Builder::SoftMaxLayer& Builder::SoftMaxLayer::setName(const std::string& name) {
-    getLayer()->setName(name);
-    return *this;
-}
-
-const Port& Builder::SoftMaxLayer::getPort() const {
-    return getLayer()->getOutputPorts()[0];
-}
-
-Builder::SoftMaxLayer& Builder::SoftMaxLayer::setPort(const Port &port) {
-    getLayer()->getOutputPorts()[0] = port;
-    getLayer()->getInputPorts()[0] = port;
-    return *this;
-}
-
-size_t Builder::SoftMaxLayer::getAxis() const {
-    return getLayer()->getParameters().at("axis");
-}
-
-Builder::SoftMaxLayer& Builder::SoftMaxLayer::setAxis(size_t axis) {
-    getLayer()->getParameters()["axis"] = axis;
-    return *this;
-}
-
-REG_CONVERTER_FOR(SoftMax, [](const CNNLayerPtr& cnnLayer, Builder::Layer& layer) {
-    layer.getParameters()["axis"] = static_cast<size_t>(cnnLayer->GetParamAsUInt("axis", 1));
-});
\ No newline at end of file
diff --git a/inference-engine/src/inference_engine/builders/ie_tanh_layer.cpp b/inference-engine/src/inference_engine/builders/ie_tanh_layer.cpp
deleted file mode 100644 (file)
index e5c654f..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_tanh_layer.hpp>
-
-#include <string>
-
-using namespace InferenceEngine;
-
-Builder::TanHLayer::TanHLayer(const std::string& name): LayerDecorator("TanH", name) {
-    getLayer()->getOutputPorts().resize(1);
-    getLayer()->getInputPorts().resize(1);
-}
-
-Builder::TanHLayer::TanHLayer(const Layer::Ptr& layer): LayerDecorator(layer) {
-    checkType("TanH");
-}
-
-Builder::TanHLayer::TanHLayer(const Layer::CPtr& layer): LayerDecorator(layer) {
-    checkType("TanH");
-}
-
-Builder::TanHLayer& Builder::TanHLayer::setName(const std::string& name) {
-    getLayer()->setName(name);
-    return *this;
-}
-
-const Port& Builder::TanHLayer::getPort() const {
-    return getLayer()->getOutputPorts()[0];
-}
-
-Builder::TanHLayer& Builder::TanHLayer::setPort(const Port &port) {
-    getLayer()->getOutputPorts()[0] = port;
-    getLayer()->getInputPorts()[0] = port;
-    return *this;
-}
-
-REG_VALIDATOR_FOR(TanH, [] (const InferenceEngine::Builder::Layer::CPtr& input_layer, bool partial) {
-    if (!input_layer->getInputPorts().empty() &&
-        !input_layer->getOutputPorts().empty() &&
-        !input_layer->getInputPorts()[0].shape().empty() &&
-        !input_layer->getOutputPorts()[0].shape().empty() &&
-        input_layer->getInputPorts()[0].shape() != input_layer->getOutputPorts()[0].shape()) {
-        THROW_IE_EXCEPTION << "Input and output ports should be equal";
-    }
-});
diff --git a/inference-engine/src/inference_engine/builders/ie_tile_layer.cpp b/inference-engine/src/inference_engine/builders/ie_tile_layer.cpp
deleted file mode 100644 (file)
index 5fa82bf..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_tile_layer.hpp>
-#include <ie_cnn_layer_builder.h>
-
-#include <vector>
-#include <string>
-
-using namespace InferenceEngine;
-
-Builder::TileLayer::TileLayer(const std::string& name): LayerDecorator("Tile", name) {
-    getLayer()->getOutputPorts().resize(1);
-    getLayer()->getInputPorts().resize(1);
-}
-
-Builder::TileLayer::TileLayer(const Layer::Ptr& layer): LayerDecorator(layer) {
-    checkType("Tile");
-}
-
-Builder::TileLayer::TileLayer(const Layer::CPtr& layer): LayerDecorator(layer) {
-    checkType("Tile");
-}
-
-Builder::TileLayer& Builder::TileLayer::setName(const std::string& name) {
-    getLayer()->setName(name);
-    return *this;
-}
-
-const Port& Builder::TileLayer::getInputPort() const {
-    return getLayer()->getInputPorts()[0];
-}
-
-Builder::TileLayer& Builder::TileLayer::setInputPort(const Port &port) {
-    getLayer()->getInputPorts()[0] = port;
-    return *this;
-}
-
-const Port& Builder::TileLayer::getOutputPort() const {
-    return getLayer()->getOutputPorts()[0];
-}
-
-Builder::TileLayer& Builder::TileLayer::setOutputPort(const Port &port) {
-    getLayer()->getOutputPorts()[0] = port;
-    return *this;
-}
-
-size_t Builder::TileLayer::getTiles() const {
-    return getLayer()->getParameters().at("tiles");
-}
-
-Builder::TileLayer& Builder::TileLayer::setTiles(size_t tiles) {
-    getLayer()->getParameters()["tiles"] = tiles;
-    return *this;
-}
-
-size_t Builder::TileLayer::getAxis() const {
-    return getLayer()->getParameters().at("axis");
-}
-
-Builder::TileLayer& Builder::TileLayer::setAxis(size_t axis) {
-    getLayer()->getParameters()["axis"] = axis;
-    return *this;
-}
-
-REG_CONVERTER_FOR(Tile, [](const CNNLayerPtr& cnnLayer, Builder::Layer& layer) {
-    layer.getParameters()["axis"] = static_cast<size_t>(cnnLayer->GetParamAsUInt("axis"));
-    layer.getParameters()["tiles"] = static_cast<size_t>(cnnLayer->GetParamAsUInt("tiles"));
-});
\ No newline at end of file
index f5af3b8..9b460bc 100644 (file)
@@ -20,6 +20,7 @@
 // #include <shape_infer/ie_reshaper.hpp>
 #include <string>
 
+#include <transformations/common_optimizations/common_optimizations.hpp>
 #include <transformations/convert_opset1_to_legacy/convert_opset1_to_legacy.hpp>
 #include <transformations/convert_opset2_to_opset1/convert_opset2_to_opset1.hpp>
 #include <transformations/convert_opset1_to_legacy/convert_one_hot_to_one_hot_ie.hpp>
@@ -69,6 +70,7 @@ static std::shared_ptr<ngraph::Function> copyFunction(const std::shared_ptr<cons
     for (auto n : specialized_function->get_ops()) {
         goe_elimination.run_on_node(n);
     }
+    specialized_function->set_friendly_name(func->get_friendly_name());
     return specialized_function;
 }
 
@@ -125,7 +127,7 @@ CNNNetworkNGraphImpl::CNNNetworkNGraphImpl(const std::shared_ptr<Function>& nGra
         network.setInputInfo(info);
     };
 
-    // Add shape infer method for old operations which are not included to opset1 and opset2
+    // Add shape infer method for old operations which are not included to opset1, opset2 and opset3
     ::ngraph::op::GenericIE::addExtension(_ngraph_function, std::make_shared<ShapeInfer::BuiltInShapeInferHolder>());
 
     reshape();
@@ -189,14 +191,14 @@ void CNNNetworkNGraphImpl::getName(char* pName, size_t len) const noexcept {
     // Description buffer will preserve garbage if external pointer not initialized
     if (len < 1) return;
     memset(pName, 0, len);
-    DescriptionBuffer(pName, len) << _ngraph_function->get_name();
+    DescriptionBuffer(pName, len) << _ngraph_function->get_friendly_name();
 }
 
 const std::string& CNNNetworkNGraphImpl::getName() const noexcept {
     if (cnnNetwork) {
         return cnnNetwork->getName();
     }
-    return _ngraph_function->get_name();
+    return _ngraph_function->get_friendly_name();
 }
 
 InputInfo::Ptr CNNNetworkNGraphImpl::getInput(const std::string& inputName) const noexcept {
@@ -453,6 +455,7 @@ StatusCode CNNNetworkNGraphImpl::serialize(const std::string& xmlPath, const std
         // Disable shape inference (WA for generic operations)
         ::ngraph::op::GenericIE::DisableReshape noReshape(graph);
 
+        ::ngraph::pass::CommonOptimizations().run_on_function(graph);
         ::ngraph::pass::ConvertOpSet2ToOpSet1().run_on_function(graph);
         ::ngraph::pass::ConvertOpSet1ToLegacy().run_on_function(graph);
         network = InferenceEngine::details::convertFunctionToICNNNetwork(graph, *this);
@@ -515,6 +518,7 @@ void CNNNetworkNGraphImpl::convertToCNNNetworkImpl() {
     // Disable shape inference (WA for generic operations)
     ::ngraph::op::GenericIE::DisableReshape noReshape(graph);
 
+    ::ngraph::pass::CommonOptimizations().run_on_function(graph);
     ::ngraph::pass::ConvertOpSet2ToOpSet1().run_on_function(graph);
     ::ngraph::pass::ConvertOpSet1ToLegacy().run_on_function(graph);
     cnnNetwork = InferenceEngine::details::convertFunctionToICNNNetwork(graph, *this);
index 1c7c73c..cfb68ab 100644 (file)
@@ -17,6 +17,7 @@
 
 #include <ngraph/opsets/opset.hpp>
 #include "cpp/ie_cnn_net_reader.h"
+#include "cpp/ie_plugin_cpp.hpp"
 #include "cpp_interfaces/base/ie_plugin_base.hpp"
 #include "details/ie_exception_conversion.hpp"
 #include "details/ie_so_pointer.hpp"
@@ -38,10 +39,9 @@ IE_SUPPRESS_DEPRECATED_START
 namespace {
 
 std::once_flag flag;
-std::shared_ptr<InferenceEngine::details::SharedObjectLoader> cnnReaderLoader;
+InferenceEngine::details::SharedObjectLoader::Ptr cnnReaderLoader;
 
-std::shared_ptr<InferenceEngine::details::SharedObjectLoader>
-createCnnReaderLoader() {
+InferenceEngine::details::SharedObjectLoader::Ptr createCnnReaderLoader() {
     std::call_once(flag, [&] () {
         FileUtils::FilePath libraryName = FileUtils::toFilePath(std::string("inference_engine_ir_readers") + std::string(IE_BUILD_POSTFIX));
         FileUtils::FilePath irReadersLibraryPath = FileUtils::makeSharedLibraryName(getInferenceEngineLibraryPath(), libraryName);
@@ -150,6 +150,19 @@ class Core::Impl : public ICore {
         std::vector<FileUtils::FilePath> listOfExtentions;
     };
 
+    /**
+     * Hold original blob in order to avoid situations when original blob is allocated on stack
+     */
+    class WeightsHolderBlob : public TBlob<uint8_t> {
+        Blob::CPtr originBlob;
+
+    public:
+        explicit WeightsHolderBlob(const Blob::CPtr& weights) :
+            TBlob<uint8_t>(weights->getTensorDesc(),
+                           weights->cbuffer().as<uint8_t*>()),
+            originBlob(weights) { }
+    };
+
     std::unordered_set<std::string> opsetNames;
     std::vector<IExtensionPtr> extensions;
 
@@ -235,6 +248,60 @@ public:
         return _taskExecutor;
     }
 
+    CNNNetwork ReadNetwork(const std::string& modelPath, const std::string& binPath) const override {
+        IE_PROFILING_AUTO_SCOPE(Core::ReadNetwork)
+        IE_SUPPRESS_DEPRECATED_START
+        ResponseDesc desc;
+        CNNNetReaderPtr cnnReader(createCnnReaderLoader());
+        StatusCode rt = cnnReader->ReadNetwork(modelPath.c_str(), &desc);
+        if (rt != OK) THROW_IE_EXCEPTION << desc.msg;
+        if (cnnReader->getVersion(&desc) >= 10) {
+            cnnReader->addExtensions(getExtensions());
+        }
+        std::string bPath = binPath;
+        if (bPath.empty()) {
+            bPath = modelPath;
+            auto pos = bPath.rfind('.');
+            if (pos != std::string::npos) bPath = bPath.substr(0, pos);
+            bPath += ".bin";
+
+            if (!FileUtils::fileExist(bPath)) bPath.clear();
+        }
+
+        if (!bPath.empty()) {
+            rt = cnnReader->ReadWeights(bPath.c_str(), &desc);
+            if (rt != OK) THROW_IE_EXCEPTION << desc.msg;
+        } else {
+            TBlob<uint8_t>::Ptr weights_ptr;
+            rt = cnnReader->SetWeights(weights_ptr, &desc);
+            if (rt != OK) THROW_IE_EXCEPTION << desc.msg;
+        }
+        IE_SUPPRESS_DEPRECATED_END
+
+        return CNNNetwork(cnnReader);
+    }
+
+    CNNNetwork ReadNetwork(const std::string& model, const Blob::CPtr& weights) const override {
+        IE_PROFILING_AUTO_SCOPE(Core::ReadNetwork)
+        IE_SUPPRESS_DEPRECATED_START
+        ResponseDesc desc;
+        CNNNetReaderPtr cnnReader(createCnnReaderLoader());
+        StatusCode rt = cnnReader->ReadNetwork(model.data(), model.length(), &desc);
+        if (rt != OK) THROW_IE_EXCEPTION << desc.msg;
+        if (cnnReader->getVersion(&desc) >= 10) {
+            cnnReader->addExtensions(getExtensions());
+        }
+        TBlob<uint8_t>::Ptr weights_ptr;
+        if (weights) {
+            weights_ptr = std::make_shared<WeightsHolderBlob>(weights);
+        }
+        rt = cnnReader->SetWeights(weights_ptr, &desc);
+        if (rt != OK) THROW_IE_EXCEPTION << desc.msg;
+        IE_SUPPRESS_DEPRECATED_END
+
+        return CNNNetwork(cnnReader);
+    }
+
     IE_SUPPRESS_DEPRECATED_START
 
     /**
@@ -277,6 +344,11 @@ public:
                     iplugin_api_ptr->SetCore(mutableCore);
                 }
 
+                // Add registered extensions to new plugin
+                for (const auto& ext : extensions) {
+                    plugin->AddExtension(ext, nullptr);
+                }
+
                 InferencePlugin cppPlugin(plugin);
 
                 // configuring
@@ -388,10 +460,18 @@ public:
                 THROW_IE_EXCEPTION << "Cannot add opset with name: " << it.first << ". Opset with the same name already exists.";
             opsetNames.insert(it.first);
         }
+
+        for (auto& plugin : plugins) {
+            IE_SUPPRESS_DEPRECATED_START
+            try {
+                plugin.second.AddExtension(extension);
+            } catch (...) {}
+            IE_SUPPRESS_DEPRECATED_END
+        }
         extensions.emplace_back(extension);
     }
 
-    const std::vector<IExtensionPtr>& getExtensions() {
+    const std::vector<IExtensionPtr>& getExtensions() const {
         return extensions;
     }
 };
@@ -399,6 +479,7 @@ public:
 Core::Impl::Impl() {
     opsetNames.insert("opset1");
     opsetNames.insert("opset2");
+    opsetNames.insert("opset3");
 }
 
 Core::Impl::~Impl() {}
@@ -488,58 +569,11 @@ Parsed<T> parseDeviceNameIntoConfig(const std::string& deviceName, const std::ma
 }  //  namespace
 
 CNNNetwork Core::ReadNetwork(const std::string& modelPath, const std::string& binPath) const {
-    IE_PROFILING_AUTO_SCOPE(Core::ReadNetwork)
-    IE_SUPPRESS_DEPRECATED_START
-    ResponseDesc desc;
-    CNNNetReaderPtr cnnReader(createCnnReaderLoader());
-    StatusCode rt = cnnReader->ReadNetwork(modelPath.c_str(), &desc);
-    if (rt != OK) THROW_IE_EXCEPTION << desc.msg;
-    if (cnnReader->getVersion(&desc) >= 10) {
-        cnnReader->addExtensions(_impl->getExtensions());
-    }
-    std::string bPath = binPath;
-    if (bPath.empty()) {
-        bPath = modelPath;
-        auto pos = bPath.rfind('.');
-        if (pos != std::string::npos) bPath = bPath.substr(0, pos);
-        bPath += ".bin";
-
-        if (!FileUtils::fileExist(bPath)) bPath.clear();
-    }
-
-    if (!bPath.empty()) {
-        rt = cnnReader->ReadWeights(bPath.c_str(), &desc);
-        if (rt != OK) THROW_IE_EXCEPTION << desc.msg;
-    } else {
-        TBlob<uint8_t>::Ptr weights_ptr;
-        rt = cnnReader->SetWeights(weights_ptr, &desc);
-        if (rt != OK) THROW_IE_EXCEPTION << desc.msg;
-    }
-    IE_SUPPRESS_DEPRECATED_END
-
-    return CNNNetwork(cnnReader);
+    return _impl->ReadNetwork(modelPath, binPath);
 }
 
 CNNNetwork Core::ReadNetwork(const std::string& model, const Blob::CPtr& weights) const {
-    IE_PROFILING_AUTO_SCOPE(Core::ReadNetwork)
-    IE_SUPPRESS_DEPRECATED_START
-    ResponseDesc desc;
-    CNNNetReaderPtr cnnReader(createCnnReaderLoader());
-    StatusCode rt = cnnReader->ReadNetwork(model.data(), model.length(), &desc);
-    if (rt != OK) THROW_IE_EXCEPTION << desc.msg;
-    if (cnnReader->getVersion(&desc) >= 10) {
-        cnnReader->addExtensions(_impl->getExtensions());
-    }
-    TBlob<uint8_t>::Ptr weights_ptr;
-    if (weights) {
-        uint8_t* ptr = weights->cbuffer().as<uint8_t*>();
-        weights_ptr = make_shared_blob<uint8_t>(weights->getTensorDesc(), ptr);
-    }
-    rt = cnnReader->SetWeights(weights_ptr, &desc);
-    if (rt != OK) THROW_IE_EXCEPTION << desc.msg;
-    IE_SUPPRESS_DEPRECATED_END
-
-    return CNNNetwork(cnnReader);
+    return _impl->ReadNetwork(model, weights);
 }
 
 ExecutableNetwork Core::LoadNetwork(const CNNNetwork network, const std::string& deviceName,
index 6605259..505221c 100644 (file)
@@ -36,7 +36,7 @@ std::string getPluginName(const std::string& deviceName) {
     static std::map<std::string, std::string> plugunFromDeviceMap = {
         {"CPU", "MKLDNNPlugin"},    {"GPU", "clDNNPlugin"},         {"FPGA", "dliaPlugin"},
         {"MYRIAD", "myriadPlugin"}, {"HDDL", "HDDLPlugin"},         {"GNA", "GNAPlugin"},
-        {"HETERO", "HeteroPlugin"}, {"MULTI", "MultiDevicePlugin"}};
+        {"HETERO", "HeteroPlugin"}, {"MULTI", "MultiDevicePlugin"}, {"KMB", "kmbPlugin"}};
     auto val = plugunFromDeviceMap.find(deviceName);
 
     if (val == plugunFromDeviceMap.end()) {
index 337fbfa..af153ce 100644 (file)
 namespace InferenceEngine {
 
 #ifdef ENABLE_MKL_DNN
-static Xbyak::util::Cpu cpu;
+static Xbyak::util::Cpu& get_cpu_info() {
+    static Xbyak::util::Cpu cpu;
+    return cpu;
+}
 #endif
 
 bool with_cpu_x86_sse42() {
 #ifdef ENABLE_MKL_DNN
-    return cpu.has(Xbyak::util::Cpu::tSSE42);
+    return get_cpu_info().has(Xbyak::util::Cpu::tSSE42);
 #else
 #if defined(HAVE_SSE)
     return true;
@@ -33,9 +36,21 @@ bool with_cpu_x86_sse42() {
 #endif
 }
 
+bool with_cpu_x86_avx() {
+#ifdef ENABLE_MKL_DNN
+    return get_cpu_info().has(Xbyak::util::Cpu::tAVX);
+#else
+#if defined(HAVE_AVX)
+    return true;
+#else
+    return false;
+#endif
+#endif
+}
+
 bool with_cpu_x86_avx2() {
 #ifdef ENABLE_MKL_DNN
-    return cpu.has(Xbyak::util::Cpu::tAVX2);
+    return get_cpu_info().has(Xbyak::util::Cpu::tAVX2);
 #else
 #if defined(HAVE_AVX2)
     return true;
@@ -47,7 +62,7 @@ bool with_cpu_x86_avx2() {
 
 bool with_cpu_x86_avx512f() {
 #ifdef ENABLE_MKL_DNN
-    return cpu.has(Xbyak::util::Cpu::tAVX512F);
+    return get_cpu_info().has(Xbyak::util::Cpu::tAVX512F);
 #else
 #if defined(HAVE_AVX512)
     return true;
@@ -59,9 +74,9 @@ bool with_cpu_x86_avx512f() {
 
 bool with_cpu_x86_avx512_core() {
 #ifdef ENABLE_MKL_DNN
-       return cpu.has(Xbyak::util::Cpu::tAVX512F  |
-                      Xbyak::util::Cpu::tAVX512DQ |
-                      Xbyak::util::Cpu::tAVX512BW);
+       return get_cpu_info().has(Xbyak::util::Cpu::tAVX512F |
+                                 Xbyak::util::Cpu::tAVX512DQ |
+                                 Xbyak::util::Cpu::tAVX512BW);
 #else
 #if defined(HAVE_AVX512)
     return true;
@@ -73,7 +88,7 @@ bool with_cpu_x86_avx512_core() {
 
 bool with_cpu_x86_bfloat16() {
 #ifdef ENABLE_MKL_DNN
-    return cpu.has(Xbyak::util::Cpu::tAVX512_BF16);
+    return get_cpu_info().has(Xbyak::util::Cpu::tAVX512_BF16);
 #else
     return false;
 #endif
index d4d4b94..6467b6b 100644 (file)
@@ -17,7 +17,6 @@
 #include "ie_profiling.hpp"
 #include "ie_parallel.hpp"
 #include "ie_system_conf.h"
-#include "ie_error.hpp"
 #include "threading/ie_thread_affinity.hpp"
 #include "details/ie_exception.hpp"
 #include "ie_util_internal.hpp"
index f7257bd..7471390 100644 (file)
@@ -43,7 +43,7 @@ StatusCode CNNNetReaderImpl::SetWeights(const TBlob<uint8_t>::Ptr& weights, Resp
             std::stringstream model;
             xmlDoc->save(model);
             network = std::make_shared<CNNNetworkNGraphImpl>(v10Reader.read(model.str(), weights));
-        } else {
+        } else if (weights) {
             _parser->SetWeights(weights);
         }
     } catch (const InferenceEngineException& iee) {
index 9cf8783..437bb4a 100644 (file)
@@ -268,6 +268,7 @@ FormatParser::FormatParser(size_t version): _version(version) {
                 std::make_shared<LayerCreator<UniqueLayer>>("Unique"),
                 std::make_shared<LayerCreator<NonMaxSuppressionLayer>>("NonMaxSuppression"),
                 std::make_shared<LayerCreator<ScatterUpdateLayer>>("ScatterUpdate"),
+                std::make_shared<LayerCreator<ScatterElementsUpdateLayer>>("ScatterElementsUpdate"),
                 std::make_shared<LayerCreator<ExperimentalDetectronPriorGridGeneratorLayer>>("ExperimentalDetectronPriorGridGenerator"),
                 std::make_shared<LayerCreator<ExperimentalDetectronGenerateProposalsSingleImageLayer>>("ExperimentalDetectronGenerateProposalsSingleImage"),
                 std::make_shared<LayerCreator<ExperimentalDetectronTopKROIs>>("ExperimentalDetectronTopKROIs")};
index 6e0ac90..04cb4ef 100644 (file)
@@ -21,6 +21,7 @@
 #include <ngraph/ops.hpp>
 #include <ngraph/opsets/opset.hpp>
 #include <ngraph/opsets/opset2.hpp>
+#include <ngraph/opsets/opset3.hpp>
 
 #include "cnn_network_impl.hpp"
 #include "details/caseless.hpp"
@@ -53,6 +54,7 @@ V10Parser::V10Parser(const std::vector<IExtensionPtr>& exts) {
     // Load default opsets
     opsets["opset1"] = ngraph::get_opset1();
     opsets["opset2"] = ngraph::get_opset2();
+    opsets["opset3"] = ngraph::get_opset3();
 
     // Load custom opsets
     for (const auto& ext : exts) {
@@ -342,36 +344,26 @@ std::shared_ptr<ngraph::Node> V10Parser::createNode(const std::vector<ngraph::Ou
     }
 
     std::shared_ptr<ngraph::Node> ngraphNode;
-    if (opsets.find(params.version) != opsets.end()) {
-        // Create only parameter from opset1
-        if (params.version != "opset1" || params.type == "Parameter") {
-            auto opset = opsets.at(params.version);
+    if (opsets.count(params.version)) {
+        auto opset = opsets.at(params.version);
 
+        for (const auto& creator : creators) {
+            if (creator->shouldCreate(params.type)) {
+                ngraphNode = creator->createLayer(inputs, node, weights, params);
+                break;
+            }
+        }
+
+        if (!ngraphNode) {
             if (!opset.contains_type(params.type)) {
                 THROW_IE_EXCEPTION << "Opset " << params.version << " doesn't contain the operation with type: " << params.type;
             }
+
             ngraphNode = std::shared_ptr<ngraph::Node>(opset.create(params.type));
             ngraphNode->set_arguments(inputs);
             XmlDeserializer visitor(node);
             if (ngraphNode->visit_attributes(visitor))
                 ngraphNode->constructor_validate_and_infer_types();
-            else
-                ngraphNode.reset();
-
-            if (ngraphNode && params.version == "opset2" && params.type != "Parameter") {
-                auto opset1 = opsets.at("opset1");
-                if (opset1.get_types_info().find(ngraphNode->get_type_info()) != opset1.get_types_info().end())
-                    ngraphNode.reset();
-            }
-        }
-    }
-
-    if (!ngraphNode && (params.version == "opset1" || params.version == "opset2")) {
-        for (const auto& creator : creators) {
-            if (creator->shouldCreate(params.type)) {
-                ngraphNode = creator->createLayer(inputs, node, weights, params);
-                break;
-            }
         }
     }
 
index df2e722..9e095be 100644 (file)
@@ -2,10 +2,6 @@
 // SPDX-License-Identifier: Apache-2.0
 //
 
-/**
- * @brief A header file that provides interface for network reader that is used to build networks from a given IR
- * @file ie_icnn_net_reader.h
- */
 #pragma once
 
 #include <ie_blob.h>
index f17b43a..a2eb698 100644 (file)
@@ -2,10 +2,6 @@
 // SPDX-License-Identifier: Apache-2.0
 //
 
-/**
- * @brief A header file that provides interface for network reader that is used to build networks from a given IR
- * @file ie_icnn_net_reader.h
- */
 #pragma once
 
 #include <ie_api.h>
index daae95a..17a6c46 100644 (file)
@@ -10,7 +10,6 @@ endif()
 
 file(GLOB_RECURSE LIBRARY_SRC ${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp)
 file(GLOB_RECURSE PUBLIC_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/include/*.hpp ${CMAKE_CURRENT_SOURCE_DIR}/include/*.h)
-file(GLOB_RECURSE NN_BUILDER_LIBRARY_SRC ${CMAKE_CURRENT_SOURCE_DIR}/src/builders/*.cpp)
 
 set(PUBLIC_HEADERS_DIR "${CMAKE_CURRENT_SOURCE_DIR}/include")
 
@@ -24,7 +23,6 @@ source_group("include" FILES ${PUBLIC_HEADERS})
 
 add_library(${TARGET_NAME}_obj OBJECT
             ${LIBRARY_SRC}
-            ${NN_BUILDER_LIBRARY_SRC}
             ${PUBLIC_HEADERS})
 
 set_ie_threading_interface_for(${TARGET_NAME}_obj)
@@ -38,13 +36,6 @@ target_include_directories(${TARGET_NAME}_obj PRIVATE ${PUBLIC_HEADERS_DIR} ${CM
     $<TARGET_PROPERTY:ngraph::ngraph,INTERFACE_INCLUDE_DIRECTORIES>
     $<TARGET_PROPERTY:pugixml,INTERFACE_INCLUDE_DIRECTORIES>)
 
-function(nn_builder_disable_warnings)
-    disable_deprecated_warnings()
-    set_source_files_properties(${NN_BUILDER_LIBRARY_SRC} PROPERTIES COMPILE_FLAGS "${ie_c_cxx_deprecated}")
-endfunction()
-
-nn_builder_disable_warnings()
-
 # Create shared library
 
 add_library(${TARGET_NAME} SHARED
index cd2e6c3..0771ab5 100644 (file)
@@ -33,7 +33,7 @@ using AllLayers =
                ReshapeLayer*, TileLayer*, ScaleShiftLayer*, PReLULayer*, PowerLayer*, BatchNormalizationLayer*,
                ClampLayer*, TensorIterator*, LSTMCell*, GRUCell*, RNNCell*, RNNSequenceLayer*, QuantizeLayer*,
                BinaryConvolutionLayer*, WeightableLayer*, OneHotLayer*, MathLayer*, ReduceLayer*, UniqueLayer*,
-               NonMaxSuppressionLayer*, ScatterUpdateLayer*, ExperimentalDetectronPriorGridGeneratorLayer*,
+               NonMaxSuppressionLayer*, ScatterUpdateLayer*, ScatterElementsUpdateLayer*, ExperimentalDetectronPriorGridGeneratorLayer*,
                ExperimentalDetectronGenerateProposalsSingleImageLayer*, ExperimentalDetectronTopKROIs*, CNNLayer*>;
 
 template <class Visitor, std::size_t I = 0, typename... Tp>
diff --git a/inference-engine/src/legacy_api/src/builders/ie_const_layer.cpp b/inference-engine/src/legacy_api/src/builders/ie_const_layer.cpp
deleted file mode 100644 (file)
index 7fa2596..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_const_layer.hpp>
-#include <string>
-
-using namespace InferenceEngine;
-
-Builder::ConstLayer::ConstLayer(const std::string& name): LayerDecorator("Const", name) {
-    getLayer()->getOutputPorts().resize(1);
-    getLayer()->getParameters()["custom"] = Blob::CPtr();
-}
-
-Builder::ConstLayer::ConstLayer(const Layer::Ptr& layer): LayerDecorator(layer) {
-    checkType("Const");
-}
-
-Builder::ConstLayer::ConstLayer(const Layer::CPtr& layer): LayerDecorator(layer) {
-    checkType("Const");
-}
-
-Builder::ConstLayer& Builder::ConstLayer::setName(const std::string& name) {
-    getLayer()->setName(name);
-    return *this;
-}
-
-const Port& Builder::ConstLayer::getPort() const {
-    return getLayer()->getOutputPorts()[0];
-}
-
-Builder::ConstLayer& Builder::ConstLayer::setPort(const Port& port) {
-    const auto& data = getLayer()->getOutputPorts()[0].getData();
-    getLayer()->getOutputPorts()[0] = port;
-    getLayer()->getOutputPorts()[0].setData(data);
-    return *this;
-}
-
-Builder::ConstLayer& Builder::ConstLayer::setData(const Blob::CPtr& data) {
-    getLayer()->getParameters()["custom"] = data;
-    getLayer()->getOutputPorts()[0].getData()->setData(std::const_pointer_cast<Blob>(data));
-    return *this;
-}
-
-const Blob::CPtr& Builder::ConstLayer::getData() const {
-    if (getLayer()->getParameters().at("custom").as<Blob::CPtr>().get() !=
-        getLayer()->getOutputPorts()[0].getData()->getData().get())
-        THROW_IE_EXCEPTION << "Constant data output port has incorrect data!";
-    return getLayer()->getParameters().at("custom").as<Blob::CPtr>();
-}
-
-REG_VALIDATOR_FOR(Const, [](const InferenceEngine::Builder::Layer::CPtr& layer, bool partial) {
-    Builder::ConstLayer constBuilder(layer);
-    const auto& data = constBuilder.getData();
-    if (!data || data->cbuffer() == nullptr) THROW_IE_EXCEPTION << "Cannot create Const layer! Data is required!";
-});
diff --git a/inference-engine/src/legacy_api/src/builders/ie_input_layer_layer.cpp b/inference-engine/src/legacy_api/src/builders/ie_input_layer_layer.cpp
deleted file mode 100644 (file)
index 50b72a6..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_input_layer.hpp>
-#include <string>
-
-using namespace InferenceEngine;
-
-Builder::InputLayer::InputLayer(const std::string& name): LayerDecorator("Input", name) {
-    getLayer()->getOutputPorts().resize(1);
-}
-
-Builder::InputLayer::InputLayer(const Layer::Ptr& layer): LayerDecorator(layer) {
-    checkType("Input");
-}
-
-Builder::InputLayer::InputLayer(const Layer::CPtr& layer): LayerDecorator(layer) {
-    checkType("Input");
-}
-
-Builder::InputLayer& Builder::InputLayer::setName(const std::string& name) {
-    getLayer()->setName(name);
-    return *this;
-}
-
-const Port& Builder::InputLayer::getPort() const {
-    return getLayer()->getOutputPorts()[0];
-}
-
-Builder::InputLayer& Builder::InputLayer::setPort(const Port& port) {
-    getLayer()->getOutputPorts()[0] = port;
-    return *this;
-}
-
-REG_VALIDATOR_FOR(Input, [](const InferenceEngine::Builder::Layer::CPtr& layer, bool partial) {
-    if (layer->getOutputPorts()[0].shape().empty())
-        THROW_IE_EXCEPTION << layer->getType() << " node " << layer->getName() << " should have shape!";
-});
diff --git a/inference-engine/src/legacy_api/src/builders/ie_layer_builder.cpp b/inference-engine/src/legacy_api/src/builders/ie_layer_builder.cpp
deleted file mode 100644 (file)
index 66e7383..0000000
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_layer_builder.hpp>
-#include <details/caseless.hpp>
-#include <limits>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-using namespace InferenceEngine;
-
-Builder::Layer::Layer(const std::string& type, const std::string& name)
-    : id((std::numeric_limits<idx_t>::max)()), type(type), name(name) {}
-
-Builder::Layer::Layer(const ILayer::CPtr& layer) {
-    id = layer->getId();
-    name = layer->getName();
-    type = layer->getType();
-    inPorts = layer->getInputPorts();
-    outPorts = layer->getOutputPorts();
-    params = layer->getParameters();
-}
-
-Builder::Layer::Layer(idx_t id, const Builder::Layer& layer): Layer(layer) {
-    this->id = id;
-}
-
-idx_t Builder::Layer::getId() const noexcept {
-    return id;
-}
-
-const std::string& Builder::Layer::getType() const noexcept {
-    return type;
-}
-Builder::Layer& Builder::Layer::setType(const std::string& type) {
-    this->type = type;
-    return *this;
-}
-
-const std::string& Builder::Layer::getName() const noexcept {
-    return name;
-}
-Builder::Layer& Builder::Layer::setName(const std::string& name) {
-    this->name = name;
-    return *this;
-}
-
-const std::map<std::string, Parameter>& Builder::Layer::getParameters() const noexcept {
-    return params;
-}
-std::map<std::string, Parameter>& Builder::Layer::getParameters() {
-    return params;
-}
-Builder::Layer& Builder::Layer::setParameters(const std::map<std::string, Parameter>& params) {
-    getParameters() = params;
-    return *this;
-}
-
-std::vector<Port>& Builder::Layer::getInputPorts() {
-    return inPorts;
-}
-const std::vector<Port>& Builder::Layer::getInputPorts() const noexcept {
-    return inPorts;
-}
-Builder::Layer& Builder::Layer::setInputPorts(const std::vector<Port>& ports) {
-    getInputPorts() = ports;
-    return *this;
-}
-
-std::vector<Port>& Builder::Layer::getOutputPorts() {
-    return outPorts;
-}
-const std::vector<Port>& Builder::Layer::getOutputPorts() const noexcept {
-    return outPorts;
-}
-Builder::Layer& Builder::Layer::setOutputPorts(const std::vector<Port>& ports) {
-    getOutputPorts() = ports;
-    return *this;
-}
-
-const ILayer::CPtr Builder::Layer::build() const {
-    validate(true);
-    return std::static_pointer_cast<const ILayer>(shared_from_this());
-}
-
-void Builder::Layer::addValidator(const std::string& type,
-                                  const std::function<void(const Layer::CPtr&, bool)>& validator) {
-    auto holder = getValidatorsHolder();
-    if (holder->validators.find(type) == holder->validators.end()) holder->validators[type] = validator;
-}
-
-void Builder::Layer::validate(bool partial) const {
-    if (getValidatorsHolder()->validators.find(type) != getValidatorsHolder()->validators.end())
-        getValidatorsHolder()->validators[type](shared_from_this(), partial);
-}
-
-std::shared_ptr<Builder::ValidatorsHolder> Builder::Layer::getValidatorsHolder() {
-    static std::shared_ptr<ValidatorsHolder> localHolder;
-    if (localHolder == nullptr) {
-        localHolder = std::make_shared<ValidatorsHolder>();
-    }
-    return localHolder;
-}
diff --git a/inference-engine/src/legacy_api/src/builders/ie_layer_decorator.cpp b/inference-engine/src/legacy_api/src/builders/ie_layer_decorator.cpp
deleted file mode 100644 (file)
index d704880..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_layer_decorator.hpp>
-#include <details/caseless.hpp>
-#include <memory>
-#include <string>
-#include <vector>
-
-using namespace InferenceEngine;
-using namespace details;
-
-Builder::LayerDecorator::LayerDecorator(const std::string& type, const std::string& name):
-        cLayer(nullptr) {
-    layer = std::make_shared<Layer>(type, name);
-}
-
-Builder::LayerDecorator::LayerDecorator(const Layer::Ptr& layer): cLayer(nullptr), layer(layer) {}
-Builder::LayerDecorator::LayerDecorator(const Layer::CPtr& layer): cLayer(layer), layer(nullptr) {}
-
-Builder::LayerDecorator::LayerDecorator(const Builder::LayerDecorator& rval) {
-    *this = rval;
-}
-
-Builder::LayerDecorator& Builder::LayerDecorator::operator=(const Builder::LayerDecorator& rval) {
-    layer = rval.layer;
-    cLayer = rval.cLayer;
-    return *this;
-}
-
-Builder::LayerDecorator::operator Builder::Layer() const {
-    getLayer()->validate(true);
-    return *getLayer();
-}
-
-Builder::LayerDecorator::operator Builder::Layer::Ptr() {
-    getLayer()->validate(true);
-    return getLayer();
-}
-
-Builder::LayerDecorator::operator Builder::Layer::CPtr() const {
-    getLayer()->validate(true);
-    return getLayer();
-}
-
-const std::string& Builder::LayerDecorator::getType() const {
-    return getLayer()->getType();
-}
-const std::string& Builder::LayerDecorator::getName() const {
-    return getLayer()->getName();
-}
-
-Builder::Layer::Ptr& Builder::LayerDecorator::getLayer() {
-    if (!layer) THROW_IE_EXCEPTION << "Cannot get Layer::Ptr!";
-    return layer;
-}
-
-const Builder::Layer::CPtr Builder::LayerDecorator::getLayer() const {
-    if (!cLayer) {
-        if (!layer) THROW_IE_EXCEPTION << "Cannot get Layer::CPtr!";
-        return std::static_pointer_cast<const Layer>(layer);
-    }
-    return cLayer;
-}
-
-void Builder::LayerDecorator::checkType(const std::string& type) const {
-    if (!details::CaselessEq<std::string>()(getLayer()->getType(), type))
-        THROW_IE_EXCEPTION << "Cannot create " << type << " decorator for layer " << getLayer()->getType();
-}
diff --git a/inference-engine/src/legacy_api/src/builders/ie_network_builder.cpp b/inference-engine/src/legacy_api/src/builders/ie_network_builder.cpp
deleted file mode 100644 (file)
index f708ccc..0000000
+++ /dev/null
@@ -1,643 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <builders/ie_const_layer.hpp>
-#include <builders/ie_input_layer.hpp>
-#include <builders/ie_network_builder.hpp>
-#include <details/caseless.hpp>
-#include <limits>
-#include <map>
-#include <memory>
-#include <shape_infer/ie_reshaper.hpp>
-#include <string>
-#include <unordered_map>
-#include <unordered_set>
-#include <utility>
-#include <vector>
-
-#include "blob_factory.hpp"
-#include "graph_tools.hpp"
-#include "ie_cnn_layer_builder.h"
-#include "ie_profiling.hpp"
-
-using namespace InferenceEngine;
-
-/******************************************************************************
- Network builder
- ******************************************************************************/
-Builder::Network::Network(const std::string& name): Builder::Network(Context(), name) {}
-Builder::Network::Network(const INetwork& network): Builder::Network(Context(), network) {}
-Builder::Network::Network(const ICNNNetwork& network): Builder::Network(Context(), network) {}
-
-Builder::Network::Network(const Context& ieContext, const std::string& name) {
-    parameters["name"] = name;
-    parameters["context"] = ieContext;
-    parameters["version"] = 3;
-    parameters["layers"] = std::vector<Layer::Ptr>();
-    parameters["connections"] = std::vector<Connection>();
-}
-
-Builder::Network::Network(const Context& ieContext, const INetwork& network): Network(ieContext, network.getName()) {
-    for (const auto& layer : network) {
-        parameters["layers"].as<std::vector<Layer::Ptr>>().push_back(std::make_shared<Layer>(layer));
-        const auto layerConnections = network.getLayerConnections(layer->getId());
-        for (const auto& connection : layerConnections) {
-            bool found = false;
-            for (const auto& con : parameters["connections"].as<std::vector<Connection>>()) {
-                if (con == connection) {
-                    found = true;
-                    break;
-                }
-            }
-            if (!found) {
-                parameters["connections"].as<std::vector<Connection>>().push_back(connection);
-            }
-        }
-    }
-}
-
-Builder::Network::Network(const Context& ieContext, const ICNNNetwork& network): Network(ieContext, network.getName()) {
-    parameters["version"] = 0;
-    auto allInputs = CNNNetGetAllInputLayers(network);
-    InputsDataMap inputs;
-    network.getInputsInfo(inputs);
-    if (inputs.empty() && allInputs.empty())
-        THROW_IE_EXCEPTION << "Cannot create graph! No inputs for the topology " << network.getName();
-
-    std::unordered_map<std::string, idx_t> name2id;
-    std::unordered_set<Data*> dataPtrs;
-    std::vector<CNNLayerPtr> queueLayers;
-
-    auto createGenericFromCNNLayer = [&](const CNNLayerPtr& cnnLayer) {
-        for (const auto& data : cnnLayer->insData) {
-            auto lockedData = data.lock();
-            if (!lockedData) continue;
-            if (dataPtrs.find(lockedData.get()) == dataPtrs.end()) {
-                dataPtrs.insert(lockedData.get());
-            }
-        }
-        for (const auto& data : cnnLayer->outData) {
-            if (dataPtrs.find(data.get()) == dataPtrs.end()) {
-                dataPtrs.insert(data.get());
-            }
-        }
-        std::map<std::string, Blob::Ptr> blobs = cnnLayer->blobs;
-        size_t inputsCount(0);
-        for (const auto& data : cnnLayer->insData) {
-            auto lockedData = data.lock();
-            if (!lockedData) continue;
-            inputsCount++;
-        }
-        const auto layer = builderFromCNNLayer(cnnLayer);
-        idx_t layerId = addLayer(layer);
-
-        if (blobs.find("weights") != blobs.end()) {
-            idx_t constLayerId = addLayer(ConstLayer("weights").setData(blobs["weights"]));
-            connect({constLayerId}, {layerId, inputsCount++});
-        }
-        if (blobs.find("biases") != blobs.end()) {
-            if (blobs.find("weights") == blobs.end()) ++inputsCount;
-
-            idx_t constLayerId = addLayer(ConstLayer("biases").setData(blobs["biases"]));
-            connect({constLayerId}, {layerId, inputsCount++});
-        }
-        for (const auto& it : blobs) {
-            if (it.first == "weights" || it.first == "biases") continue;
-            idx_t constLayerId = addLayer(ConstLayer(it.first).setData(it.second));
-            connect({constLayerId}, {layerId, inputsCount++});
-        }
-        name2id[layer.getName()] = layerId;
-        return layerId;
-    };
-
-    auto addPreProcessFor = [&](const InputInfo::Ptr& inputInfo) {
-        auto inputLayer = getLayer(name2id[inputInfo->name()]);
-        if (inputLayer->getType().empty() && inputLayer->getName().empty()) return;
-
-        inputLayer->getParameters()["preProcess"] = inputInfo->getPreProcess();
-    };
-
-    for (auto input : inputs) {
-        auto inputLayer = input.second->getInputData()->getCreatorLayer().lock();
-
-        if (dataPtrs.find(input.second->getInputData().get()) == dataPtrs.end()) {
-            dataPtrs.insert(input.second->getInputData().get());
-        }
-
-        if (!inputLayer) {
-            // For v1 parser
-            inputLayer.reset(new CNNLayer(
-                {input.second->getInputData()->getName(), "Input", input.second->getInputData()->getPrecision()}));
-
-            inputLayer->outData.push_back(input.second->getInputData());
-        }
-        const auto layer =
-            InputLayer(inputLayer->name).setPort(Port(inputLayer->outData[0]->getTensorDesc().getDims()));
-        name2id[layer.getName()] = addLayer(layer);
-
-        for (const auto& nlayer : input.second->getInputData()->getInputTo()) {
-            queueLayers.push_back(nlayer.second);
-        }
-    }
-    for (auto input : allInputs) {
-        auto isRealInput =
-            std::find_if(std::begin(inputs), std::end(inputs), [&](InputsDataMap::value_type& inputInfo) {
-                return inputInfo.second->getInputData()->getName() == input->name;
-            });
-        if (isRealInput != std::end(inputs)) {
-            continue;
-        }
-
-        details::CaselessEq<std::string> eq;
-        CNNLayerPtr cnnLayer = input;
-
-        if (eq(input->type, "Memory")) {
-            auto memoryId = input->GetParamAsString("id");
-            cnnLayer.reset(new CNNLayer({input->name + "/id=" + memoryId, "MemoryInput", input->precision}));
-            cnnLayer->params = input->params;
-            cnnLayer->outData = input->outData;
-        }
-
-        createGenericFromCNNLayer(cnnLayer);
-
-        size_t count_out = 0;
-        for (auto&& outData : input->outData) {
-            for (auto&& nlayer : outData->getInputTo()) {
-                queueLayers.push_back(nlayer.second);
-            }
-            count_out++;
-        }
-    }
-    while (!queueLayers.empty()) {
-        auto cnnLayerPtr = *queueLayers.begin();
-
-        if (name2id.find(cnnLayerPtr->name) == name2id.end()) {
-            createGenericFromCNNLayer(cnnLayerPtr);
-
-            for (auto&& outData : cnnLayerPtr->outData) {
-                for (auto&& nlayer : outData->getInputTo()) {
-                    queueLayers.push_back(nlayer.second);
-                }
-            }
-        }
-
-        queueLayers.erase(queueLayers.begin());
-    }
-    std::map<std::string, DataPtr> output;
-    network.getOutputsInfo(output);
-
-    for (auto it = output.begin(); it != output.end(); it++) {
-        CNNLayerPtr creator = (*it).second->getCreatorLayer().lock();
-        if (name2id.find(creator->name) == name2id.end())
-            THROW_IE_EXCEPTION << "Cannot find output layer " << creator->name;
-
-        auto lastLayer = getLayer(name2id[creator->name]);
-        if (lastLayer->getName() == "" && lastLayer->getType().empty())
-            THROW_IE_EXCEPTION << "Cannot find output layer " << creator->name;
-
-        std::string name = "out_" + lastLayer->getName();
-
-        CNNLayerPtr cnnOutLayer(new CNNLayer({name, "Output", creator->outData[0]->getPrecision()}));
-        cnnOutLayer->insData.push_back((*it).second);
-
-        idx_t outLayerId = createGenericFromCNNLayer(cnnOutLayer);
-
-        idx_t inIdx(0);
-        for (size_t i = 0; i < creator->outData.size(); i++) {
-            if (creator->outData[i] == (*it).second) {
-                inIdx = i;
-                break;
-            }
-        }
-
-        parameters["connections"].as<std::vector<Connection>>().push_back(
-            Connection({lastLayer->getId(), inIdx}, {outLayerId}));
-    }
-
-    for (const auto dataPtr : dataPtrs) {
-        auto cnnInputLayer = dataPtr->getCreatorLayer().lock();
-        idx_t inIdx(0);
-        if (!cnnInputLayer) {
-            // For v1 parser
-            cnnInputLayer.reset(new CNNLayer({dataPtr->getName(), "Input", dataPtr->getPrecision()}));
-        } else {
-            for (size_t i = 0; i < cnnInputLayer->outData.size(); i++) {
-                if (cnnInputLayer->outData[i].get() == dataPtr) {
-                    inIdx = i;
-                    break;
-                }
-            }
-        }
-        for (const auto& it : dataPtr->getInputTo()) {
-            if (name2id.find(cnnInputLayer->name) == name2id.end() || name2id.find(it.second->name) == name2id.end())
-                THROW_IE_EXCEPTION << "Cannot create connections between nodes: " << cnnInputLayer->name << " -> "
-                                   << it.second->name;
-            idx_t outIdx(0);
-
-            for (size_t i = 0; i < it.second->insData.size(); i++) {
-                const auto lockedData = it.second->insData[i].lock();
-                if (lockedData && lockedData.get() == dataPtr) {
-                    outIdx = i;
-                    break;
-                }
-            }
-            parameters["connections"].as<std::vector<Connection>>().push_back(
-                Connection({name2id[cnnInputLayer->name], inIdx}, {name2id[it.second->name], outIdx}));
-        }
-    }
-
-    for (const auto& input : inputs) {
-        addPreProcessFor(input.second);
-    }
-}
-
-const std::vector<Builder::Layer::Ptr>& Builder::Network::getLayers() const {
-    return parameters.at("layers").as<std::vector<Layer::Ptr>>();
-}
-std::vector<Builder::Layer::Ptr>& Builder::Network::getLayers() {
-    return parameters["layers"].as<std::vector<Layer::Ptr>>();
-}
-
-idx_t Builder::Network::addLayer(const std::vector<PortInfo>& inputs, const Layer& layer) {
-    IE_PROFILING_AUTO_SCOPE(Builder::Network::addLayer)
-    auto layer_id = addLayer(layer);
-    for (size_t i = 0; i < inputs.size(); i++) {
-        connect({inputs[i].layerId(), inputs[i].portId()}, {layer_id, i});
-    }
-    return layer_id;
-}
-
-idx_t Builder::Network::addLayer(const Layer& layer) {
-    auto getAvailableId = [&](idx_t defaultId) {
-        if (defaultId == (std::numeric_limits<idx_t>::max)()) defaultId = 0;
-
-        auto it = parameters["layers"].as<std::vector<Layer::Ptr>>().begin();
-        while (it != parameters["layers"].as<std::vector<Layer::Ptr>>().end()) {
-            for (it = parameters["layers"].as<std::vector<Layer::Ptr>>().begin();
-                 it != parameters["layers"].as<std::vector<Layer::Ptr>>().end(); it++) {
-                if ((*it)->getId() == defaultId) {
-                    defaultId++;
-                    break;
-                }
-            }
-        }
-        return defaultId;
-    };
-    auto generateAvailableName = [&](const std::string& name, idx_t id) {
-        const std::string idName = "id" + std::to_string(id);
-        std::string generatedName(name);
-        if (generatedName.empty()) generatedName = idName;
-        bool nameIsUnique(false);
-        while (!nameIsUnique) {
-            nameIsUnique = true;
-            for (const auto& layer : parameters["layers"].as<std::vector<Layer::Ptr>>()) {
-                if (generatedName == layer->getName()) {
-                    nameIsUnique = false;
-                    generatedName += "_" + idName;
-                }
-            }
-        }
-        return generatedName;
-    };
-    idx_t generatedId = getAvailableId(layer.getId());
-    const auto name = generateAvailableName(layer.getName(), generatedId);
-    parameters["layers"].as<std::vector<Layer::Ptr>>().emplace_back(std::make_shared<Layer>(generatedId, layer));
-    parameters["layers"]
-        .as<std::vector<Layer::Ptr>>()[parameters["layers"].as<std::vector<Layer::Ptr>>().size() - 1]
-        ->setName(name);
-    return generatedId;
-}
-
-void Builder::Network::connect(const PortInfo& input, const PortInfo& output) {
-    const auto mergePortData = [&]() -> bool {
-        const auto blobEqualOrEmpty = [](const Blob::Ptr& ref, const Blob::Ptr& test) -> bool {
-            return (ref->size() == test->size() || test->size() == 0) &&
-                   (!memcmp(ref->cbuffer(), test->cbuffer(), test->byteSize())) &&
-                   (ref->getTensorDesc().getPrecision() == test->getTensorDesc().getPrecision() ||
-                    test->getTensorDesc().getPrecision() == Precision::UNSPECIFIED) &&
-                   (ref->getTensorDesc().getLayout() == test->getTensorDesc().getLayout() ||
-                    test->getTensorDesc().getLayout() == Layout::ANY) &&
-                   (ref->getTensorDesc().getDims() == test->getTensorDesc().getDims() ||
-                    test->getTensorDesc().getDims().empty()) &&
-                   (ref->cbuffer().as<char*>() == test->cbuffer().as<char*>() || test->cbuffer() == nullptr);
-        };
-
-        const auto srcPortData = getLayer(input.layerId())->getOutputPorts()[input.portId()].getData();
-        const auto dstPortData = getLayer(output.layerId())->getInputPorts()[output.portId()].getData();
-        if (srcPortData == dstPortData) return true;
-
-        if (srcPortData->getParameters() != dstPortData->getParameters() && !srcPortData->getParameters().empty() &&
-            !dstPortData->getParameters().empty())
-            return false;
-
-        size_t srcDataCount(0), dstDataCount(0);
-        if (!srcPortData->getParameters().empty()) srcDataCount++;
-        if (!dstPortData->getParameters().empty()) dstDataCount++;
-
-        const auto srcBlb = srcPortData->getData();
-        const auto dstBlb = dstPortData->getData();
-        if (srcBlb == dstBlb ||
-            (srcBlb->size() == dstBlb->size() && srcBlb->getTensorDesc() == dstBlb->getTensorDesc() &&
-             ((srcBlb->cbuffer().as<char*>() == dstBlb->cbuffer().as<char*>()) ||
-              (srcBlb->cbuffer() != nullptr && dstBlb->cbuffer() != nullptr &&
-               !memcmp(srcBlb->cbuffer(), dstBlb->cbuffer(), dstBlb->byteSize()))))) {
-            srcDataCount++;
-            dstDataCount++;
-        } else if (blobEqualOrEmpty(srcBlb, dstBlb)) {
-            srcDataCount++;
-        } else if (blobEqualOrEmpty(dstBlb, srcBlb)) {
-            dstDataCount++;
-        } else {
-            return false;
-        }
-
-        if (dstDataCount > srcDataCount) {
-            // Change source and all src destination data
-            for (const auto& connection : getLayerConnections(input.layerId())) {
-                if (connection.from() != input) continue;
-                getLayer(connection.to().layerId())->getInputPorts()[connection.to().portId()].setData(dstPortData);
-            }
-            getLayer(input.layerId())->getOutputPorts()[input.portId()].setData(dstPortData);
-        } else {
-            // Change destination data
-            getLayer(output.layerId())->getInputPorts()[output.portId()].setData(srcPortData);
-        }
-
-        return true;
-    };
-
-    if (!mergePortData()) THROW_IE_EXCEPTION << "Cannot connect two ports with different data!";
-
-    parameters["connections"].as<std::vector<Connection>>().emplace_back(input, output);
-}
-
-void Builder::Network::removeLayer(idx_t layerId) {
-    auto it = parameters["layers"].as<std::vector<Layer::Ptr>>().begin();
-    for (; it != parameters["layers"].as<std::vector<Layer::Ptr>>().end(); it++) {
-        if ((*it)->getId() == layerId) {
-            break;
-        }
-    }
-    if (it != parameters["layers"].as<std::vector<Layer::Ptr>>().end())
-        parameters["layers"].as<std::vector<Layer::Ptr>>().erase(it);
-}
-
-void Builder::Network::disconnect(const Connection& connection) {
-    auto it = parameters["connections"].as<std::vector<Connection>>().begin();
-    for (; it != parameters["connections"].as<std::vector<Connection>>().end(); it++) {
-        if (connection == *it) break;
-    }
-    if (it != parameters["connections"].as<std::vector<Connection>>().end())
-        parameters["connections"].as<std::vector<Connection>>().erase(it);
-
-    try {
-        auto layer = getLayer(connection.to().layerId());
-        layer->getInputPorts()[connection.to().portId()].setData(std::make_shared<PortData>());
-    } catch (InferenceEngine::details::InferenceEngineException& ex) {
-    }
-}
-
-const INetwork::CPtr Builder::Network::build() {
-    validate();
-    InferenceEngine::Builder::Network::Ptr network =
-        std::make_shared<InferenceEngine::Builder::Network>(static_cast<const INetwork&>(*this));
-    return network;
-}
-
-void Builder::Network::validate() {
-    // Check that all ports are connected
-    for (const auto& layer : getLayers()) {
-        std::vector<bool> existInCon(layer->getInputPorts().size());
-        for (size_t i = 0; i < layer->getInputPorts().size(); i++) {
-            if (layer->getInputPorts()[i].getParameters().find("type") !=
-                layer->getInputPorts()[i].getParameters().end())
-                existInCon[i] = true;
-        }
-        std::vector<bool> existOutCon(layer->getOutputPorts().size());
-
-        const auto layerConnections = getLayerConnections(layer->getId());
-        for (const auto& connection : layerConnections) {
-            if (connection.from().layerId() == layer->getId()) {
-                existOutCon[connection.from().portId()] = true;
-                getLayer(connection.to().layerId());
-            }
-            if (connection.to().layerId() == layer->getId()) {
-                existInCon[connection.to().portId()] = true;
-                getLayer(connection.from().layerId());
-            }
-        }
-        bool allPortsConnected = true;
-        for (const auto& cons : {existInCon, existOutCon}) {
-            for (const auto& existCon : cons) {
-                allPortsConnected = allPortsConnected && existCon;
-            }
-        }
-        if (!allPortsConnected)
-            THROW_IE_EXCEPTION << "Not all ports of layer " << layer->getName() << " were connected!";
-    }
-
-    // Check all layers
-    for (const auto& connection : getConnections()) {
-        if (!getLayer(connection.to().layerId()))
-            THROW_IE_EXCEPTION << "Cannot find layer with id: " << connection.to().layerId();
-        if (!getLayer(connection.from().layerId()))
-            THROW_IE_EXCEPTION << "Cannot find layer with id: " << connection.from().layerId();
-    }
-
-    std::map<std::string, SizeVector> inputShapes;
-    for (const auto& input : getInputs()) inputShapes[input->getName()] = input->getOutputPorts()[0].shape();
-
-    ShapeInfer::Reshaper reshaper(this);
-    ResponseDesc resp;
-    StatusCode sts = reshaper.run(inputShapes, &resp);
-    // Not all implementations may be registered if all shapes were read from IR.
-    if (sts == NOT_FOUND) {
-        bool allShapesLooksGood = true;
-        for (const auto& connection : getConnections()) {
-            if (getLayer(connection.from().layerId())->getOutputPorts()[connection.from().portId()].shape() !=
-                    getLayer(connection.to().layerId())->getInputPorts()[connection.to().portId()].shape() ||
-                getLayer(connection.to().layerId())->getInputPorts()[connection.to().portId()].shape().empty()) {
-                allShapesLooksGood = false;
-                break;
-            }
-        }
-        if (allShapesLooksGood) sts = OK;
-    }
-
-    if (sts != OK) THROW_IE_EXCEPTION << resp.msg;
-
-    // Check all parameters
-    for (const auto& layer : getLayers()) {
-        try {
-            layer->build();
-        } catch (InferenceEngine::details::InferenceEngineException& ex) {
-            THROW_IE_EXCEPTION << "Cannot build layer " << layer->getName() << ": " << ex.what();
-        } catch (std::bad_cast& ex) {
-            THROW_IE_EXCEPTION << "Cannot build layer " << layer->getName() << ": " << ex.what();
-        }
-    }
-}
-
-Builder::Network::operator const INetwork::CPtr() {
-    return build();
-}
-
-const ILayer::CPtr Builder::Network::getLayer(idx_t layerId) const noexcept {
-    try {
-        for (auto& layer : getLayers()) {
-            if (layer->getId() == layerId) return layer->build();
-        }
-    } catch (...) {
-    }
-
-    return nullptr;
-}
-
-Builder::Layer::Ptr Builder::Network::getLayer(idx_t layerId) {
-    for (auto& layer : getLayers()) {
-        if (layer->getId() == layerId) return layer;
-    }
-    THROW_IE_EXCEPTION << "Cannot find layer with id: " << layerId;
-}
-
-const std::string& Builder::Network::getName() const noexcept {
-    static std::string errName;
-    try {
-        return parameters.at("name");
-    } catch (...) {
-        return errName;
-    }
-}
-
-const Context& Builder::Network::getContext() const noexcept {
-    static Context errCtx;
-    try {
-        return parameters.at("context");
-    } catch (...) {
-        return errCtx;
-    }
-}
-
-Context& Builder::Network::getContext() noexcept {
-    static Context errCtx;
-    try {
-        return parameters.at("context");
-    } catch (...) {
-        return errCtx;
-    }
-}
-
-Builder::Network::const_iterator Builder::Network::begin() const noexcept {
-    try {
-        return Network::const_iterator(this);
-    } catch (...) {
-        return Network::const_iterator(this, true);
-    }
-}
-
-Builder::Network::const_iterator Builder::Network::end() const noexcept {
-    return Network::const_iterator(this, true);
-}
-
-size_t Builder::Network::size() const noexcept {
-    return static_cast<size_t>(std::distance(std::begin(*this), std::end(*this)));
-}
-
-Builder::Network::iterator Builder::Network::begin() {
-    return Network::iterator(this);
-}
-
-Builder::Network::iterator Builder::Network::end() {
-    return Network::iterator(this, true);
-}
-
-const std::vector<ILayer::CPtr> Builder::Network::getInputs() const noexcept {
-    std::vector<ILayer::CPtr> inputs;
-    try {
-        for (const auto& layer : parameters.at("layers").as<std::vector<Layer::Ptr>>()) {
-            bool isInputLayer = true;
-            for (const auto& connection : getLayerConnections(layer->getId())) {
-                if (connection.to().layerId() == layer->getId()) {
-                    isInputLayer = false;
-                    break;
-                }
-            }
-            if (isInputLayer) {
-                inputs.push_back(layer->build());
-            }
-        }
-    } catch (...) {
-    }
-    return inputs;
-}
-
-std::vector<Builder::Layer::Ptr> Builder::Network::getInputs() {
-    std::vector<Builder::Layer::Ptr> inputs;
-    for (auto& layer : parameters.at("layers").as<std::vector<Layer::Ptr>>()) {
-        bool isInputLayer = true;
-        for (const auto& connection : getLayerConnections(layer->getId())) {
-            if (connection.to().layerId() == layer->getId()) {
-                isInputLayer = false;
-                break;
-            }
-        }
-        if (isInputLayer) {
-            inputs.push_back(layer);
-        }
-    }
-    return inputs;
-}
-
-const std::vector<ILayer::CPtr> Builder::Network::getOutputs() const noexcept {
-    std::vector<ILayer::CPtr> outputs;
-    try {
-        for (const auto& layer : parameters.at("layers").as<std::vector<Layer::Ptr>>()) {
-            bool isOutputLayer = true;
-            for (const auto& connection : getLayerConnections(layer->getId())) {
-                if (connection.from().layerId() == layer->getId()) {
-                    isOutputLayer = false;
-                    break;
-                }
-            }
-            if (isOutputLayer) {
-                outputs.push_back(layer->build());
-            }
-        }
-    } catch (...) {
-    }
-    return outputs;
-}
-
-std::vector<Builder::Layer::Ptr> Builder::Network::getOutputs() {
-    std::vector<Builder::Layer::Ptr> outputs;
-    for (auto& layer : parameters.at("layers").as<std::vector<Layer::Ptr>>()) {
-        bool isOutputLayer = true;
-        for (const auto& connection : getLayerConnections(layer->getId())) {
-            if (connection.from().layerId() == layer->getId()) {
-                isOutputLayer = false;
-                break;
-            }
-        }
-        if (isOutputLayer) {
-            outputs.push_back(layer);
-        }
-    }
-    return outputs;
-}
-
-const std::vector<Connection>& Builder::Network::getConnections() const {
-    return parameters.at("connections").as<std::vector<Connection>>();
-}
-
-const std::vector<Connection> Builder::Network::getLayerConnections(idx_t layerId) const noexcept {
-    std::vector<Connection> layerConnections;
-    try {
-        for (const auto connection : parameters.at("connections").as<std::vector<Connection>>()) {
-            if (connection.from().layerId() == layerId || connection.to().layerId() == layerId)
-                layerConnections.push_back(connection);
-        }
-    } catch (...) {
-    }
-    return layerConnections;
-}
diff --git a/inference-engine/src/legacy_api/src/builders/ie_pooling_layer.cpp b/inference-engine/src/legacy_api/src/builders/ie_pooling_layer.cpp
deleted file mode 100644 (file)
index 63f3cd1..0000000
+++ /dev/null
@@ -1,236 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <ie_cnn_layer_builder.h>
-
-#include <builders/ie_pooling_layer.hpp>
-#include <string>
-#include <vector>
-
-using namespace InferenceEngine;
-
-Builder::PoolingLayer::PoolingLayer(const std::string& name): LayerDecorator("Pooling", name) {
-    getLayer()->getInputPorts().resize(1);
-    getLayer()->getOutputPorts().resize(1);
-    setKernel({});
-    setStrides({});
-    setPaddingsEnd({});
-    setPaddingsBegin({});
-    setExcludePad(false);
-    setPoolingType(PoolingType::MAX);
-    setRoundingType(RoundingType::CEIL);
-}
-
-Builder::PoolingLayer::PoolingLayer(const Layer::Ptr& layer): LayerDecorator(layer) {
-    checkType("Pooling");
-
-    std::string typeStr = getLayer()->getParameters()["pool-method"];
-    if (typeStr == "max")
-        type = MAX;
-    else if (typeStr == "avg")
-        type = AVG;
-
-    std::string roundTypeStr = getLayer()->getParameters()["rounding_type"];
-    if (roundTypeStr == "ceil")
-        roundingType = CEIL;
-    else if (roundTypeStr == "avg")
-        roundingType = FLOOR;
-}
-
-Builder::PoolingLayer::PoolingLayer(const Layer::CPtr& layer): LayerDecorator(layer) {
-    checkType("Pooling");
-
-    const auto cLayer = static_cast<const PoolingLayer*>(this)->getLayer();
-
-    std::string typeStr = cLayer->getParameters().at("pool-method");
-    if (typeStr == "max")
-        type = MAX;
-    else if (typeStr == "avg")
-        type = AVG;
-
-    std::string roundTypeStr = cLayer->getParameters().at("rounding_type");
-    if (roundTypeStr == "ceil")
-        roundingType = CEIL;
-    else if (roundTypeStr == "avg")
-        roundingType = FLOOR;
-}
-
-Builder::PoolingLayer::operator Builder::Layer() const {
-    Layer genLayer(*getLayer());
-
-    std::vector<size_t> l_kernel = getKernel();
-    std::vector<size_t> l_paddingBegin = getPaddingsBegin();
-    std::vector<size_t> l_paddingEnd = getPaddingsEnd();
-    std::vector<size_t> l_strides = getStrides();
-
-    if (l_paddingBegin.empty() && !l_kernel.empty()) l_paddingBegin.resize(l_kernel.size(), 0);
-    if (l_paddingEnd.empty() && !l_kernel.empty()) l_paddingEnd.resize(l_kernel.size(), 0);
-    if (l_strides.empty() && !l_kernel.empty()) l_strides.resize(l_kernel.size(), 1);
-
-    if (l_kernel.empty() || l_kernel.size() != l_paddingBegin.size() || l_kernel.size() != l_paddingEnd.size() ||
-        l_kernel.size() != l_strides.size())
-        THROW_IE_EXCEPTION << genLayer.getType() << " node " << genLayer.getName() << " contains incorrect parameters!";
-
-    genLayer.getParameters()["kernel"] = l_kernel;
-    genLayer.getParameters()["strides"] = l_strides;
-    genLayer.getParameters()["pads_begin"] = l_paddingBegin;
-    genLayer.getParameters()["pads_end"] = l_paddingEnd;
-    return genLayer;
-}
-
-Builder::PoolingLayer& Builder::PoolingLayer::setName(const std::string& name) {
-    getLayer()->setName(name);
-    return *this;
-}
-
-const Port& Builder::PoolingLayer::getInputPort() const {
-    return getLayer()->getInputPorts()[0];
-}
-
-Builder::PoolingLayer& Builder::PoolingLayer::setInputPort(const Port& port) {
-    getLayer()->getInputPorts()[0] = port;
-    return *this;
-}
-
-const Port& Builder::PoolingLayer::getOutputPort() const {
-    return getLayer()->getOutputPorts()[0];
-}
-
-Builder::PoolingLayer& Builder::PoolingLayer::setOutputPort(const Port& port) {
-    getLayer()->getOutputPorts()[0] = port;
-    return *this;
-}
-
-const std::vector<size_t> Builder::PoolingLayer::getKernel() const {
-    return getLayer()->getParameters().at("kernel");
-}
-Builder::PoolingLayer& Builder::PoolingLayer::setKernel(const std::vector<size_t>& kernel) {
-    getLayer()->getParameters()["kernel"] = kernel;
-    return *this;
-}
-
-const std::vector<size_t> Builder::PoolingLayer::getStrides() const {
-    return getLayer()->getParameters().at("strides");
-}
-Builder::PoolingLayer& Builder::PoolingLayer::setStrides(const std::vector<size_t>& strides) {
-    getLayer()->getParameters()["strides"] = strides;
-    return *this;
-}
-
-const std::vector<size_t> Builder::PoolingLayer::getPaddingsBegin() const {
-    return getLayer()->getParameters().at("pads_begin");
-}
-Builder::PoolingLayer& Builder::PoolingLayer::setPaddingsBegin(const std::vector<size_t>& paddings) {
-    getLayer()->getParameters()["pads_begin"] = paddings;
-    return *this;
-}
-
-const std::vector<size_t> Builder::PoolingLayer::getPaddingsEnd() const {
-    return getLayer()->getParameters().at("pads_end");
-}
-Builder::PoolingLayer& Builder::PoolingLayer::setPaddingsEnd(const std::vector<size_t>& paddings) {
-    getLayer()->getParameters()["pads_end"] = paddings;
-    return *this;
-}
-
-Builder::PoolingLayer::PoolingType Builder::PoolingLayer::getPoolingType() const {
-    return type;
-}
-Builder::PoolingLayer& Builder::PoolingLayer::setPoolingType(Builder::PoolingLayer::PoolingType type) {
-    std::string typeStr;
-    switch (type) {
-    case MAX:
-        typeStr = "max";
-        break;
-    case AVG:
-        typeStr = "avg";
-        break;
-    }
-    getLayer()->getParameters()["pool-method"] = typeStr;
-    this->type = type;
-    return *this;
-}
-
-Builder::PoolingLayer::RoundingType Builder::PoolingLayer::getRoundingType() const {
-    return roundingType;
-}
-Builder::PoolingLayer& Builder::PoolingLayer::setRoundingType(Builder::PoolingLayer::RoundingType type) {
-    roundingType = type;
-    std::string typeStr;
-    switch (type) {
-    case CEIL:
-        typeStr = "ceil";
-        break;
-    case FLOOR:
-        typeStr = "floor";
-        break;
-    }
-    getLayer()->getParameters()["rounding_type"] = typeStr;
-    return *this;
-}
-
-bool Builder::PoolingLayer::getExcludePad() const {
-    return getLayer()->getParameters().at("exclude-pad");
-}
-
-Builder::PoolingLayer& Builder::PoolingLayer::setExcludePad(bool exclude) {
-    getLayer()->getParameters()["exclude-pad"] = exclude;
-    return *this;
-}
-
-REG_VALIDATOR_FOR(Pooling, [](const Builder::Layer::CPtr& layer, bool partial) {
-    // WA for old IRs
-    if (layer->getParameters().find("kernel") == layer->getParameters().end() &&
-        layer->getParameters().find("kernel-x") != layer->getParameters().end() &&
-        layer->getParameters().find("kernel-y") != layer->getParameters().end())
-        return;
-
-    Builder::PoolingLayer poolBuilder(layer);
-    std::vector<size_t> l_kernel = poolBuilder.getKernel();
-    std::vector<size_t> l_paddingBegin = poolBuilder.getPaddingsBegin();
-    std::vector<size_t> l_paddingEnd = poolBuilder.getPaddingsEnd();
-    std::vector<size_t> l_strides = poolBuilder.getStrides();
-
-    if (l_paddingBegin.empty() && !l_kernel.empty()) l_paddingBegin.resize(l_kernel.size(), 0);
-    if (l_paddingEnd.empty() && !l_kernel.empty()) l_paddingEnd.resize(l_kernel.size(), 0);
-    if (l_strides.empty() && !l_kernel.empty()) l_strides.resize(l_kernel.size(), 1);
-
-    if (l_kernel.empty() || l_kernel.size() != l_paddingBegin.size() || l_kernel.size() != l_paddingEnd.size() ||
-        l_kernel.size() != l_strides.size())
-        THROW_IE_EXCEPTION << layer->getType() << " node " << layer->getName() << " contains incorrect parameters!";
-});
-
-REG_CONVERTER_FOR(Pooling, [](const CNNLayerPtr& cnnLayer, Builder::Layer& layer) {
-    if (cnnLayer->params.find("kernel") == cnnLayer->params.end() &&
-        cnnLayer->params.find("kernel-x") != cnnLayer->params.end() &&
-        cnnLayer->params.find("kernel-y") != cnnLayer->params.end())
-        return;
-    std::vector<unsigned int> tmp = cnnLayer->GetParamAsUInts("kernel");
-    layer.getParameters()["kernel"] = std::vector<size_t>(tmp.size());
-    for (size_t i = 0; i < tmp.size(); ++i) {
-        layer.getParameters()["kernel"].as<std::vector<size_t>>()[i] = static_cast<size_t>(tmp[i]);
-    }
-
-    tmp = cnnLayer->GetParamAsUInts("strides");
-    layer.getParameters()["strides"] = std::vector<size_t>(tmp.size());
-    for (size_t i = 0; i < tmp.size(); ++i) {
-        layer.getParameters()["strides"].as<std::vector<size_t>>()[i] = static_cast<size_t>(tmp[i]);
-    }
-
-    tmp = cnnLayer->GetParamAsUInts("pads_begin");
-    layer.getParameters()["pads_begin"] = std::vector<size_t>(tmp.size());
-    for (size_t i = 0; i < tmp.size(); ++i) {
-        layer.getParameters()["pads_begin"].as<std::vector<size_t>>()[i] = static_cast<size_t>(tmp[i]);
-    }
-
-    tmp = cnnLayer->GetParamAsUInts("pads_end");
-    layer.getParameters()["pads_end"] = std::vector<size_t>(tmp.size());
-    for (size_t i = 0; i < tmp.size(); ++i) {
-        layer.getParameters()["pads_end"].as<std::vector<size_t>>()[i] = static_cast<size_t>(tmp[i]);
-    }
-
-    layer.getParameters()["exclude-pad"] = cnnLayer->GetParamAsBool("exclude-pad", false);
-    layer.getParameters()["rounding_type"] = cnnLayer->GetParamAsString("rounding_type", "ceil");
-    layer.getParameters()["pool-method"] = cnnLayer->GetParamAsString("pool-method", "max");
-});
diff --git a/inference-engine/src/legacy_api/src/builders/ie_split_layer.cpp b/inference-engine/src/legacy_api/src/builders/ie_split_layer.cpp
deleted file mode 100644 (file)
index 473eea5..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <ie_cnn_layer_builder.h>
-
-#include <builders/ie_split_layer.hpp>
-#include <string>
-#include <vector>
-
-using namespace InferenceEngine;
-
-Builder::SplitLayer::SplitLayer(const std::string& name): LayerDecorator("Split", name) {
-    getLayer()->getInputPorts().resize(1);
-    setAxis(1);
-}
-
-Builder::SplitLayer::SplitLayer(const Layer::Ptr& layer): LayerDecorator(layer) {
-    checkType("Split");
-}
-
-Builder::SplitLayer::SplitLayer(const Layer::CPtr& layer): LayerDecorator(layer) {
-    checkType("Split");
-}
-
-Builder::SplitLayer& Builder::SplitLayer::setName(const std::string& name) {
-    getLayer()->setName(name);
-    return *this;
-}
-
-const Port& Builder::SplitLayer::getInputPort() const {
-    return getLayer()->getInputPorts()[0];
-}
-
-Builder::SplitLayer& Builder::SplitLayer::setInputPort(const Port& port) {
-    getLayer()->getInputPorts()[0] = port;
-    return *this;
-}
-
-const std::vector<Port>& Builder::SplitLayer::getOutputPorts() const {
-    return getLayer()->getOutputPorts();
-}
-
-Builder::SplitLayer& Builder::SplitLayer::setOutputPorts(const std::vector<Port>& ports) {
-    getLayer()->getOutputPorts() = ports;
-    return *this;
-}
-
-size_t Builder::SplitLayer::getAxis() const {
-    return getLayer()->getParameters().at("axis");
-}
-
-Builder::SplitLayer& Builder::SplitLayer::setAxis(size_t axis) {
-    getLayer()->getParameters()["axis"] = axis;
-    return *this;
-}
-
-REG_CONVERTER_FOR(Split, [](const CNNLayerPtr& cnnLayer, Builder::Layer& layer) {
-    layer.getParameters()["axis"] = static_cast<size_t>(cnnLayer->GetParamAsUInt("axis", 1));
-});
\ No newline at end of file
index 3283cdc..941b3e8 100644 (file)
 #include "ie_cnn_layer_builder_ngraph.h"
 
 #include <debug.h>
+#include <ngraph/opsets/opset1.hpp>
 #include "transformations/convert_opset1_to_legacy/convert_opset1_to_legacy.hpp"
 #include "transformations/utils/utils.hpp"
+#include "transformations/rt_info/fused_names_attribute.hpp"
 
 namespace InferenceEngine {
 namespace details {
@@ -445,6 +447,7 @@ std::shared_ptr<CNNNetworkImpl> convertFunctionToICNNNetwork(const std::shared_p
                 std::make_shared<Builder::NodeConverter<::ngraph::op::Sign>>(),
                 std::make_shared<Builder::NodeConverter<::ngraph::op::Sinh>>(),
                 std::make_shared<Builder::NodeConverter<::ngraph::op::SquaredDifference>>(),
+                std::make_shared<Builder::NodeConverter<::ngraph::op::v1::Select>>(),
                 std::make_shared<Builder::NodeConverter<::ngraph::op::v1::Softmax>>(),
                 std::make_shared<Builder::NodeConverter<::ngraph::op::v1::Split>>(),
                 std::make_shared<Builder::NodeConverter<::ngraph::op::VariadicSplit>>(),
@@ -578,7 +581,14 @@ std::shared_ptr<CNNNetworkImpl> convertFunctionToICNNNetwork(const std::shared_p
         rt_info["keep_constants"] = attr.asVariant();
 
         CNNLayerPtr cnnLayer = createCNNLayer(layer);
-        for (const auto &rt : layer->get_rt_info()) {
+
+        // Set originalLayersNames from FusedNames
+        std::string originalNames = ::ngraph::getFusedNames(layer);
+        if (!originalNames.empty()) {
+            cnnLayer->params["originalLayersNames"] = originalNames;
+        }
+
+        for (const auto &rt : rt_info) {
             Parameter param(rt.second);
             if (param.empty()) continue;
             if (details::CaselessEq<std::string>()(rt.first, "affinity")) {
diff --git a/inference-engine/src/legacy_api/src/ie_cnn_layer_builder.cpp b/inference-engine/src/legacy_api/src/ie_cnn_layer_builder.cpp
deleted file mode 100644 (file)
index 4144f9d..0000000
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <ie_cnn_layer_builder.h>
-
-#include <limits>
-#include <set>
-#include <sstream>
-
-#include "blob_factory.hpp"
-#include "ie_memcpy.h"
-
-namespace InferenceEngine {
-
-namespace Builder {
-
-IE_SUPPRESS_DEPRECATED_START
-
-ConverterRegister::ConverterRegister(const std::string& type,
-                                     const std::function<void(const CNNLayerPtr&, Layer&)>& converter) {
-    if (getConvertersHolder().converters.find(type) == getConvertersHolder().converters.end())
-        getConvertersHolder().converters[type] = converter;
-}
-
-ConvertersHolder& ConverterRegister::getConvertersHolder() {
-    static Builder::ConvertersHolder holder;
-    return holder;
-}
-
-Layer builderFromCNNLayer(const CNNLayerPtr& cnnLayer) {
-    Builder::Layer layer(cnnLayer->type, cnnLayer->name);
-    std::vector<Port> inputPorts;
-    for (const auto& data : cnnLayer->insData) {
-        auto lockedData = data.lock();
-        if (!lockedData) continue;
-        inputPorts.emplace_back(lockedData->getTensorDesc().getDims());
-    }
-
-    std::vector<Port> outputPorts;
-    for (const auto& data : cnnLayer->outData) {
-        outputPorts.emplace_back(data->getTensorDesc().getDims());
-    }
-
-    size_t inputsCount = inputPorts.size();
-    std::map<std::string, Blob::Ptr> blobs = cnnLayer->blobs;
-    if (blobs.find("weights") != blobs.end()) {
-        auto port = Port();
-        port.setParameter("type", "weights");
-        inputPorts.push_back(port);
-    }
-    if (blobs.find("biases") != blobs.end()) {
-        if (inputsCount == inputPorts.size()) {
-            auto port = Port();
-            port.setParameter("type", "weights");
-            inputPorts.push_back(port);
-        }
-
-        auto port = Port();
-        port.setParameter("type", "biases");
-        inputPorts.push_back(port);
-    }
-    for (const auto& it : blobs) {
-        if (it.first == "weights" || it.first == "biases") continue;
-        auto port = Port();
-        port.setParameter("type", it.first);
-        inputPorts.emplace_back(port);
-    }
-
-    std::map<std::string, Parameter> params;
-    for (const auto& it : cnnLayer->params) {
-        params[it.first] = it.second;
-    }
-
-    layer.setInputPorts(inputPorts).setOutputPorts(outputPorts).setParameters(params);
-
-    Builder::ConverterRegister::convert(cnnLayer, layer);
-
-    return layer;
-}
-
-std::map<std::string, std::string> convertParameters2Strings(const std::map<std::string, Parameter>& parameters) {
-    std::map<std::string, std::string> oldParams;
-    for (const auto& param : parameters) {
-        // skip blobs and ports
-        if (param.second.is<Blob::CPtr>() || param.second.is<Blob::Ptr>() || param.second.is<std::vector<Port>>() ||
-            param.second.is<PreProcessInfo>())
-            continue;
-        if (param.second.is<std::string>() || param.second.is<std::vector<std::string>>()) {
-            oldParams[param.first] = Builder::convertParameter2String<std::string>(param.second);
-        } else if (param.second.is<int>() || param.second.is<std::vector<int>>()) {
-            oldParams[param.first] = Builder::convertParameter2String<int>(param.second);
-        } else if (param.second.is<float>() || param.second.is<std::vector<float>>()) {
-            oldParams[param.first] = Builder::convertParameter2String<float>(param.second);
-        } else if (param.second.is<unsigned int>() || param.second.is<std::vector<unsigned int>>()) {
-            oldParams[param.first] = Builder::convertParameter2String<unsigned int>(param.second);
-        } else if (param.second.is<size_t>() || param.second.is<std::vector<size_t>>()) {
-            oldParams[param.first] = Builder::convertParameter2String<size_t>(param.second);
-        } else if (param.second.is<bool>() || param.second.is<std::vector<bool>>()) {
-            oldParams[param.first] = Builder::convertParameter2String<bool>(param.second);
-        } else {
-            THROW_IE_EXCEPTION << "Parameter " << param.first << " has unsupported parameter type!";
-        }
-    }
-    return oldParams;
-}
-
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace Builder
-}  // namespace InferenceEngine
index 60b0de1..3a347b4 100644 (file)
@@ -7,10 +7,9 @@
 #include <ie_api.h>
 #include <ie_blob.h>
 #include <ie_layers.h>
+#include <ie_parameter.hpp>
 
-#include <builders/ie_layer_builder.hpp>
 #include <details/caseless.hpp>
-#include <ie_network.hpp>
 #include <map>
 #include <memory>
 #include <string>
@@ -20,38 +19,6 @@ namespace InferenceEngine {
 
 namespace Builder {
 
-IE_SUPPRESS_DEPRECATED_START
-
-Layer builderFromCNNLayer(const CNNLayerPtr& cnnLayer);
-
-struct ConvertersHolder {
-    details::caseless_map<std::string, std::function<void(const CNNLayerPtr& cnnLayer, Layer&)>> converters;
-};
-
-/**
- * @brief This class registers layer validators
- */
-class INFERENCE_ENGINE_API_CLASS(ConverterRegister) {
-public:
-    /**
-     * @brief The constructor registers new layer validator
-     * @param type Layer type
-     * @param validator Layer validator
-     */
-    explicit ConverterRegister(const std::string& type,
-                               const std::function<void(const CNNLayerPtr&, Layer&)>& converter);
-
-    static void convert(const CNNLayerPtr& cnnLayer, Layer& layer) {
-        if (getConvertersHolder().converters.find(layer.getType()) != getConvertersHolder().converters.end())
-            getConvertersHolder().converters[layer.getType()](cnnLayer, layer);
-    }
-
-private:
-    static ConvertersHolder& getConvertersHolder();
-};
-
-IE_SUPPRESS_DEPRECATED_END
-
 #define REG_CONVERTER_FOR(__type, __converter) \
     static InferenceEngine::Builder::ConverterRegister _reg_converter_##__type(#__type, __converter)
 
@@ -73,8 +40,5 @@ inline std::string convertParameter2String<std::string>(const Parameter& paramet
     return parameter.as<std::string>();
 }
 
-INFERENCE_ENGINE_API_CPP(std::map<std::string, std::string>)
-convertParameters2Strings(const std::map<std::string, Parameter>& parameters);
-
 }  // namespace Builder
 }  // namespace InferenceEngine
index 02359c9..62fe7b5 100644 (file)
@@ -505,18 +505,17 @@ CNNLayer::Ptr NodeConverter<ngraph::op::Exp>::createLayer(const std::shared_ptr<
 
 template <>
 CNNLayer::Ptr NodeConverter<ngraph::op::MVN>::createLayer(const std::shared_ptr<ngraph::Node>& layer) const {
-    LayerParams params = {layer->get_friendly_name(), "MVN",
-                          details::convertPrecision(layer->get_output_element_type(0))};
+    LayerParams params = {layer->get_friendly_name(), "MVN", details::convertPrecision(layer->get_output_element_type(0))};
     auto res = std::make_shared<InferenceEngine::MVNLayer>(params);
     auto castedLayer = ngraph::as_type_ptr<ngraph::op::MVN>(layer);
     if (castedLayer == nullptr) THROW_IE_EXCEPTION << "Cannot get " << params.type << " layer " << params.name;
 
     res->params["eps"] = asString(castedLayer->get_eps());
-    if (castedLayer->get_reduction_axes().size() == castedLayer->get_shape().size()) {
-        res->params["across_channels"] = "1";
-    } else {
-        res->params["across_channels"] = "0";
-    }
+
+    const size_t chanelAxis = 1;
+    ngraph::AxisSet reductionAxes = castedLayer->get_reduction_axes();
+    res->params["across_channels"] = asString(reductionAxes.count(chanelAxis) > 0);
+
     res->params["normalize_variance"] = asString(castedLayer->get_normalize_variance());
     return res;
 }
@@ -770,6 +769,20 @@ CNNLayer::Ptr NodeConverter<ngraph::op::ConvolutionIE>::createLayer(
     InferenceEngine::Parameter attr(rt_info["keep_constants"]);
     bool keep_constants = attr.as<bool>();
 
+    //  These params added for CPU tests
+    if (rt_info.find("PrimitivesPriority") != rt_info.end()) {
+        attr = rt_info["PrimitivesPriority"];
+        res->params["PrimitivesPriority"] = attr.as<std::string>();
+    }
+    if (rt_info.find("InputMemoryFormats") != rt_info.end()) {
+        attr = rt_info["InputMemoryFormats"];
+        res->params["InputMemoryFormats"] = attr.as<std::string>();
+    }
+    if (rt_info.find("OutputMemoryFormats") != rt_info.end()) {
+        attr = rt_info["OutputMemoryFormats"];
+        res->params["OutputMemoryFormats"] = attr.as<std::string>();
+    }
+
     NodeConverter<ngraph::op::Constant> converter;
     const auto weightsNode = castedLayer->input_value(1).get_node_shared_ptr();
     if (!keep_constants && converter.canCreate(weightsNode)) {
@@ -1377,6 +1390,24 @@ CNNLayer::Ptr NodeConverter<ngraph::op::SquaredDifference>::createLayer(const st
 }
 
 template <>
+CNNLayer::Ptr NodeConverter<ngraph::op::v1::Select>::createLayer(const std::shared_ptr<ngraph::Node>& layer) const {
+    LayerParams params = {layer->get_friendly_name(), "Select", details::convertPrecision(layer->get_output_element_type(0))};
+
+    auto res = std::make_shared<InferenceEngine::CNNLayer>(params);
+    auto castedLayer = ngraph::as_type_ptr<ngraph::op::v1::Select>(layer);
+    if (castedLayer == nullptr) THROW_IE_EXCEPTION << "Cannot get " << params.type << " layer " << params.name;
+
+    auto broadcast = castedLayer->get_auto_broadcast().m_type;
+    if (broadcast == ngraph::op::AutoBroadcastType::NUMPY) {
+        res->params["auto_broadcast"] = "numpy";
+    } else if (broadcast == ngraph::op::AutoBroadcastType::NONE) {
+        res->params["auto_broadcast"] = "none";
+    }
+
+    return res;
+}
+
+template <>
 CNNLayer::Ptr NodeConverter<ngraph::op::DetectionOutput>::createLayer(
     const std::shared_ptr<ngraph::Node>& layer) const {
     LayerParams params = {layer->get_friendly_name(), "DetectionOutput",
index 378aaac..43f6918 100644 (file)
@@ -9,7 +9,6 @@
 #include <ie_layers.h>
 
 #include <details/caseless.hpp>
-#include <ie_network.hpp>
 #include <map>
 #include <memory>
 #include <ngraph/node.hpp>
diff --git a/inference-engine/src/legacy_api/src/ie_context.cpp b/inference-engine/src/legacy_api/src/ie_context.cpp
deleted file mode 100644 (file)
index 37838f2..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <ie_context.hpp>
-#include <memory>
-#include <shape_infer/built-in/ie_built_in_holder.hpp>
-#include <string>
-#include <vector>
-
-IE_SUPPRESS_DEPRECATED_START
-
-using namespace InferenceEngine;
-
-Context::Context() {
-    auto builtIn = std::make_shared<ShapeInfer::BuiltInShapeInferHolder>();
-    try {
-        addExtension(builtIn);
-    } catch (...) {
-    }
-}
-
-void Context::addExtension(const IShapeInferExtensionPtr& ext) {
-    // Get all shape infer impls
-    char** types = nullptr;
-    unsigned int size = 0;
-    ResponseDesc resp;
-    StatusCode sts = ext->getShapeInferTypes(types, size, &resp);
-    if (sts != OK) THROW_IE_EXCEPTION << "Failed to get types from extension: " << resp.msg;
-    std::vector<std::string> implTypes;
-
-    std::string badLayerTypes;
-    for (int i = 0; i < size; i++) {
-        std::string type(types[i], strlen(types[i]));
-        delete[] types[i];
-        if (shapeInferImpls.find(type) != shapeInferImpls.end()) {
-            if (!badLayerTypes.empty()) badLayerTypes += ", ";
-            badLayerTypes += type;
-        }
-        implTypes.emplace_back(type);
-    }
-    delete[] types;
-
-    if (!badLayerTypes.empty())
-        THROW_IE_EXCEPTION << "Failed to add extension with already registered types: " << badLayerTypes;
-
-    for (const auto& implType : implTypes) {
-        IShapeInferImpl::Ptr impl;
-        sts = ext->getShapeInferImpl(impl, implType.c_str(), &resp);
-        if (sts != OK) THROW_IE_EXCEPTION << "Failed to get implementation for " << implType << "type: " << resp.msg;
-        shapeInferImpls[implType] = impl;
-    }
-}
-
-void Context::addShapeInferImpl(const std::string& type, const IShapeInferImpl::Ptr& impl) {
-    if (shapeInferImpls.find(type) != shapeInferImpls.end())
-        THROW_IE_EXCEPTION << "Failed to add implementation for already registered type: " << type;
-    shapeInferImpls[type] = impl;
-}
-
-IShapeInferImpl::Ptr Context::getShapeInferImpl(const std::string& type) {
-    return shapeInferImpls.find(type) == shapeInferImpls.end() ? nullptr : shapeInferImpls[type];
-}
index 9c38fe6..d1266e1 100644 (file)
@@ -721,7 +721,7 @@ void ScaleShiftValidator::parseParams(CNNLayer* layer) {
     if (!casted) {
         THROW_IE_EXCEPTION << "Layer is not instance of ScaleShiftLayer class";
     }
-    if (!casted->params.empty()) {
+    if (casted->params.count("broadcast")) {
         casted->_broadcast = casted->GetParamAsUInt("broadcast", 2);
     }
 }
@@ -771,13 +771,11 @@ void ReshapeValidator::parseParams(CNNLayer* layer) {
         THROW_IE_EXCEPTION << "Layer is not instance of ReshapeLayer class";
     }
     casted->shape.clear();
-    if (!casted->params.empty()) {
-        if (casted->type == "Flatten") {
-            casted->num_axes = casted->GetParamAsInt("end_axis", -1);
-            casted->axis = casted->GetParamAsInt("axis", 0);
-        } else {
-            casted->shape = casted->GetParamAsInts("dim", {});
-        }
+    if (casted->type == "Flatten" && casted->params.count("end_axis") && casted->params.count("axis")) {
+        casted->num_axes = casted->GetParamAsInt("end_axis", -1);
+        casted->axis = casted->GetParamAsInt("axis", 0);
+    } else if (casted->params.count("dim")) {
+        casted->shape = casted->GetParamAsInts("dim", {});
     }
 }
 
@@ -885,7 +883,7 @@ void ReLUValidator::parseParams(CNNLayer* layer) {
     if (!casted) {
         THROW_IE_EXCEPTION << "Layer is not instance of ReLULayer class";
     }
-    if (!casted->params.empty()) {
+    if (casted->params.count("negative_slope")) {
         casted->negative_slope = casted->GetParamAsFloat("negative_slope");
     }
 }
@@ -895,7 +893,7 @@ void ReLUValidator::checkParams(const CNNLayer* layer) {
     if (!casted) {
         THROW_IE_EXCEPTION << "Layer is not instance of ReLULayer class";
     }
-    if (!casted->params.empty()) {
+    if (casted->params.count("negative_slope")) {
         float negative_slope = casted->GetParamAsFloat("negative_slope");
     }
 }
@@ -2682,40 +2680,34 @@ void NormalizeValidator::checkParams(const CNNLayer* layer) {
 SelectValidator::SelectValidator(const std::string& _type): LayerValidator(_type) {}
 
 void SelectValidator::checkShapes(const CNNLayer* layer, const std::vector<SizeVector>& inShapes) const {
-    enum { condition, then_, else_, numOfInputs };
-    auto casted = dynamic_cast<const SelectLayer*>(layer);
-    if (!casted) {
-        THROW_IE_EXCEPTION << layer->name << " Layer is not instance of SelectLayer class";
-    }
+    enum { CONDITION, THEN, ELSE, numOfInputs };
 
     size_t numInputs = inShapes.size();
-    if (numOfInputs != numInputs) THROW_IE_EXCEPTION << " Select can take 3 inputs, but actually it has: " << numInputs;
+    if (numOfInputs != numInputs) THROW_IE_EXCEPTION << "Select layer with name '" << layer->name << "' take 3 inputs, but actually it has: " << numInputs;
 
-    if (inShapes[then_] != inShapes[else_]) {
-        THROW_IE_EXCEPTION << " Positive input shape should be the same as negative input shape";
-    }
+    size_t new_rank = inShapes[ELSE].size();
+    new_rank = std::max(new_rank, inShapes[THEN].size());
 
-    if (inShapes[condition].size() > inShapes[then_].size()) {
-        THROW_IE_EXCEPTION << " Condition input dimensions count (" << inShapes[condition].size()
-                           << ") should be less or equel then"
-                           << " posititve input dimension count (" << inShapes[then_].size() << ")";
-    }
+    if (inShapes[CONDITION].size() > new_rank)
+        THROW_IE_EXCEPTION << "Select layer with name '" << layer->name << "' has 'Mask' input's rank more than broadcasted 'Then' and 'Else' inputs' ranks";
 
-    if (inShapes[condition].size() > inShapes[else_].size()) {
-        THROW_IE_EXCEPTION << " Condition input dimensions count (" << inShapes[condition].size()
-                           << ") should be less or equel then"
-                           << " negative input dimension count (" << inShapes[else_].size() << ")";
-    }
+    for (size_t i = 0; i < new_rank; i++) {
+        auto in1 = i < (new_rank - inShapes[THEN].size()) ? 1 : inShapes[THEN][i - (new_rank - inShapes[THEN].size())];
+        auto in2 = i < (new_rank - inShapes[ELSE].size()) ? 1 : inShapes[ELSE][i - (new_rank - inShapes[ELSE].size())];
+
+        size_t tmp = 0;
+        if (in1 == in2 || in1 == 1 || in2 == 1)
+            tmp = std::max(in1, in2);
+        else
+            THROW_IE_EXCEPTION << "Select layer with name '" << layer->name << "' has incompatible 'Then' and 'Else' inputs' shapes";
 
-    for (std::size_t i = 0; i < inShapes[condition].size(); ++i) {
-        const auto& cond_dim = inShapes[condition][inShapes[condition].size() - 1 - i];
-        const auto& then_dim = inShapes[then_][inShapes[then_].size() - 1 - i];
+        auto in0 = i < (new_rank - inShapes[CONDITION].size()) ? 1 : inShapes[CONDITION][i - (new_rank - inShapes[CONDITION].size())];
 
-        if (cond_dim != then_dim && cond_dim != 1) {
-            THROW_IE_EXCEPTION << " Condition input dimension " << (inShapes[condition].size() - 1 - i) << " ("
-                               << cond_dim << ") should be less or equel then posititve and negative"
-                               << " input dimension " << (inShapes[then_].size() - 1 - i) << " (" << then_dim << ")";
-        }
+        if (tmp == in0 || in0 == 1)
+            tmp = std::max(tmp, in0);
+        else
+            THROW_IE_EXCEPTION << "Select layer with name '" << layer->name
+                                                        << "' has incompatible 'Mask' input's shapes and broadcasted 'Then' and 'Else' inputs' shapes";
     }
 }
 
@@ -3128,6 +3120,61 @@ void ScatterUpdateValidator::checkShapes(const CNNLayer* layer, const vector<Siz
         THROW_IE_EXCEPTION << layer->name << " Precision should be equal for input tensors 'Data' and 'Updates'";
 }
 
+ScatterElementsUpdateValidator::ScatterElementsUpdateValidator(const std::string& _type): LayerValidator(_type) {}
+
+void ScatterElementsUpdateValidator::parseParams(CNNLayer* layer) {
+    auto casted = dynamic_cast<ScatterElementsUpdateLayer*>(layer);
+    if (!casted) {
+        THROW_IE_EXCEPTION << layer->name << " Layer is not instance of ScatterElementsUpdateLayer class";
+    }
+}
+
+void ScatterElementsUpdateValidator::checkShapes(const CNNLayer* layer, const vector<SizeVector>& inShapes) const {
+    auto casted = dynamic_cast<const ScatterElementsUpdateLayer*>(layer);
+    if (!casted) {
+        THROW_IE_EXCEPTION << layer->name << " Layer is not instance of ScatterElementsUpdateLayer class";
+    }
+
+    size_t numInputs = inShapes.size();
+    if (numInputs != 4)
+        THROW_IE_EXCEPTION << layer->name << " Scatter can take only 4 inputs, but actually it has: " << numInputs;
+
+    static constexpr int DATA = 0;
+    static constexpr int INDICES = 1;
+    static constexpr int UPDATES = 2;
+    static constexpr int AXIS = 3;
+
+    if (inShapes[DATA].size() < 1)
+        THROW_IE_EXCEPTION << layer->name << " 'Data' tensor rank must be >= 1";
+
+    if (inShapes[INDICES].size() < 1)
+        THROW_IE_EXCEPTION << layer->name << " 'Indices' tensor rank must be >= 1";
+
+    if (inShapes[UPDATES].size() < 1)
+        THROW_IE_EXCEPTION << layer->name << " 'Updates' tensor rank must be >= 1";
+
+    if (!(inShapes[AXIS].size() == 1 && inShapes[AXIS][0] == 1))
+        THROW_IE_EXCEPTION << layer->name << " 'Axis' tensor must be 1D array of 1 element";
+
+    if (inShapes[INDICES].size() != inShapes[DATA].size())
+        THROW_IE_EXCEPTION << layer->name << " Incorrect number of 'indexes' tensors dimension";
+
+    if (inShapes[UPDATES].size() != inShapes[DATA].size())
+        THROW_IE_EXCEPTION << layer->name << " Incorrect number of 'updates' tensors dimension";
+
+    Precision inIdxPrecision = layer->insData[INDICES].lock()->getTensorDesc().getPrecision();
+    if (inIdxPrecision != Precision::FP32 && inIdxPrecision != Precision::I32)
+        THROW_IE_EXCEPTION << layer->name << " Incorrect input 'Indices' precision. Only FP32 or I32 are supported!";
+
+    Precision inAxisPrecision = layer->insData[AXIS].lock()->getTensorDesc().getPrecision();
+    if (inAxisPrecision != Precision::FP32 && inAxisPrecision != Precision::I32)
+        THROW_IE_EXCEPTION << layer->name << " Incorrect input 'Axis' precision. Only FP32 or I32 are supported!";
+
+    if (layer->insData[DATA].lock()->getTensorDesc().getPrecision() !=
+        layer->insData[UPDATES].lock()->getTensorDesc().getPrecision())
+        THROW_IE_EXCEPTION << layer->name << " Precision should be equal for input tensors 'Data' and 'Updates'";
+}
+
 #define REG_LAYER_VALIDATOR_FOR_TYPE(__validator, __type) _validators[#__type] = std::make_shared<__validator>(#__type)
 
 LayerValidators::LayerValidators() {
@@ -3254,6 +3301,7 @@ LayerValidators::LayerValidators() {
     REG_LAYER_VALIDATOR_FOR_TYPE(UniqueValidator, Unique);
     REG_LAYER_VALIDATOR_FOR_TYPE(NMSValidator, NonMaxSuppression);
     REG_LAYER_VALIDATOR_FOR_TYPE(ScatterUpdateValidator, ScatterUpdate);
+    REG_LAYER_VALIDATOR_FOR_TYPE(ScatterElementsUpdateValidator, ScatterElementsUpdate);
 }
 
 }  // namespace InferenceEngine
index 6cbd18d..307007b 100644 (file)
@@ -978,5 +978,14 @@ public:
     void checkShapes(const CNNLayer* layer, const std::vector<SizeVector>& inShapes) const override;
 };
 
+class ScatterElementsUpdateValidator : public LayerValidator {
+public:
+    explicit ScatterElementsUpdateValidator(const std::string& _type);
+
+    void parseParams(CNNLayer* layer) override;
+
+    void checkShapes(const CNNLayer* layer, const std::vector<SizeVector>& inShapes) const override;
+};
+
 }  // namespace details
 }  // namespace InferenceEngine
index e58e562..1ce342c 100644 (file)
@@ -6,9 +6,293 @@
 
 using namespace InferenceEngine;
 
-//
-// ie_layers.h
-//
+const DataPtr CNNLayer::input() const {
+    if (insData.empty()) {
+        THROW_IE_EXCEPTION << "Internal error: input data is empty";
+    }
+    auto lockedFirstInsData = insData[0].lock();
+    if (!lockedFirstInsData) {
+        THROW_IE_EXCEPTION << "Internal error: unable to lock weak_ptr\n";
+    }
+    return lockedFirstInsData;
+}
+
+float CNNLayer::ie_parse_float(const std::string& str) {
+    if (str == "-inf") {
+        return -std::numeric_limits<float>::infinity();
+    } else if (str == "inf") {
+        return std::numeric_limits<float>::infinity();
+    } else {
+        float res;
+        std::stringstream val_stream(str);
+        val_stream.imbue(std::locale("C"));
+        val_stream >> res;
+        if (!val_stream.eof()) THROW_IE_EXCEPTION;
+        return res;
+    }
+}
+
+std::string CNNLayer::ie_serialize_float(float value) {
+    std::stringstream val_stream;
+    val_stream.imbue(std::locale("C"));
+    val_stream << value;
+    return val_stream.str();
+}
+
+float CNNLayer::GetParamAsFloat(const char* param, float def) const {
+    std::string val = GetParamAsString(param, ie_serialize_float(def).c_str());
+    try {
+        return ie_parse_float(val);
+    } catch (...) {
+        THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name << ". Value "
+                           << val << " cannot be casted to float.";
+    }
+}
+
+float CNNLayer::GetParamAsFloat(const char* param) const {
+    std::string val = GetParamAsString(param);
+    try {
+        return ie_parse_float(val);
+    } catch (...) {
+        THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name << ". Value "
+                           << val << " cannot be casted to float.";
+    }
+}
+
+std::vector<float> CNNLayer::GetParamAsFloats(const char* param, std::vector<float> def) const {
+    std::string vals = GetParamAsString(param, "");
+    std::vector<float> result;
+    std::istringstream stream(vals);
+    std::string str;
+    if (vals.empty()) return def;
+    while (getline(stream, str, ',')) {
+        try {
+            float val = ie_parse_float(str);
+            result.push_back(val);
+        } catch (...) {
+            THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " " << str << " from IR for layer " << name
+                               << ". Value " << vals << " cannot be casted to floats.";
+        }
+    }
+    return result;
+}
+
+std::vector<float> CNNLayer::GetParamAsFloats(const char* param) const {
+    std::string vals = GetParamAsString(param);
+    std::vector<float> result;
+    std::istringstream stream(vals);
+    std::string str;
+    while (getline(stream, str, ',')) {
+        try {
+            float val = ie_parse_float(str);
+            result.push_back(val);
+        } catch (...) {
+            THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " " << str << " from IR for layer " << name
+                               << ". Value " << vals << " cannot be casted to floats.";
+        }
+    }
+    return result;
+}
+
+int CNNLayer::GetParamAsInt(const char* param, int def) const {
+    std::string val = GetParamAsString(param, std::to_string(def).c_str());
+    try {
+        return std::stoi(val);
+    } catch (...) {
+        THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name << ". Value "
+                           << val << " cannot be casted to int.";
+    }
+}
+
+int CNNLayer::GetParamAsInt(const char* param) const {
+    std::string val = GetParamAsString(param);
+    try {
+        return std::stoi(val);
+    } catch (...) {
+        THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name << ". Value "
+                           << val << " cannot be casted to int.";
+    }
+}
+
+std::vector<int> CNNLayer::GetParamAsInts(const char* param, std::vector<int> def) const {
+    std::string vals = GetParamAsString(param, "");
+    std::vector<int> result;
+    std::istringstream stream(vals);
+    std::string str;
+    if (vals.empty()) return def;
+    while (getline(stream, str, ',')) {
+        try {
+            result.push_back(std::stoi(str));
+        } catch (...) {
+            THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " " << str << " from IR for layer " << name
+                               << ". Value " << vals << " cannot be casted to int.";
+        }
+    }
+    return result;
+}
+
+std::vector<int> CNNLayer::GetParamAsInts(const char* param) const {
+    std::string vals = GetParamAsString(param);
+    std::vector<int> result;
+    std::istringstream stream(vals);
+    std::string str;
+    while (getline(stream, str, ',')) {
+        try {
+            result.push_back(std::stoi(str));
+        } catch (...) {
+            THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " " << str << " from IR for layer " << name
+                               << ". Value " << vals << " cannot be casted to int.";
+        }
+    }
+    return result;
+}
+
+unsigned int CNNLayer::GetParamAsUInt(const char* param, unsigned int def) const {
+    std::string val = GetParamAsString(param, std::to_string(def).c_str());
+    std::string message = "Cannot parse parameter " + std::string(param) + " from IR for layer " + name +
+                          ". Value " + val + " cannot be casted to int.";
+    try {
+        int value = std::stoi(val);
+        if (value < 0) {
+            THROW_IE_EXCEPTION << message;
+        }
+        return static_cast<unsigned int>(value);
+    } catch (...) {
+        THROW_IE_EXCEPTION << message;
+    }
+}
+
+unsigned int CNNLayer::GetParamAsUInt(const char* param) const {
+    std::string val = GetParamAsString(param);
+    std::string message = "Cannot parse parameter " + std::string(param) + " from IR for layer " + name +
+                          ". Value " + val + " cannot be casted to unsigned int.";
+    try {
+        int value = std::stoi(val);
+        if (value < 0) {
+            THROW_IE_EXCEPTION << message;
+        }
+        return static_cast<unsigned int>(value);
+    } catch (...) {
+        THROW_IE_EXCEPTION << message;
+    }
+}
+
+std::vector<unsigned int> CNNLayer::GetParamAsUInts(const char* param, std::vector<unsigned int> def) const {
+    std::string vals = GetParamAsString(param, "");
+    std::vector<unsigned int> result;
+    std::istringstream stream(vals);
+    std::string str;
+    std::string message = "Cannot parse parameter " + std::string(param) + " " + str + " from IR for layer " +
+                          name + ". Value " + vals + " cannot be casted to unsigned int.";
+    if (vals.empty()) return def;
+    while (getline(stream, str, ',')) {
+        try {
+            int value = std::stoi(str);
+            if (value < 0) {
+                THROW_IE_EXCEPTION << message;
+            }
+            result.push_back(static_cast<unsigned int>(value));
+        } catch (...) {
+            THROW_IE_EXCEPTION << message;
+        }
+    }
+    return result;
+}
+
+std::vector<unsigned int> CNNLayer::GetParamAsUInts(const char* param) const {
+    std::string vals = GetParamAsString(param);
+    std::vector<unsigned int> result;
+    std::istringstream stream(vals);
+    std::string str;
+    std::string message = "Cannot parse parameter " + std::string(param) + " " + str + " from IR for layer " +
+                          name + ". Value " + vals + " cannot be casted to int.";
+    while (getline(stream, str, ',')) {
+        try {
+            int value = std::stoi(str);
+            if (value < 0) {
+                THROW_IE_EXCEPTION << message;
+            }
+            result.push_back(static_cast<unsigned int>(value));
+        } catch (...) {
+            THROW_IE_EXCEPTION << message;
+        }
+    }
+    return result;
+}
+
+bool CNNLayer::GetParamAsBool(const char* param, bool def) const {
+    std::string val = GetParamAsString(param, std::to_string(def).c_str());
+    std::string loweredCaseValue;
+    std::transform(val.begin(), val.end(), std::back_inserter(loweredCaseValue), [](char value) {
+        return static_cast<char>(std::tolower(value));
+    });
+
+    bool result = false;
+
+    if (!(std::istringstream(loweredCaseValue) >> std::boolalpha >> result)) {
+        // attempting parse using non alpha bool
+        return (GetParamAsInt(param, def) != 0);
+    }
+
+    return result;
+}
+
+bool CNNLayer::GetParamAsBool(const char* param) const {
+    std::string val = GetParamAsString(param);
+    std::string loweredCaseValue;
+    std::transform(val.begin(), val.end(), std::back_inserter(loweredCaseValue), [](char value) {
+        return static_cast<char>(std::tolower(value));
+    });
+
+    bool result = false;
+
+    if (!(std::istringstream(loweredCaseValue) >> std::boolalpha >> result)) {
+        // attempting parse using non alpha bool
+        return (GetParamAsInt(param) != 0);
+    }
+
+    return result;
+}
+
+std::string CNNLayer::GetParamAsString(const char* param, const char* def) const {
+    auto it = params.find(param);
+    if (it == params.end() || it->second.empty()) {
+        return def;
+    }
+    return (*it).second;
+}
+
+bool CNNLayer::CheckParamPresence(const char* param) const {
+    auto it = params.find(param);
+    if (it == params.end()) {
+        return false;
+    }
+    return true;
+}
+
+std::string CNNLayer::GetParamAsString(const char* param) const {
+    auto it = params.find(param);
+    if (it == params.end()) {
+        THROW_IE_EXCEPTION << "No such parameter name '" << param << "' for layer " << name;
+    }
+    return (*it).second;
+}
+
+std::vector<std::string> CNNLayer::GetParamAsStrings(const char* param, std::vector<std::string> def) const {
+    std::string vals = GetParamAsString(param, "");
+    std::vector<std::string> result;
+    std::istringstream stream(vals);
+    std::string str;
+    if (vals.empty()) return def;
+    while (getline(stream, str, ',')) {
+        try {
+            result.push_back(str);
+        } catch (...) {
+            THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name << ".";
+        }
+    }
+    return result;
+}
 
 CNNLayer::~CNNLayer() {}
 WeightableLayer::~WeightableLayer() {}
@@ -68,6 +352,7 @@ TopKLayer::~TopKLayer() {}
 UniqueLayer::~UniqueLayer() {}
 NonMaxSuppressionLayer::~NonMaxSuppressionLayer() {}
 ScatterUpdateLayer::~ScatterUpdateLayer() {}
+ScatterElementsUpdateLayer::~ScatterElementsUpdateLayer() {}
 ExperimentalDetectronPriorGridGeneratorLayer::~ExperimentalDetectronPriorGridGeneratorLayer() {}
 ExperimentalDetectronGenerateProposalsSingleImageLayer::~ExperimentalDetectronGenerateProposalsSingleImageLayer() {}
 ExperimentalDetectronTopKROIs::~ExperimentalDetectronTopKROIs() {}
diff --git a/inference-engine/src/legacy_api/src/ie_network.cpp b/inference-engine/src/legacy_api/src/ie_network.cpp
deleted file mode 100644 (file)
index e18761a..0000000
+++ /dev/null
@@ -1,126 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <ie_network.hpp>
-#include <map>
-#include <memory>
-#include <string>
-
-using namespace InferenceEngine;
-
-IE_SUPPRESS_DEPRECATED_START
-
-PortData::PortData() {
-    createData({});
-}
-
-PortData::PortData(const SizeVector& shape, const Precision& precision) {
-    createData({precision, shape, TensorDesc::getLayoutByDims(shape)});
-}
-
-const Blob::Ptr& PortData::getData() const {
-    return data;
-}
-
-void PortData::setData(const Blob::Ptr& data) {
-    this->data = data;
-}
-
-const std::map<std::string, Parameter>& PortData::getParameters() const noexcept {
-    return parameters;
-}
-
-void PortData::createData(const TensorDesc& desc) {
-    switch (desc.getPrecision()) {
-    case Precision::UNSPECIFIED:
-        data = std::make_shared<InferenceEngine::TBlob<uint8_t>>(desc);
-        break;
-    case Precision::FP32:
-        data = make_shared_blob<PrecisionTrait<Precision::FP32>::value_type>(desc);
-        break;
-    case Precision::FP16:
-        data = make_shared_blob<PrecisionTrait<Precision::FP16>::value_type>(desc);
-        break;
-    case Precision::Q78:
-        data = make_shared_blob<PrecisionTrait<Precision::Q78>::value_type>(desc);
-        break;
-    case Precision::I16:
-        data = make_shared_blob<PrecisionTrait<Precision::I16>::value_type>(desc);
-        break;
-    case Precision::U8:
-        data = make_shared_blob<PrecisionTrait<Precision::U8>::value_type>(desc);
-        break;
-    case Precision::I8:
-        data = make_shared_blob<PrecisionTrait<Precision::I8>::value_type>(desc);
-        break;
-    case Precision::U16:
-        data = make_shared_blob<PrecisionTrait<Precision::U16>::value_type>(desc);
-        break;
-    case Precision::I32:
-        data = make_shared_blob<PrecisionTrait<Precision::I32>::value_type>(desc);
-        break;
-    default:
-        THROW_IE_EXCEPTION << "Unsupported precisions!";
-    }
-}
-
-void PortData::setShape(const SizeVector& shape) {
-    TensorDesc desc = data->getTensorDesc();
-    if (desc.getDims() == shape) return;
-    if (data->cbuffer() != nullptr) {
-        THROW_IE_EXCEPTION << "Cannot change shape for allocated data!";
-    }
-    createData({desc.getPrecision(), shape, TensorDesc::getLayoutByDims(shape)});
-}
-
-Port::Port() {
-    data = std::make_shared<PortData>();
-}
-
-Port::Port(const SizeVector& shapes, const Precision& precision) {
-    data = std::make_shared<PortData>(shapes, precision);
-}
-Port::Port(const Port& port) {
-    parameters = port.parameters;
-    data = port.data;
-}
-
-bool Port::operator==(const Port& rhs) const {
-    return parameters == rhs.parameters && data == rhs.data;
-}
-
-bool Port::operator!=(const Port& rhs) const {
-    return !(rhs == *this);
-}
-
-const SizeVector& Port::shape() const noexcept {
-    return data->getData()->getTensorDesc().getDims();
-}
-
-void Port::setShape(const SizeVector& shape) {
-    data->setShape(shape);
-}
-
-const std::map<std::string, Parameter>& Port::getParameters() const noexcept {
-    return parameters;
-}
-
-void Port::setParameters(const std::map<std::string, Parameter>& params) noexcept {
-    parameters = params;
-}
-
-void Port::setParameter(const std::string& name, const Parameter& param) {
-    parameters[name] = param;
-}
-
-const PortData::Ptr& Port::getData() const noexcept {
-    return data;
-}
-
-void Port::setData(const PortData::Ptr& data) {
-    if (!data) return;
-    this->data = data;
-}
-
-IE_SUPPRESS_DEPRECATED_END
index 8fb75b4..d90af89 100644 (file)
@@ -81,6 +81,7 @@ CNNLayerPtr clonelayer(const CNNLayer& source) {
                                    &layerCloneImpl<ExperimentalDetectronGenerateProposalsSingleImageLayer>,
                                    &layerCloneImpl<ExperimentalDetectronPriorGridGeneratorLayer>,
                                    &layerCloneImpl<ScatterUpdateLayer>,
+                                   &layerCloneImpl<ScatterElementsUpdateLayer>,
                                    &layerCloneImpl<NonMaxSuppressionLayer>,
                                    &layerCloneImpl<SelectLayer>,
                                    &layerCloneImpl<BatchNormalizationLayer>,
@@ -175,7 +176,15 @@ std::shared_ptr<ICNNNetwork> cloneNetwork(const ICNNNetwork& network) {
 
     return cloneNet(network);
 }
-details::CNNNetworkImplPtr cloneNet(const ICNNNetwork& network) {
+
+details::CNNNetworkImplPtr cloneNet(const ICNNNetwork& origin_network) {
+    std::shared_ptr<ICNNNetwork> clonedNetwork;
+    // Call conversion only on the copy of nGraph function
+    if (auto func = origin_network.getFunction()) {
+        clonedNetwork = cloneNetwork(origin_network);
+    }
+    const ICNNNetwork& network = (clonedNetwork) ? *clonedNetwork : origin_network;
+
     std::vector<CNNLayerPtr> layers;
     details::CNNNetworkIterator i(&network);
     while (i != details::CNNNetworkIterator()) {
index 8dd5294..1380a35 100644 (file)
@@ -249,6 +249,7 @@ REG_SHAPE_INFER_FOR_TYPE(TopKShapeProp, TopK);
 REG_SHAPE_INFER_FOR_TYPE(UniqueShapeProp, Unique);
 REG_SHAPE_INFER_FOR_TYPE(NMSShapeProp, NonMaxSuppression);
 REG_SHAPE_INFER_FOR_TYPE(ScatterUpdateShapeProp, ScatterUpdate);
+REG_SHAPE_INFER_FOR_TYPE(ScatterElementsUpdateShapeProp, ScatterElementsUpdate);
 
 }  // namespace ShapeInfer
 }  // namespace InferenceEngine
index 04b7f6a..12fa9ea 100644 (file)
@@ -33,5 +33,24 @@ public:
     }
 };
 
+/**
+ *@brief Implementation of Shape inference for ScatterElementsUpdate layer
+ */
+class ScatterElementsUpdateShapeProp : public BuiltInShapeInferImpl {
+public:
+    explicit ScatterElementsUpdateShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
+
+    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
+                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
+        LayerParams lp {};
+        ScatterElementsUpdateLayer scatterElementsUpdateLayer(lp);
+        scatterElementsUpdateLayer.params = params;
+        scatterElementsUpdateLayer.type = _type;
+        validate(&scatterElementsUpdateLayer, inBlobs, params, blobs);
+
+        outShapes = {inShapes[0]};
+    }
+};
+
 }  // namespace ShapeInfer
 }  // namespace InferenceEngine
index 2513b4a..e6ed8b4 100644 (file)
@@ -8,7 +8,6 @@
 #include <ie_layers.h>
 
 #include <blob_factory.hpp>
-#include <builders/ie_split_layer.hpp>
 #include <functional>
 #include <graph_tools.hpp>
 #include <map>
@@ -28,12 +27,6 @@ using namespace InferenceEngine;
 using namespace InferenceEngine::details;
 using namespace ShapeInfer;
 
-IE_SUPPRESS_DEPRECATED_START
-
-Reshaper::Reshaper(Builder::Network* network): network(network) {}
-
-IE_SUPPRESS_DEPRECATED_END
-
 inline static std::vector<CNNLayerPtr> SortTopologicallyStartsFrom(const std::vector<DataPtr>& inputs) {
     std::vector<CNNLayerPtr> all_layers;
     CNNNetForestDFS(
@@ -46,7 +39,7 @@ inline static std::vector<CNNLayerPtr> SortTopologicallyStartsFrom(const std::ve
     return all_layers;
 }
 
-Reshaper::Reshaper(std::vector<DataPtr> insDatas, const LauncherCreator::Ptr& launcherCreator): network(nullptr) {
+Reshaper::Reshaper(std::vector<DataPtr> insDatas, const LauncherCreator::Ptr& launcherCreator) {
     auto builtIn = std::make_shared<BuiltInShapeInferHolder>();
     _allTypes = getTypeNamesFromExtension(builtIn);
     _extensions.push_back(builtIn);
@@ -67,7 +60,7 @@ Reshaper::Reshaper(std::vector<DataPtr> insDatas, const LauncherCreator::Ptr& la
     }
 }
 
-Reshaper::Reshaper(ICNNNetwork& network, const LauncherCreator::Ptr& launcherCreator): network(nullptr) {
+Reshaper::Reshaper(ICNNNetwork& network, const LauncherCreator::Ptr& launcherCreator) {
     auto builtIn = std::make_shared<BuiltInShapeInferHolder>();
     _allTypes = getTypeNamesFromExtension(builtIn);
     _extensions.push_back(builtIn);
@@ -96,14 +89,8 @@ Reshaper::Reshaper(ICNNNetwork& network, const LauncherCreator::Ptr& launcherCre
 }
 
 void Reshaper::AddExtension(const IShapeInferExtensionPtr& extension) {
-    if (!extension) THROW_IE_EXCEPTION << "Failed to add empty shape infer extension";
-
-    if (network) {
-        IE_SUPPRESS_DEPRECATED_START
-        network->getContext().addExtension(extension);
-        IE_SUPPRESS_DEPRECATED_END
-        return;
-    }
+    if (!extension)
+        THROW_IE_EXCEPTION << "Failed to add empty shape infer extension";
 
     auto newLayerTypes = getTypeNamesFromExtension(extension);
     std::string badLayerTypes;
@@ -159,10 +146,6 @@ ReshapeLauncher::Ptr Reshaper::getLauncherByLayerName(const std::string& layerNa
 }
 
 StatusCode Reshaper::run(const std::map<std::string, SizeVector>& inputShapes, ResponseDesc* resp) {
-    if (network) {
-        return networkShapeInfer(inputShapes, resp);
-    }
-
     // WA: In another case we should change the registration logic of shape implementations
     static std::mutex reshapeMutex;
     {
@@ -252,154 +235,6 @@ SizeVector Reshaper::getResultShapeFor(DataPtr& data, ResponseDesc* resp) {
     return foundLauncher->getShapeByName(data->getName());
 }
 
-StatusCode Reshaper::networkShapeInfer(const std::map<std::string, SizeVector>& inputShapes, ResponseDesc* resp) {
-    if (!network) return DescriptionBuffer(GENERAL_ERROR, resp) << "Cannot infer shapes! Network is not loaded.";
-
-    IE_SUPPRESS_DEPRECATED_START
-
-    std::vector<Builder::Layer> propagatedLayers;
-    Builder::Network propagatedNetwork(*network);
-
-    // Set new input shapes
-    for (auto& layer : propagatedNetwork) {
-        if (inputShapes.find(layer->getName()) == inputShapes.end() ||
-            details::CaselessEq<std::string>()(layer->getType(), "Const"))
-            continue;
-
-        if (layer->getOutputPorts().size() != 1)
-            return DescriptionBuffer(GENERAL_ERROR, resp)
-                   << "Cannot infer shapes! Input layers can have only one output port.";
-
-        layer->getOutputPorts()[0].setShape(inputShapes.find(layer->getName())->second);
-    }
-
-    std::map<idx_t, std::map<std::string, std::string>> preparedParams;
-    // Prepare params for split layer
-    for (auto& layer : propagatedNetwork) {
-        if ((layer->getType() == "Reshape" || layer->getType() == "Flatten") && layer->getInputPorts().size() != 2 &&
-            !layer->getInputPorts()[0].shape().empty() &&
-            layer->getParameters().find("axis") != layer->getParameters().end() &&
-            (layer->getParameters().find("dim") == layer->getParameters().end() ||
-             layer->getParameters().at("dim").as<std::vector<int>>().empty())) {
-            auto inputShape = layer->getInputPorts()[0].shape();
-            size_t inputShapeTotal =
-                std::accumulate(inputShape.begin(), inputShape.end(), 1lu, std::multiplies<size_t>());
-            std::vector<int> dim;
-            size_t axis = layer->getParameters().at("axis");
-            for (size_t i = 0; i < axis; i++) {
-                dim.emplace_back(inputShape[i]);
-                inputShapeTotal /= inputShape[i];
-            }
-            if (dim.size() < inputShape.size()) dim.emplace_back(inputShapeTotal);
-            layer->getParameters()["dim"] = dim;
-        }
-
-        std::map<std::string, std::string> params =
-            InferenceEngine::Builder::convertParameters2Strings(layer->getParameters());
-        if (layer->getType() == "Split") {
-            Builder::SplitLayer splitLayer(layer);
-            std::vector<size_t> sizes;
-            size_t axisSize = splitLayer.getInputPort().shape()[splitLayer.getAxis()];
-            size_t uninitOuts(0);
-            for (const auto& port : layer->getOutputPorts()) {
-                if (port.shape().empty()) {
-                    sizes.push_back(0);
-                    uninitOuts++;
-                } else if (port.shape().size() <= splitLayer.getAxis()) {
-                    THROW_IE_EXCEPTION << "Incorrect output shapes in Split layer " << layer->getName();
-                } else {
-                    sizes.push_back(port.shape()[splitLayer.getAxis()]);
-                    axisSize -= port.shape()[splitLayer.getAxis()];
-                }
-            }
-
-            if ((axisSize && !uninitOuts) || (axisSize && uninitOuts && axisSize % uninitOuts))
-                THROW_IE_EXCEPTION << "Incorrect output shapes in Split layer " << layer->getName();
-
-            size_t commonSize = uninitOuts != 0 ? axisSize / uninitOuts : 0;
-            for (size_t i = 0; i < sizes.size() && commonSize; i++) {
-                if (!sizes[i]) sizes[i] = commonSize;
-            }
-
-            std::string out_sizes;
-            for (const auto& size : sizes) {
-                if (!out_sizes.empty()) out_sizes += ",";
-                out_sizes += std::to_string(size);
-            }
-            if (!out_sizes.empty()) params["out_sizes"] = out_sizes;
-        }
-
-        preparedParams[layer->getId()] = params;
-    }
-
-    // Try to propagate shapes
-    for (auto& layer : propagatedNetwork) {
-        // constant layer does not change during the shape inference and also the Const blob always has C layout and
-        // doesn't know its real shape, so don't run shape propagation for it
-        if (details::CaselessEq<std::string>()(layer->getType(), "Const")) continue;
-        const auto impl = network->getContext().getShapeInferImpl(layer->getType());
-        if (!impl)
-            return DescriptionBuffer(NOT_FOUND, resp)
-                   << "Cannot infer shapes! Shape infer implementation was not found for type " << layer->getType()
-                   << ".";
-        std::vector<SizeVector> inShapes;
-        std::vector<SizeVector> outShapes;
-        std::map<std::string, std::string> params;
-        std::map<std::string, Blob::Ptr> blobs;
-
-        std::vector<Blob::CPtr> inBlobs;
-        for (const auto& inPort : layer->getInputPorts().empty() ? layer->getOutputPorts() : layer->getInputPorts()) {
-            if (inPort.getParameters().find("type") == inPort.getParameters().end()) {
-                inBlobs.push_back(inPort.getData()->getData());
-            }
-        }
-        params = preparedParams[layer->getId()];
-
-        for (const auto& port : layer->getInputPorts()) {
-            if (port.getParameters().find("type") == port.getParameters().end() ||
-                port.getData()->getData()->cbuffer() == nullptr)
-                continue;
-            blobs[port.getParameters().at("type")] = port.getData()->getData();
-        }
-        for (const auto& it : layer->getParameters()) {
-            if (!it.second.is<Blob::CPtr>()) continue;
-            blobs[it.first] = std::const_pointer_cast<Blob>(it.second.as<Blob::CPtr>());
-        }
-
-        StatusCode sts = impl->inferShapes(inBlobs, params, blobs, outShapes, resp);
-        if (sts != OK) return sts;
-
-        if (outShapes.size() != layer->getOutputPorts().size())
-            return DescriptionBuffer(GENERAL_ERROR, resp) << "Cannot infer shapes! The number of output shapes is not "
-                                                             "equal the number of output ports for layer "
-                                                          << layer->getName();
-
-        for (size_t i = 0; i < outShapes.size(); i++) {
-            layer->getOutputPorts()[i].setShape(outShapes[i]);
-        }
-        for (const auto& connection : propagatedNetwork.getLayerConnections(layer->getId())) {
-            if (connection.from().layerId() != layer->getId()) continue;
-            auto nextLayer = propagatedNetwork.getLayer(connection.to().layerId());
-            nextLayer->getInputPorts()[connection.to().portId()].setShape(outShapes[connection.from().portId()]);
-        }
-    }
-
-    // Apply new shapes
-    for (auto& layer : *network) {
-        const auto& propagatedLayer = propagatedNetwork.getLayer(layer->getId());
-        for (size_t i = 0; i < layer->getInputPorts().size(); i++) {
-            layer->getInputPorts()[i].setShape(propagatedLayer->getInputPorts()[i].shape());
-        }
-        for (size_t i = 0; i < layer->getOutputPorts().size(); i++) {
-            layer->getOutputPorts()[i].setShape(propagatedLayer->getOutputPorts()[i].shape());
-        }
-    }
-
-    IE_SUPPRESS_DEPRECATED_END
-
-    return OK;
-}
-
 caseless_set<std::string> Reshaper::getTypeNamesFromExtension(const IShapeInferExtensionPtr& extension) {
     char** types = nullptr;
     unsigned int size = 0;
index c463a1c..b9d9045 100644 (file)
@@ -6,8 +6,6 @@
 
 #include <ie_layers.h>
 
-#include <builders/ie_network_builder.hpp>
-#include <ie_context.hpp>
 #include <list>
 #include <map>
 #include <memory>
@@ -65,12 +63,6 @@ public:
     explicit Reshaper(std::vector<DataPtr> inputs,
                       const LauncherCreator::Ptr& launcherCreator = std::make_shared<LauncherCreator>());
 
-    IE_SUPPRESS_DEPRECATED_START
-
-    explicit Reshaper(Builder::Network* network);
-
-    IE_SUPPRESS_DEPRECATED_END
-
     virtual ~Reshaper() = default;
 
     /**
@@ -107,8 +99,6 @@ public:
 private:
     ReshapeLauncher::Ptr getLauncherByLayerName(const std::string& layerName) const;
 
-    StatusCode networkShapeInfer(const std::map<std::string, SizeVector>& inputShapes, ResponseDesc* resp);
-
     InferenceEngine::details::caseless_set<std::string> getTypeNamesFromExtension(
         const IShapeInferExtensionPtr& extension);
 
@@ -117,10 +107,6 @@ private:
     std::vector<CNNLayerPtr> _allSortedLayers {};
     std::set<CNNLayerPtr> _inputLayers {};
     InferenceEngine::details::caseless_set<std::string> _allTypes;
-
-    IE_SUPPRESS_DEPRECATED_START
-    Builder::Network* network;
-    IE_SUPPRESS_DEPRECATED_END
 };
 
 }  // namespace ShapeInfer
index 52183e8..40abb8c 100644 (file)
@@ -44,6 +44,7 @@ set(LAYERS
     ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_tile_node.cpp
     ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_mvn_node.cpp
     ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_resample_node.cpp
+    ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_normalize_node.cpp
 
     ${CMAKE_CURRENT_SOURCE_DIR}/nodes/batch_to_space.cpp
     ${CMAKE_CURRENT_SOURCE_DIR}/nodes/broadcast.cpp
@@ -91,7 +92,6 @@ set(LAYERS
     ${CMAKE_CURRENT_SOURCE_DIR}/nodes/unique.cpp
     ${CMAKE_CURRENT_SOURCE_DIR}/nodes/unsqueeze.cpp
     ${CMAKE_CURRENT_SOURCE_DIR}/nodes/common/softmax.cpp
-    ${CMAKE_CURRENT_SOURCE_DIR}/nodes/normalize.cpp
     ${CMAKE_CURRENT_SOURCE_DIR}/nodes/interp.cpp
 )
 
index c592ed4..0e883de 100644 (file)
@@ -248,6 +248,10 @@ void MKLDNNGraph::Replicate(const ICNNNetwork &network, const MKLDNNExtensionMan
         graphNodes.push_back(node);
         layer2node[layer] = node;
 
+        if (layer->params.count("originalLayersNames")) {
+            node->originalLayers = layer->params["originalLayersNames"];
+        }
+
         for (int port = 0; port < layer->insData.size(); port++) {
             auto data = layer->insData[port].lock();
             auto parent_layer = data->getCreatorLayer().lock();
@@ -354,7 +358,10 @@ void MKLDNNGraph::InitGraph() {
         auto nodeType = graphNode->getType();
         if (nodeType == Reorder || nodeType == Output) continue;
 
-        graphNode->addOriginalLayer(graphNode->getCnnLayer());
+        if (graphNode->getOriginalLayers().empty()) {
+            graphNode->addOriginalLayer(graphNode->getCnnLayer());
+        }
+
         if (graphNode->getFusedWith().size() || graphNode->getMergeWith().size()) {
             // Original layer names
             std::vector<MKLDNNNodePtr> internal = graphNode->getFusedWith();
@@ -419,6 +426,7 @@ void MKLDNNGraph::InitDescriptors() {
         node->getSupportedDescriptors();
 
         node->initSupportedPrimitiveDescriptors();
+        node->filterSupportedPrimitiveDescriptors();
     }
 
     for (auto &node : graphNodes) {
index 8d72160..8343523 100644 (file)
@@ -132,6 +132,9 @@ void MKLDNNGraphOptimizer::ApplyCommonGraphOptimizations(MKLDNNGraph &graph) {
     FuseResampleAndSimpleOperation(graph);
     graph.RemoveDroppedNodes();
 
+    FuseNormalizeAndSimpleOperation(graph);
+    graph.RemoveDroppedNodes();
+
     FuseEltwiseAndSimple(graph);
     graph.RemoveDroppedNodes();
 
@@ -1692,6 +1695,95 @@ void MKLDNNGraphOptimizer::FuseResampleAndSimpleOperation(MKLDNNGraph &graph) {
     }
 }
 
+void MKLDNNGraphOptimizer::FuseNormalizeAndSimpleOperation(MKLDNNGraph &graph) {
+    auto isOneOf = [&](mkldnn::algorithm alg, std::vector<mkldnn::algorithm> algs) {
+        for (auto a : algs) {
+            if (alg == a) {
+                return true;
+            }
+        }
+        return false;
+    };
+
+    auto removeEdge = [](MKLDNNGraph &graph, MKLDNNEdgePtr& edge) {
+        auto& edges = graph.GetEdges();
+        for (auto it = edges.begin(); it != edges.end(); it++) {
+            if ((*it) == edge) {
+                edges.erase(it);
+                return;
+            }
+        }
+    };
+
+    auto& graphNodes = graph.GetNodes();
+
+    auto isSutableParentNode = [](MKLDNNNodePtr node) {
+        bool isSutableNormalize = node->getType() == Normalize;
+
+        if (isSutableNormalize) {
+            return node->getChildEdges().size() == 1;
+        } else {
+            return false;
+        }
+    };
+
+    auto isSutableChildNode = [&](MKLDNNNodePtr node) {
+        if (!node->getCnnLayer())
+            return false;
+
+        if (node->getType() == Quantize) {
+            auto* quantizeNode = dynamic_cast<MKLDNNQuantizeNode*>(node.get());
+            if (quantizeNode == nullptr)
+                THROW_IE_EXCEPTION << "Cannot get quantize layer " << node->getName();
+            return !quantizeNode->isBinarization();
+        } else if (node->getType() == Depthwise) {
+            auto* depthwiseNode = dynamic_cast<MKLDNNDepthwiseNode*>(node.get());
+            if (depthwiseNode == nullptr)
+                THROW_IE_EXCEPTION << "Cannot get depthwise layer " << node->getName();
+            return ((depthwiseNode->getAlgorithm() == mkldnn::algorithm::depthwise_scale_shift && depthwiseNode->isWithBiases()) ||
+                    (depthwiseNode->getAlgorithm() == mkldnn::algorithm::depthwise_prelu));
+        } else if (node->getType() == Activation) {
+            auto* activationNode = dynamic_cast<MKLDNNActivationNode*>(node.get());
+            if (activationNode == nullptr)
+                THROW_IE_EXCEPTION << "Cannot get activation layer " << node->getName();
+            return isOneOf(activationNode->getAlgorithm(), {eltwise_relu, eltwise_gelu, eltwise_elu, eltwise_logistic,
+                eltwise_bounded_relu, eltwise_clamp, eltwise_tanh, eltwise_swish, eltwise_linear, eltwise_abs,
+                eltwise_square, eltwise_sqrt});
+        }
+        return false;
+    };
+
+    auto parent = graphNodes.begin();
+    while (parent != graphNodes.end()) {
+        auto parentNode = *parent;
+        if (!isSutableParentNode(parentNode)) {
+            parent++;
+            continue;
+        }
+
+        auto childNode = parentNode->getChildEdgeAt(0)->getChild();
+        if (!isSutableChildNode(childNode)) {
+            parent++;
+            continue;
+        }
+
+        parentNode->fuseWith(childNode);
+
+        if (childNode->getType() == Quantize) {
+            auto parentEdges = childNode->parentEdges;
+            for (auto &parentEdge : parentEdges) {
+                auto p_edge = parentEdge.lock();
+                if (p_edge->getParent()->getType() == Normalize)
+                    continue;
+
+                removeEdge(graph, p_edge);
+            }
+        }
+
+        graph.DropNode(childNode);
+    }
+}
+
 void MKLDNNGraphOptimizer::FuseEltwiseAndSimple(MKLDNNGraph &graph) {
     auto isOneOf = [&](mkldnn::algorithm alg, std::vector<mkldnn::algorithm> algs) {
         for (auto a : algs) {
index 9b41c9c..a577f02 100644 (file)
@@ -43,6 +43,7 @@ private:
 #endif
     void FuseMVNAndSimpleOperation(MKLDNNGraph &graph);
     void FuseResampleAndSimpleOperation(MKLDNNGraph &graph);
+    void FuseNormalizeAndSimpleOperation(MKLDNNGraph &graph);
     void RemoveIdentityOperator(MKLDNNGraph& graph);
 
     void RemoveIOScaleShifts(MKLDNNGraph& graph);
index 91daf28..4517d47 100644 (file)
@@ -55,6 +55,28 @@ void MKLDNNPlugin::MKLDNNInferRequest::pushInput(const std::string& inputName, I
     graph->PushInputData(inputName, inputBlob);
 }
 
+namespace {
+
+template <typename T>
+void copyToFloat(float* dst, const InferenceEngine::Blob* src) {
+    if (!dst) {
+        return;
+    }
+    const InferenceEngine::TBlob<T>* t_blob = dynamic_cast<const InferenceEngine::TBlob<T>*>(src);
+    if (t_blob == nullptr) {
+        THROW_IE_EXCEPTION << "input type is " << src->getTensorDesc().getPrecision() << " but input is not "
+                           << typeid(T).name();
+    }
+
+    const T* srcPtr = t_blob->readOnly();
+    if (srcPtr == nullptr) {
+        THROW_IE_EXCEPTION << "Input data was not allocated.";
+    }
+    for (size_t i = 0; i < t_blob->size(); i++) dst[i] = srcPtr[i];
+}
+
+}  // namespace
+
 void MKLDNNPlugin::MKLDNNInferRequest::InferImpl() {
     IE_PROFILING_AUTO_SCOPE_TASK(profilingTask)
     graph = execNetwork->_graphs.local().get();
@@ -94,9 +116,7 @@ void MKLDNNPlugin::MKLDNNInferRequest::InferImpl() {
                     in_f = dynamic_cast<InferenceEngine::TBlob<float> *>(iconv.get());
                     if (in_f == nullptr)
                         THROW_IE_EXCEPTION << "Cannot get TBlob";
-                    IE_SUPPRESS_DEPRECATED_START
-                    InferenceEngine::copyToFloat<uint16_t>(in_f->data(), input.second.get());
-                    IE_SUPPRESS_DEPRECATED_END
+                    copyToFloat<uint16_t>(in_f->data(), input.second.get());
                     pushInput<float>(input.first, iconv);
                     break;
                 case InferenceEngine::Precision::I16:
@@ -110,9 +130,7 @@ void MKLDNNPlugin::MKLDNNInferRequest::InferImpl() {
                         in_f = dynamic_cast<InferenceEngine::TBlob<float> *>(iconv.get());
                         if (in_f == nullptr)
                             THROW_IE_EXCEPTION << "Cannot get TBlob";
-                        IE_SUPPRESS_DEPRECATED_START
-                        InferenceEngine::copyToFloat<int16_t>(in_f->data(), input.second.get());
-                        IE_SUPPRESS_DEPRECATED_END
+                        copyToFloat<int16_t>(in_f->data(), input.second.get());
                         pushInput<float>(input.first, iconv);
                     } else {
                         // Instead we can send I16 directly
@@ -120,6 +138,7 @@ void MKLDNNPlugin::MKLDNNInferRequest::InferImpl() {
                     }
                     break;
                 case InferenceEngine::Precision::U8:
+                case InferenceEngine::Precision::BOOL:
                     if (graph->hasMeanImageFor(input.first)) {
                         // If a mean image exists, we convert the blob and send FP32
                         iconv = InferenceEngine::make_shared_blob<float>({InferenceEngine::Precision::FP32,
@@ -130,9 +149,7 @@ void MKLDNNPlugin::MKLDNNInferRequest::InferImpl() {
                         in_f = dynamic_cast<InferenceEngine::TBlob<float> *>(iconv.get());
                         if (in_f == nullptr)
                             THROW_IE_EXCEPTION << "Cannot get TBlob";
-                        IE_SUPPRESS_DEPRECATED_START
-                        InferenceEngine::copyToFloat<uint8_t>(in_f->data(), input.second.get());
-                        IE_SUPPRESS_DEPRECATED_END
+                        copyToFloat<uint8_t>(in_f->data(), input.second.get());
                         pushInput<float>(input.first, iconv);
                     } else {
                         // Instead we can send I8 directly
index e1c0920..7256367 100644 (file)
 #include <nodes/mkldnn_def_conv_node.h>
 #include <nodes/mkldnn_mvn_node.h>
 #include <nodes/mkldnn_resample_node.h>
+#include <nodes/mkldnn_normalize_node.h>
 #include <nodes/mkldnn_tensoriterator_node.h>
 #include <mkldnn_types.h>
 #include "mkldnn_extension_utils.h"
 #include "mkldnn_plugin.h"
 #include "ie_memcpy.h"
+#include "mkldnn_debug.h"
 
 using namespace mkldnn;
 using namespace MKLDNNPlugin;
@@ -112,6 +114,7 @@ static const InferenceEngine::details::caseless_unordered_map<std::string, Type>
         { "Convert", Convert },
         { "MVN", MVN},
         { "Resample", Resample},
+        { "Normalize", Normalize},
 };
 
 Type TypeFromName(const std::string type) {
@@ -166,6 +169,24 @@ MKLDNNNode::MKLDNNNode(const InferenceEngine::CNNLayerPtr& layer, const mkldnn::
                 THROW_IE_EXCEPTION << "Unsupported CPU implementation " << str << " for node " << getName();
         }
     }
+    if (layer->params.find("InputMemoryFormats") != layer->params.end()) {
+        std::istringstream stream(layer->params["InputMemoryFormats"]);
+        std::string str;
+        while (getline(stream, str, ',')) {
+            if (str.substr(0, 4) != "cpu:")
+                continue;
+            inputMemoryFormatsFilter.push_back(mkldnn_str2fmt(str.substr(4, str.size()).c_str()));
+        }
+    }
+    if (layer->params.find("OutputMemoryFormats") != layer->params.end()) {
+        std::istringstream stream(layer->params["OutputMemoryFormats"]);
+        std::string str;
+        while (getline(stream, str, ',')) {
+            if (str.substr(0, 4) != "cpu:")
+                continue;
+            outputMemoryFormatsFilter.push_back(mkldnn_str2fmt(str.substr(4, str.size()).c_str()));
+        }
+    }
 }
 
 void MKLDNNNode::addEdge(const MKLDNNEdgeWeakPtr& edge) {
@@ -533,6 +554,32 @@ void MKLDNNNode::initSupportedPrimitiveDescriptors() {
     }
 }
 
+void MKLDNNNode::filterSupportedPrimitiveDescriptors() {
+    if (!inputMemoryFormatsFilter.empty() || !outputMemoryFormatsFilter.empty()) {
+        auto itpd = supportedPrimitiveDescriptors.begin();
+        while (itpd != supportedPrimitiveDescriptors.end()) {
+            const auto &config = itpd->getConfig();
+            if (inputMemoryFormatsFilter.size() > config.inConfs.size() || outputMemoryFormatsFilter.size() > config.outConfs.size())
+                THROW_IE_EXCEPTION << "Incorrect number of input or output memory formats";
+
+            bool isSuitableDesc = true;
+            for (int i = 0; i < inputMemoryFormatsFilter.size(); i++) {
+                if (inputMemoryFormatsFilter[i] != MKLDNNMemoryDesc(config.inConfs[i].desc).getFormat())
+                    isSuitableDesc = false;
+            }
+            for (int i = 0; i < outputMemoryFormatsFilter.size(); i++) {
+                if (outputMemoryFormatsFilter[i] != MKLDNNMemoryDesc(config.outConfs[i].desc).getFormat())
+                    isSuitableDesc = false;
+            }
+            if (!isSuitableDesc) {
+                supportedPrimitiveDescriptors.erase(itpd);
+            } else {
+                itpd++;
+            }
+        }
+    }
+}
+
 void MKLDNNNode::initDescriptor(const InferenceEngine::LayerConfig &config) {
     auto* selectedPD = getSelectedPrimitiveDescriptor();
     if (!selectedPD) {
index 2fab65e..403f89b 100644 (file)
@@ -72,7 +72,8 @@ enum Type {
     TensorIterator,
     Convert,
     MVN,
-    Resample
+    Resample,
+    Normalize
 };
 
 Type TypeFromName(const std::string type);
@@ -153,6 +154,8 @@ static std::string NameFromType(Type type) {
             return "Convert";
         case Resample:
             return "Resample";
+        case Normalize:
+            return "Normalize";
         default:
             return "Unknown";
     }
@@ -164,9 +167,9 @@ public:
         implementationType = type;
     }
 
-    PrimitiveDescInfo(const InferenceEngine::LayerConfig conf, impl_desc_type type, std::vector<mkldnn::memory::format> outFmt): config(conf) {
+    PrimitiveDescInfo(const InferenceEngine::LayerConfig conf, impl_desc_type type, std::vector<mkldnn::memory::format> outFmts): config(conf) {
         implementationType = type;
-        outputLayouts = outFmt;
+        outputLayouts = outFmts;
     }
 
     PrimitiveDescInfo(const InferenceEngine::LayerConfig conf, impl_desc_type type, mkldnn::memory::format outFmt): config(conf) {
@@ -329,6 +332,13 @@ public:
     void resolveNotAllocatedEdges();
     virtual void execute(mkldnn::stream strm);
     virtual void initSupportedPrimitiveDescriptors();
+
+    /**
+     * @brief Filters supportedPrimitiveDescriptors according to the input layouts specified in inputMemoryFormatsFilter
+     * and output layouts specified in outputMemoryFormatsFilter
+     */
+    virtual void filterSupportedPrimitiveDescriptors();
+
     virtual void createPrimitive() = 0;
 
     virtual void selectOptimalPrimitiveDescriptor();
@@ -465,6 +475,8 @@ protected:
     std::vector <MKLDNNNodePtr> fusedWith;
     std::vector <MKLDNNNodePtr> mergedWith;
     std::vector <impl_desc_type> implPriorities;
+    std::vector <mkldnn_memory_format_t> inputMemoryFormatsFilter;
+    std::vector <mkldnn_memory_format_t> outputMemoryFormatsFilter;
 
     std::string originalLayers;  // contains names of the original layers separated by comma
 
index df73e8f..f97d71f 100644 (file)
@@ -16,6 +16,7 @@
 #include <generic_ie.hpp>
 
 #include "convert_function_to_cnn_network.hpp"
+#include <transformations/common_optimizations/common_optimizations.hpp>
 #include <transformations/convert_opset1_to_legacy/convert_opset1_to_legacy.hpp>
 #include <transformations/convert_opset2_to_opset1/convert_opset2_to_opset1.hpp>
 #include <ngraph/opsets/opset1.hpp>
@@ -68,7 +69,8 @@ Engine::LoadExeNetworkImpl(const ICore * /*core*/, const InferenceEngine::ICNNNe
             input_precision != InferenceEngine::Precision::U16 &&
             input_precision != InferenceEngine::Precision::I16 &&
             input_precision != InferenceEngine::Precision::I8 &&
-            input_precision != InferenceEngine::Precision::U8) {
+            input_precision != InferenceEngine::Precision::U8 &&
+            input_precision != InferenceEngine::Precision::BOOL) {
             THROW_IE_EXCEPTION << NOT_IMPLEMENTED_str
                                << "Input image format " << input_precision << " is not supported yet...";
         }
@@ -84,25 +86,23 @@ Engine::LoadExeNetworkImpl(const ICore * /*core*/, const InferenceEngine::ICNNNe
         conf.batchLimit = static_cast<int>(network.getBatchSize());
     }
 
-    std::shared_ptr<ICNNNetwork> clonedNetwork(nullptr);
+    std::shared_ptr<ICNNNetwork> clonedNetwork = cloneNetwork(network);
 
-    if (network.getFunction()) {
+    if (clonedNetwork->getFunction()) {
         const auto transformations_callback = [](const std::shared_ptr<const ::ngraph::Node> &node) -> bool {
             return std::dynamic_pointer_cast<const ::ngraph::opset2::Gelu>(node) ||
                 std::dynamic_pointer_cast<const ::ngraph::opset2::BatchToSpace>(node) ||
                 std::dynamic_pointer_cast<const ::ngraph::opset2::SpaceToBatch>(node);
         };
-        CNNNetwork net(network.getFunction());
-        auto nGraphFunc = net.getFunction();
+        auto nGraphFunc = clonedNetwork->getFunction();
         // Disable shape inference (WA for generic operations)
         ::ngraph::op::GenericIE::DisableReshape noReshape(nGraphFunc);
 
         // Note: instead of running all Conversion Transformations you can make up your own transformation pipeline
+        ngraph::pass::CommonOptimizations().run_on_function(nGraphFunc);
         ngraph::pass::ConvertOpSet2ToOpSet1(transformations_callback).run_on_function(nGraphFunc);
         ngraph::pass::ConvertOpSet1ToLegacy(transformations_callback).run_on_function(nGraphFunc);
-        clonedNetwork = InferenceEngine::details::convertFunctionToICNNNetwork(nGraphFunc, network);
-    } else {
-        clonedNetwork = cloneNet(network);
+        clonedNetwork = InferenceEngine::details::convertFunctionToICNNNetwork(nGraphFunc, *clonedNetwork);
     }
 
     auto implNetwork = std::dynamic_pointer_cast<details::CNNNetworkImpl>(clonedNetwork);
index 805014b..860041a 100644 (file)
@@ -37,7 +37,7 @@ void MKLDNNBatchNormalizationNode::getSupportedDescriptors() {
     if (bnLayer->_weights == nullptr || bnLayer->_biases == nullptr) {
         THROW_IE_EXCEPTION << "Weights/biases are empty for layer: " << bnLayer->name
                            << " used in MKLDNN node: " << getName() << "\n"
-                           << "Use ReadWeights and SetWeights methods of InferenceEngine::CNNNetReader"
+                           << "Use the second argumemt of InferenceEngine::Core::ReadNetwork"
                            << " to load them from .bin part of the IR";
     }
 
index 58b60af..3b83233 100644 (file)
@@ -854,6 +854,39 @@ void MKLDNNConvolutionNode::initDescriptor(const InferenceEngine::LayerConfig& c
     selectedPD->getConfig() = rightConfig;
 }
 
+void MKLDNNConvolutionNode::filterSupportedPrimitiveDescriptors() {
+    MKLDNNNode::filterSupportedPrimitiveDescriptors();
+    // We also need to filter descs in Convolution node
+    filterSupportedDescriptors();
+}
+
+void MKLDNNConvolutionNode::filterSupportedDescriptors() {
+    if (!inputMemoryFormatsFilter.empty() || !outputMemoryFormatsFilter.empty()) {
+        if (inputMemoryFormatsFilter.size() > 1 || outputMemoryFormatsFilter.size() > 1) {
+            THROW_IE_EXCEPTION << "Incorrect number of input or output memory formats for Convolution node";
+        }
+        auto itd = descs.begin();
+        while (itd != descs.end()) {
+            bool isSuitableDesc = true;
+            if (!inputMemoryFormatsFilter.empty()) {
+                auto src_fmt = std::shared_ptr<mkldnn::convolution_forward::desc>(*itd)->data.src_desc.format;
+                if (src_fmt != inputMemoryFormatsFilter[0])
+                    isSuitableDesc = false;
+            }
+            if (!outputMemoryFormatsFilter.empty()) {
+                auto dst_fmt = std::shared_ptr<mkldnn::convolution_forward::desc>(*itd)->data.dst_desc.format;
+                if (dst_fmt != outputMemoryFormatsFilter[0])
+                    isSuitableDesc = false;
+            }
+            if (!isSuitableDesc) {
+                descs.erase(itd);
+            } else {
+                itd++;
+            }
+        }
+    }
+}
+
 MKLDNNMemoryDesc MKLDNNConvolutionNode::getSrcMemDesc(mkldnn::primitive_desc_iterator &primitive_desc_it, size_t idx) {
     InferenceEngine::TensorDesc desc = idx > 0 ? MKLDNNMemoryDesc(primitive_desc_it.weights_primitive_desc(idx - 1).desc())
                                                : MKLDNNMemoryDesc(primitive_desc_it.src_primitive_desc(idx).desc());
index 232803b..9289eff 100644 (file)
@@ -25,6 +25,8 @@ public:
     void initDescriptor(const InferenceEngine::LayerConfig& config) override;
     void createPrimitive() override;
     void initSupportedPrimitiveDescriptors() override;
+    void filterSupportedPrimitiveDescriptors() override;
+    void filterSupportedDescriptors();
     bool created() const override;
     bool canBeInPlace() const override {
         return false;
index 350d717..ffc22b3 100644 (file)
@@ -48,7 +48,7 @@ void MKLDNNDeconvolutionNode::getSupportedDescriptors() {
     if (deconvLayer->_weights == nullptr) {
         THROW_IE_EXCEPTION << "Weights are empty for layer: " << deconvLayer->name
                            << " used in MKLDNN node: " << getName() << "\n"
-                           << "Use ReadWeights and SetWeights methods of InferenceEngine::CNNNetReader"
+                           << "Use the second argumemt of InferenceEngine::Core::ReadNetwork"
                            << " to load them from .bin part of the IR";
     }
     withGroups = (deconvLayer->_group > 1);
index 9748ae6..650909d 100644 (file)
@@ -79,7 +79,7 @@ void MKLDNNFullyConnectedNode::getSupportedDescriptors() {
         auto weightsDataType = MKLDNNExtensionUtils::IEPrecisionToDataType(getCnnLayer()->insData[1].lock()->getPrecision());
 
         // TODO(amalyse) what are the cases when we have non i8 weights and have to overide the precisions?
-        if ((inputDataType != memory::u8 || weightsDataType != memory::s8) && inputDataType != memory::bf16) {
+        if (((inputDataType != memory::u8 && inputDataType != memory::s8) || weightsDataType != memory::s8) && inputDataType != memory::bf16) {
             inputDataType = memory::f32;
             outputDataType = memory::f32;
         }
@@ -91,7 +91,7 @@ void MKLDNNFullyConnectedNode::getSupportedDescriptors() {
     if (fcLayer->_weights == nullptr && baseInputsNumber == 1) {
         THROW_IE_EXCEPTION << "Weights are empty for layer: " << fcLayer->name
                            << " used in MKLDNN node: " << getName() << "\n"
-                           << "Use ReadWeights and SetWeights methods of InferenceEngine::CNNNetReader"
+                           << "Use the second argumemt of InferenceEngine::Core::ReadNetwork"
                            << " to load them from .bin part of the IR";
     }
 
index fe8e0e1..11224c8 100644 (file)
@@ -440,6 +440,10 @@ void MKLDNNMVNNode::getSupportedDescriptors() {
     if (!descs.empty())
         return;
 
+    const auto& numOfDims = getParentEdgeAt(0)->getDims().ndims();
+    if (numOfDims < 1 || numOfDims > 5)
+        THROW_IE_EXCEPTION << "MVN layer with name '" << getCnnLayer()->name << "' doesn't support input with size of dimensions: " << numOfDims;
+
     auto * mvnLayer = dynamic_cast<MVNLayer*>(getCnnLayer().get());
     if (mvnLayer == nullptr)
         THROW_IE_EXCEPTION << "Cannot convert MVN layer.";
@@ -667,7 +671,7 @@ void MKLDNNMVNNode::execute(mkldnn::stream strm) {
     auto src_data = reinterpret_cast<const float *>(srcMemPtr->GetData());
     auto dst_data = reinterpret_cast<float *>(dstMemPtr->GetData());
 
-    if (layout == NCHW || layout == NCDHW) {
+    if (layout == C || layout == NC || layout == CHW || layout == NCHW || layout == NCDHW) {
         mvn_pln(src_data, dst_data, getParentEdgeAt(0)->getDesc().getDims());
     } else {
         if (output_prec == Precision::U8) {
@@ -710,6 +714,19 @@ void MKLDNNMVNNode::execute(mkldnn::stream strm) {
     }
 }
 
+std::tuple<size_t, size_t, size_t, size_t, size_t> MKLDNNMVNNode::get5dShapes(const SizeVector& dims) {
+    std::tuple<size_t, size_t, size_t, size_t, size_t> shapes;
+    switch (dims.size()) {
+        case 1 : { shapes = std::make_tuple(1, dims[0], 1, 1, 1); break; }
+        case 2 : { shapes = std::make_tuple(dims[0], dims[1], 1, 1, 1); break; }
+        case 3 : { shapes = std::make_tuple(dims[0], dims[1], 1, dims[2], 1); break; }
+        case 4 : { shapes = std::make_tuple(dims[0], dims[1], 1, dims[2], dims[3]); break; }
+        case 5 : { shapes = std::make_tuple(dims[0], dims[1], dims[2], dims[3], dims[4]); break; }
+        default : { THROW_IE_EXCEPTION << "MVN layer with name '" << getCnnLayer()->name << "' doesn't support planar layout with rank: " << dims.size(); }
+    }
+    return shapes;
+}
+
 void MKLDNNMVNNode::mvn_pln(const float* src_data, float* dst_data, const SizeVector& dims) {
     size_t blk_size = 1;  // blk size in vmm
     if (mayiuse(cpu::avx512_common)) {
@@ -721,11 +738,8 @@ void MKLDNNMVNNode::mvn_pln(const float* src_data, float* dst_data, const SizeVe
     }
 
     size_t dims_size = dims.size();
-    size_t N = (dims_size > 0) ? dims[0] : 1lu;
-    size_t C = (dims_size > 1) ? dims[1] : 1lu;
-    size_t D = (dims_size > 4) ? dims[dims_size - 3] : 1lu;
-    size_t H = (dims_size > 3) ? dims[dims_size - 2] : 1lu;
-    size_t W = (dims_size > 2) ? dims[dims_size - 1] : 1lu;
+    size_t N = 0; size_t C = 0; size_t D = 0; size_t H = 0; size_t W = 0;
+    std::tie(N, C, D, H, W) = get5dShapes(dims);
 
     size_t C1 = H * W;
     size_t C2 = C1 * D;
index 6f4e122..7d2a9c4 100644 (file)
@@ -9,6 +9,7 @@
 #include <string>
 #include <memory>
 #include <vector>
+#include <tuple>
 
 namespace MKLDNNPlugin {
 
@@ -87,6 +88,8 @@ private:
 
     void setPostOps(mkldnn::primitive_attr &attr, bool initWeights = false);
 
+    std::tuple<size_t, size_t, size_t, size_t, size_t> get5dShapes(const InferenceEngine::SizeVector& dims);
+
     bool across_channels = false;
     bool normalize_variance = true;
     float eps = 1e-9f;
diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_normalize_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_normalize_node.cpp
new file mode 100644 (file)
index 0000000..9b106b4
--- /dev/null
@@ -0,0 +1,1492 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "mkldnn_quantize_node.h"
+#include "mkldnn_depthwise_node.h"
+#include "mkldnn_activation_node.h"
+#include <mkldnn_extension_utils.h>
+#include <ie_layers_internal.hpp>
+#include "ie_parallel.hpp"
+#include "jit_uni_eltwise.hpp"
+#include "jit_uni_depthwise.hpp"
+#include "jit_uni_quantization.hpp"
+#include "bf16transformer.h"
+
+#include "mkldnn_normalize_node.h"
+
+using namespace mkldnn;
+using namespace MKLDNNPlugin;
+using namespace InferenceEngine;
+using namespace mkldnn::impl;
+using namespace mkldnn::impl::cpu;
+using namespace mkldnn::impl::utils;
+using namespace Xbyak;
+
+#define GET_OFF(field) offsetof(jit_normalize_call_args, field)
+
+template <cpu_isa_t isa>
+struct jit_uni_normalize_modulo_kernel_f32 : public jit_uni_normalize_modulo_kernel, public jit_generator {
+    DECLARE_CPU_JIT_AUX_FUNCTIONS(jit_uni_normalize_modulo_kernel_f32)
+
+    jit_uni_normalize_modulo_kernel_f32(jit_normalize_config_params jcp) : jit_uni_normalize_modulo_kernel(jcp), jit_generator() {
+        this->preamble();
+        mov(reg_src, ptr[reg_params + GET_OFF(src)]);
+        mov(reg_modulo, ptr[reg_params + GET_OFF(modulo)]);
+        mov(reg_work_amount, ptr[reg_params + GET_OFF(work_amount)]);
+        mov(reg_src_stride, ptr[reg_params + GET_OFF(src_stride)]);
+
+        Xbyak::Label modulo_loop_label;
+        Xbyak::Label modulo_loop_end_label;
+
+        uni_vpxor(vmm_sqr_sum, vmm_sqr_sum, vmm_sqr_sum);
+        L(modulo_loop_label);
+        {
+            cmp(reg_work_amount, 0);
+            jle(modulo_loop_end_label, T_NEAR);
+
+            load_vector(vmm_val, ptr[reg_src], jcp_.src_dt);
+            uni_vfmadd231ps(vmm_sqr_sum, vmm_val, vmm_val);
+            if (isa == cpu::sse42 && jcp_.is_blk) {
+                int sse42_offset = 4;
+                load_vector(vmm_val, ptr[reg_src + sse42_offset * jcp_.src_data_size], jcp_.src_dt);
+                uni_vfmadd231ps(vmm_sqr_sum, vmm_val, vmm_val);
+            }
+
+            add(reg_src, reg_src_stride);
+            sub(reg_work_amount, 1);
+
+            jmp(modulo_loop_label, T_NEAR);
+        }
+        L(modulo_loop_end_label);
+
+        if (jcp_.is_nchw && !jcp_.across_spatial) {
+            uni_vmovups(ptr[reg_modulo], vmm_sqr_sum);
+        } else {
+            // hsum+store
+            if (isa == cpu::sse42) {
+                hsum_store(vmm_sqr_sum);
+            } else if (isa == cpu::avx2) {
+                Xbyak::Ymm ymm_sqr_sum = Xbyak::Ymm(vmm_sqr_sum.getIdx());
+                vextractf128(xmm_aux1, ymm_sqr_sum, 0);
+                vextractf128(xmm_aux2, ymm_sqr_sum, 1);
+                addps(xmm_aux1, xmm_aux2);
+                hsum_store(xmm_aux1);
+            } else {
+                Xbyak::Zmm zmm_sqr_sum = Xbyak::Zmm(vmm_sqr_sum.getIdx());
+                vextractf32x4(xmm_aux1, zmm_sqr_sum, 0);
+                vextractf32x4(xmm_aux2, zmm_sqr_sum, 1);
+                addps(xmm_aux1, xmm_aux2);
+                vextractf32x4(xmm_aux2, zmm_sqr_sum, 2);
+                vextractf32x4(xmm_aux3, zmm_sqr_sum, 3);
+                addps(xmm_aux2, xmm_aux3);
+                addps(xmm_aux1, xmm_aux2);
+                hsum_store(xmm_aux1);
+            }
+        }
+
+        this->postamble();
+        ker_ = (decltype(ker_)) this->getCode();
+    }
+
+private:
+    using Vmm = typename conditional3<isa == cpu::sse42, Xbyak::Xmm, isa == cpu::avx2,
+            Xbyak::Ymm, Xbyak::Zmm>::type;
+    size_t vlen = cpu_isa_traits<isa>::vlen;
+
+    Xbyak::Reg64 reg_src = r8;
+    Xbyak::Reg64 reg_work_amount = r9;
+    Xbyak::Reg64 reg_src_stride = r10;
+    Xbyak::Reg64 reg_modulo = rbp;
+    Xbyak::Reg64 reg_params = abi_param1;
+
+    Vmm vmm_val = Vmm(0);
+    Vmm vmm_sqr_sum = Vmm(1);
+    Xbyak::Xmm xmm_aux1 = Xbyak::Xmm(2);
+    Xbyak::Xmm xmm_aux2 = Xbyak::Xmm(3);
+    Xbyak::Xmm xmm_aux3 = Xbyak::Xmm(4);
+
+    inline void hsum_store(Xbyak::Xmm xmm_sqr_sum) {
+        movshdup(xmm_aux3, xmm_sqr_sum);  //  sqrt_sum:1,2,3,4; aux3:2,2,4,4
+        addps(xmm_sqr_sum, xmm_aux3);     //  sqrt_sum:1+2,2+2,3+4,4+4
+        movhlps(xmm_aux3, xmm_sqr_sum);   //  aux3:3+4,4+4,4,4
+        addps(xmm_sqr_sum, xmm_aux3);     //  sqrt_sum:1+2+3+4,...
+        movss(ptr[reg_modulo], xmm_sqr_sum);
+    }
+
+    inline void load_vector(Vmm vmm_src, const Xbyak::Address &op, memory::data_type src_dt) {
+        switch (src_dt) {
+            case memory::f32:
+            case memory::s32:
+                uni_vmovups(vmm_src, op);
+                break;
+            case memory::s8:
+                uni_vpmovsxbd(vmm_src, op);
+                break;
+            case memory::u8:
+                uni_vpmovzxbd(vmm_src, op);
+                break;
+            default:
+                assert(!"unknown dst_dt");
+        }
+
+        if (src_dt != memory::f32)
+            uni_vcvtdq2ps(vmm_src, vmm_src);
+    }
+};
+
+// dst = src * modulo_inv * scale
+template <cpu_isa_t isa>
+struct jit_uni_normalize_kernel_f32 : public jit_uni_normalize_kernel, public jit_generator {
+    DECLARE_CPU_JIT_AUX_FUNCTIONS(jit_uni_normalize_kernel_f32)
+
+    explicit jit_uni_normalize_kernel_f32(jit_normalize_config_params jcp, const mkldnn_primitive_attr &attr)
+    : jit_uni_normalize_kernel(jcp, attr), jit_generator() {
+        const auto &p = attr_.post_ops_;
+        for (int i = 0; i < p.len_; i++) {
+            auto &post_op = p.entry_[i];
+            if (post_op.is_eltwise()) {
+                eltwise_injectors.push_back(std::make_shared<jit_uni_eltwise_injector_f32<isa>>(
+                        this, post_op.eltwise.alg, post_op.eltwise.alpha, post_op.eltwise.beta));
+            } else if (post_op.is_depthwise()) {
+                depthwise_injectors.push_back(std::make_shared<jit_uni_depthwise_injector_f32<isa>>(
+                        this, post_op.depthwise.alg));
+            } else if (post_op.is_quantization()) {
+                quantization_injectors.push_back(std::make_shared<jit_uni_quantization_injector_f32<isa>>(
+                        this, post_op, vmm_d_weights, vmm_d_bias, reg_d_weights, reg_d_bias));
+            }
+        }
+
+        this->preamble();
+
+        mov(reg_src, ptr[reg_params + GET_OFF(src)]);
+        mov(reg_dst, ptr[reg_params + GET_OFF(dst)]);
+        mov(reg_modulo, ptr[reg_params + GET_OFF(modulo)]);
+        mov(reg_weights, ptr[reg_params + GET_OFF(weights)]);
+        mov(reg_fused_factor, ptr[reg_params + GET_OFF(fused_factor)]);
+        mov(reg_work_amount, ptr[reg_params + GET_OFF(work_amount)]);
+        if (attr_.post_ops_.len_ != 0)
+            mov(reg_oc_off, ptr[reg_params + GET_OFF(oc_off)]);
+        if (isa == avx512_common)
+            uni_vpxor(vmm_zero, vmm_zero, vmm_zero);
+
+        if (jcp_.is_nchw) {
+            normalize_nchw();
+        } else if (jcp_.is_blk) {
+            normalize_blk();
+        } else if (jcp_.is_nhwc) {
+            normalize_nhwc();
+        }
+
+        this->postamble();
+
+        for (auto& inj : eltwise_injectors)
+            inj->prepare_table();
+
+        ker_ = (decltype(ker_)) this->getCode();
+    }
+
+private:
+    using Vmm = typename conditional3<isa == cpu::sse42, Xbyak::Xmm, isa == cpu::avx2,
+            Xbyak::Ymm, Xbyak::Zmm>::type;
+    size_t vlen = cpu_isa_traits<isa>::vlen;
+
+    Xbyak::Reg64 reg_src = r8;
+    Xbyak::Reg64 reg_dst = r9;
+    Xbyak::Reg64 reg_modulo = r10;
+    Xbyak::Reg64 reg_weights = r11;
+    Xbyak::Reg64 reg_fused_factor = r12;
+    Xbyak::Reg64 reg_work_amount = r15;
+    Xbyak::Reg64 reg_params = abi_param1;
+
+    Reg8 reg_tmp_8 = r14b;
+    Reg32 reg_tmp_32 = r14d;
+    Reg64 reg_tmp_64 = r14;
+
+    Xbyak::Reg64 reg_oc_off = rax;
+    Xbyak::Reg64 reg_d_weights = rbx;
+    Xbyak::Reg64 reg_d_bias = rdx;
+
+    Vmm vmm_val = Vmm(0);
+    Xmm xmm_val = Xmm(0);
+    Vmm vmm_scale = Vmm(1);
+    Xmm xmm_scale = Xmm(1);
+    Vmm vmm_modulo = Vmm(2);
+    Xmm xmm_modulo = Xmm(2);
+    Vmm vmm_fused_factor = Vmm(3);
+    Xmm xmm_fused_factor = Xmm(3);
+    Vmm vmm_fused_factor2 = Vmm(4);
+    Xmm xmm_fused_factor2 = Xmm(4);
+
+    Vmm vmm_d_weights = Vmm(5);
+    Vmm vmm_d_bias = Vmm(6);
+    Vmm vmm_zero = Vmm(7);
+
+    std::vector<std::shared_ptr<jit_uni_eltwise_injector_f32<isa>>> eltwise_injectors;
+    std::vector<std::shared_ptr<jit_uni_depthwise_injector_f32<isa>>> depthwise_injectors;
+    std::vector<std::shared_ptr<jit_uni_quantization_injector_f32<isa>>> quantization_injectors;
+
+    inline void normalize_nchw() {
+        if (jcp_.across_spatial) {
+            uni_vbroadcastss(vmm_fused_factor, ptr[reg_fused_factor]);  // for channel_shared: false or true.
+        } else {
+            if (!jcp_.channel_shared) {
+                uni_vbroadcastss(vmm_scale, ptr[reg_weights]);
+            }
+        }
+
+        Xbyak::Label main_loop_label;
+        Xbyak::Label main_loop_end_label;
+        Xbyak::Label tail_loop_label;
+        Xbyak::Label tail_loop_end_label;
+
+        int step = vlen / sizeof(float);
+        L(main_loop_label);
+        {
+            cmp(reg_work_amount, step);
+            jl(main_loop_end_label, T_NEAR);
+
+            load_vector(vmm_val, ptr[reg_src], jcp_.src_dt);
+            if (jcp_.across_spatial) {
+                uni_vmulps(vmm_val, vmm_val, vmm_fused_factor);
+            } else {
+                if (jcp_.channel_shared) {
+                    uni_vmovups(vmm_fused_factor, ptr[reg_fused_factor]);
+                    uni_vmulps(vmm_val, vmm_val, vmm_fused_factor);
+                    add(reg_fused_factor, vlen);
+                } else {
+                    uni_vmovups(vmm_modulo, ptr[reg_modulo]);  // modulo: ld dynamic
+                    uni_vmulps(vmm_val, vmm_val, vmm_modulo);
+                    uni_vmulps(vmm_val, vmm_val, vmm_scale);    // weight: bc once
+                    add(reg_modulo, vlen);
+                }
+            }
+            if (attr_.post_ops_.len_ != 0) {
+                apply_post_ops(jcp_.dst_dt, 1);
+            }
+            store_vector(ptr[reg_dst], vmm_val, jcp_.dst_dt);
+
+            add(reg_src, step * jcp_.src_data_size);
+            add(reg_dst, step * jcp_.dst_data_size);
+            sub(reg_work_amount, step);
+
+            jmp(main_loop_label, T_NEAR);
+        }
+        L(main_loop_end_label);
+
+        step = 1;
+        L(tail_loop_label);
+        {
+            cmp(reg_work_amount, 1);
+            jl(tail_loop_end_label, T_NEAR);
+
+            load_scalar(xmm_val, ptr[reg_src], jcp_.src_dt);
+            if (jcp_.across_spatial) {
+                uni_vmulps(xmm_val, xmm_val, xmm_fused_factor);
+            } else {
+                if (jcp_.channel_shared) {
+                    load_scalar(xmm_fused_factor, ptr[reg_fused_factor], memory::f32);
+                    uni_vmulps(xmm_val, xmm_val, xmm_fused_factor);
+                    add(reg_fused_factor, step * sizeof(float));
+                } else {
+                    load_scalar(xmm_modulo, ptr[reg_modulo], memory::f32);
+                    uni_vmulps(xmm_val, xmm_val, xmm_modulo);
+                    uni_vmulps(xmm_val, xmm_val, xmm_scale);
+                    add(reg_modulo, step * sizeof(float));
+                }
+            }
+            if (attr_.post_ops_.len_ != 0) {
+                apply_post_ops(jcp_.dst_dt, 1);  // vector and boradcast
+            }
+            store_scalar(ptr[reg_dst], xmm_val, jcp_.dst_dt);
+
+            add(reg_src, step * jcp_.src_data_size);
+            add(reg_dst, step * jcp_.dst_data_size);
+            sub(reg_work_amount, step);
+
+            jmp(tail_loop_label, T_NEAR);
+        }
+        L(tail_loop_end_label);
+    }
+
+    inline void normalize_nhwc() {
+        if (jcp_.channel_shared) {
+            uni_vbroadcastss(vmm_fused_factor, ptr[reg_fused_factor]);
+        } else {
+            if (!jcp_.across_spatial) {
+                uni_vbroadcastss(vmm_modulo, ptr[reg_modulo]);
+            }
+        }
+
+        Xbyak::Label main_loop_label;
+        Xbyak::Label main_loop_end_label;
+        Xbyak::Label tail_loop_label;
+        Xbyak::Label tail_loop_end_label;
+
+        int step = vlen / sizeof(float);
+        L(main_loop_label);
+        {
+            cmp(reg_work_amount, step);
+            jl(main_loop_end_label, T_NEAR);
+
+            load_vector(vmm_val, ptr[reg_src], jcp_.src_dt);
+            if (jcp_.channel_shared) {
+                uni_vmulps(vmm_val, vmm_val, vmm_fused_factor);
+            } else {
+                if (jcp_.across_spatial) {
+                    uni_vmovups(vmm_fused_factor, ptr[reg_fused_factor]);
+                    uni_vmulps(vmm_val, vmm_val, vmm_fused_factor);
+                    add(reg_fused_factor, vlen);
+                } else {
+                    uni_vmovups(vmm_scale, ptr[reg_weights]);
+                    uni_vmulps(vmm_val, vmm_val, vmm_scale);
+                    uni_vmulps(vmm_val, vmm_val, vmm_modulo);
+                    add(reg_weights, vlen);
+                }
+            }
+            if (attr_.post_ops_.len_ != 0) {
+                apply_post_ops(jcp_.dst_dt, 0);
+                add(reg_oc_off, vlen);  // out channel offset of fused ops weights in byte
+            }
+            store_vector(ptr[reg_dst], vmm_val, jcp_.dst_dt);
+
+            add(reg_src, step * jcp_.src_data_size);
+            add(reg_dst, step * jcp_.dst_data_size);
+            sub(reg_work_amount, step);
+
+            jmp(main_loop_label, T_NEAR);
+        }
+        L(main_loop_end_label);
+
+        step = 1;
+        L(tail_loop_label);
+        {
+            cmp(reg_work_amount, 1);
+            jl(tail_loop_end_label, T_NEAR);
+
+            load_scalar(xmm_val, ptr[reg_src], jcp_.src_dt);
+            if (jcp_.channel_shared) {
+                uni_vmulps(xmm_val, xmm_val, xmm_fused_factor);
+            } else {
+                if (jcp_.across_spatial) {
+                    load_scalar(xmm_fused_factor, ptr[reg_fused_factor], memory::f32);
+                    uni_vmulps(xmm_val, xmm_val, xmm_fused_factor);
+                    add(reg_fused_factor, step * sizeof(float));
+                } else {
+                    load_scalar(xmm_scale, ptr[reg_weights], memory::f32);
+                    uni_vmulps(xmm_val, xmm_val, xmm_scale);
+                    uni_vmulps(xmm_val, xmm_val, xmm_modulo);
+                    add(reg_weights, step * sizeof(float));
+                }
+            }
+            if (attr_.post_ops_.len_ != 0) {
+                apply_post_ops(jcp_.dst_dt, 0);
+                add(reg_oc_off, step * sizeof(float));
+            }
+            store_scalar(ptr[reg_dst], xmm_val, jcp_.dst_dt);
+
+            add(reg_src, step * jcp_.src_data_size);
+            add(reg_dst, step * jcp_.dst_data_size);
+            sub(reg_work_amount, step);
+
+            jmp(tail_loop_label, T_NEAR);
+        }
+        L(tail_loop_end_label);
+    }
+
+// tails with padding as a vector for normalize.
+    inline void normalize_blk() {
+        size_t blk_size = 0;
+        size_t simd_w = 0;
+        if (isa == cpu::avx512_common) {
+            blk_size = simd_w = 16;
+        } else if (isa == cpu::avx2) {
+            blk_size = simd_w = 8;
+        } else {
+            blk_size = 8;
+            simd_w = 4;
+        }
+        bool is_sse42 = (isa == cpu::sse42);
+
+        if (jcp_.across_spatial) {
+            if (jcp_.channel_shared) {
+                uni_vbroadcastss(vmm_fused_factor, ptr[reg_fused_factor]);
+            } else {
+                uni_vmovups(vmm_fused_factor, ptr[reg_fused_factor]);
+                if (is_sse42) {
+                    uni_vmovups(vmm_fused_factor2, ptr[reg_fused_factor + simd_w * sizeof(float)]);
+                }
+            }
+
+            Xbyak::Label norm_loop_label;
+            Xbyak::Label norm_loop_end_label;
+
+            L(norm_loop_label);
+            {
+                cmp(reg_work_amount, 0);
+                jle(norm_loop_end_label, T_NEAR);
+
+                load_vector(vmm_val, ptr[reg_src], jcp_.src_dt);
+                uni_vmulps(vmm_val, vmm_val, vmm_fused_factor);
+
+                if (attr_.post_ops_.len_ != 0) {
+                    apply_post_ops(jcp_.dst_dt, 0);
+                }
+                store_vector(ptr[reg_dst], vmm_val, jcp_.dst_dt);
+
+                if (is_sse42) {
+                    int sse42_offset = 4;
+                    load_vector(vmm_val, ptr[reg_src + sse42_offset * jcp_.src_data_size], jcp_.src_dt);
+                    if (jcp_.channel_shared) {
+                        uni_vmulps(vmm_val, vmm_val, vmm_fused_factor);  // bc once
+                    } else {
+                        uni_vmulps(vmm_val, vmm_val, vmm_fused_factor2);  // ld once
+                    }
+                    if (attr_.post_ops_.len_ != 0) {
+                        add(reg_oc_off, sse42_offset * sizeof(float));
+                        apply_post_ops(jcp_.dst_dt, 0);
+                        sub(reg_oc_off, sse42_offset * sizeof(float));
+                    }
+                    store_vector(ptr[reg_dst + sse42_offset * jcp_.dst_data_size], vmm_val, jcp_.dst_dt);
+                }
+                add(reg_src, blk_size * jcp_.src_data_size);
+                add(reg_dst, blk_size * jcp_.dst_data_size);
+
+                sub(reg_work_amount, 1);
+                jmp(norm_loop_label, T_NEAR);
+            }
+            L(norm_loop_end_label);
+        } else {  // across_saptail is flase
+            if (jcp_.channel_shared) {
+                uni_vbroadcastss(vmm_fused_factor, ptr[reg_fused_factor]);
+            } else {
+                uni_vbroadcastss(vmm_modulo, ptr[reg_modulo]);
+            }
+            size_t src_stride = jcp_.w * jcp_.h * blk_size * jcp_.src_data_size;
+            size_t dst_stride = jcp_.w * jcp_.h * blk_size * jcp_.dst_data_size;
+
+            Xbyak::Label norm_loop_label;
+            Xbyak::Label norm_loop_end_label;
+
+            L(norm_loop_label);
+            {
+                cmp(reg_work_amount, 0);
+                jle(norm_loop_end_label, T_NEAR);
+
+                load_vector(vmm_val, ptr[reg_src], jcp_.src_dt);
+                if (jcp_.channel_shared) {
+                    uni_vmulps(vmm_val, vmm_val, vmm_fused_factor);
+                } else {
+                    uni_vmovups(vmm_scale, ptr[reg_weights]);
+                    uni_vmulps(vmm_val, vmm_val, vmm_scale);
+                    uni_vmulps(vmm_val, vmm_val, vmm_modulo);
+                    add(reg_weights, vlen);
+                }
+                if (attr_.post_ops_.len_ != 0) {
+                    apply_post_ops(jcp_.dst_dt, 0);
+                    add(reg_oc_off, vlen);  // vlen is related isa
+                }
+                store_vector(ptr[reg_dst], vmm_val, jcp_.dst_dt);
+
+                if (is_sse42) {
+                    int sse42_offset = 4;
+                    load_vector(vmm_val, ptr[reg_src + sse42_offset * jcp_.src_data_size], jcp_.src_dt);
+                    if (jcp_.channel_shared) {
+                        uni_vmulps(vmm_val, vmm_val, vmm_fused_factor);  // bc once
+                    } else {
+                        uni_vmovups(vmm_scale, ptr[reg_weights]);  // ld dynamic
+                        uni_vmulps(vmm_val, vmm_val, vmm_scale);
+                        uni_vmulps(vmm_val, vmm_val, vmm_modulo);  // bc once
+                        add(reg_weights, vlen);  // 4 * sizeof(float)
+                    }
+                    if (attr_.post_ops_.len_ != 0) {
+                        apply_post_ops(jcp_.dst_dt, 0);
+                        add(reg_oc_off, vlen);  // vlen is related isa
+                    }
+                    store_vector(ptr[reg_dst + sse42_offset * jcp_.dst_data_size], vmm_val, jcp_.dst_dt);
+                }
+                add(reg_src, src_stride);
+                add(reg_dst, dst_stride);
+
+                sub(reg_work_amount, 1);
+                jmp(norm_loop_label, T_NEAR);
+            }
+            L(norm_loop_end_label);
+        }
+    }
+
+    inline void load_vector(Vmm vmm_src, const Xbyak::Address &op, memory::data_type src_dt) {
+        switch (src_dt) {
+            case memory::f32:
+            case memory::s32:
+                uni_vmovups(vmm_src, op);
+                break;
+            case memory::s8:
+                uni_vpmovsxbd(vmm_src, op);
+                break;
+            case memory::u8:
+                uni_vpmovzxbd(vmm_src, op);
+                break;
+            default:
+                assert(!"unknown dst_dt");
+        }
+
+        if (src_dt != memory::f32)
+            uni_vcvtdq2ps(vmm_src, vmm_src);
+    }
+
+    inline void load_scalar(Xmm xmm_src, const Xbyak::Address &op, memory::data_type src_dt) {
+        switch (src_dt) {
+            case memory::f32:
+            case memory::s32:
+                movss(xmm_src, op);
+                break;
+            case memory::s8:
+                movsx(reg_tmp_32, op);
+                movq(xmm_src, reg_tmp_64);
+                break;
+            case memory::u8:
+                movzx(reg_tmp_32, op);
+                movq(xmm_src, reg_tmp_64);
+                break;
+            default:
+                assert(!"unknown dst_dt");
+        }
+
+        if (src_dt != data_type::f32) {
+            uni_vcvtdq2ps(xmm_src, xmm_src);
+        }
+    }
+
+    inline void store_vector(const Xbyak::Address &op, Vmm vmm_dst, memory::data_type dst_dt) {
+        Ymm ymm_dst = Ymm(vmm_dst.getIdx());
+        Xmm xmm_dst = Xmm(vmm_dst.getIdx());
+
+        if (dst_dt == memory::f32) {
+            uni_vmovups(op, vmm_dst);
+        } else if (dst_dt == memory::u8) {
+            uni_vcvtps2dq(vmm_dst, vmm_dst);
+            if (isa == cpu::avx512_common) {
+                vpmaxsd(vmm_dst, vmm_dst, vmm_zero);
+                vpmovusdb(op, vmm_dst);
+            } else {
+                uni_vpackusdw(vmm_dst, vmm_dst, vmm_dst);
+                if (isa != cpu::sse42)
+                    vpermq(ymm_dst, ymm_dst, 0x08);
+                uni_vpackuswb(vmm_dst, vmm_dst, vmm_dst);
+                if (isa != cpu::sse42)
+                    vmovq(op, xmm_dst);
+                else
+                    movd(op, xmm_dst);
+            }
+        } else if (dst_dt == memory::s8) {
+            uni_vcvtps2dq(vmm_dst, vmm_dst);
+            if (isa == cpu::avx512_common) {
+                vpmovsdb(op, vmm_dst);
+            } else {
+                uni_vpackssdw(vmm_dst, vmm_dst, vmm_dst);
+                if (isa != cpu::sse42)
+                    vpermq(ymm_dst, ymm_dst, 0x08);
+                uni_vpacksswb(vmm_dst, vmm_dst, vmm_dst);
+                if (isa != cpu::sse42)
+                    vmovq(op, xmm_dst);
+                else
+                    movd(op, xmm_dst);
+            }
+        }
+    }
+
+    inline void store_scalar(const Xbyak::Address &op, Xmm xmm_dst, memory::data_type dst_dt) {
+        if (dst_dt != data_type::f32) {
+            uni_vcvtps2dq(xmm_dst, xmm_dst);
+        }
+
+        switch (dst_dt) {
+            case memory::f32:
+            case memory::s32:
+                movss(op, xmm_dst);
+                break;
+            case memory::s8:
+                uni_vpackssdw(xmm_dst, xmm_dst, xmm_dst);
+                uni_vpacksswb(xmm_dst, xmm_dst, xmm_dst);
+                movq(reg_tmp_64, xmm_dst);
+                mov(op, reg_tmp_8);
+                break;
+            case memory::u8:
+                uni_vpackusdw(xmm_dst, xmm_dst, xmm_dst);
+                uni_vpackuswb(xmm_dst, xmm_dst, xmm_dst);
+                movq(reg_tmp_64, xmm_dst);
+                mov(op, reg_tmp_8);
+                break;
+            default:
+                assert(!"unknown dst_dt");
+        }
+    }
+
+    // scalar: load scalar to xmm, process on xmm with padded param, store xmm to scalar.
+    // is_broadcast for broadcasting param for depth_wise and quantize, for fusion with plain layout.
+    void apply_post_ops(memory::data_type dst_dt, bool is_broadcast) {
+        const auto &p = attr_.post_ops_;
+        int eltwise_inj_idx = 0;
+        int depthwise_inj_idx = 0;
+        int quantization_inj_idx = 0;
+        for (int i = 0; i < p.len_; i++) {
+            auto& post_op = p.entry_[i];
+            if (post_op.is_eltwise()) {
+                eltwise_injectors[eltwise_inj_idx]->compute_vector_range(vmm_val.getIdx(), vmm_val.getIdx() + 1);
+                eltwise_inj_idx++;
+            } else if (post_op.is_depthwise()) {
+                mov(reg_d_weights, reinterpret_cast<size_t>(post_op.depthwise.weights_data));
+                mov(reg_d_bias, reinterpret_cast<size_t>(post_op.depthwise.biases_data));
+                add(reg_d_weights, reg_oc_off);
+                add(reg_d_bias, reg_oc_off);
+                // weight and bias is padding. scalar as vector.
+                depthwise_injectors[depthwise_inj_idx]->compute_vector_range(vmm_val.getIdx(), vmm_val.getIdx() + 1, reg_d_weights, reg_d_bias, is_broadcast);
+                depthwise_inj_idx++;
+            } else if (post_op.is_quantization()) {
+                bool do_dequantization = post_op.quantization.alg == alg_kind::quantization_quantize_dequantize;
+                bool do_rounding = do_dequantization || dst_dt == memory::f32 || i != p.len_ - 1;
+
+                int s_idx = vmm_val.getIdx();
+
+                quantization_injectors[quantization_inj_idx]->init_crop_ptrs(reg_oc_off);
+                quantization_injectors[quantization_inj_idx]->compute_crop(s_idx, s_idx + 1, 0, 0, is_broadcast);
+
+                quantization_injectors[quantization_inj_idx]->init_input_scale_shift_ptrs(reg_oc_off);
+                quantization_injectors[quantization_inj_idx]->compute_input_scale_shift(s_idx, s_idx + 1, 0, do_rounding, 0, is_broadcast);
+
+                if (do_dequantization) {
+                    quantization_injectors[quantization_inj_idx]->init_output_scale_shift_ptrs(reg_oc_off);
+                    quantization_injectors[quantization_inj_idx]->compute_output_scale_shift(s_idx, s_idx + 1, 0, 0, is_broadcast);
+                }
+
+                quantization_inj_idx++;
+            }
+        }
+    }
+};
+
+MKLDNNNormalizeNode::MKLDNNNormalizeNode(const InferenceEngine::CNNLayerPtr& layer, const mkldnn::engine& eng, int socket)
+        : MKLDNNNode(layer, eng, socket) {}
+
+void MKLDNNNormalizeNode::getSupportedDescriptors() {
+    if (!descs.empty())
+        return;
+
+    if (getParentEdges().size() != 1)
+        THROW_IE_EXCEPTION << "Incorrect number of input edges for layer " << getName();
+    if (getChildEdges().empty())
+        THROW_IE_EXCEPTION << "Incorrect number of output edges for layer " << getName();
+
+    if (getParentEdgeAt(0)->getDims().ndims() > 4 || getParentEdgeAt(0)->getDims().ndims() < 2) {
+        THROW_IE_EXCEPTION << "Normalize supports from 2D to 4D blobs!";
+    }
+
+    auto *layer = getCnnLayer().get();
+    if (layer == nullptr)
+        THROW_IE_EXCEPTION << "Cannot get Normalize layer.";
+    across_spatial = layer->GetParamAsBool("across_spatial", false);
+    channel_shared = layer->GetParamAsBool("channel_shared", false);
+    eps = layer->GetParamAsFloat("eps");
+
+    MemoryBlob::Ptr tweights = as<MemoryBlob>(layer->blobs.at("weights"));
+    if (!tweights) {
+        THROW_IE_EXCEPTION << layer->name << "Weights are not initialized or cannot be casted to MemoryBlob for layer Normalize with name '"
+            << layer->name << "'";
+    }
+    weights_prec = tweights->getTensorDesc().getPrecision();
+
+    if (weights_prec == Precision::FP32) {
+        weights_blob = tweights;
+    } else if (weights_prec == Precision::BF16) {
+        MKLDNNPlugin::BF16Transformer transformer;
+        weights_blob = transformer.convertBF16ToFloat(tweights);
+    } else {
+        // Unknown non supported data type, return an error
+        THROW_IE_EXCEPTION << layer->name << "Weights for layer Normalize wiht name '" << layer->name <<
+            "' has unsupported data type " << tweights->getTensorDesc().getPrecision();
+    }
+}
+
+void MKLDNNNormalizeNode::initSupportedPrimitiveDescriptors() {
+    if (!supportedPrimitiveDescriptors.empty())
+        return;
+
+    setPostOps(attr, true);
+
+    Precision inputPrecision = getCnnLayer()->insData[0].lock()->getPrecision();
+    Precision outputPrecision = getCnnLayer()->outData[0]->getPrecision();
+
+    if (!fusedWith.empty()) {
+        auto lastFusedLayer = fusedWith[fusedWith.size() - 1].get()->getCnnLayer();
+        if (lastFusedLayer) {
+            outputPrecision = lastFusedLayer->outData[0]->getPrecision();
+        }
+    }
+
+    auto isOneOf = [&](InferenceEngine::Precision precision, std::vector<InferenceEngine::Precision> precisions) {
+        for (auto p : precisions) {
+            if (precision == p) {
+                return true;
+            }
+        }
+        return false;
+    };
+    if (!isOneOf(inputPrecision, {Precision::FP32, Precision::I8, Precision::U8})) {
+        THROW_IE_EXCEPTION << "Unsupported input precision. " << getName();
+    }
+    if (!isOneOf(outputPrecision, {Precision::FP32, Precision::I8, Precision::U8})) {
+        THROW_IE_EXCEPTION << "Unsupported output precision. " << getName();
+    }
+    if (!isOneOf(weights_prec, {Precision::FP32, Precision::BF16})) {
+        THROW_IE_EXCEPTION << "Unsupported wights precision. " << getName();
+    }
+
+    auto inputDataType = MKLDNNExtensionUtils::IEPrecisionToDataType(inputPrecision);
+    auto outputDataType = MKLDNNExtensionUtils::IEPrecisionToDataType(outputPrecision);
+    auto weightsDataType = MKLDNNExtensionUtils::IEPrecisionToDataType(weights_prec);
+
+    input_prec = inputPrecision;
+    output_prec = outputPrecision;
+    src_data_size = MKLDNNExtensionUtils::sizeOfDataType(inputDataType);
+    dst_data_size = MKLDNNExtensionUtils::sizeOfDataType(outputDataType);
+    weights_data_size = MKLDNNExtensionUtils::sizeOfDataType(weightsDataType);
+
+    bool canBeInplace = src_data_size == dst_data_size && getParentEdgeAt(0)->getParent()->getChildEdges().size() == 1;
+
+    InferenceEngine::LayerConfig config;
+    config.dynBatchSupport = false;
+    config.inConfs.resize(1);
+    config.outConfs.resize(1);
+    config.inConfs[0].constant = false;
+    config.outConfs[0].constant = false;
+    config.inConfs[0].inPlace = -1;
+    config.outConfs[0].inPlace = canBeInplace ? 0 : -1;
+
+    auto pushDesc = [&](memory::format format) {
+        config.inConfs[0].desc = MKLDNNMemoryDesc(getParentEdgeAt(0)->getDims(), inputDataType, format);
+        config.outConfs[0].desc = MKLDNNMemoryDesc(getParentEdgeAt(0)->getDims(), outputDataType, format);
+        supportedPrimitiveDescriptors.push_back({config, impl_desc_type::unknown, format});
+    };
+
+    // only plain layout support when w/o sse42
+    if (getParentEdgeAt(0)->getDims().ndims() == 4) {
+        if (mayiuse(cpu::sse42)) {
+            pushDesc(memory::nhwc);
+            if (mayiuse(cpu::avx512_common)) {
+                pushDesc(memory::nChw16c);
+            } else {
+                pushDesc(memory::nChw8c);
+            }
+        }
+    }
+    if (canBeInplace)
+        config.inConfs[0].inPlace = 0;
+    pushDesc(MKLDNNMemory::GetPlainFormat(getChildEdgeAt(0)->getDims()));
+}
+
+void MKLDNNNormalizeNode::setPostOps(mkldnn::primitive_attr &attr, bool initWeights) {
+    int blob_idx = 0;
+    mkldnn::post_ops ops;
+
+    for (auto &node : fusedWith) {
+        auto* quantizeNode = dynamic_cast<MKLDNNQuantizeNode *>(node.get());
+        if (quantizeNode) {
+            quantizeNode->appendPostOps(ops);
+            continue;
+        }
+
+        auto* depthwiseNode = dynamic_cast<MKLDNNDepthwiseNode *>(node.get());
+        if (depthwiseNode) {
+            if (initWeights) {
+                auto* depthwiseLayer = reinterpret_cast<WeightableLayer*>(depthwiseNode->getCnnLayer().get());
+                MKLDNNDims depthwiseDims({static_cast<ptrdiff_t>(rnd_up(getParentEdgeAt(0)->getDims()[1], 16))});
+
+                PostOpsIntBlobMemory.push_back(MKLDNNMemoryPtr(new MKLDNNMemory(getEngine())));
+                PostOpsIntBlobMemory[blob_idx]->Create(depthwiseDims, memory::data_type::f32, memory::format::x);
+
+                PostOpsIntBlobMemory[blob_idx]->SetData(memory::data_type::f32, memory::x,
+                                                        depthwiseLayer->_weights->buffer(),
+                                                        depthwiseLayer->_weights->size() *
+                                                        MKLDNNExtensionUtils::sizeOfDataType(memory::data_type::f32));
+
+                if (depthwiseNode->isBroadcast()) {
+                    float broadcastValue = static_cast<float *>(PostOpsIntBlobMemory[blob_idx]->GetData())[0];
+                    for (int i = 1; i < PostOpsIntBlobMemory[blob_idx]->GetPrimitiveDescriptor().desc().data.dims[0]; i++) {
+                        static_cast<float *>(PostOpsIntBlobMemory[blob_idx]->GetData())[i] = broadcastValue;
+                    }
+                }
+
+                if (depthwiseNode->getAlgorithm() == depthwise_scale_shift) {
+                    PostOpsIntBlobMemory.push_back(MKLDNNMemoryPtr(new MKLDNNMemory(getEngine())));
+                    PostOpsIntBlobMemory[blob_idx + 1]->Create(depthwiseDims, memory::data_type::f32,
+                                                               memory::format::x);
+                    PostOpsIntBlobMemory[blob_idx + 1]->SetData(memory::data_type::f32, memory::x,
+                                                                depthwiseLayer->_biases->buffer(),
+                                                                depthwiseLayer->_biases->size() *
+                                                                MKLDNNExtensionUtils::sizeOfDataType(memory::data_type::f32));
+
+                    if (depthwiseNode->isBroadcast()) {
+                        float broadcastValue = static_cast<float *>(PostOpsIntBlobMemory[blob_idx + 1]->GetData())[0];
+                        for (int i = 1; i < PostOpsIntBlobMemory[blob_idx + 1]->GetPrimitiveDescriptor().desc().data.dims[0]; i++) {
+                            static_cast<float *>(PostOpsIntBlobMemory[blob_idx + 1]->GetData())[i] = broadcastValue;
+                        }
+                    }
+
+                    ops.append_depthwise(depthwiseNode->getAlgorithm(),
+                                         (const float *) PostOpsIntBlobMemory[blob_idx]->GetData(),
+                                         (const float *) PostOpsIntBlobMemory[blob_idx + 1]->GetData());
+
+                    blob_idx += 2;
+                } else {
+                    ops.append_depthwise(depthwiseNode->getAlgorithm(),
+                                         (const float *) PostOpsIntBlobMemory[blob_idx]->GetData(),
+                                         nullptr);
+
+                    blob_idx += 1;
+                }
+            } else {
+                ops.append_depthwise(depthwiseNode->getAlgorithm(),
+                                     nullptr,
+                                     nullptr);
+            }
+
+            continue;
+        }
+
+        auto* activationNode = dynamic_cast<MKLDNNActivationNode *>(node.get());
+        if (activationNode) {
+            ops.append_eltwise(1.0, activationNode->getAlgorithm(), activationNode->getAlpha(), activationNode->getBeta());
+
+            continue;
+        }
+
+        THROW_IE_EXCEPTION << "Fusing of " << NameFromType(node->getType()) << " operation to " << NameFromType(this->getType()) << " node is not implemented";
+    }
+
+    attr.set_post_ops(ops);
+}
+
+void MKLDNNNormalizeNode::createPrimitive() {
+    auto& dstMemPtr = getChildEdgeAt(0)->getMemoryPtr();
+    auto& srcMemPtr = getParentEdgeAt(0)->getMemoryPtr();
+    if (!dstMemPtr || !dstMemPtr->GetPrimitivePtr())
+        THROW_IE_EXCEPTION << "Destination memory didn't allocate.";
+    if (!srcMemPtr || !srcMemPtr->GetPrimitivePtr())
+        THROW_IE_EXCEPTION << "Input memory didn't allocate.";
+    if (getSelectedPrimitiveDescriptor() == nullptr)
+        THROW_IE_EXCEPTION << "Preferable primitive descriptor is not set.";
+
+    auto selectedPD = getSelectedPrimitiveDescriptor();
+    Layout selected_layout = selectedPD->getConfig().inConfs[0].desc.getLayout();
+    auto jcp = jit_normalize_config_params();
+    jcp.src_dt = MKLDNNExtensionUtils::IEPrecisionToDataType(selectedPD->getConfig().inConfs[0].desc.getPrecision());
+    jcp.dst_dt = MKLDNNExtensionUtils::IEPrecisionToDataType(selectedPD->getConfig().outConfs[0].desc.getPrecision());
+    jcp.src_data_size = MKLDNNExtensionUtils::sizeOfDataType(jcp.src_dt);
+    jcp.dst_data_size = MKLDNNExtensionUtils::sizeOfDataType(jcp.dst_dt);
+    jcp.is_nchw = selected_layout == MKLDNNMemory::GetPlainLayout(getChildEdgeAt(0)->getDims());
+    jcp.is_nhwc = selected_layout == Layout::NHWC;
+    jcp.is_blk = selected_layout == Layout::BLOCKED;
+    jcp.across_spatial = across_spatial;
+    jcp.channel_shared = channel_shared;
+    auto dims = getParentEdgeAt(0)->getDesc().getDims();
+    size_t dims_size = dims.size();
+    jcp.n = (dims_size > 0) ? dims[0] : 1lu;
+    jcp.c = (dims_size > 1) ? dims[1] : 1lu;
+    jcp.h = (dims_size > 2) ? dims[2] : 1lu;
+    jcp.w = (dims_size > 3) ? dims[3] : 1lu;
+
+    if (mayiuse(cpu::avx512_common)) {
+        normalize_modulo_kernel.reset(new jit_uni_normalize_modulo_kernel_f32<cpu::avx512_common>(jcp));
+        normalize_kernel.reset(new jit_uni_normalize_kernel_f32<cpu::avx512_common>(jcp, *attr.get()));
+    } else if (mayiuse(cpu::avx2)) {
+        normalize_modulo_kernel.reset(new jit_uni_normalize_modulo_kernel_f32<cpu::avx2>(jcp));
+        normalize_kernel.reset(new jit_uni_normalize_kernel_f32<cpu::avx2>(jcp, *attr.get()));
+    } else if (mayiuse(cpu::sse42)) {
+        normalize_modulo_kernel.reset(new jit_uni_normalize_modulo_kernel_f32<cpu::sse42>(jcp));
+        normalize_kernel.reset(new jit_uni_normalize_kernel_f32<cpu::sse42>(jcp, *attr.get()));
+    }
+
+    const auto &p = (*attr.get()).post_ops_;
+    for (int i = 0; i < p.len_; i++) {
+        auto &post_op = p.entry_[i];
+        if (post_op.is_eltwise()) {
+            eltwise_injectors_ref.push_back(std::make_shared<ref_eltwise_scalar_fwd_t>(
+                post_op.eltwise.alg, post_op.eltwise.alpha, post_op.eltwise.beta));
+        } else if (post_op.is_depthwise()) {
+            depthwise_injectors_ref.push_back(std::make_shared<ref_depthwise_scalar_fwd_t>(
+                    post_op.depthwise.alg));
+        }
+    }
+}
+
+void MKLDNNNormalizeNode::execute(mkldnn::stream strm) {
+    auto &srcMemPtr = getParentEdgeAt(0)->getMemoryPtr();
+    auto &dstMemPtr = getChildEdgeAt(0)->getMemoryPtr();
+    const uint8_t *src_ptr = reinterpret_cast<const uint8_t*>(srcMemPtr->GetData()) +
+            srcMemPtr->GetDescriptor().data.layout_desc.blocking.offset_padding *
+            MKLDNNExtensionUtils::sizeOfDataType(mkldnn::memory::data_type(srcMemPtr->GetDescriptor().data.data_type));
+    uint8_t *dst_ptr = reinterpret_cast<uint8_t*>(dstMemPtr->GetData()) +
+            dstMemPtr->GetDescriptor().data.layout_desc.blocking.offset_padding *
+            MKLDNNExtensionUtils::sizeOfDataType(mkldnn::memory::data_type(dstMemPtr->GetDescriptor().data.data_type));
+
+    auto dims = getParentEdgeAt(0)->getDesc().getDims();
+
+    if (output_prec == Precision::U8) {
+        auto dst_data = reinterpret_cast<uint8_t *>(dst_ptr);
+        if (input_prec == Precision::U8) {
+            auto src_data = reinterpret_cast<const uint8_t *>(src_ptr);
+            normalize_function<uint8_t, uint8_t>(src_data, dst_data, dims);
+        } else if (input_prec == Precision::I8) {
+            auto src_data = reinterpret_cast<const int8_t *>(src_ptr);
+            normalize_function<int8_t, uint8_t>(src_data, dst_data, dims);
+        } else if (input_prec == Precision::FP32) {
+            auto src_data = reinterpret_cast<const float *>(src_ptr);
+            normalize_function<float, uint8_t>(src_data, dst_data, dims);
+        }
+    } else if (output_prec == Precision::I8) {
+        auto dst_data = reinterpret_cast<int8_t *>(dst_ptr);
+        if (input_prec == Precision::U8) {
+            auto src_data = reinterpret_cast<const uint8_t *>(src_ptr);
+            normalize_function<uint8_t, int8_t>(src_data, dst_data, dims);
+        } else if (input_prec == Precision::I8) {
+            auto src_data = reinterpret_cast<const int8_t *>(src_ptr);
+            normalize_function<int8_t, int8_t>(src_data, dst_data, dims);
+        } else if (input_prec == Precision::FP32) {
+            auto src_data = reinterpret_cast<const float *>(src_ptr);
+            normalize_function<float, int8_t>(src_data, dst_data, dims);
+        }
+    } else if (output_prec == Precision::FP32) {
+        auto dst_data = reinterpret_cast<float *>(dst_ptr);
+        if (input_prec == Precision::U8) {
+            auto src_data = reinterpret_cast<const uint8_t *>(src_ptr);
+            normalize_function<uint8_t, float>(src_data, dst_data, dims);
+        } else if (input_prec == Precision::I8) {
+            auto src_data = reinterpret_cast<const int8_t *>(src_ptr);
+            normalize_function<int8_t, float>(src_data, dst_data, dims);
+        } else if (input_prec == Precision::FP32) {
+            auto src_data = reinterpret_cast<const float *>(src_ptr);
+            normalize_function<float, float>(src_data, dst_data, dims);
+        }
+    }
+}
+
+template <typename in_data_t, typename out_data_t>
+void MKLDNNNormalizeNode::normalize_nchw(const in_data_t* src_data, out_data_t* dst_data, const InferenceEngine::SizeVector& dims) {
+    size_t blk_size = 1;  // elt in vmm
+    if (mayiuse(cpu::avx512_common)) {
+        blk_size = 16;
+    } else if (mayiuse(cpu::avx2)) {
+        blk_size = 8;
+    } else if (mayiuse(cpu::sse42)) {
+        blk_size = 4;
+    }
+
+    size_t dims_size = dims.size();
+    size_t W = (dims_size > 3) ? dims[3] : 1lu;
+    size_t H = (dims_size > 2) ? dims[2] : 1lu;
+    size_t C = (dims_size > 1) ? dims[1] : 1lu;
+    size_t B = (dims_size > 0) ? dims[0] : 1lu;
+    float *weights = weights_blob->buffer().as<float *>();
+
+    for (size_t b = 0lu; b < B; b++) {
+        const in_data_t *src_data_b = src_data + b * C * H * W;
+        out_data_t *dst_data_b = dst_data + b * C * H * W;
+        if (across_spatial) {
+            // modulo
+            float addition_identity = 0.0f;
+            float modulo = 0.0f;
+            modulo = parallel_sum(C, addition_identity, [&](int ic) -> float {
+                const in_data_t *src_data_bc = src_data_b + ic * H * W;
+                float modulo_kernel = 0.0f;
+                float modulo_tail = 0.0f;
+                size_t tail_start = 0;
+
+                auto arg = jit_normalize_call_args();
+                arg.src = src_data_bc;
+                arg.modulo = static_cast<float*>(&modulo_kernel);
+                arg.src_stride = blk_size * sizeof(in_data_t);
+                arg.work_amount = (W * H) / blk_size;
+                (*normalize_modulo_kernel)(&arg);
+
+                tail_start = (W * H / blk_size) * blk_size;
+
+                // tail
+                for (size_t tail = tail_start; tail < H * W; tail++) {
+                    modulo_tail += src_data_bc[tail] * src_data_bc[tail];
+                }
+                return modulo_kernel + modulo_tail;
+            });
+
+            modulo = std::sqrt(modulo);
+            float modulo_inv = 1.0f / (modulo + eps);
+
+            // normalize
+            parallel_for(C, [&](size_t ic) {
+                const in_data_t *src_data_bc = src_data_b + ic * H * W;
+                out_data_t *dst_data_bc = dst_data_b + ic * H * W;
+                float fused_weight_modulo = channel_shared ? (weights[0] * modulo_inv) : (weights[ic] * modulo_inv);
+                auto arg = jit_normalize_call_args();
+                arg.src = src_data_bc;
+                arg.dst = dst_data_bc;
+                arg.fused_factor = static_cast<float*>(&fused_weight_modulo);  // broadcast once
+                arg.oc_off = ic * sizeof(float);
+                arg.work_amount = static_cast<size_t>(W * H);
+                (*normalize_kernel)(&arg);
+            });
+        } else {  // across_spatial: false
+            // moduloM
+            std::vector<float> moduloM(H * W, 0.f);
+            size_t blocks_num = div_up(H * W, blk_size);
+            parallel_for(blocks_num, [&](size_t ib) {
+                const in_data_t *src_data_b_ib = src_data_b + ib * blk_size;
+                size_t min_cb = (std::min)(blk_size, (H * W) - (ib * blk_size));
+                if (min_cb == blk_size) {
+                    auto arg = jit_normalize_call_args();
+                    arg.src = src_data_b_ib;
+                    arg.modulo = static_cast<float*>(&moduloM[ib * blk_size]);
+                    arg.src_stride = W * H * sizeof(in_data_t);
+                    arg.work_amount = C;
+                    (*normalize_modulo_kernel)(&arg);
+                } else {
+                    for (size_t c = 0; c < C; c++) {
+                        const in_data_t *src_data_b_ib_c = src_data_b_ib + W * H * c;
+                        for (size_t blk = 0; blk < min_cb; blk++) {
+                            moduloM[ib * blk_size + blk] += src_data_b_ib_c[blk] * src_data_b_ib_c[blk];
+                        }
+                    }
+                }
+            });
+
+            for (size_t m = 0; m < H * W; m++) {
+                moduloM[m] = 1.0f / (std::sqrt(moduloM[m]) + eps);
+                if (channel_shared)
+                    moduloM[m] = moduloM[m] * weights[0];
+            }
+
+            // normalize
+            parallel_for(C, [&](size_t ic) {
+                const in_data_t *src_data_bc = src_data_b + ic * H * W;
+                out_data_t *dst_data_bc = dst_data_b + ic * H * W;
+                auto arg = jit_normalize_call_args();
+                arg.src = src_data_bc;
+                arg.dst = dst_data_bc;
+                if (channel_shared) {
+                    arg.fused_factor = static_cast<float*>(&moduloM[0]);  // ld dynamic
+                } else {
+                    arg.modulo = static_cast<float*>(&moduloM[0]);    // ld dynamic
+                    arg.weights = static_cast<float*>(&weights[ic]);  // bc once
+                }
+                arg.oc_off = ic * sizeof(float);
+                arg.work_amount = static_cast<size_t>(W * H);
+                (*normalize_kernel)(&arg);
+            });
+        }
+    }
+}
+
+template <typename in_data_t, typename out_data_t>
+void MKLDNNNormalizeNode::normalize_nchw_ref(const in_data_t* src_data, out_data_t* dst_data, const InferenceEngine::SizeVector& dims) {
+    size_t dims_size = dims.size();
+    size_t W = (dims_size > 3) ? dims[3] : 1lu;
+    size_t H = (dims_size > 2) ? dims[2] : 1lu;
+    size_t C = (dims_size > 1) ? dims[1] : 1lu;
+    size_t B = (dims_size > 0) ? dims[0] : 1lu;
+    float *weights = weights_blob->buffer().as<float *>();
+
+    for (size_t b = 0lu; b < B; b++) {
+        const in_data_t *src_data_b = src_data + b * C * H * W;
+        out_data_t *dst_data_b = dst_data + b * C * H * W;
+        if (across_spatial) {
+            // modulo
+            float addition_identity = 0.0f;
+            float modulo = 0.0f;
+            modulo = parallel_sum(C, addition_identity, [&](int ic) -> float {
+                const in_data_t *src_data_bc = src_data_b + ic * H * W;
+                float modulo_c = 0.0f;
+                for (size_t m = 0; m < H * W; m++) {
+                    modulo_c += src_data_bc[m] * src_data_bc[m];
+                }
+                return modulo_c;
+            });
+
+            modulo = std::sqrt(modulo);
+            float modulo_inv = 1.0f / (modulo + eps);
+
+            // normalize
+            parallel_for(C, [&](size_t ic) {
+                const in_data_t *src_data_bc = src_data_b + ic * H * W;
+                out_data_t *dst_data_bc = dst_data_b + ic * H * W;
+                float fused_weight_modulo = channel_shared ? (weights[0] * modulo_inv) : (weights[ic] * modulo_inv);
+                for (size_t m = 0; m < W * H; m++) {
+                    float dst_value = src_data_bc[m] * fused_weight_modulo;
+                    apply_post_ops_scalar(dst_value, ic);
+                    if (output_prec == Precision::U8) {
+                        dst_data_bc[m] = (dst_value >= 0) ? dst_value : 0;
+                    } else {
+                        dst_data_bc[m] = dst_value;
+                    }
+                }
+            });
+        } else {  // across_spatial: false
+            // moduloM
+            std::vector<float> moduloM(H * W, 0.f);
+            parallel_for(H, [&](size_t ih) {
+                size_t offset_h = ih * W;
+                const in_data_t *src_data_b_ih = src_data_b + offset_h;
+                for (size_t c = 0; c < C; c++) {
+                    const in_data_t *src_data_b_ih_c = src_data_b_ih + W * H * c;
+                    for (size_t w = 0; w < W; w++) {
+                        moduloM[offset_h + w] += src_data_b_ih_c[w] * src_data_b_ih_c[w];
+                    }
+                }
+            });
+
+            for (size_t m = 0; m < H * W; m++) {
+                moduloM[m] = 1.0f / (std::sqrt(moduloM[m]) + eps);
+                if (channel_shared)
+                    moduloM[m] = moduloM[m] * weights[0];
+            }
+
+            // normalize
+            parallel_for(C, [&](size_t ic) {
+                const in_data_t *src_data_bc = src_data_b + ic * H * W;
+                out_data_t *dst_data_bc = dst_data_b + ic * H * W;
+                for (size_t m = 0; m < W * H; m++) {
+                    float dst_value = channel_shared ? src_data_bc[m] * moduloM[m] :
+                                      src_data_bc[m] * moduloM[m] * weights[ic];
+                    apply_post_ops_scalar(dst_value, ic);
+                    if (output_prec == Precision::U8) {
+                        dst_data_bc[m] = (dst_value >= 0) ? dst_value : 0;
+                    } else {
+                        dst_data_bc[m] = dst_value;
+                    }
+                }
+            });
+        }
+    }
+}
+
+template <typename in_data_t, typename out_data_t>
+void MKLDNNNormalizeNode::normalize_nhwc(const in_data_t* src_data, out_data_t* dst_data, const InferenceEngine::SizeVector& dims) {
+    size_t blk_size = 1;  // elt in vmm
+    if (mayiuse(cpu::avx512_common)) {
+        blk_size = 16;
+    } else if (mayiuse(cpu::avx2)) {
+        blk_size = 8;
+    } else if (mayiuse(cpu::sse42)) {
+        blk_size = 4;
+    }
+
+    size_t dims_size = dims.size();
+    size_t W = (dims_size > 3) ? dims[3] : 1lu;
+    size_t H = (dims_size > 2) ? dims[2] : 1lu;
+    size_t C = (dims_size > 1) ? dims[1] : 1lu;
+    size_t B = (dims_size > 0) ? dims[0] : 1lu;
+    float *weights = weights_blob->buffer().as<float *>();
+
+    for (size_t b = 0lu; b < B; b++) {
+        const in_data_t *src_data_b = src_data + b * C * H * W;
+        out_data_t *dst_data_b = dst_data + b * C * H * W;
+        if (across_spatial) {
+            // modulo
+            float addition_identity = 0;
+            float modulo = 0.0f;
+            modulo = parallel_sum(H, addition_identity, [&](int ih) -> float {
+                size_t tail_start = 0;
+                const in_data_t *src_data_bh = src_data_b + ih * C * W;
+                float modulo_kernel = 0.f;
+                float modulo_tail = 0.f;
+
+                auto arg = jit_normalize_call_args();
+                arg.src = src_data_bh;
+                arg.modulo = static_cast<float*>(&modulo_kernel);
+                arg.src_stride = blk_size * sizeof(in_data_t);
+                arg.work_amount = (C * W) / blk_size;
+                (*normalize_modulo_kernel)(&arg);
+
+                tail_start = (C * W / blk_size) * blk_size;
+
+                // tail
+                for (size_t tail = tail_start; tail < C * W; tail++) {
+                    modulo_tail += src_data_bh[tail] * src_data_bh[tail];
+                }
+                return modulo_kernel + modulo_tail;
+            });
+            modulo = std::sqrt(modulo);
+            float modulo_inv = 1.0f / (modulo + eps);
+
+            // normalize
+            if (channel_shared) {
+                float fused_weight_modulo = weights[0] * modulo_inv;
+                parallel_for2d(H, W, [&](int ih, int iw) {
+                    const in_data_t *src_data_bhw = src_data_b + ih * C * W + iw * C;
+                    out_data_t *dst_data_bhw = dst_data_b + ih * C * W + iw * C;
+                    auto arg = jit_normalize_call_args();
+                    arg.src = src_data_bhw;
+                    arg.dst = dst_data_bhw;
+                    arg.fused_factor = static_cast<float*>(&fused_weight_modulo);  // bc static
+                    arg.oc_off = 0;
+                    arg.work_amount = static_cast<size_t>(C);
+                    (*normalize_kernel)(&arg);
+                });
+            } else {  // channel_shared=false
+                std::vector<float> fused_weight_modulo(C);
+                for (size_t c = 0; c < C; c++) {
+                    fused_weight_modulo[c] = weights[c] * modulo_inv;
+                }
+                parallel_for2d(H, W, [&](int ih, int iw) {
+                    const in_data_t *src_data_bhw = src_data_b + ih * C * W + iw * C;
+                    out_data_t *dst_data_bhw = dst_data_b + ih * C * W + iw * C;
+                    auto arg = jit_normalize_call_args();
+                    arg.src = src_data_bhw;
+                    arg.dst = dst_data_bhw;
+                    arg.fused_factor = static_cast<float *>(&fused_weight_modulo[0]);  // ld dynamic
+                    arg.oc_off = 0;
+                    arg.work_amount = static_cast<size_t>(C);
+                    (*normalize_kernel)(&arg);
+                });
+            }
+        } else {  // for across_spatial=false
+            parallel_for2d(H, W, [&](int ih, int iw) {
+                // modulo
+                float modulo = 0.f;
+                const in_data_t *src_data_bhw = src_data_b + ih * C * W + iw * C;
+                out_data_t *dst_data_bhw = dst_data_b + ih * C * W + iw * C;
+                auto arg = jit_normalize_call_args();
+                arg.src = src_data_bhw;
+                arg.modulo = static_cast<float*>(&modulo);
+                arg.src_stride = blk_size * sizeof(in_data_t);
+                arg.work_amount = C / blk_size;
+                (*normalize_modulo_kernel)(&arg);
+
+                size_t tail_start = (C / blk_size) * blk_size;
+
+                // for tail
+                for (size_t c = tail_start; c < C; c++) {
+                    modulo += src_data_bhw[c] * src_data_bhw[c];
+                }
+
+                modulo = std::sqrt(modulo);
+                float modulo_inv = 1.0f / (modulo + eps);
+
+                // normalize
+                arg.dst = dst_data_bhw;
+                float fused_weight_modulo = 0;
+                if (channel_shared) {
+                    fused_weight_modulo = modulo_inv * weights[0];
+                    arg.fused_factor = static_cast<float*>(&fused_weight_modulo);  // bc static
+                } else {
+                    arg.modulo = static_cast<float*>(&modulo_inv);  // bc static
+                    arg.weights = static_cast<float*>(&weights[0]); // ld dynamic
+                }
+                arg.work_amount = C;
+                arg.oc_off = 0;
+                (*normalize_kernel)(&arg);
+            });
+        }
+    }
+}
+
+template <typename in_data_t, typename out_data_t>
+void MKLDNNNormalizeNode::normalize_blk(const in_data_t* src_data, out_data_t* dst_data, const InferenceEngine::SizeVector& dims) {
+    size_t blk_size = 1;  // channel blk for memory layout
+    if (mayiuse(cpu::avx512_common)) {
+        blk_size = 16;
+    } else if (mayiuse(cpu::avx2)) {
+        blk_size = 8;
+    } else if (mayiuse(cpu::sse42)) {
+        blk_size = 8;
+    }
+
+    size_t dims_size = dims.size();
+    size_t W = (dims_size > 3) ? dims[3] : 1lu;
+    size_t H = (dims_size > 2) ? dims[2] : 1lu;
+    size_t C = (dims_size > 1) ? dims[1] : 1lu;
+    size_t B = (dims_size > 0) ? dims[0] : 1lu;
+    float *weights = weights_blob->buffer().as<float *>();
+
+    size_t CB = div_up(C, blk_size);
+
+    // normalize for tails: data is padding, norm weight is padding, so tails as vector for normalize;
+    // post ops for tails: post-ops params is padding.
+    std::vector<float> weights_padding(CB * blk_size);
+    if (!channel_shared) {
+        memcpy(static_cast<float*>(&weights_padding[0]), weights, C * sizeof(float));
+    }
+
+    for (size_t b = 0lu; b < B; b++) {
+        const in_data_t *src_data_b = src_data + b * CB * H * W * blk_size;
+        out_data_t *dst_data_b = dst_data + b * CB * H * W * blk_size;
+        if (across_spatial) {
+            // modulo
+            float modulo = 0.0f;
+            float addition_identity = 0.0f;
+            modulo = parallel_sum2d(CB, H, addition_identity, [&](size_t cb, size_t h) -> float {
+                // handle W * blk_size data
+                const in_data_t *src_data_b_cb_h = src_data_b + cb * H * W * blk_size + h * W * blk_size;
+                size_t min_cb = (std::min)(blk_size, C - cb * blk_size);
+                float modulo_w_blk = 0.0f;
+                if (min_cb == blk_size) {
+                    auto arg = jit_normalize_call_args();
+                    arg.src = src_data_b_cb_h;
+                    arg.modulo = static_cast<float*>(&modulo_w_blk);
+                    arg.src_stride = blk_size * sizeof(in_data_t);
+                    arg.work_amount = W;
+                    (*normalize_modulo_kernel)(&arg);
+                } else {
+                    for (size_t w = 0; w < W; w++) {
+                        const in_data_t *src_data_b_cb_h_w = src_data_b_cb_h + w * blk_size;
+                        for (size_t c = 0; c < min_cb; c++) {
+                            modulo_w_blk += src_data_b_cb_h_w[c] * src_data_b_cb_h_w[c];
+                        }
+                    }
+                }
+                return modulo_w_blk;
+            });
+
+            modulo = std::sqrt(modulo);
+            float modulo_inv = 1.0f / (modulo + eps);
+
+            // normalize
+            if (channel_shared) {
+                float fused_weight_modulo = weights[0] * modulo_inv;
+                parallel_for2d(CB, H, [&](size_t cb, size_t h) {
+                    const in_data_t *src_data_b_cb_h = src_data_b + cb * H * W * blk_size + h * W * blk_size;
+                    out_data_t *dst_data_b_cb_h = dst_data_b + cb * H * W * blk_size + h * W * blk_size;
+                    auto arg = jit_normalize_call_args();
+                    arg.src = src_data_b_cb_h;
+                    arg.dst = dst_data_b_cb_h;
+                    arg.fused_factor = static_cast<float*>(&fused_weight_modulo);  // broadcast once
+                    arg.work_amount = static_cast<size_t>(W);
+                    arg.oc_off = cb * blk_size * sizeof(float);
+                    (*normalize_kernel)(&arg);
+                });
+            } else {
+                for (size_t c = 0; c < C; c++) {
+                    weights_padding[c] = weights_padding[c] * modulo_inv;
+                }
+                parallel_for2d(CB, H, [&](size_t cb, size_t h) {
+                    const in_data_t *src_data_b_cb_h = src_data_b + cb * H * W * blk_size + h * W * blk_size;
+                    out_data_t *dst_data_b_cb_h = dst_data_b + cb * H * W * blk_size + h * W * blk_size;
+                    auto arg = jit_normalize_call_args();
+                    arg.src = src_data_b_cb_h;
+                    arg.dst = dst_data_b_cb_h;
+                    arg.fused_factor = static_cast<float*>(&weights_padding[cb * blk_size]);  // load once
+                    arg.work_amount = static_cast<size_t>(W);
+                    arg.oc_off = cb * blk_size  * sizeof(float);
+                    (*normalize_kernel)(&arg);
+                });
+            }
+        } else {  // across_spatial: false
+            parallel_for2d(H, W, [&](size_t ih, size_t iw) {
+                // modulo
+                float modulo = 0.0f;
+                const in_data_t *src_data_bhw = src_data_b + ih * W * blk_size + iw * blk_size;
+                out_data_t *dst_data_bhw = dst_data_b + ih * W * blk_size + iw * blk_size;
+                auto arg = jit_normalize_call_args();
+                arg.src = src_data_bhw;
+                arg.modulo = static_cast<float*>(&modulo);
+                arg.src_stride = blk_size * W * H * sizeof(in_data_t);
+                arg.work_amount = C / blk_size;  // CB or CB-1
+                (*normalize_modulo_kernel)(&arg);
+                // for tail
+                size_t padding = CB * blk_size - C;
+                if (padding > 0) {
+                    size_t tail = blk_size - padding;
+                    const in_data_t *src_data_bhw_lastCB = src_data_bhw + (CB - 1) * blk_size * W * H;
+                    for (size_t c = 0; c < tail; c++) {
+                        modulo += src_data_bhw_lastCB[c] * src_data_bhw_lastCB[c];
+                    }
+                }
+
+                modulo = std::sqrt(modulo);
+                float modulo_inv = 1.0f / (modulo + eps);
+
+                // normalize
+                arg.dst = dst_data_bhw;
+                float fused_weight_modulo = 0;
+                if (channel_shared) {
+                    fused_weight_modulo = weights[0] * modulo_inv;
+                    arg.fused_factor = static_cast<float*>(&fused_weight_modulo);  // broadcast
+                } else {
+                    arg.weights = static_cast<float*>(&weights_padding[0]);  // load
+                    arg.modulo = static_cast<float*>(&modulo_inv);  // broadcast
+                }
+                arg.work_amount = CB;
+                arg.oc_off = 0;
+                (*normalize_kernel)(&arg);
+            });
+        }
+    }
+}
+
+template <typename in_data_t, typename out_data_t>
+void MKLDNNNormalizeNode::normalize_function(const in_data_t* src_data, out_data_t* dst_data, const InferenceEngine::SizeVector& dims) {
+    auto selectedPD = getSelectedPrimitiveDescriptor();
+    Layout selected_layout = selectedPD->getConfig().inConfs[0].desc.getLayout();
+    if (mayiuse(cpu::sse42) && normalize_modulo_kernel && normalize_kernel) {
+        if (selected_layout == MKLDNNMemory::GetPlainLayout(getChildEdgeAt(0)->getDims())) {
+            normalize_nchw(src_data, dst_data, dims);
+        } else if (selected_layout == Layout::NHWC) {
+            normalize_nhwc(src_data, dst_data, dims);
+        } else if (selected_layout == Layout::BLOCKED) {
+            normalize_blk(src_data, dst_data, dims);
+        } else {
+            THROW_IE_EXCEPTION << "The selected layout is not supported.";
+        }
+    } else {
+        if (selected_layout == MKLDNNMemory::GetPlainLayout(getChildEdgeAt(0)->getDims())) {
+            normalize_nchw_ref(src_data, dst_data, dims);
+        } else {
+            THROW_IE_EXCEPTION << "Only support plain layout on machine w/o sse42.";
+        }
+    }
+}
+
+inline void MKLDNNNormalizeNode::apply_post_ops_scalar(float &dst_value, int index_c) {
+    const auto &p = (*attr.get()).post_ops_;
+    int eltwise_inj_idx = 0;
+    int depthwise_inj_idx = 0;
+    for (int i = 0; i < p.len_; i++) {
+        auto &post_op = p.entry_[i];
+        if (post_op.is_eltwise()) {
+            dst_value = eltwise_injectors_ref[eltwise_inj_idx]->compute_scalar(dst_value);
+            eltwise_inj_idx++;
+        } else if (post_op.is_depthwise()) {
+            auto depthwise_weights = post_op.depthwise.weights_data + index_c;
+            auto depthwise_bias = post_op.depthwise.biases_data + index_c;
+            dst_value = depthwise_injectors_ref[depthwise_inj_idx]->compute_scalar(dst_value, depthwise_weights, depthwise_bias);
+            depthwise_inj_idx++;
+        } else if (post_op.is_quantization()) {
+            bool do_dequantization = post_op.quantization.alg == alg_kind::quantization_quantize_dequantize;
+            bool do_rounding = do_dequantization || output_prec == Precision::FP32 || i != p.len_ - 1;
+
+            auto quant = post_op.quantization;
+
+            float crop_low = quant.crop_low_data->shifts_[quant.crop_low_data->count_ == 1 ? 0 : index_c];
+            float crop_high = quant.crop_high_data->shifts_[quant.crop_high_data->count_ == 1 ? 0 : index_c];
+            float input_scale = quant.input_scale_data->scales_[quant.input_scale_data->count_ == 1 ? 0 : index_c];
+            float input_shift = quant.input_shift_data->shifts_[quant.input_shift_data->count_ == 1 ? 0 : index_c];
+
+            dst_value = nstl::min(crop_high, nstl::max(crop_low, dst_value));
+            dst_value = dst_value * input_scale + input_shift;
+
+            if (do_rounding) {
+                dst_value = roundf(dst_value);
+            }
+
+            if (do_dequantization) {
+                float output_scale = quant.output_scale_data->scales_[quant.output_scale_data->count_ == 1 ? 0 : index_c];
+                float output_shift = quant.output_shift_data->shifts_[quant.output_shift_data->count_ == 1 ? 0 : index_c];
+                dst_value = dst_value * output_scale + output_shift;
+            }
+        }
+    }
+}
+
+bool MKLDNNNormalizeNode::created() const {
+    return getType() == Normalize;
+}
+
+REG_MKLDNN_PRIM_FOR(MKLDNNNormalizeNode, Normalize);
diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_normalize_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_normalize_node.h
new file mode 100644 (file)
index 0000000..e11c806
--- /dev/null
@@ -0,0 +1,121 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include "ref_eltwise.hpp"
+#include "ref_depthwise.hpp"
+
+using namespace InferenceEngine;
+
+namespace MKLDNNPlugin {
+
+struct jit_normalize_config_params {
+    bool is_nchw;
+    bool is_nhwc;
+    bool is_blk;
+    bool across_spatial;
+    bool channel_shared;
+    mkldnn::memory::data_type src_dt;
+    mkldnn::memory::data_type dst_dt;
+    int src_data_size;
+    int dst_data_size;
+    size_t n, c, h, w;
+};
+
+struct jit_normalize_call_args {
+    const void *src;
+    void *dst;
+    const float *weights;
+    const float *modulo;
+    const float *fused_factor;
+    size_t src_stride;
+    size_t dst_stride;
+    size_t work_amount;
+    size_t oc_off;
+};
+
+struct jit_uni_normalize_modulo_kernel {
+    void (*ker_)(const jit_normalize_call_args *);
+
+    void operator()(const jit_normalize_call_args *args) {
+        assert(ker_);
+        ker_(args);
+    }
+
+    jit_uni_normalize_modulo_kernel(jit_normalize_config_params jcp) : ker_(nullptr), jcp_(jcp) {}
+    virtual ~jit_uni_normalize_modulo_kernel() {}
+
+    jit_normalize_config_params jcp_;
+};
+
+struct jit_uni_normalize_kernel {
+    void (*ker_)(const jit_normalize_call_args *);
+
+    void operator()(const jit_normalize_call_args *args) {
+        assert(ker_);
+        ker_(args);
+    }
+
+    explicit jit_uni_normalize_kernel(jit_normalize_config_params jcp, const mkldnn_primitive_attr &attr) : ker_(nullptr), jcp_(jcp), attr_(attr) {}
+    virtual ~jit_uni_normalize_kernel() {}
+
+    jit_normalize_config_params jcp_;
+    const mkldnn_primitive_attr &attr_;
+};
+
+class MKLDNNNormalizeNode : public MKLDNNNode {
+public:
+    MKLDNNNormalizeNode(const InferenceEngine::CNNLayerPtr& layer, const mkldnn::engine& eng, int socket);
+    ~MKLDNNNormalizeNode() override = default;
+
+    void getSupportedDescriptors() override;
+    void initSupportedPrimitiveDescriptors() override;
+    void createPrimitive() override;
+    bool created() const override;
+    void execute(mkldnn::stream strm) override;
+    bool canBeInPlace() const override {
+        return false;
+    }
+
+private:
+    template <typename in_data_t, typename out_data_t>
+    void normalize_nchw(const in_data_t* src_data, out_data_t* dst_data, const InferenceEngine::SizeVector& dims);
+
+    template <typename in_data_t, typename out_data_t>
+    void normalize_nchw_ref(const in_data_t* src_data, out_data_t* dst_data, const InferenceEngine::SizeVector& dims);
+
+    template <typename in_data_t, typename out_data_t>
+    void normalize_nhwc(const in_data_t* src_data, out_data_t* dst_data, const InferenceEngine::SizeVector& dims);
+
+    template <typename in_data_t, typename out_data_t>
+    void normalize_blk(const in_data_t* src_data, out_data_t* dst_data, const InferenceEngine::SizeVector& dims);
+
+    void setPostOps(mkldnn::primitive_attr &attr, bool initWeights = false);
+    inline void apply_post_ops_scalar(float &dst_value, int index_c);
+
+    template <typename in_data_t, typename out_data_t>
+    void normalize_function(const in_data_t* src_data, out_data_t* dst_data, const InferenceEngine::SizeVector& dims);
+
+    MemoryBlob::Ptr weights_blob;
+    bool across_spatial = true;
+    bool channel_shared = true;
+    float eps = 1e-10f;
+
+    InferenceEngine::Precision input_prec, output_prec, weights_prec;
+    size_t src_data_size, dst_data_size, weights_data_size;
+
+    mkldnn::primitive_attr attr;
+
+    std::vector<MKLDNNMemoryPtr> PostOpsIntBlobMemory;
+
+    std::shared_ptr<jit_uni_normalize_modulo_kernel> normalize_modulo_kernel;
+    std::shared_ptr<jit_uni_normalize_kernel> normalize_kernel;
+
+    std::vector<std::shared_ptr<mkldnn::impl::cpu::ref_eltwise_scalar_fwd_t>> eltwise_injectors_ref;
+    std::vector<std::shared_ptr<mkldnn::impl::cpu::ref_depthwise_scalar_fwd_t>> depthwise_injectors_ref;
+};
+
+}  // namespace MKLDNNPlugin
+
index 2ae0dc5..5c222a0 100644 (file)
@@ -373,41 +373,22 @@ void MKLDNNQuantizeNode::createPrimitive() {
     } else if (levels != 2) {
         auto prim_desc = createPrimitiveDescriptor<quantization_forward::primitive_desc, quantization_forward::desc>();
 
-        if (cropLow.size() == 1)
-            cropLow.resize(axisSize, cropLow[0]);
-        auto cropLowDataMem = std::make_shared<MKLDNNMemory>(getEngine());
-        cropLowDataMem->Create(weightsDataDesc, &cropLow[0]);
-        internalBlobMemory.push_back(cropLowDataMem);
-
-        if (cropHigh.size() == 1)
-            cropHigh.resize(axisSize, cropHigh[0]);
-        auto cropHighDataMem = std::make_shared<MKLDNNMemory>(getEngine());
-        cropHighDataMem->Create(weightsDataDesc, &cropHigh[0]);
-        internalBlobMemory.push_back(cropHighDataMem);
-
-        if (inputScale.size() == 1)
-            inputScale.resize(axisSize, inputScale[0]);
-        auto inputScaleDataMem = std::make_shared<MKLDNNMemory>(getEngine());
-        inputScaleDataMem->Create(weightsDataDesc, &inputScale[0]);
-        internalBlobMemory.push_back(inputScaleDataMem);
-
-        if (inputShift.size() == 1)
-            inputShift.resize(axisSize, inputShift[0]);
-        auto inputShiftDataMem = std::make_shared<MKLDNNMemory>(getEngine());
-        inputShiftDataMem->Create(weightsDataDesc, &inputShift[0]);
-        internalBlobMemory.push_back(inputShiftDataMem);
-
-        if (outputScale.size() == 1)
-            outputScale.resize(axisSize, outputScale[0]);
-        auto outputScaleDataMem = std::make_shared<MKLDNNMemory>(getEngine());
-        outputScaleDataMem->Create(weightsDataDesc, &outputScale[0]);
-        internalBlobMemory.push_back(outputScaleDataMem);
-
-        if (outputShift.size() == 1)
-            outputShift.resize(axisSize, outputShift[0]);
-        auto outputShiftDataMem = std::make_shared<MKLDNNMemory>(getEngine());
-        outputShiftDataMem->Create(weightsDataDesc, &outputShift[0]);
-        internalBlobMemory.push_back(outputShiftDataMem);
+        auto pushInternalBlob = [&](std::vector<float>& data) {
+            if (data.size() == 1)
+                data.resize(axisPaddedSize, data[0]);
+            else
+                data.resize(axisPaddedSize);
+            auto memory = std::make_shared<MKLDNNMemory>(getEngine());
+            memory->Create(weightsDataDesc, &data[0]);
+            internalBlobMemory.push_back(memory);
+        };
+
+        pushInternalBlob(cropLow);
+        pushInternalBlob(cropHigh);
+        pushInternalBlob(inputScale);
+        pushInternalBlob(inputShift);
+        pushInternalBlob(outputScale);
+        pushInternalBlob(outputShift);
 
         prim.reset(new quantization_forward(prim_desc, getParentEdgeAt(0)->getMemory().GetPrimitive(),
                                             internalBlobMemory[0]->GetPrimitive(),
index 7e8b94d..1ca7e28 100644 (file)
@@ -38,11 +38,20 @@ public:
     const std::vector<float>& getCropHigh() const { return cropHigh; }
     const std::vector<float>& getInputScale() const { return inputScale; }
     const std::vector<float>& getInputShift() const { return inputShift; }
+    const std::vector<float>& getOutputScale() const { return outputScale; }
+    const std::vector<float>& getOutputShift() const { return outputShift; }
 
     void setCropLow(std::vector<float> newCropLow) { cropLow = std::move(newCropLow); }
     void setCropHigh(std::vector<float> newCropHigh) { cropHigh = std::move(newCropHigh); }
     void setInputScale(std::vector<float> newInputScale) { inputScale = std::move(newInputScale); }
     void setInputShift(std::vector<float> newInputShift) { inputShift = std::move(newInputShift); }
+    void setOutputScale(std::vector<float> newOutputScale) { outputScale = std::move(newOutputScale); }
+    void setOutputShift(std::vector<float> newOutputShift) { outputShift = std::move(newOutputShift); }
+
+    const bool isInputLowBroadcast() const { return isInputLowBroadcasted; }
+    const bool isInputHighBroadcast() const { return isInputHighBroadcasted; }
+    const bool isOutputLowBroadcast() const { return isOutputLowBroadcasted; }
+    const bool isOutputHighBroadcast() const { return isOutputHighBroadcasted; }
 
     InferenceEngine::Precision getInputPrecision() const { return inputPrecision; }
     InferenceEngine::Precision getOutputPrecision() const { return outputPrecision; }
index 764aae8..9d7c188 100644 (file)
@@ -148,6 +148,9 @@ struct jit_uni_resample_nearest_kernel_f32 : public jit_uni_resample_nearest_ker
 
         this->postamble();
 
+        for (auto& inj : eltwise_injectors)
+            inj->prepare_table();
+
         ker_ = (decltype(ker_)) this->getCode();
     }
 
@@ -587,16 +590,16 @@ void MKLDNNResampleNode::NearestNeighbor_PLN(const float *in_ptr_, float *out_pt
                                           float fx, float fy, float fz, int OD, int OH, int OW) {
     std::vector<int> index_buffer(OD * OH * OW);
     for (int oz = 0; oz < OD; oz++) {
-        float iz = oz * fz + fz / 2.0f - 0.5f;
-        int iz_offset = static_cast<int>(round(iz)) * IH * IW;
+        float iz = oz * fz;
+        int iz_offset = static_cast<int>(std::floor(iz)) * IH * IW;
         int oz_offset = oz * OH * OW;
         for (int oy = 0; oy < OH; oy++) {
-            float iy = oy * fy + fy / 2.0f - 0.5f;
-            int iy_offset = static_cast<int>(round(iy)) * IW + iz_offset;
+            float iy = oy * fy;
+            int iy_offset = static_cast<int>(std::floor(iy)) * IW + iz_offset;
             int oy_offset = oy * OW + oz_offset;
             for (int ox = 0; ox < OW; ox++) {
-                float ix = ox * fx + fx / 2.0f - 0.5f;
-                int ix_index = static_cast<int>(round(ix)) + iy_offset;
+                float ix = ox * fx;
+                int ix_index = static_cast<int>(std::floor(ix)) + iy_offset;
                 index_buffer[oy_offset + ox] = ix_index;
             }
         }
@@ -642,16 +645,16 @@ void MKLDNNResampleNode::NearestNeighbor_BLK(const in_data_t *in_ptr_, out_data_
     std::vector<int> index_h(OH);
     std::vector<int> index_w(OW);
     for (int oz = 0; oz < OD; oz++) {
-        float iz = oz * fz + fz / 2.0f - 0.5f;
-        index_d[oz] = static_cast<int>(round(iz));
+        float iz = oz * fz;
+        index_d[oz] = static_cast<int>(std::floor(iz));
     }
     for (int oy = 0; oy < OH; oy++) {
-        float iy = oy * fy + fy / 2.0f - 0.5f;
-        index_h[oy] = static_cast<int>(round(iy));
+        float iy = oy * fy;
+        index_h[oy] = static_cast<int>(std::floor(iy));
     }
     for (int ox = 0; ox < OW; ox++) {
-        float ix = ox * fx + fx / 2.0f - 0.5f;
-        index_w[ox] = static_cast<int>(round(ix));
+        float ix = ox * fx;
+        index_w[ox] = static_cast<int>(std::floor(ix));
     }
 
     Layout layout = getParentEdgeAt(0)->getDesc().getLayout();
index 289658d..df0210f 100644 (file)
@@ -27,14 +27,15 @@ void MKLDNNReshapeNode::initSupportedPrimitiveDescriptors() {
         return;
 
     InferenceEngine::Precision precision = getCnnLayer()->insData[0].lock()->getPrecision();
-    if (precision != InferenceEngine::Precision::FP32)
-        precision = InferenceEngine::Precision::FP32;
     auto inputDataType = MKLDNNExtensionUtils::IEPrecisionToDataType(precision);
     precision = getCnnLayer()->outData[0]->getPrecision();
-    if (precision != InferenceEngine::Precision::FP32)
-        precision = InferenceEngine::Precision::FP32;
     auto outputDataType = MKLDNNExtensionUtils::IEPrecisionToDataType(precision);
 
+    // Current reshape implementation is simple memory reinterpret,
+    // same precision on input and output is required
+    if (inputDataType != outputDataType)
+        inputDataType = outputDataType;
+
     auto& inDims = getParentEdgeAt(0)->getDims();
     auto& outDims = getChildEdgeAt(0)->getDims();
     memory::format outFormat = MKLDNNMemory::GetPlainFormat(outDims);
diff --git a/inference-engine/src/mkldnn_plugin/nodes/normalize.cpp b/inference-engine/src/mkldnn_plugin/nodes/normalize.cpp
deleted file mode 100644 (file)
index 5ef5eef..0000000
+++ /dev/null
@@ -1,541 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include "base.hpp"
-#include "list.hpp"
-
-#include "jit_generator.hpp"
-#include <algorithm>
-#include <cmath>
-#include <ie_parallel.hpp>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-#include "bf16transformer.h"
-
-using namespace mkldnn::impl::cpu;
-using namespace mkldnn::impl::utils;
-
-namespace InferenceEngine {
-namespace Extensions {
-namespace Cpu {
-
-#define GET_OFF(field) offsetof(jit_args_normalize, field)
-
-struct jit_args_normalize {
-    float *src;
-    float *dst;
-    float *weights;
-    float *eps;
-    float *sqr_sums;
-    float *sqrt_sum;
-    size_t stride;
-    size_t work_amount;
-};
-//////////////////////////////////////////////////////////////////////////////
-struct jit_uni_normalize_per_spatial_kernel {
-    void (*ker_)(const jit_args_normalize *);
-
-    void operator()(const jit_args_normalize *args) {
-        assert(ker_);
-        ker_(args);
-    }
-
-    explicit jit_uni_normalize_per_spatial_kernel(bool channel_shared) : ker_(nullptr) {
-        is_channel_shared = channel_shared;
-    }
-    virtual ~jit_uni_normalize_per_spatial_kernel() {}
-    bool is_channel_shared = true;
-};
-
-struct jit_uni_normalize_across_spatial_kernel {
-    void (*ker_)(const jit_args_normalize *);
-
-    void operator()(const jit_args_normalize *args) {
-        assert(ker_);
-        ker_(args);
-    }
-
-    explicit jit_uni_normalize_across_spatial_kernel(bool channel_shared) : ker_(nullptr) {
-        is_channel_shared = channel_shared;
-    }
-    virtual ~jit_uni_normalize_across_spatial_kernel() {}
-    bool is_channel_shared = true;
-};
-
-struct jit_uni_sqr_sum_kernel {
-    void (*ker_)(const jit_args_normalize *);
-
-    void operator()(const jit_args_normalize *args) {
-        assert(ker_);
-        ker_(args);
-    }
-
-    jit_uni_sqr_sum_kernel() : ker_(nullptr) {}
-    virtual ~jit_uni_sqr_sum_kernel() {}
-};
-
-/////////////////////////////////////////////////////////////////////////////
-template <cpu_isa_t isa>
-struct jit_uni_normalize_across_spatial_kernel_f32
-        : public jit_uni_normalize_across_spatial_kernel,
-          public jit_generator {
-    DECLARE_CPU_JIT_AUX_FUNCTIONS(jit_uni_normalize_across_spatial_kernel_f32)
-
-    explicit jit_uni_normalize_across_spatial_kernel_f32(bool channel_shared)
-        : jit_uni_normalize_across_spatial_kernel(channel_shared), jit_generator() {
-        this->preamble();
-        mov(reg_src, ptr[reg_params + GET_OFF(src)]);
-        mov(reg_dst, ptr[reg_params + GET_OFF(dst)]);
-        mov(reg_sqrt_sum, ptr[reg_params + GET_OFF(sqrt_sum)]);
-        mov(reg_weights, ptr[reg_params + GET_OFF(weights)]);
-        mov(reg_work_amount, ptr[reg_params + GET_OFF(work_amount)]);
-        mov(reg_stride, ptr[reg_params + GET_OFF(stride)]);
-
-        Xbyak::Label div_scale_loop_label;
-        Xbyak::Label div_scale_loop_end_label;
-        uni_vbroadcastss(vmm_norm, ptr[reg_sqrt_sum]);
-
-        uni_vbroadcastss(vmm_scale, ptr[reg_weights]);
-
-        L(div_scale_loop_label);
-        {
-            cmp(reg_work_amount, 0);
-            jle(div_scale_loop_end_label, T_NEAR);
-
-            uni_vmovups(vmm_val, ptr[reg_src]);
-            uni_vdivps(vmm_val, vmm_val, vmm_norm);
-            uni_vmulps(vmm_val, vmm_val, vmm_scale);
-            uni_vmovups(ptr[reg_dst], vmm_val);
-
-            add(reg_src, reg_stride);
-            add(reg_dst, reg_stride);
-            sub(reg_work_amount, 1);
-
-            jmp(div_scale_loop_label, T_NEAR);
-        }
-        L(div_scale_loop_end_label);
-        this->postamble();
-        ker_ = (decltype(ker_)) this->getCode();
-    }
-
-private:
-    using Vmm = typename conditional3<isa == sse42, Xbyak::Xmm, isa == avx2,
-            Xbyak::Ymm, Xbyak::Zmm>::type;
-    size_t vlen = cpu_isa_traits<isa>::vlen;
-
-    Xbyak::Reg64 reg_src = r8;
-    Xbyak::Reg64 reg_dst = r9;
-    Xbyak::Reg64 reg_sqrt_sum = r10;
-    Xbyak::Reg64 reg_weights = r11;
-    Xbyak::Reg64 reg_work_amount = r12;
-    Xbyak::Reg64 reg_stride = r13;
-    Xbyak::Reg64 reg_params = abi_param1;
-
-    Vmm vmm_val = Vmm(0);
-    Vmm vmm_scale = Vmm(1);
-    Vmm vmm_norm = Vmm(2);
-};
-
-template <cpu_isa_t isa>
-struct jit_uni_sqr_sum_kernel_f32 : public jit_uni_sqr_sum_kernel,
-                                    public jit_generator {
-    DECLARE_CPU_JIT_AUX_FUNCTIONS(jit_uni_sqr_sum_kernel_f32)
-
-    jit_uni_sqr_sum_kernel_f32() : jit_uni_sqr_sum_kernel(), jit_generator() {
-        this->preamble();
-        mov(reg_src, ptr[reg_params + GET_OFF(src)]);
-        mov(reg_sqr_sums, ptr[reg_params + GET_OFF(sqr_sums)]);
-        mov(reg_work_amount, ptr[reg_params + GET_OFF(work_amount)]);
-        mov(reg_stride, ptr[reg_params + GET_OFF(stride)]);
-
-        Xbyak::Label sqr_sum_loop_label;
-        Xbyak::Label sqr_sum_loop_end_label;
-
-        uni_vpxor(vmm_sqr_sum, vmm_sqr_sum, vmm_sqr_sum);
-        L(sqr_sum_loop_label);
-        {
-            cmp(reg_work_amount, 0);
-            jle(sqr_sum_loop_end_label, T_NEAR);
-
-            uni_vmovups(vmm_val, ptr[reg_src]);
-            uni_vfmadd231ps(vmm_sqr_sum, vmm_val, vmm_val);
-
-            add(reg_src, reg_stride);
-            sub(reg_work_amount, 1);
-
-            jmp(sqr_sum_loop_label, T_NEAR);
-        }
-        L(sqr_sum_loop_end_label);
-        // hsum+store
-        if (isa == sse42) {
-            hsum_store(vmm_sqr_sum);
-        } else if (isa == avx2) {
-            Xbyak::Ymm ymm_sqr_sum = Xbyak::Ymm(vmm_sqr_sum.getIdx());
-            vextractf128(xmm_aux1, ymm_sqr_sum, 0);
-            vextractf128(xmm_aux2, ymm_sqr_sum, 1);
-            addps(xmm_aux1, xmm_aux2);
-            hsum_store(xmm_aux1);
-        } else {
-            Xbyak::Zmm zmm_sqr_sum = Xbyak::Zmm(vmm_sqr_sum.getIdx());
-            vextractf32x4(xmm_aux1, zmm_sqr_sum, 0);
-            vextractf32x4(xmm_aux2, zmm_sqr_sum, 1);
-            addps(xmm_aux1, xmm_aux2);
-            vextractf32x4(xmm_aux2, zmm_sqr_sum, 2);
-            vextractf32x4(xmm_aux3, zmm_sqr_sum, 3);
-            addps(xmm_aux2, xmm_aux3);
-            addps(xmm_aux1, xmm_aux2);
-            hsum_store(xmm_aux1);
-        }
-        this->postamble();
-        ker_ = (decltype(ker_)) this->getCode();
-    }
-
-private:
-    using Vmm = typename conditional3<isa == sse42, Xbyak::Xmm, isa == avx2,
-            Xbyak::Ymm, Xbyak::Zmm>::type;
-    size_t vlen = cpu_isa_traits<isa>::vlen;
-
-    Xbyak::Reg64 reg_src = r8;
-    Xbyak::Reg64 reg_work_amount = r9;
-    Xbyak::Reg64 reg_stride = r10;
-    Xbyak::Reg64 reg_sqr_sums = rbp;
-    Xbyak::Reg64 reg_params = abi_param1;
-
-    Vmm vmm_val = Vmm(0);
-    Vmm vmm_sqr_sum = Vmm(1);
-    Xbyak::Xmm xmm_aux1 = Xbyak::Xmm(2);
-    Xbyak::Xmm xmm_aux2 = Xbyak::Xmm(3);
-    Xbyak::Xmm xmm_aux3 = Xbyak::Xmm(4);
-
-    void hsum_store(Xbyak::Xmm xmm_sqr_sum) {
-        movshdup(xmm_aux3, xmm_sqr_sum);  //  sqrt_sum:1,2,3,4; aux3:2,2,4,4
-        addps(xmm_sqr_sum, xmm_aux3);     //  sqrt_sum:1+2,2+2,3+4,4+4
-        movhlps(xmm_aux3, xmm_sqr_sum);   //  aux3:3+4,4+4,4,4
-        addps(xmm_sqr_sum, xmm_aux3);     //  sqrt_sum:1+2+3+4,...
-        movss(ptr[reg_sqr_sums], xmm_sqr_sum);
-    }
-};
-
-template <cpu_isa_t isa>
-struct jit_uni_normalize_per_spatial_kernel_f32
-        : public jit_uni_normalize_per_spatial_kernel,
-          public jit_generator {
-    DECLARE_CPU_JIT_AUX_FUNCTIONS(jit_uni_normalize_per_spatial_kernel_f32)
-
-    explicit jit_uni_normalize_per_spatial_kernel_f32(bool channel_shared)
-        : jit_uni_normalize_per_spatial_kernel(channel_shared), jit_generator() {
-        this->preamble();
-
-        mov(reg_src, ptr[reg_params + GET_OFF(src)]);
-        mov(reg_dst, ptr[reg_params + GET_OFF(dst)]);
-        mov(reg_weights, ptr[reg_params + GET_OFF(weights)]);
-        mov(reg_eps, ptr[reg_params + GET_OFF(eps)]);
-        mov(reg_stride, ptr[reg_params + GET_OFF(stride)]);
-        mov(reg_work_amount, ptr[reg_params + GET_OFF(work_amount)]);
-
-        Xbyak::Label norm2_loop_label;
-        Xbyak::Label norm2_loop_end_label;
-        Xbyak::Label div_loop_label;
-        Xbyak::Label div_loop_end_label;
-
-        mov(aux_reg_work_amount, reg_work_amount);
-        mov(aux_reg_src, reg_src);
-        uni_vpxor(vmm_sqrt_sum, vmm_sqrt_sum, vmm_sqrt_sum);
-        uni_vbroadcastss(vmm_eps, ptr[reg_eps]);
-        uni_vaddps(vmm_sqrt_sum, vmm_sqrt_sum, vmm_eps);
-
-        L(norm2_loop_label);
-        {
-            cmp(aux_reg_work_amount, 0);
-            jle(norm2_loop_end_label, T_NEAR);
-
-            uni_vmovups(vmm_val, ptr[aux_reg_src]);
-            uni_vfmadd231ps(vmm_sqrt_sum, vmm_val, vmm_val);
-
-            add(aux_reg_src, reg_stride);
-            sub(aux_reg_work_amount, 1);
-
-            jmp(norm2_loop_label, T_NEAR);
-        }
-
-        L(norm2_loop_end_label);
-
-        uni_vsqrtps(vmm_sqrt_sum, vmm_sqrt_sum);
-
-        mov(aux_reg_work_amount, reg_work_amount);
-        mov(aux_reg_src, reg_src);
-        if (is_channel_shared) {
-            uni_vbroadcastss(vmm_scale, ptr[reg_weights]);
-        }
-        L(div_loop_label);
-        {
-            cmp(aux_reg_work_amount, 0);
-            jle(div_loop_end_label, T_NEAR);
-
-            uni_vmovups(vmm_val, ptr[aux_reg_src]);
-
-            uni_vdivps(vmm_val, vmm_val, vmm_sqrt_sum);
-
-            if (!is_channel_shared) {
-                uni_vbroadcastss(vmm_scale, ptr[reg_weights]);
-                add(reg_weights, 1*sizeof(float));
-            }
-            uni_vmulps(vmm_val, vmm_val, vmm_scale);
-
-            uni_vmovups(ptr[reg_dst], vmm_val);
-
-            add(aux_reg_src, reg_stride);
-            add(reg_dst, reg_stride);
-            sub(aux_reg_work_amount, 1);
-
-            jmp(div_loop_label, T_NEAR);
-        }
-        L(div_loop_end_label);
-
-        this->postamble();
-
-        ker_ = (decltype(ker_)) this->getCode();
-    }
-
-private:
-    using Vmm = typename conditional3<isa == sse42, Xbyak::Xmm, isa == avx2,
-            Xbyak::Ymm, Xbyak::Zmm>::type;
-    size_t vlen = cpu_isa_traits<isa>::vlen;
-
-    Xbyak::Reg64 reg_src = r8;
-    Xbyak::Reg64 aux_reg_src = r9;
-    Xbyak::Reg64 reg_dst = r10;
-    Xbyak::Reg64 reg_weights = r11;
-    Xbyak::Reg64 reg_work_amount = r12;
-    Xbyak::Reg64 aux_reg_work_amount = r13;
-    Xbyak::Reg64 reg_stride = r14;
-    Xbyak::Reg64 reg_eps = r15;
-    Xbyak::Reg64 reg_params = abi_param1;
-
-    Vmm vmm_val = Vmm(0);
-    Vmm vmm_sqrt_sum = Vmm(1);
-    Vmm vmm_scale = Vmm(2);
-    Vmm vmm_eps = Vmm(3);
-};
-
-/////////////////////////////////////////////////////////////////////////////
-class NormalizeImpl : public ExtLayerBase {
-public:
-    explicit NormalizeImpl(const CNNLayer* layer) {
-        try {
-            if (layer->insData.size() != 1 || layer->outData.size() != 1)
-                THROW_IE_EXCEPTION << "Incorrect number of input/output edges!";
-
-            if (layer->insData[0].lock()->getTensorDesc().getDims().size() < 2 ||
-                layer->insData[0].lock()->getTensorDesc().getDims().size() > 4) {
-                THROW_IE_EXCEPTION << "Normalize supports from 2D to 4D blobs!";
-            }
-
-            MemoryBlob::Ptr tweights = as<MemoryBlob>(layer->blobs.at("weights"));
-            if (!tweights) {
-                THROW_IE_EXCEPTION << layer->name << "Weights are not initialized or cannot be casted to MemoryBlob for layer Normalize with name '"
-                    << layer->name << "'";
-            }
-
-            if (tweights->getTensorDesc().getPrecision() == Precision::FP32) {
-                weights = tweights;
-            } else if (tweights->getTensorDesc().getPrecision() == Precision::BF16) {
-                MKLDNNPlugin::BF16Transformer transformer;
-                weights = transformer.convertBF16ToFloat(tweights);
-            } else {
-                // Unknown non supported data type, return an error
-                THROW_IE_EXCEPTION << layer->name << "Weights for layer Normalize wiht name '" << layer->name <<
-                    "' has unsupported data type " << tweights->getTensorDesc().getPrecision();
-            }
-            across_spatial = layer->GetParamAsBool("across_spatial", false);
-            channel_shared = layer->GetParamAsBool("channel_shared", false);
-            eps = layer->GetParamAsFloat("eps");
-
-            block_size = 1;
-            if (across_spatial) {
-                if (mayiuse(avx512_common)) {
-                    normalize_across_spatial_kernel.reset(
-                            new jit_uni_normalize_across_spatial_kernel_f32<avx512_common>(channel_shared));
-                    sqr_sum_kernel.reset(
-                            new jit_uni_sqr_sum_kernel_f32<avx512_common>());
-                    block_size = 16;
-                } else if (mayiuse(avx2)) {
-                    normalize_across_spatial_kernel.reset(
-                            new jit_uni_normalize_across_spatial_kernel_f32<avx2>(channel_shared));
-                    sqr_sum_kernel.reset(
-                            new jit_uni_sqr_sum_kernel_f32<avx2>());
-                    block_size = 8;
-                } else if (mayiuse(sse42)) {
-                    normalize_across_spatial_kernel.reset(
-                            new jit_uni_normalize_across_spatial_kernel_f32<sse42>(channel_shared));
-                    sqr_sum_kernel.reset(
-                            new jit_uni_sqr_sum_kernel_f32<sse42>());
-                    block_size = 4;
-                }
-            } else {
-                if (mayiuse(avx512_common)) {
-                    normalize_per_spatial_kernel.reset(
-                            new jit_uni_normalize_per_spatial_kernel_f32<avx512_common>(channel_shared));
-                    block_size = 16;
-                } else if (mayiuse(avx2)) {
-                    normalize_per_spatial_kernel.reset(
-                            new jit_uni_normalize_per_spatial_kernel_f32<avx2>(channel_shared));
-                    block_size = 8;
-                } else if (mayiuse(sse42)) {
-                    normalize_per_spatial_kernel.reset(
-                            new jit_uni_normalize_per_spatial_kernel_f32<sse42>(channel_shared));
-                    block_size = 4;
-                }
-            }
-
-            addConfig(layer, { { ConfLayout::PLN, false, 0 } }, { { ConfLayout::PLN, false, 0 } }, true);
-        } catch (InferenceEngine::details::InferenceEngineException &ex) {
-            errorMsg = ex.what();
-        }
-    }
-
-    StatusCode execute(std::vector<Blob::Ptr> &inputs,
-            std::vector<Blob::Ptr> &outputs,
-            ResponseDesc *resp) noexcept override {
-        auto *src_data = inputs[0]->cbuffer().as<float *>();
-        auto *dst_data = outputs[0]->buffer().as<float *>();
-        float *scl = weights->buffer().as<float *>();
-
-        int W = (inputs[0]->getTensorDesc().getDims().size() > 3)
-                ? inputs[0]->getTensorDesc().getDims()[3]
-                : 1;
-        int H = (inputs[0]->getTensorDesc().getDims().size() > 2)
-                ? inputs[0]->getTensorDesc().getDims()[2]
-                : 1;
-        int C = (inputs[0]->getTensorDesc().getDims().size() > 1)
-                ? inputs[0]->getTensorDesc().getDims()[1]
-                : 1;
-        int B = (inputs[0]->getTensorDesc().getDims().size() > 0)
-                ? inputs[0]->getTensorDesc().getDims()[0]
-                : 1;
-
-        for (int b = 0; b < B; b++) {
-            float *src_data_b = src_data + b * C * H * W;
-            float *dst_data_b = dst_data + b * C * H * W;
-            if (across_spatial) {
-                int tail_start_sqr_sum = 0;
-                float addition_identity_value = 0;
-                float sqrt_sum_kernel = 0;
-                float sqrt_sum_tail = 0;
-                if (sqr_sum_kernel) {
-                    size_t advance = (H * W / block_size) * block_size;
-                    sqrt_sum_kernel = parallel_sum(C, addition_identity_value, [&](int ic) -> float {
-                        float sqr_sum_value = 0;
-                        auto arg = jit_args_normalize();
-                        arg.src = src_data_b + ic * advance;
-                        arg.sqr_sums = static_cast<float*>(&sqr_sum_value);
-                        arg.stride = block_size * sizeof(float);
-                        arg.work_amount = H * W / block_size;
-                        (*sqr_sum_kernel)(&arg);
-                        return sqr_sum_value;
-                    });
-                    tail_start_sqr_sum = advance * C;
-                }
-                //  all or rest for sqr_sum
-                int tail_num_sqr_sum = H * W * C - tail_start_sqr_sum;
-                sqrt_sum_tail = parallel_sum(tail_num_sqr_sum, addition_identity_value, [&](int in) -> float {
-                    return src_data_b[tail_start_sqr_sum + in] * src_data_b[tail_start_sqr_sum + in];
-                });
-                float sqrt_sum = sqrt_sum_kernel + sqrt_sum_tail + eps;
-                sqrt_sum = std::sqrt(sqrt_sum);
-
-                int tail_start_across_spatial = 0;
-                if (normalize_across_spatial_kernel) {
-                    tail_start_across_spatial = (H * W / block_size) * block_size;
-                    parallel_for(C, [&](int ic) {  //  parallel for each channel, element*scl/sqrt_sum
-                        auto arg = jit_args_normalize();
-                        arg.src = src_data_b + ic * H * W;
-                        arg.dst = dst_data_b + ic * H * W;
-                        arg.weights = channel_shared ? scl : &scl[ic];
-                        arg.sqrt_sum = &sqrt_sum;
-                        arg.stride = block_size*sizeof(float);
-                        arg.work_amount = H * W / block_size;
-
-                        (*normalize_across_spatial_kernel)(&arg);
-                        //  rest for this channel
-                        for (int tail = tail_start_across_spatial; tail < H * W; tail++) {
-                            dst_data_b[ic * H * W + tail] = src_data_b[ic * H * W + tail] / sqrt_sum;
-                            dst_data_b[ic * H * W + tail] = channel_shared
-                                    ? dst_data_b[ic * H * W + tail] * scl[0]
-                                    : dst_data_b[ic * H * W + tail] * scl[ic];
-                        }
-                    });
-                } else {
-                    for (int c = 0; c < C; c++) {
-                        int hw = 0;
-                        float s = channel_shared ? scl[0] : scl[c];
-                        for (; hw < H * W; hw++) {
-                            dst_data_b[c * H * W + hw]
-                                    = (src_data_b[c * H * W + hw] / sqrt_sum) * s;
-                        }
-                    }
-                }
-            } else {
-                int tail_start_per_spatial = 0;
-                if (normalize_per_spatial_kernel) {
-                    int blocks_num = H * W / block_size;
-                    parallel_for(blocks_num, [&](int ib) {
-                        auto arg = jit_args_normalize();
-
-                        arg.src = src_data_b + ib * block_size;
-                        arg.dst = dst_data_b + ib * block_size;
-                        arg.weights = scl;
-                        arg.eps = &eps;
-                        arg.stride = static_cast<size_t>((size_t)(H) * W * sizeof(float));
-                        arg.work_amount = static_cast<size_t>(C);
-
-                        (*normalize_per_spatial_kernel)(&arg);
-                    });
-                    tail_start_per_spatial = (H * W / block_size) * block_size;
-                }
-                parallel_for(H * W - tail_start_per_spatial, [&](int i) {
-                    int offset = i + tail_start_per_spatial;
-
-                    float norm = eps;
-                    for (int c = 0; c < C; c++) {
-                        const float *src_data_b_c = src_data_b + c * W * H;
-                        norm += src_data_b_c[offset] * src_data_b_c[offset];
-                    }
-
-                    norm = std::sqrt(norm);
-
-                    for (int c = 0; c < C; c++) {
-                        const float *src_data_b_c = src_data_b + c * W * H;
-                        float *dst_data_b_c = dst_data_b + c * W * H;
-
-                        dst_data_b_c[offset] = channel_shared
-                                ? (src_data_b_c[offset] / norm * scl[0])
-                                : (src_data_b_c[offset] / norm * scl[c]);
-                    }
-                });
-            }
-        }
-
-        return OK;
-    }
-
-private:
-    int block_size;
-    std::shared_ptr<jit_uni_normalize_per_spatial_kernel> normalize_per_spatial_kernel;
-    std::shared_ptr<jit_uni_normalize_across_spatial_kernel> normalize_across_spatial_kernel;
-    std::shared_ptr<jit_uni_sqr_sum_kernel> sqr_sum_kernel;
-
-    MemoryBlob::Ptr weights;
-    bool across_spatial = true;
-    bool channel_shared = true;
-    float eps = 1e-10f;
-};
-
-REG_FACTORY_FOR(ImplFactory<NormalizeImpl>, Normalize);
-
-}  // namespace Cpu
-}  // namespace Extensions
-}  // namespace InferenceEngine
index 119fc4c..35c606b 100644 (file)
@@ -5,11 +5,8 @@
 #include "list.hpp"
 #include "base.hpp"
 
-#include <cmath>
 #include <string>
 #include <vector>
-#include <cassert>
-#include <algorithm>
 #include "ie_parallel.hpp"
 
 namespace InferenceEngine {
@@ -17,132 +14,212 @@ namespace Extensions {
 namespace Cpu {
 
 class SelectImpl: public ExtLayerBase {
-    enum {condition, then_, else_, numOfInputs};
+    enum { CONDITION, THEN, ELSE, numOfInputs };
+    enum { N, C, D, H, W, numOfDims };
+
+    std::string broadcast;
+    std::vector<size_t> resDims;
+    std::vector<size_t> resOffset;
+    std::vector<size_t> condOffset;
+    std::vector<size_t> thenOffset;
+    std::vector<size_t> elseOffset;
 
 public:
     explicit SelectImpl(const CNNLayer* layer) {
         try {
-            if (numOfInputs != layer->insData.size() || 1 != layer->outData.size()) {
-                THROW_IE_EXCEPTION << layer->name << " Incorrect number of input/output edges!";
-            }
+            if (layer->insData.size() != numOfInputs || layer->outData.size() != 1)
+                THROW_IE_EXCEPTION << "Select layer with name '" << layer->name << "' has incorrect number of input/output edges!";
+
+            broadcast = layer->GetParamAsString("auto_broadcast", "numpy");
+
+            if (layer->insData[THEN].lock()->getTensorDesc().getPrecision() != layer->insData[ELSE].lock()->getTensorDesc().getPrecision())
+                THROW_IE_EXCEPTION << "Select layer with name '" << layer->name << "' has different precisions on 'Then' and 'Else' inputs";
+
+            const auto& conditionPrecision = layer->insData[CONDITION].lock()->getTensorDesc().getPrecision();
+            if (conditionPrecision != Precision::BOOL && conditionPrecision != Precision::I32  && conditionPrecision != Precision::U8)
+                THROW_IE_EXCEPTION << "Select layer with name '" << layer->name << "' has unsupported precision: " << conditionPrecision
+                                                                                                                << " on 'Condition' input";
+
+            const auto& inputPrecisionSize = layer->insData[THEN].lock()->getTensorDesc().getPrecision().size();
+            if (inputPrecisionSize != 1 && inputPrecisionSize != 2 && inputPrecisionSize != 4 && inputPrecisionSize != 8)
+                THROW_IE_EXCEPTION << "Select layer with name '" << layer->name << "' has unsupported precision: " <<
+                                                        layer->insData[THEN].lock()->getTensorDesc().getPrecision() << " on 'Then' and 'Else' inputs";
+
+            const auto &conditionShapes = layer->insData[CONDITION].lock()->getTensorDesc().getDims();
+            const auto &thenShapes = layer->insData[THEN].lock()->getTensorDesc().getDims();
+            const auto &elseShapes = layer->insData[ELSE].lock()->getTensorDesc().getDims();
+            const auto &outputShapes = layer->outData[0]->getTensorDesc().getDims();
+
+            if (broadcast != "none" && broadcast != "numpy")
+                THROW_IE_EXCEPTION << "Select layer with name '" << layer->name << "' has unsupported broadcast type: " << broadcast;
 
-            auto conditionPrecision = layer->insData[condition].lock()->getTensorDesc().getPrecision();
+            if (broadcast == "none" && ((conditionShapes != outputShapes) || (thenShapes != outputShapes) || (elseShapes != outputShapes)))
+                THROW_IE_EXCEPTION << "Select layer with name '" << layer->name << "' and auto_broadcast='none' has input shapes mismatch";
 
-            if (Precision::I32 != conditionPrecision
-                && Precision::FP32 != conditionPrecision
-                && Precision::U8 != conditionPrecision) {
-                THROW_IE_EXCEPTION << layer->name << " Incorrect condition tensor precision: " << conditionPrecision
-                << ". Should be I32, U8 or FP32";
+            if (broadcast == "numpy") {
+                if (outputShapes.size() < conditionShapes.size() || outputShapes.size() < thenShapes.size() || outputShapes.size() < elseShapes.size())
+                    THROW_IE_EXCEPTION << "Select layer with name '" << layer->name << "' and auto_broadcast='numpy' has incompatible input and output shapes";
+
+                for (int condIt = conditionShapes.size() - 1, outIt = outputShapes.size() - 1; condIt >= 0; condIt--, outIt--)
+                        if (conditionShapes[condIt] != outputShapes[outIt] && conditionShapes[condIt] != 1)
+                            THROW_IE_EXCEPTION << "Select layer with name '" << layer->name
+                                                                        << "' and auto_broadcast='numpy' has incompatible 'Condition' input and output shapes";
+
+                for (int thenIt = thenShapes.size() - 1, outIt = outputShapes.size() - 1; thenIt >= 0; thenIt--, outIt--)
+                        if (thenShapes[thenIt] != outputShapes[outIt] && thenShapes[thenIt] != 1)
+                            THROW_IE_EXCEPTION << "Select layer with name '" << layer->name
+                                                                            << "' and auto_broadcast='numpy' has incompatible 'Then' input and output shapes";
+
+
+                for (int elseIt = elseShapes.size() - 1, outIt = outputShapes.size() - 1; elseIt >= 0; elseIt--, outIt--)
+                        if (elseShapes[elseIt] != outputShapes[outIt] && elseShapes[elseIt] != 1)
+                            THROW_IE_EXCEPTION << "Select layer with name '" << layer->name
+                                                                             << "' and auto_broadcast='numpy' has incompatible 'Else' input and output shapes";
             }
 
-            addConfig(layer, {{ConfLayout::PLN, false},
-                              {ConfLayout::PLN, false},
-                              {ConfLayout::PLN, false}},
-                             {{ConfLayout::PLN, false}});
-        } catch (InferenceEngine::details::InferenceEngineException &ex) {
-            errorMsg = ex.what();
-        }
-    }
+            resDims.resize(numOfDims, 1);
+            std::copy(std::begin(outputShapes), std::end(outputShapes), std::begin(resDims) + (numOfDims - outputShapes.size()));
+            if (broadcast == "numpy") {
+                calcOutOffset(resOffset, resDims);
 
-    template <typename COND_T, typename DATA_T>
-    void execute_impl(std::vector<Blob::Ptr>& inputs, Blob::Ptr& output) noexcept {
-        auto *conditionData = inputs[condition]->cbuffer().as<const COND_T*>();
-        auto *thenData = inputs[then_]->cbuffer().as<const DATA_T*>();
-        auto *elseData = inputs[else_]->cbuffer().as<const DATA_T*>();
-
-        auto *dstData = output->cbuffer().as<DATA_T *>();
-        enum {N, C, H, W, Dims};
-        int dim[Dims] = {1, 1, 1, 1};
-        int cdim[Dims] = {1, 1, 1, 1};
-
-        SizeVector dims = inputs[then_]->getTensorDesc().getDims();
-        std::copy(std::begin(dims), std::end(dims), std::begin(dim) + (Dims - dims.size()));
-
-        SizeVector cDims = inputs[condition]->getTensorDesc().getDims();
-        std::copy(std::begin(cDims), std::end(cDims), std::begin(cdim) + (Dims - cDims.size()));
-
-        parallel_for3d(dim[N], dim[H], dim[W], [&](int b, int h, int w) {
-            for (int c = 0; c < dim[C]; c++) {
-                dstData[b*dim[C]*dim[H]*dim[W] + c*dim[H]*dim[W] + h*dim[W] + w]
-                        = conditionData[(b % cdim[N])*cdim[C]*cdim[H]*cdim[W] +
-                                        (c % cdim[C])*cdim[H]*cdim[W] +
-                                        (h % cdim[H])*cdim[W] +
-                                        (w % cdim[W])]
-                          ?      thenData[b*dim[C]*dim[H]*dim[W] + c*dim[H]*dim[W] + h*dim[W] + w]
-                          :      elseData[b*dim[C]*dim[H]*dim[W] + c*dim[H]*dim[W] + h*dim[W] + w];
+                std::vector<size_t> condDims(numOfDims, 1);
+                std::copy(std::begin(conditionShapes), std::end(conditionShapes), std::begin(condDims) + (numOfDims - conditionShapes.size()));
+                calcInOffset(condOffset, condDims, resDims);
+
+                std::vector<size_t> thenDims(numOfDims, 1);
+                std::copy(std::begin(thenShapes), std::end(thenShapes), std::begin(thenDims) + (numOfDims - thenShapes.size()));
+                calcInOffset(thenOffset, thenDims, resDims);
+
+                std::vector<size_t> elseDims(numOfDims, 1);
+                std::copy(std::begin(elseShapes), std::end(elseShapes), std::begin(elseDims) + (numOfDims - elseShapes.size()));
+                calcInOffset(elseOffset, elseDims, resDims);
             }
-        });
-    }
 
-    StatusCode execute(std::vector<Blob::Ptr>& inputs, std::vector<Blob::Ptr>& outputs, ResponseDesc *resp) noexcept override {
-        auto &outputData = outputs[0];
+            LayerConfig config;
+            for (size_t i = 0; i < numOfInputs; i++) {
+                DataConfig inConfig;
+                inConfig.inPlace = -1;
+                inConfig.constant = false;
 
-        auto cond_precision = inputs[condition]->getTensorDesc().getPrecision();
-        auto data_precision = inputs[then_]->getTensorDesc().getPrecision();
+                Precision inPrecision = layer->insData[i].lock()->getTensorDesc().getPrecision();
+                const SizeVector& inDims = layer->insData[i].lock()->getTensorDesc().getDims();
+                inConfig.desc = TensorDesc(inPrecision, inDims, InferenceEngine::TensorDesc::getLayoutByDims(inDims));
 
-        auto compare = getPrecisionMask(cond_precision, data_precision);
-        switch (compare) {
-            /* 64 bit data type */
-            case getPrecisionMask(Precision::I32, Precision::I64):
-                execute_impl<int32_t, int64_t>(inputs, outputData);
-                break;
-            case getPrecisionMask(Precision::U8, Precision::I64):
-                execute_impl<uint8_t, int64_t>(inputs, outputData);
-                break;
-            case getPrecisionMask(Precision::I32, Precision::U64):
-                execute_impl<int32_t, uint64_t>(inputs, outputData);
-                break;
-            case getPrecisionMask(Precision::U8, Precision::U64):
-                execute_impl<uint8_t , uint64_t>(inputs, outputData);
-                break;
+                config.inConfs.push_back(inConfig);
+            }
 
-            /* 32 bit data type */
-            case getPrecisionMask(Precision::I32, Precision::FP32):
-            case getPrecisionMask(Precision::I32, Precision::I32):
-                execute_impl<int32_t , int32_t>(inputs, outputData);
-                break;
-            case getPrecisionMask(Precision::U8, Precision::FP32):
-            case getPrecisionMask(Precision::U8, Precision::I32):
-                execute_impl<uint8_t , int32_t>(inputs, outputData);
-                break;
+            DataConfig outConfig;
+            outConfig.inPlace = -1;
+            outConfig.constant = false;
+            Precision outPrecision = layer->insData[1].lock()->getTensorDesc().getPrecision();
+            const SizeVector& outDims = layer->outData[0]->getTensorDesc().getDims();
+            outConfig.desc = TensorDesc(outPrecision, outDims, InferenceEngine::TensorDesc::getLayoutByDims(outDims));
+            config.outConfs.push_back(outConfig);
 
-            /* 16 bit data type */
-            case getPrecisionMask(Precision::I32, Precision::FP16):
-            case getPrecisionMask(Precision::I32, Precision::Q78):
-            case getPrecisionMask(Precision::I32, Precision::I16):
-            case getPrecisionMask(Precision::I32, Precision::U16):
-                execute_impl<int32_t , int16_t>(inputs, outputData);
-                break;
-            case getPrecisionMask(Precision::U8, Precision::FP16):
-            case getPrecisionMask(Precision::U8, Precision::Q78):
-            case getPrecisionMask(Precision::U8, Precision::I16):
-            case getPrecisionMask(Precision::U8, Precision::U16):
-                execute_impl<uint8_t , int16_t>(inputs, outputData);
-                break;
+            config.dynBatchSupport = false;
+            confs.push_back(config);
+        } catch (InferenceEngine::details::InferenceEngineException &ex) {
+            errorMsg = ex.what();
+        }
+    }
 
-            /* 8 bit data type */
-            case getPrecisionMask(Precision::I32, Precision::I8):
-            case getPrecisionMask(Precision::I32, Precision::U8):
-                execute_impl<int32_t , int8_t>(inputs, outputData);
+    StatusCode execute(std::vector<Blob::Ptr>& inputs, std::vector<Blob::Ptr>& outputs, ResponseDesc *resp) noexcept override {
+        auto &outputData = outputs[0];
+        const size_t condPrecSize = inputs[CONDITION]->getTensorDesc().getPrecision().size();
+        const size_t inputsPrecSize = inputs[THEN]->getTensorDesc().getPrecision().size();
+
+        switch (condPrecSize) {
+            case 1: {
+                switch (inputsPrecSize) {
+                    case 1: { execute_impl<uint8_t, uint8_t>(inputs, outputData); break; }
+                    case 2: { execute_impl<uint8_t, uint16_t>(inputs, outputData); break; }
+                    case 4: { execute_impl<uint8_t, uint32_t>(inputs, outputData); break; }
+                    case 8: { execute_impl<uint8_t, uint64_t>(inputs, outputData); break; }
+                    default: {
+                        if (resp) {
+                            std::string errorMsg = "Select layer doesn't support 'Then' and 'Else' inputs' precision: "
+                                                                                        + std::string(inputs[THEN]->getTensorDesc().getPrecision().name());
+                                errorMsg.copy(resp->msg, sizeof(resp->msg) - 1);
+                        }
+                        return GENERAL_ERROR;
+                    }
+                }
                 break;
-            case getPrecisionMask(Precision::U8, Precision::I8):
-            case getPrecisionMask(Precision::U8, Precision::U8):
-                execute_impl<uint8_t , int8_t>(inputs, outputData);
+            }
+            case 4: {
+                switch (inputsPrecSize) {
+                    case 1: { execute_impl<int32_t, uint8_t>(inputs, outputData); break; }
+                    case 2: { execute_impl<int32_t, uint16_t>(inputs, outputData); break; }
+                    case 4: { execute_impl<int32_t, uint32_t>(inputs, outputData); break; }
+                    case 8: { execute_impl<int32_t, uint64_t>(inputs, outputData); break; }
+                    default: {
+                        if (resp) {
+                            std::string errorMsg = "Select layer doesn't support 'Then' and 'Else' inputs' precision: "
+                                                                                        + std::string(inputs[THEN]->getTensorDesc().getPrecision().name());
+                                errorMsg.copy(resp->msg, sizeof(resp->msg) - 1);
+                        }
+                        return GENERAL_ERROR;
+                    }
+                }
                 break;
-
-            default:
+            }
+            default: {
                 if (resp) {
-                    std::string errorMsg = "Incorrect Reduce layer type";
-                    errorMsg.copy(resp->msg, sizeof(resp->msg) - 1);
+                    std::string errorMsg = "Select layer doesn't support 'Condition' inputs' precision: "
+                                                                                    + std::string(inputs[CONDITION]->getTensorDesc().getPrecision().name());
+                        errorMsg.copy(resp->msg, sizeof(resp->msg) - 1);
                 }
                 return GENERAL_ERROR;
+            }
         }
 
-
         return OK;
     }
-};
 
+private:
+    void calcOutOffset(std::vector<size_t>& offset, const std::vector<size_t>& dims) {
+        offset.resize(numOfDims);
+        int k = 1;
+        for (int i = dims.size() - 1; i >= 0; i--) {
+            offset[i] = k;
+            k *= dims[i];
+        }
+    }
+
+    void calcInOffset(std::vector<size_t>& offset, const std::vector<size_t>& inDims, const std::vector<size_t>& outDims) {
+        offset.resize(numOfDims);
+        int k = 1;
+        for (int i = inDims.size() - 1; i >= 0; i--) {
+            offset[i] = (inDims[i] == outDims[i]) ? k : 0;
+            k *= inDims[i];
+        }
+    }
+
+    template <typename COND_T, typename DATA_T>
+    void execute_impl(std::vector<Blob::Ptr>& inputs, Blob::Ptr& output) noexcept {
+        auto *conditionData = inputs[CONDITION]->cbuffer().as<const COND_T *>() + inputs[CONDITION]->getTensorDesc().getBlockingDesc().getOffsetPadding();
+        auto *thenData = inputs[THEN]->cbuffer().as<const DATA_T *>() + inputs[THEN]->getTensorDesc().getBlockingDesc().getOffsetPadding();
+        auto *elseData = inputs[ELSE]->cbuffer().as<const DATA_T *>() + inputs[ELSE]->getTensorDesc().getBlockingDesc().getOffsetPadding();
+        auto *dstData = output->buffer().as<DATA_T *>() + output->getTensorDesc().getBlockingDesc().getOffsetPadding();
+
+        if (broadcast == "none") {
+            size_t dstDataSize = std::accumulate(begin(resDims), end(resDims), 1, std::multiplies<size_t>());
+            parallel_for(dstDataSize, [&](size_t i) {
+                dstData[i] = conditionData[i] ? thenData[i] : elseData[i];
+            });
+        } else {
+            parallel_for4d(resDims[N], resDims[C], resDims[D], resDims[H], [&](int b, int c, int d, int h) {
+                for (int w = 0; w < resDims[W]; w++) {
+                    size_t indexOut = b * resOffset[N] + c * resOffset[C] + d * resOffset[D] + h * resOffset[H] + w * resOffset[W];
+                    size_t indexCond = b * condOffset[N] + c * condOffset[C] + d * condOffset[D] + h * condOffset[H] + w * condOffset[W];
+                    size_t indexThen = b * thenOffset[N] + c * thenOffset[C] + d * thenOffset[D] + h * thenOffset[H] + w * thenOffset[W];
+                    size_t indexElse = b * elseOffset[N] + c * elseOffset[C] + d * elseOffset[D] + h * elseOffset[H] + w * elseOffset[W];
+                    dstData[indexOut] = conditionData[indexCond] ? thenData[indexThen] : elseData[indexElse];
+                }
+            });
+        }
+    }
+};
 
 REG_FACTORY_FOR(ImplFactory<SelectImpl>, Select);
 }  // namespace Cpu
index 375c211..f9eea40 100644 (file)
@@ -85,7 +85,7 @@ public:
     /**
      * @brief Wraps original method
      * IInferencePlugin::LoadNetwork
-     * @param network - a network object acquired from CNNNetReader
+     * @param network - a network object acquired from InferenceEngine::Core::ReadNetwork
      * @param config string-string map of config parameters relevant only for this load operation
      * @param context - a pointer to plugin context derived from RemoteContext class used to
      *        execute the network
index 10e381c..37f89da 100644 (file)
@@ -7,7 +7,6 @@
 #include <cpp_interfaces/interface/ie_imemory_state_internal.hpp>
 #include <ie_iinfer_request.hpp>
 #include <ie_parameter.hpp>
-#include <ie_primitive_info.hpp>
 #include <map>
 #include <memory>
 #include <string>
index 539c35a..bf80615 100644 (file)
 
 
 #include <ie_iextension.h>
-#include <cpp/ie_executable_network.hpp>
 #include <ie_input_info.hpp>
 #include <ie_icnn_network.hpp>
 #include <ie_icore.hpp>
+#include <ie_plugin.hpp>
 #include <ie_iexecutable_network.hpp>
 #include <ie_remote_context.hpp>
 
@@ -118,7 +118,7 @@ public:
      * @brief Creates an executable network from an pares network object, users can create as many networks as they need
      * and use them simultaneously (up to the limitation of the HW resources)
      * @param executableNetwork - a reference to a shared ptr of the returned network interface
-     * @param network - a network object acquired from CNNNetReader
+     * @param network - a network object acquired from InferenceEngine::Core::ReadNetwork
      * @param config string-string map of config parameters relevant only for this load operation
      */
     virtual void LoadNetwork(IExecutableNetwork::Ptr& executableNetwork, const ICNNNetwork& network,
@@ -126,7 +126,7 @@ public:
 
     /**
      * @brief Creates an executable network from network object, on specified remote context
-     * @param network - a network object acquired from CNNNetReader
+     * @param network - a network object acquired from InferenceEngine::Core::ReadNetwork
      * @param config string-string map of config parameters relevant only for this load operation
      * @param context - a pointer to plugin context derived from RemoteContext class used to
      *        execute the network
index d37b17c..40dcf88 100644 (file)
@@ -13,8 +13,8 @@
 #include <memory>
 #include <string>
 
+#include <ie_plugin_ptr.hpp>
 #include "threading/ie_itask_executor.hpp"
-#include "ie_plugin_ptr.hpp"
 
 namespace InferenceEngine {
 
@@ -39,6 +39,23 @@ public:
     virtual InferenceEnginePluginPtr GetPluginByName(const std::string& deviceName) const = 0;
 
     /**
+     * @brief Reads IR xml and bin (with the same name) files
+     * @param model string with IR
+     * @param weights shared pointer to constant blob with weights
+     * @return CNNNetwork
+     */
+    virtual CNNNetwork ReadNetwork(const std::string& model, const Blob::CPtr& weights) const = 0;
+
+    /**
+     * @brief Reads IR xml and bin files
+     * @param modelPath path to IR file
+     * @param binPath path to bin file, if path is empty, will try to read bin file with the same name as xml and
+     * if bin file with the same name was not found, will load IR without weights.
+     * @return CNNNetwork
+     */
+    virtual CNNNetwork ReadNetwork(const std::string& modelPath, const std::string& binPath) const = 0;
+
+    /**
      * @brief Default virtual destructor
      */
     virtual ~ICore() = default;
index 41427d1..c0e4cbb 100644 (file)
@@ -52,6 +52,13 @@ INFERENCE_ENGINE_API_CPP(int) getNumberOfCPUCores();
 INFERENCE_ENGINE_API_CPP(bool) with_cpu_x86_sse42();
 
 /**
+ * @brief      Checks whether CPU supports AVX capability
+ * @ingroup    ie_dev_api_system_conf
+ * @return     `True` is AVX instructions are available, `false` otherwise
+ */
+INFERENCE_ENGINE_API_CPP(bool) with_cpu_x86_avx();
+
+/**
  * @brief      Checks whether CPU supports AVX2 capability
  * @ingroup    ie_dev_api_system_conf
  * @return     `True` is AVX2 instructions are available, `false` otherwise
diff --git a/inference-engine/src/transformations/include/transformations/common_optimizations/common_optimizations.hpp b/inference-engine/src/transformations/include/transformations/common_optimizations/common_optimizations.hpp
new file mode 100644 (file)
index 0000000..776d42d
--- /dev/null
@@ -0,0 +1,29 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <vector>
+#include <memory>
+
+#include <ie_api.h>
+
+#include <ngraph/pass/graph_rewrite.hpp>
+
+#include "transformations/utils/pass_param.hpp"
+
+namespace ngraph {
+namespace pass {
+
+class INFERENCE_ENGINE_API_CLASS(CommonOptimizations);
+
+}  // namespace pass
+}  // namespace ngraph
+
+class ngraph::pass::CommonOptimizations: public ngraph::pass::FunctionPass {
+public:
+    explicit CommonOptimizations() : FunctionPass() {}
+
+    bool run_on_function(std::shared_ptr<ngraph::Function> f) override;
+};
diff --git a/inference-engine/src/transformations/include/transformations/common_optimizations/common_optimizations_tbl.hpp b/inference-engine/src/transformations/include/transformations/common_optimizations/common_optimizations_tbl.hpp
new file mode 100644 (file)
index 0000000..53103c4
--- /dev/null
@@ -0,0 +1,17 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#ifndef NGRAPH_PASS
+#warning "NGRAPH_PASS is not defined"
+#define NGRAPH_PASS(A, B)
+#endif
+
+// To register new pass you need to define NGRAPH_PASS
+// Usage example:
+//   ngraph::pass:Manager pm;
+//   #define NGRAPH_PASS(NAME, NAMESPACE)   pm.register_pass<NAMESPACE::NAME>();
+//   #include <transformations/transformations_tbl.hpp>
+//   #undef NGRAPH_PASS
+
+NGRAPH_PASS(NopElimination, ::ngraph::pass)
index 58d1a6a..3090d2b 100644 (file)
@@ -21,6 +21,7 @@
 #include "ngraph_ops/convolution_ie.hpp"
 #include "ngraph_ops/deconvolution_ie.hpp"
 #include "ngraph/op/fused/group_conv.hpp"
+#include "ngraph/rt_info.hpp"
 
 #include <ngraph/pass/graph_rewrite.hpp>
 
@@ -136,6 +137,7 @@ ngraph::graph_rewrite_callback ngraph::pass::ConvFusion::get_callback() {
             return false;
         }
 
+        ngraph::copy_runtime_info({m_conv, eltwise}, new_conv);
         new_conv->set_friendly_name(m.get_match_root()->get_friendly_name());
         ngraph::replace_node(m.get_match_root(), new_conv);
         return true;
index 942ee7c..adc6d47 100644 (file)
@@ -12,6 +12,7 @@
 #include <ngraph/pass/graph_rewrite.hpp>
 
 #include <ngraph/opsets/opset1.hpp>
+#include <ngraph/rt_info.hpp>
 
 #include "ngraph_ops/scaleshift.hpp"
 #include "ngraph_ops/eltwise.hpp"
@@ -57,7 +58,8 @@ bool convert_to_eltwise(std::shared_ptr<T> & node,
 
     auto eltwise = std::make_shared<ngraph::op::Eltwise>(data1, data2, et);
     eltwise->set_friendly_name(node->get_friendly_name());
-    ngraph::replace_node(node, std::dynamic_pointer_cast<ngraph::Node>(eltwise));
+    ngraph::copy_runtime_info(node, eltwise);
+    ngraph::replace_node(node, eltwise);
     return true;
 }
 
@@ -147,7 +149,8 @@ ngraph::graph_rewrite_callback get_callback() {
             }
 
             scaleshift->set_friendly_name(lin_op->get_friendly_name());
-            ngraph::replace_node(m.get_match_root(), std::dynamic_pointer_cast<ngraph::Node>(scaleshift));
+            ngraph::copy_runtime_info(m.get_match_root(), scaleshift);
+            ngraph::replace_node(m.get_match_root(), scaleshift);
         } else {
             float value;
             if (!ngraph::op::util::get_single_value(const_node, value)) {
@@ -164,6 +167,7 @@ ngraph::graph_rewrite_callback get_callback() {
                 return false;
             }
             power->set_friendly_name(lin_op->get_friendly_name());
+            ngraph::copy_runtime_info(m.get_match_root(), power);
             ngraph::replace_node(m.get_match_root(), power);
         }
 
index 5abf05b..8fc8839 100644 (file)
@@ -14,6 +14,7 @@
 //   #include <transformations/transformations_tbl.hpp>
 //   #undef NGRAPH_PASS
 
+NGRAPH_PASS(InitNodeInfo, ::ngraph::pass)
 NGRAPH_PASS(ConvertPriorBox, ::ngraph::pass)
 NGRAPH_PASS(ConstantFolding, ::ngraph::pass)
 NGRAPH_PASS(ConvertReduceToPooling, ::ngraph::pass)
index 51f1cae..2d49f83 100644 (file)
@@ -21,6 +21,7 @@
 
 #include <ngraph/opsets/opset1.hpp>
 #include <ngraph/pass/graph_rewrite.hpp>
+#include <ngraph/rt_info.hpp>
 
 namespace ngraph {
 namespace pass {
@@ -81,17 +82,24 @@ private:
                 return false;
             }
 
+            NodeVector new_ops;
+
             auto new_bias = std::make_shared<opset1::Add>(m_fc->input(2).get_source_output(), m_bias);
+            new_ops.push_back(new_bias);
             std::shared_ptr<Node> final_bias = new_bias;
             if (new_bias->get_shape().size() >= 2) {
                 final_bias = std::make_shared<opset1::Reshape>(final_bias, opset1::Constant::create(element::i64, Shape{1}, {-1}), true);
+                new_ops.push_back(final_bias);
             }
 
             auto new_fc = std::make_shared<op::FullyConnected>(m_fc->input(0).get_source_output(),
                                                                m_fc->input(1).get_source_output(),
                                                                final_bias,
                                                                m_fc->get_shape());
+            new_ops.push_back(new_fc);
+
             new_fc->set_friendly_name(add->get_friendly_name());
+            ngraph::copy_runtime_info({m_fc, add}, new_ops);
             ngraph::replace_node(add, new_fc);
             return true;
         };
index 7840479..e347556 100644 (file)
 #include <ngraph_ops/fully_connected.hpp>
 #include <ngraph/builder/make_constant.hpp>
 #include <ngraph/graph_util.hpp>
-#include <ngraph/op/add.hpp>
 #include <ngraph/pattern/matcher.hpp>
 #include <ngraph/pattern/op/label.hpp>
 #include <ngraph/pattern/op/skip.hpp>
 #include <ngraph/util.hpp>
 #include <ngraph/ngraph.hpp>
+#include <ngraph/opsets/opset1.hpp>
+#include <ngraph/rt_info.hpp>
 
 #include <ngraph/pass/graph_rewrite.hpp>
 #include <transformations/utils/utils.hpp>
@@ -46,7 +47,7 @@ private:
         auto input = std::make_shared<pattern::op::Label>(element::f32, Shape{2, 4});
 
         auto reshape_shape = std::make_shared<pattern::op::Label>(element::i64, Shape{4});
-        auto reshape = std::make_shared<ngraph::op::v1::Reshape>(input, reshape_shape, true);
+        auto reshape = std::make_shared<ngraph::opset1::Reshape>(input, reshape_shape, true);
 
         auto weights = std::make_shared<pattern::op::Label>(element::f32, Shape{2, 4});
         auto biases = std::make_shared<pattern::op::Label>(element::f32, Shape{2});
@@ -58,7 +59,7 @@ private:
                 return false;
             }
 
-            auto reshape = std::dynamic_pointer_cast<ngraph::op::v1::Reshape>(fc->input_value(0).get_node_shared_ptr());
+            auto reshape = std::dynamic_pointer_cast<ngraph::opset1::Reshape>(fc->input_value(0).get_node_shared_ptr());
             if (!reshape) {
                 return false;
             }
@@ -82,6 +83,7 @@ private:
                                                                fc->get_shape());
 
             new_fc->set_friendly_name(fc->get_friendly_name());
+            ngraph::copy_runtime_info({reshape, fc}, new_fc);
             ngraph::replace_node(fc, new_fc);
             return true;
         };
diff --git a/inference-engine/src/transformations/include/transformations/convert_opset2_to_opset1/convert_opset2_to_opset1_tbl.hpp b/inference-engine/src/transformations/include/transformations/convert_opset2_to_opset1/convert_opset2_to_opset1_tbl.hpp
new file mode 100644 (file)
index 0000000..d67b909
--- /dev/null
@@ -0,0 +1,19 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#ifndef NGRAPH_PASS
+#warning "NGRAPH_PASS is not defined"
+#define NGRAPH_PASS(A, B)
+#endif
+
+// To register new pass you need to define NGRAPH_PASS
+// Usage example:
+//   ngraph::pass:Manager pm;
+//   #define NGRAPH_PASS(NAME, NAMESPACE)   pm.register_pass<NAMESPACE::NAME>();
+//   #include <transformations/transformations_tbl.hpp>
+//   #undef NGRAPH_PASS
+
+NGRAPH_PASS(ConvertGELU, ::ngraph::pass)
+NGRAPH_PASS(ConvertSpaceToBatch, ::ngraph::pass)
+NGRAPH_PASS(ConvertBatchToSpace, ::ngraph::pass)
index 4b6a0f9..dc404ff 100644 (file)
@@ -165,10 +165,12 @@ void ngraph::pass::ConvertReduceToPooling::convert_reduce_to_pooling() {
              *
              *  Note: some of reshape nodes can be optimized if they do nothing.
              */
+            NodeVector new_ops;
 
             if (!shape_begin.empty() && shape_begin != input->output(0).get_shape()) {
                 input = std::make_shared<ngraph::opset1::Reshape>(input, opset1::Constant::create(element::i64, Shape{shape_begin.size()}, shape_begin), true);
                 input->set_friendly_name(reduce->get_friendly_name() + "/reshape_begin");
+                new_ops.push_back(input);
             }
 
             if (std::is_same<T, ngraph::opset1::ReduceMean>()) {
@@ -181,6 +183,7 @@ void ngraph::pass::ConvertReduceToPooling::convert_reduce_to_pooling() {
                                                                   op::RoundingType::FLOOR);
 
                 input->set_friendly_name(reduce->get_friendly_name() + "/pool");
+                new_ops.push_back(input);
             } else if (std::is_same<T, ngraph::opset1::ReduceMax>()) {
                 input = std::make_shared<ngraph::opset1::MaxPool>(input,
                                                                   strides,
@@ -190,6 +193,7 @@ void ngraph::pass::ConvertReduceToPooling::convert_reduce_to_pooling() {
                                                                   op::RoundingType::FLOOR);
 
                 input->set_friendly_name(reduce->get_friendly_name() + "/pool");
+                new_ops.push_back(input);
             } else if (std::is_same<T, ngraph::opset1::ReduceSum>()) {
                 input = std::make_shared<ngraph::opset1::AvgPool>(input,
                                                                   strides,
@@ -200,19 +204,22 @@ void ngraph::pass::ConvertReduceToPooling::convert_reduce_to_pooling() {
                                                                   op::RoundingType::FLOOR);
 
                 input->set_friendly_name(reduce->get_friendly_name() + "/pool");
+                new_ops.push_back(input);
 
                 input = std::make_shared<ngraph::opset1::Multiply>(input,
                         opset1::Constant::create(reduce->input(0).get_element_type(), Shape{1}, {reduction_dims_count}));
                 input->set_friendly_name(reduce->get_friendly_name() + "/mul");
+                new_ops.push_back(input);
             } else {
                 return false;
             }
 
             if (!shape_end.empty() && shape_end != input->output(0).get_shape()) {
                 input = std::make_shared<ngraph::opset1::Reshape>(input, opset1::Constant::create(element::i64, Shape{shape_end.size()}, shape_end), true);
+                new_ops.push_back(input);
             }
             input->set_friendly_name(reduce->get_friendly_name());
-
+            copy_runtime_info(reduce, new_ops);
             replace_node(reduce, input);
             return true;
         };
diff --git a/inference-engine/src/transformations/include/transformations/init_node_info.hpp b/inference-engine/src/transformations/include/transformations/init_node_info.hpp
new file mode 100644 (file)
index 0000000..2ca55cf
--- /dev/null
@@ -0,0 +1,37 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <vector>
+#include <memory>
+
+#include <ie_api.h>
+
+#include <ngraph/pass/graph_rewrite.hpp>
+
+namespace ngraph {
+namespace pass {
+
+class INFERENCE_ENGINE_API_CLASS(InitNodeInfo);
+
+}  // namespace pass
+}  // namespace ngraph
+
+/*
+ * Description:
+ *     InitNodeInfo transformation helps to set runtime info attributes in a single place.
+ *     Every runtime info attribute that needs to be initialized should be registered
+ *     in run_on_function method. Also do not forget to override init methods for registered
+ *     attribute.
+ *     This transformations should be called first in transformation pipeline. If attrbute was
+ *     already set initialization will be skipped for this node.
+ */
+
+class ngraph::pass::InitNodeInfo: public ngraph::pass::FunctionPass {
+public:
+    InitNodeInfo() : FunctionPass() {}
+
+    bool run_on_function(std::shared_ptr<ngraph::Function> f) override;
+};
index f3dc736..70d2329 100644 (file)
 #include <ngraph/ngraph.hpp>
 
 #include "ngraph/pattern/matcher.hpp"
-#include "ngraph/op/broadcast.hpp"
-#include "ngraph/op/experimental/dyn_broadcast.hpp"
-#include "ngraph/op/fused/conv_fused.hpp"
-#include "ngraph/op/reshape.hpp"
-#include "ngraph/op/add.hpp"
 
-#include "ngraph/op/fused/group_conv.hpp"
+#include <ngraph/opsets/opset1.hpp>
 
 #include "ngraph/op/util/binary_elementwise_arithmetic.hpp"
 
@@ -36,8 +31,8 @@ class INFERENCE_ENGINE_API_CLASS(MulAddFusion);
 class ngraph::pass::MulAddFusion: public ngraph::pass::GraphRewrite {
 public:
     MulAddFusion() : GraphRewrite() {
-        mul_add_fusion<op::v1::Multiply>();
-        mul_add_fusion<op::v1::Add>();
+        mul_add_fusion<opset1::Multiply>();
+        mul_add_fusion<opset1::Add>();
     }
 
 private:
@@ -74,8 +69,8 @@ bool fusion(std::shared_ptr<T> m_eltwise) {
 
     std::shared_ptr<op::Op> eltwise, add, mul;
     std::shared_ptr<Node> constant, constant1, constant2;
-    std::tie(add, constant1) = parse_eltwise_inputs<op::v1::Add, Node>(m_eltwise);
-    std::tie(mul, constant2) = parse_eltwise_inputs<op::v1::Multiply, Node>(m_eltwise);
+    std::tie(add, constant1) = parse_eltwise_inputs<opset1::Add, Node>(m_eltwise);
+    std::tie(mul, constant2) = parse_eltwise_inputs<opset1::Multiply, Node>(m_eltwise);
 
     if (add && add->output(0).get_target_inputs().size() != 1) {
         return false;
@@ -101,14 +96,10 @@ bool fusion(std::shared_ptr<T> m_eltwise) {
 
         // Mul->Mul => Mul, Add->Add => Add
         if (std::dynamic_pointer_cast<T>(eltwise) && std::dynamic_pointer_cast<T>(m_eltwise)) {
-            auto new_eltwise = std::make_shared<T>(
-                    res.first,
-                    std::make_shared<T>(
-                            constant,
-                            res.second,  // constant
-                            op::AutoBroadcastType::NUMPY),
-                    op::AutoBroadcastType::NUMPY);
+            auto new_const = std::make_shared<T>(constant, res.second);
+            auto new_eltwise = std::make_shared<T>(res.first, new_const);
 
+            copy_runtime_info(m_eltwise, {new_const, new_eltwise});
             replace_node(m_eltwise, new_eltwise);
             new_eltwise->set_op_annotations(std::make_shared<op::util::EltwiseAttrs>(m_attrs));
             new_eltwise->set_friendly_name(m_eltwise->get_friendly_name());
@@ -116,13 +107,12 @@ bool fusion(std::shared_ptr<T> m_eltwise) {
         }
 
         // Add->Mul => Mul->Add
-        if (std::dynamic_pointer_cast<op::v1::Add>(eltwise) && std::dynamic_pointer_cast<op::v1::Multiply>(m_eltwise)) {
-            auto new_mul = std::make_shared<op::v1::Multiply>(res.first, constant);
-            auto new_add = std::make_shared<op::v1::Add> (new_mul,
-                                                          std::make_shared<op::v1::Multiply>(
-                                                          constant,
-                                                          res.second));
+        if (std::dynamic_pointer_cast<opset1::Add>(eltwise) && std::dynamic_pointer_cast<opset1::Multiply>(m_eltwise)) {
+            auto new_mul = std::make_shared<opset1::Multiply>(res.first, constant);
+            auto new_const = std::make_shared<opset1::Multiply>(constant, res.second);
+            auto new_add = std::make_shared<opset1::Add> (new_mul, new_const);
 
+            copy_runtime_info(m_eltwise, {new_mul, new_const, new_add});
             replace_node(m_eltwise, new_add);
 
             // We need to preserve op annotations and namings
@@ -144,7 +134,7 @@ void ngraph::pass::MulAddFusion::mul_add_fusion() {
     auto eltwise = std::make_shared<T>(input1, input2);
 
     ngraph::graph_rewrite_callback callback = [&](ngraph::pattern::Matcher &m) {
-        static_assert(std::is_same<T, op::v1::Add>() || std::is_same<T, op::v1::Multiply>(),
+        static_assert(std::is_same<T, opset1::Add>() || std::is_same<T, opset1::Multiply>(),
                       "Unsupported template parameter. Only Add or Multiply allowed!");
 
         if (auto m_eltwise = std::dynamic_pointer_cast<T>(m.get_match_root())) {
diff --git a/inference-engine/src/transformations/include/transformations/rt_info/fused_names_attribute.hpp b/inference-engine/src/transformations/include/transformations/rt_info/fused_names_attribute.hpp
new file mode 100644 (file)
index 0000000..1a9d711
--- /dev/null
@@ -0,0 +1,65 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <assert.h>
+#include <functional>
+#include <memory>
+#include <string>
+#include <set>
+
+#include <ngraph/node.hpp>
+#include <ngraph/variant.hpp>
+#include <ie_api.h>
+
+
+namespace ngraph {
+
+/*
+ * Description:
+ *     FusedName class represents runtime info attribute that stores
+ *     all operation names that was fully or partially fused into node
+ */
+
+class FusedNames {
+private:
+    std::set<std::string> fused_names;
+
+public:
+    FusedNames() = default;
+
+    explicit FusedNames(const std::string &name) {
+        fused_names.insert(name);
+    }
+
+    // This method unite current set of already fused names with another FusedNames object
+    void fuseWith(const FusedNames &names);
+
+    // return string with operation names separated by coma in alphabetical order
+    std::string getNames() const;
+
+    // returns vector of fused names sorted in alphabetical order
+    std::vector<std::string> getVectorNames() const;
+};
+
+template<>
+class VariantWrapper<FusedNames> : public VariantImpl<FusedNames> {
+public:
+    static constexpr VariantTypeInfo type_info{"Variant::RuntimeAttribute::FusedNames", 0};
+
+    const VariantTypeInfo &get_type_info() const override {
+        return type_info;
+    }
+
+    VariantWrapper(const value_type &value) : VariantImpl<value_type>(value) {}
+
+    std::shared_ptr<ngraph::Variant> merge(const ngraph::NodeVector & nodes) override;
+
+    std::shared_ptr<ngraph::Variant> init(const std::shared_ptr<ngraph::Node> & node) override;
+};
+
+INFERENCE_ENGINE_API_CPP(std::string) getFusedNames(const std::shared_ptr<ngraph::Node> & node);
+
+INFERENCE_ENGINE_API_CPP(std::vector<std::string>) getFusedNamesVector(const std::shared_ptr<ngraph::Node> & node);
+
+}  // namespace ngraph
\ No newline at end of file
diff --git a/inference-engine/src/transformations/include/transformations/utils/pass_manager.hpp b/inference-engine/src/transformations/include/transformations/utils/pass_manager.hpp
deleted file mode 100644 (file)
index 434ba22..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright (C) 2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <transformations/utils/pass_param.hpp>
-
-#include <ngraph/pass/manager.hpp>
-
-
-namespace ngraph {
-namespace pass {
-
-class INFERENCE_ENGINE_API_CLASS(ConversionPassManager);
-
-}  // namespace pass
-}  // namespace ngraph
-
-
-class ngraph::pass::ConversionPassManager : public ::ngraph::pass::Manager, public ::ngraph::pass::PassParam {
-public:
-    explicit ConversionPassManager(const PassParam::param_callback & callback = PassParam::getDefaultCallback())
-            : Manager(), PassParam(callback) {
-        register_conversion_passes();
-    }
-
-private:
-    void register_conversion_passes();
-};
index d20c6ee..c5b7b96 100644 (file)
@@ -8,7 +8,7 @@
 #include <vector>
 
 #include <ngraph/opsets/opset1.hpp>
-
+#include <ngraph/rt_info.hpp>
 
 void ngraph::pass::BatchNormDecomposition::batch_norm_decomposition() {
     Shape shape{2, 2, 1, 1};
@@ -43,19 +43,12 @@ void ngraph::pass::BatchNormDecomposition::batch_norm_decomposition() {
         //  scale = 1. / np.sqrt(variance + eps)
         //  shift = (mean * (-1)) * scale
         auto input_type = m_input->get_element_type();
-        auto scale = make_shared<ngraph::opset1::Divide>(
-                opset1::Constant::create(input_type, Shape{}, {1}),
-                make_shared<opset1::Power>(
-                        make_shared<opset1::Add>(
-                                m_var,
-                                opset1::Constant::create(input_type, Shape{}, {m_bn->get_eps_value()})),
-                        opset1::Constant::create(input_type, Shape{}, {0.5})));
-
-        auto shift = make_shared<opset1::Multiply>(
-                scale,
-                make_shared<opset1::Multiply>(
-                        m_mean,
-                        opset1::Constant::create(m_input->get_element_type(), Shape{}, {-1})));
+        auto scale_add = make_shared<opset1::Add>(m_var, opset1::Constant::create(input_type, Shape{}, {m_bn->get_eps_value()}));
+        auto scale_power = make_shared<opset1::Power>(scale_add, opset1::Constant::create(input_type, Shape{}, {0.5}));
+        auto scale = make_shared<ngraph::opset1::Divide>(opset1::Constant::create(input_type, Shape{}, {1}), scale_power);
+
+        auto shift_mul = make_shared<opset1::Multiply>(m_mean, opset1::Constant::create(m_input->get_element_type(), Shape{}, {-1}));
+        auto shift = make_shared<opset1::Multiply>(scale, shift_mul);
 
         // Expand Scale, Shift, Gamma and Beta to be aligned with layout
         size_t dims_to_add = m_input->get_shape().size() - 2;
@@ -76,18 +69,17 @@ void ngraph::pass::BatchNormDecomposition::batch_norm_decomposition() {
         auto shift_aligned = make_shared<opset1::Reshape>(shift, opset1::Constant::create(element::i64, Shape{shift_shape.size()}, shift_shape), true);
 
         // Connect: Mul(input, scale)->Add(mul, shift)->Mul(add, gamma)->Add(mul, beta)
-        auto result = make_shared<opset1::Add>(
-                make_shared<opset1::Multiply>(
-                        make_shared<opset1::Add>(
-                                make_shared<opset1::Multiply>(
-                                        m_input,
-                                        scale_aligned),
-                                shift_aligned),
-                        gamma_aligned),
-                beta_aligned);
-
-        result->set_friendly_name(m_bn->get_friendly_name());
-        replace_node(m_bn, result);
+        auto mul1 = std::make_shared<opset1::Multiply>(m_input, scale_aligned);
+        auto add1 = std::make_shared<opset1::Add>(mul1, shift_aligned);
+        auto mul2 = std::make_shared<opset1::Multiply>(add1, gamma_aligned);
+        auto add2 = std::make_shared<opset1::Add>(mul2, beta_aligned);
+
+        add2->set_friendly_name(m_bn->get_friendly_name());
+
+        copy_runtime_info(m_bn, {mul1, add1, mul2, add2,
+                                 gamma_aligned, beta_aligned, scale_aligned, shift_aligned,
+                                 scale_add, scale_power, scale, shift_mul, shift});
+        replace_node(m_bn, add2);
 
         return true;
     };
diff --git a/inference-engine/src/transformations/src/transformations/common_optimizations/common_optimizations.cpp b/inference-engine/src/transformations/src/transformations/common_optimizations/common_optimizations.cpp
new file mode 100644 (file)
index 0000000..f791470
--- /dev/null
@@ -0,0 +1,21 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "transformations/common_optimizations/common_optimizations.hpp"
+
+#include <memory>
+
+#include <ngraph/pass/manager.hpp>
+#include <ngraph/pass/nop_elimination.hpp>
+
+bool ngraph::pass::CommonOptimizations::run_on_function(std::shared_ptr<ngraph::Function> f) {
+    ngraph::pass::Manager CommonOptimizations;
+
+#define NGRAPH_PASS(NAME, NAMESPACE) CommonOptimizations.register_pass<NAMESPACE::NAME>();
+#include <transformations/common_optimizations/common_optimizations_tbl.hpp>
+#undef NGRAPH_PASS
+
+    CommonOptimizations.run_passes(f);
+    return true;
+}
index ba7ddb0..9fc17ce 100644 (file)
@@ -8,6 +8,7 @@
 #include <vector>
 
 #include <ngraph/opsets/opset2.hpp>
+#include <ngraph/rt_info.hpp>
 
 void ngraph::pass::ConvertBatchToSpace::convert_batch_to_space() {
     auto input0 = std::make_shared<pattern::op::Label>(element::f32, Shape{1, 1, 1, 1});
@@ -69,6 +70,9 @@ void ngraph::pass::ConvertBatchToSpace::convert_batch_to_space_ie_side() {
         if (squeezed_shape.size() > block_values.size()) {
             return false;
         }
+
+        NodeVector new_ops;
+
         std::shared_ptr<Node> flat_node = data.get_node_shared_ptr();
         for (size_t block_idx = 1; block_idx < block_values.size(); ++block_idx) {
             dispersed_shape[0] = block_values[block_idx];
@@ -78,6 +82,7 @@ void ngraph::pass::ConvertBatchToSpace::convert_batch_to_space_ie_side() {
             const bool special_zero = false;
             flat_node = std::make_shared<ngraph::op::v1::Reshape>(flat_node, out_pattern_1, special_zero)
                     ->add_provenance_group_members_above({data});
+            new_ops.push_back(flat_node);
 
             size_t val = 1;
             for (size_t axis_idx = 0; axis_idx <= block_values.size(); ++axis_idx) {
@@ -95,6 +100,7 @@ void ngraph::pass::ConvertBatchToSpace::convert_batch_to_space_ie_side() {
                                          std::vector<int64_t>(axes_order.begin(), axes_order.end()));
             flat_node = std::make_shared<ngraph::opset1::Transpose>(flat_node, axes_order_const)
                     ->add_provenance_group_members_above({flat_node});
+            new_ops.push_back(flat_node);
 
             squeezed_shape[0] = dispersed_shape[1];
             squeezed_shape[block_idx] *= block_values[block_idx];
@@ -103,6 +109,7 @@ void ngraph::pass::ConvertBatchToSpace::convert_batch_to_space_ie_side() {
                     op::Constant::create(element::i64, Shape{squeezed_shape.size()}, squeezed_shape);
             flat_node = std::make_shared<ngraph::op::v1::Reshape>(flat_node, out_pattern_2, special_zero)
                     ->add_provenance_group_members_above({data});
+            new_ops.push_back(flat_node);
         }
 
         std::vector<int64_t> upperbounds_values;
@@ -117,8 +124,10 @@ void ngraph::pass::ConvertBatchToSpace::convert_batch_to_space_ie_side() {
         std::vector<int64_t> end_mask(data_shape.size(), 0);
         flat_node = std::make_shared<op::v1::StridedSlice>(
                 flat_node, crops_begin_const, upperbounds, begin_mask, end_mask);
+        new_ops.push_back(flat_node);
 
         flat_node->set_friendly_name(batch_to_space->get_friendly_name());
+        ngraph::copy_runtime_info(batch_to_space, flat_node);
         ngraph::replace_node(batch_to_space, flat_node);
         return true;
     };
index e4a3b3f..b303fca 100644 (file)
@@ -8,6 +8,7 @@
 #include <vector>
 
 #include <ngraph/opsets/opset1.hpp>
+#include <ngraph/rt_info.hpp>
 
 void ngraph::pass::ConvertBroadcastToTiles::convert_broadcast_to_tiles() {
     auto weights = std::make_shared<pattern::op::Label>(element::f32, Shape {1});
@@ -34,6 +35,8 @@ void ngraph::pass::ConvertBroadcastToTiles::convert_broadcast_to_tiles() {
 
         auto last_node = std::dynamic_pointer_cast<ngraph::Node>(data_node);
 
+        NodeVector new_ops;
+
         // In case if input_shape and output_shape differ we insert Reshape to align shapes
         if (input_shape.size() != dims_count) {
             if (input_shape.size() > dims_count) {
@@ -57,6 +60,7 @@ void ngraph::pass::ConvertBroadcastToTiles::convert_broadcast_to_tiles() {
             }
             auto shape_const = std::make_shared<ngraph::opset1::Constant>(element::i64, Shape {shape.size()}, shape);
             auto reshape = std::make_shared<ngraph::opset1::Reshape>(data_node, shape_const, true);
+            new_ops.push_back(reshape);
             last_node = std::dynamic_pointer_cast<ngraph::Node>(reshape);
             input_shape = shape;
         }
@@ -80,11 +84,12 @@ void ngraph::pass::ConvertBroadcastToTiles::convert_broadcast_to_tiles() {
 
         auto const_node = std::make_shared<ngraph::opset1::Constant>(element::i64, Shape {dims_count}, dims);
         auto tile = std::make_shared<ngraph::opset1::Tile>(last_node, const_node);
+        new_ops.push_back(tile);
         tile->set_friendly_name(broadcast->get_friendly_name());
 
         last_node = std::dynamic_pointer_cast<ngraph::Node>(tile);
-
-        ngraph::replace_node(m.get_match_root(), last_node);
+        ngraph::copy_runtime_info(broadcast, new_ops);
+        ngraph::replace_node(broadcast, last_node);
         return true;
     };
 
index 8dc9800..aea1555 100644 (file)
@@ -8,6 +8,7 @@
 #include <vector>
 
 #include <ngraph/opsets/opset1.hpp>
+#include <ngraph/rt_info.hpp>
 
 void ngraph::pass::ConvertDepthToSpace::convert_depth_to_space() {
     auto input0 = std::make_shared<pattern::op::Label>(element::f32, Shape{1, 1, 1, 1});
@@ -92,6 +93,7 @@ void ngraph::pass::ConvertDepthToSpace::convert_depth_to_space() {
         auto transpose = std::make_shared<ngraph::opset1::Transpose>(reshape_begin, create_constant(order));
         auto reshape_end = std::make_shared<ngraph::opset1::Reshape>(transpose, create_constant(shape_end), true);
         reshape_end->set_friendly_name(dts_node->get_friendly_name());
+        ngraph::copy_runtime_info(dts_node, {reshape_begin, transpose, reshape_end});
         ngraph::replace_node(dts_node, reshape_end);
         return true;
     };
index 0c10208..38cabe6 100644 (file)
@@ -8,6 +8,7 @@
 #include <vector>
 
 #include <ngraph/opsets/opset1.hpp>
+#include <ngraph/rt_info.hpp>
 
 void ngraph::pass::ConvertDivide::convert_divide() {
     auto input0 = std::make_shared<pattern::op::Label>(element::i64, Shape{1, 1, 1, 1});
@@ -26,7 +27,7 @@ void ngraph::pass::ConvertDivide::convert_divide() {
         auto mul = std::make_shared<ngraph::opset1::Multiply>(div->input(0).get_source_output(), pow);
 
         mul->set_friendly_name(div->get_friendly_name());
-
+        ngraph::copy_runtime_info(div, {pow, mul});
         ngraph::replace_node(div, mul);
         return true;
     };
index 5254566..5e82d76 100644 (file)
@@ -8,6 +8,7 @@
 #include <vector>
 
 #include <ngraph/opsets/opset1.hpp>
+#include <ngraph/rt_info.hpp>
 
 void ngraph::pass::ConvertMinimum::convert_minimum() {
     auto input0 = std::make_shared<pattern::op::Label>(element::i64, Shape{1, 1, 1, 1});
@@ -36,7 +37,7 @@ void ngraph::pass::ConvertMinimum::convert_minimum() {
         auto neg_2 = std::make_shared<ngraph::opset1::Multiply>(max, opset1::Constant::create(max->get_element_type(), Shape{1}, {-1}));
 
         neg_2->set_friendly_name(minimum->get_friendly_name());
-
+        ngraph::copy_runtime_info(minimum, {neg_0, neg_1, max, neg_2});
         ngraph::replace_node(minimum, neg_2);
         return true;
     };
index 9a2852b..fb0d3f0 100644 (file)
@@ -8,6 +8,7 @@
 #include <vector>
 
 #include <ngraph/opsets/opset1.hpp>
+#include <ngraph/rt_info.hpp>
 
 void ngraph::pass::ConvertMod::convert_mod() {
     auto input0 = std::make_shared<pattern::op::Label>(element::f32, Shape{1, 1, 1, 1});
@@ -19,9 +20,27 @@ void ngraph::pass::ConvertMod::convert_mod() {
         if (!mod) {
             return false;
         }
-        auto last_node = mod->decompose_op()[0];
-        last_node->set_friendly_name(mod->get_friendly_name());
-        ngraph::replace_node(mod, last_node);
+
+        const auto dividend = std::make_shared<opset1::Abs>(mod->input_value(0));
+        const auto dividend_sign = std::make_shared<opset1::Sign>(mod->input_value(0));
+        const auto dividend_et = dividend->get_element_type();
+        const auto divisor = std::make_shared<opset1::Abs>(mod->input_value(1));
+
+        // truncated(a / b)
+        auto div = std::make_shared<opset1::Divide>(dividend, divisor);
+        auto convert_to_i64 = std::make_shared<opset1::Convert>(div, ngraph::element::i64);
+        auto convert = std::make_shared<opset1::Convert>(convert_to_i64, dividend_et);
+        // truncated(a / b) * b
+        auto multiplication = std::make_shared<opset1::Multiply>(convert, divisor);
+        // a mod b = a - truncated(a / b) * b
+        auto sub = std::make_shared<opset1::Subtract>(dividend, multiplication);
+
+        // apply sign of dividend
+        auto mul = std::make_shared<opset1::Multiply>(dividend_sign, sub);
+
+        mul->set_friendly_name(mod->get_friendly_name());
+        ngraph::copy_runtime_info(mod, {dividend, dividend_sign, divisor, div, convert_to_i64, convert, multiplication, sub, mul});
+        ngraph::replace_node(mod, mul);
         return true;
     };
 
index edb4678..9eb4956 100644 (file)
@@ -8,10 +8,11 @@
 #include <vector>
 
 #include <ngraph/opsets/opset1.hpp>
+#include <ngraph/rt_info.hpp>
 
 void ngraph::pass::ConvertNegative::convert_negative() {
     auto input = std::make_shared<pattern::op::Label>(element::f32, Shape{1});
-    auto neg = std::make_shared<ngraph::op::Negative>(input);
+    auto neg = std::make_shared<ngraph::opset1::Negative>(input);
 
     ngraph::graph_rewrite_callback callback = [](pattern::Matcher& m) {
         auto neg = std::dynamic_pointer_cast<ngraph::opset1::Negative> (m.get_match_root());
@@ -22,7 +23,7 @@ void ngraph::pass::ConvertNegative::convert_negative() {
         auto mul = std::make_shared<ngraph::opset1::Multiply>(neg->input(0).get_source_output(),
                                                               opset1::Constant::create(neg->get_element_type(), Shape{1}, {-1}));
         mul->set_friendly_name(neg->get_friendly_name());
-
+        ngraph::copy_runtime_info(neg, mul);
         ngraph::replace_node(neg, mul);
         return true;
     };
index d3830fe..4f3c6f2 100644 (file)
@@ -8,6 +8,7 @@
 #include <vector>
 
 #include <ngraph/opsets/opset1.hpp>
+#include <ngraph/rt_info.hpp>
 
 #include <ngraph_ops/convolution_ie.hpp>
 #include <ngraph_ops/deconvolution_ie.hpp>
@@ -37,6 +38,7 @@ void ngraph::pass::ConvertConvolutions::convert_convolution() {
                                                                    conv->output(0).get_shape(),
                                                                    1 /* groups */,
                                                                    conv->get_auto_pad());
+        ngraph::copy_runtime_info(conv, conv_ie);
         conv_ie->set_friendly_name(conv->get_friendly_name());
         ngraph::replace_node(conv, conv_ie);
         return true;
@@ -86,7 +88,9 @@ void ngraph::pass::ConvertConvolutions::convert_group_convolution() {
                                                                    gconv->output(0).get_shape(),
                                                                    group,
                                                                    gconv->get_auto_pad());
+        conv_ie->get_rt_info() = gconv->get_rt_info();
         conv_ie->set_friendly_name(gconv->get_friendly_name());
+        ngraph::copy_runtime_info(gconv, conv_ie);
         ngraph::replace_node(gconv, conv_ie);
         return true;
     };
@@ -130,6 +134,7 @@ void ngraph::pass::ConvertConvolutions::convert_convolution_backprop_data() {
                                                                        1 /* groups */,
                                                                        deconv->get_auto_pad());
         deconv_ie->set_friendly_name(deconv->get_friendly_name());
+        ngraph::copy_runtime_info(deconv, deconv_ie);
         ngraph::replace_node(deconv, deconv_ie);
         return true;
     };
@@ -187,6 +192,7 @@ void ngraph::pass::ConvertConvolutions::convert_group_convolution_backprop_data(
                                                                      group,
                                                                      gconv->get_auto_pad());
         conv_ie->set_friendly_name(gconv->get_friendly_name());
+        ngraph::copy_runtime_info(gconv, conv_ie);
         ngraph::replace_node(gconv, conv_ie);
         return true;
     };
index de312e1..1982a92 100644 (file)
@@ -8,6 +8,7 @@
 #include <vector>
 
 #include <ngraph/opsets/opset1.hpp>
+#include <ngraph/rt_info.hpp>
 
 void ngraph::pass::ConvertGatherToGatherIE::convert_gather_to_gather_ie() {
     auto input_0 = std::make_shared<pattern::op::Label>(element::f32, Shape{1});
@@ -28,6 +29,9 @@ void ngraph::pass::ConvertGatherToGatherIE::convert_gather_to_gather_ie() {
         }
         auto axis = axes_constant->get_vector<int64_t>()[0];
 
+        // vector of new created nGraph operations
+        NodeVector new_ops;
+
         // if the input with indices is scalar we need to unsqueeze it to 1D so plugins which do not support 0D can
         // execute this layer. Then we need to squeeze the axis dimension to restore original shape of gather output
         auto indices = gather->input(1).get_source_output();
@@ -38,18 +42,25 @@ void ngraph::pass::ConvertGatherToGatherIE::convert_gather_to_gather_ie() {
             gather_output_shape.insert(gather_output_shape.begin() + axis, 1);
             indices = std::make_shared<ngraph::opset1::Unsqueeze>(indices.get_node_shared_ptr(),
                                                                   opset1::Constant::create(element::i64, Shape{1}, {0}));
+            new_ops.push_back(indices.get_node_shared_ptr());
         }
         auto gather_ie = std::make_shared<ngraph::op::GatherIE>(gather->input(0).get_source_output(),
                                                                 indices,
                                                                 axis,
                                                                 gather_output_shape);
+        new_ops.push_back(gather_ie);
+
         if (squeeze_gather_output) {
             auto sq = std::make_shared<ngraph::opset1::Squeeze>(gather_ie,
-                                                            op::Constant::create(element::i64, Shape{1}, {axis}));
+                                                                op::Constant::create(element::i64, Shape{1}, {axis}));
             sq->set_friendly_name(gather->get_friendly_name());
+            new_ops.push_back(sq);
+
+            ngraph::copy_runtime_info(gather, new_ops);
             ngraph::replace_node(gather, sq);
         } else {
             gather_ie->set_friendly_name(gather->get_friendly_name());
+            ngraph::copy_runtime_info(gather, new_ops);
             ngraph::replace_node(gather, gather_ie);
         }
         return true;
@@ -58,4 +69,3 @@ void ngraph::pass::ConvertGatherToGatherIE::convert_gather_to_gather_ie() {
     auto m1 = std::make_shared<ngraph::pattern::Matcher>(gather, "ConvertGatherToGatherIE");
     this->add_matcher(m1, callback, PassProperty::CHANGE_DYNAMIC_STATE);
 }
-
index 14ca019..a8db453 100644 (file)
@@ -8,6 +8,7 @@
 #include <vector>
 
 #include <ngraph/opsets/opset1.hpp>
+#include <ngraph/rt_info.hpp>
 
 void ngraph::pass::ConvertGatherTreeToGatherTreeIE::convert() {
     auto input0 = std::make_shared<pattern::op::Label>(element::i64, Shape{1, 1, 1});
@@ -27,6 +28,7 @@ void ngraph::pass::ConvertGatherTreeToGatherTreeIE::convert() {
         auto gt_ie = std::make_shared<ngraph::op::GatherTreeIE>(gt->input_value(0), gt->input_value(1), gt->input_value(2), reshape);
 
         gt_ie->set_friendly_name(gt->get_friendly_name());
+        ngraph::copy_runtime_info(gt, {reshape, gt_ie});
         ngraph::replace_node(gt, gt_ie);
         return true;
     };
index f29b7ab..dc2b9f2 100644 (file)
@@ -8,6 +8,7 @@
 #include <transformations/utils/utils.hpp>
 
 #include <ngraph/ngraph.hpp>
+#include <ngraph/rt_info.hpp>
 
 void ngraph::pass::ConvertGELU::convert_gelu() {
     auto input = std::make_shared<pattern::op::Label>(element::f32, Shape{});
@@ -29,6 +30,7 @@ void ngraph::pass::ConvertGELU::convert_gelu() {
         auto res = std::make_shared<ngraph::opset1::Multiply>(mul, add);
 
         res->set_friendly_name(gelu->get_friendly_name());
+        ngraph::copy_runtime_info(gelu, {mul, sq2, div, erf, add, res});
         ngraph::replace_node(gelu, res);
         return true;
     };
index af8b7fc..726498a 100644 (file)
@@ -8,6 +8,8 @@
 #include <vector>
 
 #include <ngraph/opsets/opset1.hpp>
+#include <ngraph/rt_info.hpp>
+
 #include <transformations/utils/utils.hpp>
 #include <ngraph_ops/hard_sigmoid_ie.hpp>
 
@@ -44,7 +46,8 @@ void ngraph::pass::ConvertHardSigmoidToHardSigmoidIE::convert_hard_sigmoid() {
                                                                              beta_value);
 
         hard_sigmoid_ie->set_friendly_name(hard_sigmoid->get_friendly_name());
-        ngraph::replace_node(m.get_match_root(), hard_sigmoid_ie);
+        ngraph::copy_runtime_info(hard_sigmoid, hard_sigmoid_ie);
+        ngraph::replace_node(hard_sigmoid, hard_sigmoid_ie);
         return true;
     };
 
index a4a4b8b..2cc5e76 100644 (file)
@@ -11,6 +11,7 @@
 #include <map>
 
 #include <ngraph/opsets/opset1.hpp>
+#include <ngraph/rt_info.hpp>
 
 #include <ngraph_ops/interp.hpp>
 
@@ -55,8 +56,10 @@ void ngraph::pass::ConvertInterpolateToInterpOrResample::convert_interpolate_to_
             attrs.antialias = interpolate_attrs.antialias;
 
             auto interp = std::make_shared<ngraph::op::Interp>(data_node, attrs);
-            interp->set_friendly_name(m.get_match_root()->get_friendly_name());
-            ngraph::replace_node(m.get_match_root(), std::dynamic_pointer_cast<ngraph::Node>(interp));
+            interp->set_friendly_name(interpolate->get_friendly_name());
+
+            ngraph::copy_runtime_info(interpolate, interp);
+            ngraph::replace_node(interpolate, interp);
         } else if (interpolate_attrs.pads_begin[0] == 0 && interpolate_attrs.pads_end[0] == 0 && !interpolate_attrs.align_corners) {
             auto attrs = ngraph::op::ResampleIEAttrs();
             attrs.mode = interpolate_mode;
@@ -121,8 +124,9 @@ void ngraph::pass::ConvertInterpolateToInterpOrResample::convert_interpolate_to_
                 resample = std::make_shared<ngraph::op::ResampleV2>(data_node, constant, attrs);
             }
 
-            resample->set_friendly_name(m.get_match_root()->get_friendly_name());
-            ngraph::replace_node(m.get_match_root(), resample);
+            resample->set_friendly_name(interpolate->get_friendly_name());
+            ngraph::copy_runtime_info(interpolate, resample);
+            ngraph::replace_node(interpolate, resample);
         } else {
             return false;
         }
index fad65c4..22b8dea 100644 (file)
@@ -9,6 +9,7 @@
 #include <string>
 
 #include <ngraph/opsets/opset1.hpp>
+#include <ngraph/rt_info.hpp>
 
 #include <ngraph_ops/lrn_ie.hpp>
 
@@ -56,7 +57,8 @@ void ngraph::pass::ConvertLRNToLRNIE::convert_lrn() {
                                                             region);
 
         lrn_ie->set_friendly_name(lrn->get_friendly_name());
-        ngraph::replace_node(m.get_match_root(), lrn_ie);
+        ngraph:copy_runtime_info(lrn, lrn_ie);
+        ngraph::replace_node(lrn, lrn_ie);
         return true;
     };
 
index 00cebe1..e976afd 100644 (file)
@@ -8,6 +8,7 @@
 #include <vector>
 
 #include <ngraph/opsets/opset1.hpp>
+#include <ngraph/rt_info.hpp>
 
 #include <ngraph_ops/lstm_cell_ie.hpp>
 
@@ -53,7 +54,8 @@ void ngraph::pass::ConvertLSTMCellToLSTMCellIE::convert_lstm_cell() {
                                                                       lstm_cell->get_output_shape(1));
 
         lstm_cell_ie->set_friendly_name(lstm_cell->get_friendly_name());
-        ngraph::replace_node(m.get_match_root(), lstm_cell_ie);
+        ngraph::copy_runtime_info(lstm_cell, {concat, lstm_cell_ie});
+        ngraph::replace_node(lstm_cell, lstm_cell_ie);
         return true;
     };
 
index 13838cf..966375d 100644 (file)
@@ -12,6 +12,7 @@
 #include <numeric>
 
 #include <ngraph/opsets/opset1.hpp>
+#include <ngraph/rt_info.hpp>
 
 #include <ngraph_ops/fully_connected.hpp>
 #include <transformations/utils/utils.hpp>
@@ -96,6 +97,9 @@ void ngraph::pass::ConvertMatMulToFCorGemm::convert_matmul() {
         // fc_input_b updated.
         auto fc_input_a = input_a, fc_input_b = input_b;
 
+        // vector of new nGraph operations
+        NodeVector new_ops;
+
         // Check that if second inputs is Constant operation and it's shape without ones dimensions has length <= 2
         // we replace MatMul with FullyConnected operation.
         // Otherwise we replace MatMul with Gemm.
@@ -120,17 +124,20 @@ void ngraph::pass::ConvertMatMulToFCorGemm::convert_matmul() {
             // Weights normalization
             if (!matmul->get_transpose_b()) {
                 fc_input_b = create_transpose(fc_input_b, matmul->get_friendly_name() + "/transpose_b");
+                new_ops.push_back(fc_input_b.get_node_shared_ptr());
             }
 
             if (shape_b.size() != 2) {
                 auto reshape_shape =
                         opset1::Constant::create<int64_t>(element::i64, Shape {2}, {-1ll, static_cast<int64_t>(K)});
                 fc_input_b = std::make_shared<opset1::Reshape>(fc_input_b, reshape_shape, true);
+                new_ops.push_back(fc_input_b.get_node_shared_ptr());
             }
 
             // Input normalization
             if (matmul->get_transpose_a()) {
                 fc_input_a = create_transpose(fc_input_a, matmul->get_friendly_name() + "/transpose_a");
+                new_ops.push_back(fc_input_a.get_node_shared_ptr());
             }
 
             // Create FullyConnected
@@ -139,7 +146,9 @@ void ngraph::pass::ConvertMatMulToFCorGemm::convert_matmul() {
 
             auto fc = std::make_shared<op::FullyConnected>(fc_input_a, fc_input_b, fc_bias, output_shape);
             fc->set_friendly_name(matmul->get_friendly_name());
+            new_ops.push_back(fc);
 
+            ngraph::copy_runtime_info(matmul, new_ops);
             ngraph::replace_node(matmul, fc);
         } else {
             // WA for IE that Gemm must have inputs with the same length.
@@ -148,6 +157,7 @@ void ngraph::pass::ConvertMatMulToFCorGemm::convert_matmul() {
                 Shape reshape_shape(shape_b.size() - shape_a.size(), 1);
                 reshape_shape.insert(reshape_shape.end(), shape_a.begin(), shape_a.end());
                 fc_input_a = op::util::reshapeTo(fc_input_a, reshape_shape);
+                new_ops.push_back(fc_input_a.get_node_shared_ptr());
             } else if (shape_b.size() < shape_a.size()) {
                 // Reshape second input (fc_input_b)
                 Shape reshape_shape;
@@ -162,20 +172,25 @@ void ngraph::pass::ConvertMatMulToFCorGemm::convert_matmul() {
                     reshape_shape.insert(reshape_shape.end(), shape_b.begin(), shape_b.end());
                 }
                 fc_input_b = op::util::reshapeTo(fc_input_b, reshape_shape);
+                new_ops.push_back(fc_input_b.get_node_shared_ptr());
             }
 
             auto gemm = std::make_shared<opset1::MatMul>(fc_input_a, fc_input_b, matmul->get_transpose_a(), matmul->get_transpose_b());
+            new_ops.push_back(gemm);
 
             if (gemm->get_shape() != output_shape) {
                 // This case is possible only when second input had exactly 1 dimension (that is not supported by GEMM operation)
                 // and for this case we have to reshape second input to first but this affects output shape (additional dimensions)
                 // So to preserve output shape we insert additional reshape operation
                 auto reshape_output = op::util::reshapeTo(gemm, output_shape);
+                new_ops.push_back(reshape_output);
                 gemm->set_friendly_name(matmul->get_friendly_name() + "/gemm");
                 reshape_output->set_friendly_name(matmul->get_friendly_name());
+                ngraph::copy_runtime_info(matmul, new_ops);
                 ngraph::replace_node(matmul, reshape_output);
             } else {
                 gemm->set_friendly_name(matmul->get_friendly_name());
+                ngraph::copy_runtime_info(matmul, new_ops);
                 ngraph::replace_node(matmul, gemm);
             }
         }
index a74a0cf..4bb60be 100644 (file)
@@ -9,6 +9,7 @@
 #include <algorithm>
 
 #include <ngraph/opsets/opset1.hpp>
+#include <ngraph/rt_info.hpp>
 
 #include "transformations/utils/utils.hpp"
 
@@ -133,16 +134,28 @@ void ngraph::pass::ConvertMulAddToScaleShiftOrPower::convert_mul_add_to_scaleshi
 
         // TODO: in case if scale and shift constants has equal values the best way is to convert them to Power
         if (res1 == CONVERSION_RESULT::SCALE_SHIFT || res2 == CONVERSION_RESULT::SCALE_SHIFT) {
+            NodeVector new_ops;
+
             auto weights_in = ngraph::op::util::normalize_constant(const_weights_node, add_node->get_shape());
             auto biases_in = ngraph::op::util::normalize_constant(const_bias_node, add_node->get_shape());
-            if (res1 == CONVERSION_RESULT::POWER)
+            new_ops.push_back(weights_in);
+            new_ops.push_back(biases_in);
+
+            if (res1 == CONVERSION_RESULT::POWER) {
                 weights_in = ngraph::op::util::broadcastTo(weights_in, biases_in->get_shape());
-            if (res2 == CONVERSION_RESULT::POWER)
+                new_ops.push_back(weights_in);
+            }
+            if (res2 == CONVERSION_RESULT::POWER) {
                 biases_in = ngraph::op::util::broadcastTo(biases_in, weights_in->get_shape());
+                new_ops.push_back(biases_in);
+            }
 
             auto scaleshift = std::make_shared<ngraph::op::ScaleShiftIE>(data_node, weights_in, biases_in);
+            new_ops.push_back(scaleshift);
+
             scaleshift->set_friendly_name(add_node->get_friendly_name());
-            ngraph::replace_node(m.get_match_root(), std::dynamic_pointer_cast<Node>(scaleshift));
+            ngraph::copy_runtime_info({mul_node, add_node}, new_ops);
+            ngraph::replace_node(m.get_match_root(), scaleshift);
         } else {
             float scale = 0.f, shift = 0.f;
             if (!op::util::get_single_value(const_weights_node, scale)) {
@@ -154,6 +167,7 @@ void ngraph::pass::ConvertMulAddToScaleShiftOrPower::convert_mul_add_to_scaleshi
 
             auto power = std::make_shared<ngraph::op::PowerIE>(data_node, 1., scale, shift);
             power->set_friendly_name(add_node->get_friendly_name());
+            ngraph::copy_runtime_info({mul_node, add_node}, power);
             ngraph::replace_node(m.get_match_root(), power);
         }
 
index 2e42041..ffc60c5 100644 (file)
@@ -10,6 +10,7 @@
 #include <ngraph/opsets/opset1.hpp>
 
 #include <ngraph_ops/nms_ie.hpp>
+#include <ngraph/rt_info.hpp>
 
 void ngraph::pass::ConvertNMSToNMSIE::convert_nms_to_nms_ie() {
     auto input_0 = std::make_shared<pattern::op::Label>(element::f32, Shape{1, 1, 4});
@@ -30,23 +31,29 @@ void ngraph::pass::ConvertNMSToNMSIE::convert_nms_to_nms_ie() {
             return false;
         }
 
+        // vector of new nGraph operations
+        NodeVector new_ops;
+
         auto new_max_per_class = nms->input(2).get_source_output();
         if (nms->input(2).get_shape().empty()) {
             new_max_per_class = std::make_shared<ngraph::op::Unsqueeze>(
                     nms->input(2).get_source_output().get_node_shared_ptr(),
                     opset1::Constant::create(element::i64, Shape{1}, {0}));
+            new_ops.push_back(new_max_per_class.get_node_shared_ptr());
         }
         auto new_iou_threshold = nms->input(3).get_source_output();
         if (nms->input(3).get_shape().empty()) {
             new_iou_threshold = std::make_shared<ngraph::op::Unsqueeze>(
                     nms->input(3).get_source_output().get_node_shared_ptr(),
                     opset1::Constant::create(element::i64, Shape{1}, {0}));
+            new_ops.push_back(new_iou_threshold.get_node_shared_ptr());
         }
         auto new_score_threshold = nms->input(4).get_source_output();
         if (nms->input(4).get_shape().empty()) {
             new_score_threshold = std::make_shared<ngraph::op::Unsqueeze>(
                     nms->input(4).get_source_output().get_node_shared_ptr(),
                     opset1::Constant::create(element::i64, Shape{1}, {0}));
+            new_ops.push_back(new_score_threshold.get_node_shared_ptr());
         }
         int center_point_box = 0;
         switch (nms->get_box_encoding()) {
@@ -68,8 +75,10 @@ void ngraph::pass::ConvertNMSToNMSIE::convert_nms_to_nms_ie() {
                                                                          nms->output(0).get_shape(),
                                                                          center_point_box,
                                                                          nms->get_sort_result_descending());
+        new_ops.push_back(new_nms);
         new_nms->set_friendly_name(nms->get_friendly_name());
-        ngraph::replace_node(m.get_match_root(), new_nms);
+        ngraph::copy_runtime_info(nms, new_ops);
+        ngraph::replace_node(nms, new_nms);
         return true;
     };
 
index 41b2d5c..52bee83 100644 (file)
@@ -8,6 +8,7 @@
 #include <vector>
 
 #include <ngraph/opsets/opset1.hpp>
+#include <ngraph/rt_info.hpp>
 
 #include "ngraph_ops/normalize_ie.hpp"
 
@@ -61,8 +62,9 @@ void ngraph::pass::ConvertNormalizeL2WithMulToNormalizeIE::convert_normalize_l2_
                                                                        across_spatial,
                                                                        channel_shared);
 
-        normalize_ie->set_friendly_name(m.get_match_root()->get_friendly_name());
-        ngraph::replace_node(m.get_match_root(), normalize_ie);
+        normalize_ie->set_friendly_name(normalize->get_friendly_name());
+        ngraph::copy_runtime_info(normalize, normalize_ie);
+        ngraph::replace_node(normalize, normalize_ie);
         return true;
     };
 
@@ -95,8 +97,9 @@ void ngraph::pass::ConvertNormalizeL2ToNormalizeIE::convert_normalize_l2() {
                                                                        across_channels,
                                                                        channel_shared);
 
-        normalize_ie->set_friendly_name(m.get_match_root()->get_friendly_name());
-        ngraph::replace_node(m.get_match_root(), normalize_ie);
+        normalize_ie->set_friendly_name(normalize->get_friendly_name());
+        ngraph::copy_runtime_info(normalize, normalize_ie);
+        ngraph::replace_node(normalize, normalize_ie);
         return true;
     };
 
index a7ac6c4..4119455 100644 (file)
 
 #include <ngraph_ops/onehot_ie.hpp>
 #include <transformations/utils/utils.hpp>
+#include <ngraph/rt_info.hpp>
 
 void ngraph::pass::ConvertOneHotToOneHotIE::convert_one_hot() {
     auto input = std::make_shared<pattern::op::Label>(element::i32, Shape{1, 1, 1, 1});
     auto depth = std::make_shared<pattern::op::Label>(element::i64, Shape{});
     auto on_value = std::make_shared<pattern::op::Label>(element::f32, Shape{});
     auto off_value = std::make_shared<pattern::op::Label>(element::f32, Shape{});
-    auto one_hot = std::make_shared<ngraph::op::v1::OneHot>(input, depth, on_value, off_value, 1);
+    auto one_hot = std::make_shared<ngraph::opset1::OneHot>(input, depth, on_value, off_value, 1);
 
     ngraph::graph_rewrite_callback callback = [=](pattern::Matcher& m) {
-        auto one_hot = std::dynamic_pointer_cast<ngraph::op::v1::OneHot> (m.get_match_root());
+        auto one_hot = std::dynamic_pointer_cast<ngraph::opset1::OneHot> (m.get_match_root());
         if (!one_hot) {
             return false;
         }
 
         element::Type output_type = is_f16 ? element::f16 : element::f32;
 
-        const auto depth_node = std::dynamic_pointer_cast<ngraph::op::Constant>(one_hot->get_inputs()[1].get_output().get_node());
-        const auto on_value_node = std::dynamic_pointer_cast<ngraph::op::Constant>(one_hot->get_inputs()[2].get_output().get_node());
-        const auto off_value_node = std::dynamic_pointer_cast<ngraph::op::Constant>(one_hot->get_inputs()[3].get_output().get_node());
+        const auto depth_node = std::dynamic_pointer_cast<ngraph::opset1::Constant>(one_hot->get_inputs()[1].get_output().get_node());
+        const auto on_value_node = std::dynamic_pointer_cast<ngraph::opset1::Constant>(one_hot->get_inputs()[2].get_output().get_node());
+        const auto off_value_node = std::dynamic_pointer_cast<ngraph::opset1::Constant>(one_hot->get_inputs()[3].get_output().get_node());
 
         // can be converted iff inputs with depth, on/off values are constants
         if (depth_node == nullptr || on_value_node == nullptr || off_value_node == nullptr) return false;
@@ -44,10 +45,12 @@ void ngraph::pass::ConvertOneHotToOneHotIE::convert_one_hot() {
 
         // insert Convert layer to cast output to a correct data type defined by the on/off values
         if (on_value_node->get_element_type() != output_type) {
-            auto convert = std::make_shared<ngraph::op::Convert>(one_hot_ie, on_value_node->get_element_type());
+            auto convert = std::make_shared<ngraph::opset1::Convert>(one_hot_ie, on_value_node->get_element_type());
             convert->set_friendly_name(one_hot->get_friendly_name() + "/Convert");
+            ngraph::copy_runtime_info(one_hot, {one_hot_ie, convert});
             ngraph::replace_node(m.get_match_root(), convert);
         } else {
+            ngraph::copy_runtime_info(one_hot, one_hot_ie);
             ngraph::replace_node(m.get_match_root(), one_hot_ie);
         }
 
index c5f9cca..09857f0 100644 (file)
@@ -3,11 +3,71 @@
 //
 
 #include "transformations/convert_opset1_to_legacy/convert_opset1_to_legacy.hpp"
-#include <transformations/utils/pass_manager.hpp>
+
+#include <transformations/constant_eltwise_reduction.hpp>
+#include <transformations/convert_broadcast_to_tiles.hpp>
+#include <transformations/convert_opset1_to_legacy/convert_convolutions.hpp>
+#include <transformations/convert_divide.hpp>
+#include <transformations/convert_mod.hpp>
+#include <transformations/convert_opset1_to_legacy/convert_gather_to_gather_ie.hpp>
+#include <transformations/convert_opset1_to_legacy/convert_gathertree_to_gathertree_ie.hpp>
+#include <transformations/convert_opset1_to_legacy/convert_interpolate_to_interp_or_resample.hpp>
+#include <transformations/convert_opset1_to_legacy/convert_lrn_to_lrn_ie.hpp>
+#include <transformations/convert_opset1_to_legacy/convert_lstm_cell_to_lstm_cell_ie.hpp>
+#include <transformations/convert_opset1_to_legacy/convert_matmul_to_fc_or_gemm.hpp>
+#include <transformations/convert_minimum_to_power_and_max.hpp>
+#include <transformations/convert_opset1_to_legacy/convert_mul_add_to_scaleshift_or_power.hpp>
+#include <transformations/convert_opset1_to_legacy/convert_mul_or_add_finally.hpp>
+#include <transformations/convert_negative.hpp>
+#include <transformations/convert_opset1_to_legacy/convert_nms_to_nms_ie.hpp>
+#include <transformations/convert_opset1_to_legacy/convert_normalizel2_to_normalize_ie.hpp>
+#include <transformations/convert_opset1_to_legacy/convert_one_hot_to_one_hot_ie.hpp>
+#include <transformations/convert_opset1_to_legacy/convert_pad_to_pad_ie.hpp>
+#include <transformations/convert_opset1_to_legacy/convert_sqrt_to_power_ie.hpp>
+#include <transformations/convert_opset1_to_legacy/convert_power_to_power_ie.hpp>
+#include <transformations/convert_opset1_to_legacy/convert_prelu_to_relu_ie.hpp>
+#include <transformations/convert_opset1_to_legacy/convert_proposal_to_proposal_ie.hpp>
+#include <transformations/convert_opset1_to_legacy/convert_prior_to_ie_prior.hpp>
+#include <transformations/convert_reduce_to_pooling.hpp>
+#include <transformations/convert_opset1_to_legacy/convert_strided_slice_to_crop.hpp>
+#include <transformations/convert_subtract.hpp>
+#include <transformations/convert_opset1_to_legacy/convert_selu_to_selu_ie.hpp>
+#include <transformations/convert_opset1_to_legacy/convert_tile_to_ie_tile.hpp>
+#include <transformations/convert_opset1_to_legacy/convert_topk_to_topk_ie.hpp>
+#include <transformations/convert_depth_to_space.hpp>
+#include <transformations/convert_space_to_depth.hpp>
+#include <transformations/batch_norm_decomposition.hpp>
+#include <transformations/convert_opset1_to_legacy/conv_bias_fusion.hpp>
+#include <transformations/convert_opset1_to_legacy/fc_bias_fusion.hpp>
+#include <transformations/mul_add_squence_fusion.hpp>
+#include <transformations/mul_add_verification.hpp>
+#include <transformations/convert_opset1_to_legacy/reshape_fc_fusion.hpp>
+#include <transformations/convert_opset1_to_legacy/reshape_1d_convolutions.hpp>
+#include <transformations/convert_opset1_to_legacy/reshape_fully_connected.hpp>
+#include <transformations/pull_transpose_through_fq.hpp>
+#include <transformations/convert_opset1_to_legacy/convert_strided_slice_to_strided_slice_ie.hpp>
+#include <transformations/convert_opset1_to_legacy/convert_hard_sigmoid_to_hard_sigmoid_ie.hpp>
+#include <transformations/init_node_info.hpp>
+
+#include <ngraph/pass/constant_folding.hpp>
+#include <ngraph/pass/manager.hpp>
+
 #include <memory>
+#include <vector>
 
 bool ngraph::pass::ConvertOpSet1ToLegacy::run_on_function(std::shared_ptr<ngraph::Function> f) {
-    auto pm = ngraph::pass::ConversionPassManager(transformation_callback);
-    pm.run_passes(f);
+    ngraph::pass::Manager OpSet1ToLegacy;
+    std::vector<std::shared_ptr<ngraph::pass::PassBase> > transforms;
+
+#define NGRAPH_PASS(NAME, NAMESPACE) transforms.push_back(OpSet1ToLegacy.register_pass<NAMESPACE::NAME>());
+#include <transformations/convert_opset1_to_legacy/convert_opset1_to_legacy_tbl.hpp>
+#undef NGRAPH_PASS
+
+    for (auto & t : transforms) {
+        if (auto t_param = std::dynamic_pointer_cast<PassParam>(t)) {
+            t_param->setCallback(transformation_callback);
+        }
+    }
+    OpSet1ToLegacy.run_passes(f);
     return true;
 }
\ No newline at end of file
index 60a7033..b2a4915 100644 (file)
@@ -8,6 +8,7 @@
 #include <vector>
 
 #include <ngraph/opsets/opset1.hpp>
+#include <ngraph/rt_info.hpp>
 
 void ngraph::pass::ConvertPadToPadIE::convert_pad() {
     auto input_0 = std::make_shared<pattern::op::Label>(element::f32, Shape{1, 1, 1, 1});
@@ -28,7 +29,8 @@ void ngraph::pass::ConvertPadToPadIE::convert_pad() {
         if (pad_ie == nullptr)
             return false;
         pad_ie->set_friendly_name(pad->get_friendly_name());
-        ngraph::replace_node(m.get_match_root(), pad_ie);
+        ngraph::copy_runtime_info(pad, pad_ie);
+        ngraph::replace_node(pad, pad_ie);
         return true;
     };
 
index c4b82e2..5f98b4d 100644 (file)
@@ -11,6 +11,7 @@
 
 #include <ngraph_ops/power.hpp>
 #include <transformations/utils/utils.hpp>
+#include <ngraph/rt_info.hpp>
 
 void ngraph::pass::ConvertPowerToPowerIE::convert_power() {
     auto input_0 = std::make_shared<pattern::op::Label>(element::f32, Shape{1});
@@ -32,7 +33,8 @@ void ngraph::pass::ConvertPowerToPowerIE::convert_power() {
 
             auto power_ie = std::make_shared<ngraph::op::PowerIE>(power->input(0).get_source_output(), value, 1, 0);
             power_ie->set_friendly_name(power->get_friendly_name());
-            ngraph::replace_node(m.get_match_root(), power_ie);
+            ngraph::copy_runtime_info(power, power_ie);
+            ngraph::replace_node(power, power_ie);
             return true;
         }
         return false;
index 8967733..b50dd36 100644 (file)
@@ -11,6 +11,7 @@
 
 #include <ngraph_ops/relu_ie.hpp>
 #include <transformations/utils/utils.hpp>
+#include <ngraph/rt_info.hpp>
 
 void ngraph::pass::ConvertPReLUToReLUIE::convert_prelu() {
     auto input_0 = std::make_shared<pattern::op::Label>(element::f32, Shape{1});
@@ -32,7 +33,8 @@ void ngraph::pass::ConvertPReLUToReLUIE::convert_prelu() {
 
             auto relu_ie = std::make_shared<ngraph::op::ReLUIE>(prelu->input(0).get_source_output(), value);
             relu_ie->set_friendly_name(prelu->get_friendly_name());
-            ngraph::replace_node(m.get_match_root(), relu_ie);
+            ngraph::copy_runtime_info(prelu, relu_ie);
+            ngraph::replace_node(prelu, relu_ie);
             return true;
         }
         return false;
index faa19e0..01954ec 100644 (file)
@@ -11,6 +11,7 @@
 
 #include <ngraph_ops/prior_box_ie.hpp>
 #include <ngraph_ops/prior_box_clustered_ie.hpp>
+#include <ngraph/rt_info.hpp>
 
 void ngraph::pass::ConvertPriorBox::convert_prior_box() {
     auto data = std::make_shared<pattern::op::Label>(element::i64, Shape{1, 1, 1, 1});
@@ -41,6 +42,10 @@ void ngraph::pass::ConvertPriorBox::convert_prior_box() {
         if (!prior_box_node) {
             return false;
         }
+
+        // vector of nGraph nodes that will be replaced
+        ngraph::NodeVector ops_to_replace{unsqueeze, prior_box_node};
+
         std::shared_ptr<Node> input_1(prior_box_node->input_value(0).get_node_shared_ptr());
         std::shared_ptr<Node> input_2(prior_box_node->input_value(1).get_node_shared_ptr());
 
@@ -48,6 +53,8 @@ void ngraph::pass::ConvertPriorBox::convert_prior_box() {
         auto convert2 = std::dynamic_pointer_cast<ngraph::opset1::Convert> (input_2);
 
         if (convert1 && convert2) {
+            ops_to_replace.push_back(convert1);
+            ops_to_replace.push_back(convert2);
             input_1 = convert1->input_value(0).get_node_shared_ptr();
             input_2 = convert2->input_value(0).get_node_shared_ptr();
         }
@@ -59,6 +66,9 @@ void ngraph::pass::ConvertPriorBox::convert_prior_box() {
             return false;
         }
 
+        ops_to_replace.push_back(strided_slice1);
+        ops_to_replace.push_back(strided_slice2);
+
         //  Check that StridedSlice1 cuts H,W dims for PriorBox
         auto begin = std::dynamic_pointer_cast<ngraph::opset1::Constant> (strided_slice1->input_value(1).get_node_shared_ptr());
         auto end = std::dynamic_pointer_cast<ngraph::opset1::Constant> (strided_slice1->input_value(2).get_node_shared_ptr());
@@ -92,6 +102,8 @@ void ngraph::pass::ConvertPriorBox::convert_prior_box() {
         convert2 = std::dynamic_pointer_cast<ngraph::opset1::Convert> (input_2);
 
         if (convert1 && convert2) {
+            ops_to_replace.push_back(convert1);
+            ops_to_replace.push_back(convert2);
             input_1 = convert1->input_value(0).get_node_shared_ptr();
             input_2 = convert2->input_value(0).get_node_shared_ptr();
         }
@@ -103,10 +115,18 @@ void ngraph::pass::ConvertPriorBox::convert_prior_box() {
             return false;
         }
 
+        ops_to_replace.push_back(shape_of1);
+        ops_to_replace.push_back(shape_of2);
+
         auto prior_box_ie = std::make_shared<ngraph::op::PriorBoxIE> (shape_of1->input_value(0),
                                                                       shape_of2->input_value(0),
                                                                       prior_box_node->get_attrs());
+
         prior_box_ie->set_friendly_name(unsqueeze->get_friendly_name());
+
+        // Nodes in copy runtime info function should be in topological order
+        std::reverse(ops_to_replace.begin(), ops_to_replace.end());
+        ngraph::copy_runtime_info(ops_to_replace, prior_box_ie);
         ngraph::replace_node(m.get_match_root(), prior_box_ie);
         return true;
     };
@@ -143,6 +163,9 @@ void ngraph::pass::ConvertPriorBox::convert_prior_box_clustered() {
             return false;
         }
 
+        // vector of nGraph nodes that will be replaced
+        ngraph::NodeVector ops_to_replace{unsqueeze, prior_box_node};
+
         std::shared_ptr<Node> input_1(prior_box_node->input_value(0).get_node_shared_ptr());
         std::shared_ptr<Node> input_2(prior_box_node->input_value(1).get_node_shared_ptr());
 
@@ -150,6 +173,8 @@ void ngraph::pass::ConvertPriorBox::convert_prior_box_clustered() {
         auto convert2 = std::dynamic_pointer_cast<ngraph::opset1::Convert> (input_2);
 
         if (convert1 && convert2) {
+            ops_to_replace.push_back(convert1);
+            ops_to_replace.push_back(convert2);
             input_1 = convert1->input_value(0).get_node_shared_ptr();
             input_2 = convert2->input_value(0).get_node_shared_ptr();
         }
@@ -161,6 +186,9 @@ void ngraph::pass::ConvertPriorBox::convert_prior_box_clustered() {
             return false;
         }
 
+        ops_to_replace.push_back(strided_slice1);
+        ops_to_replace.push_back(strided_slice2);
+
         //  Check that StridedSlice1 cuts H,W dims for PriorBox
         auto begin = std::dynamic_pointer_cast<ngraph::opset1::Constant> (strided_slice1->get_argument(1));
         auto end = std::dynamic_pointer_cast<ngraph::opset1::Constant> (strided_slice1->get_argument(2));
@@ -194,6 +222,8 @@ void ngraph::pass::ConvertPriorBox::convert_prior_box_clustered() {
         convert2 = std::dynamic_pointer_cast<ngraph::opset1::Convert> (input_2);
 
         if (convert1 && convert2) {
+            ops_to_replace.push_back(convert1);
+            ops_to_replace.push_back(convert2);
             input_1 = convert1->input_value(0).get_node_shared_ptr();
             input_2 = convert2->input_value(0).get_node_shared_ptr();
         }
@@ -205,11 +235,18 @@ void ngraph::pass::ConvertPriorBox::convert_prior_box_clustered() {
             return false;
         }
 
+        ops_to_replace.push_back(shape_of1);
+        ops_to_replace.push_back(shape_of2);
+
         auto prior_box_ie = std::make_shared<ngraph::op::PriorBoxClusteredIE> (shape_of1->get_argument(0),
                                                                                shape_of2->get_argument(0),
                                                                                prior_box_node->get_attrs());
         prior_box_ie->set_friendly_name(unsqueeze->get_friendly_name());
-        ngraph::replace_node(m.get_match_root(), prior_box_ie);
+
+        // Nodes in copy runtime info function should be in topological order
+        std::reverse(ops_to_replace.begin(), ops_to_replace.end());
+        ngraph::copy_runtime_info(ops_to_replace, prior_box_ie);
+        ngraph::replace_node(unsqueeze, prior_box_ie);
         return true;
     };
 
index 57e745f..72de4e4 100644 (file)
@@ -10,6 +10,7 @@
 #include <ngraph/opsets/opset1.hpp>
 
 #include <ngraph_ops/proposal_ie.hpp>
+#include <ngraph/rt_info.hpp>
 
 void ngraph::pass::ConvertProposalToProposalIE::convert_proposal() {
     auto input_0 = std::make_shared<pattern::op::Label>(element::f32, Shape{1, 1, 1, 1});
@@ -29,10 +30,14 @@ void ngraph::pass::ConvertProposalToProposalIE::convert_proposal() {
 
         Output<Node> last;
 
+        ngraph::NodeVector ops_to_replace, new_ops;
+        ops_to_replace.push_back(proposal);
+
         if (auto reshape = std::dynamic_pointer_cast<opset1::Reshape>(proposal->input_value(2).get_node_shared_ptr())) {
             auto input_shape = reshape->get_input_shape(0);
             if (input_shape.size() == 2) {
                 last = reshape->input_value(0);
+                ops_to_replace.push_back(reshape);
             }
         }
 
@@ -40,15 +45,18 @@ void ngraph::pass::ConvertProposalToProposalIE::convert_proposal() {
             std::vector<int64_t> dims{1, -1};
             auto const_shape = std::make_shared<ngraph::opset1::Constant>(element::i64, Shape{2}, dims);
             last = std::make_shared<ngraph::opset1::Reshape>(proposal->input_value(2), const_shape, true);
+            new_ops.push_back(last.get_node_shared_ptr());
         }
 
         auto proposal_ie = std::make_shared<ngraph::op::ProposalIE> (proposal->input_value(0),
                                                                      proposal->input_value(1),
                                                                      last,
                                                                      proposal->get_attrs());
+        new_ops.push_back(proposal_ie);
 
         proposal_ie->set_friendly_name(proposal->get_friendly_name());
-        ngraph::replace_node(m.get_match_root(), proposal_ie);
+        ngraph::copy_runtime_info(ops_to_replace, new_ops);
+        ngraph::replace_node(proposal, proposal_ie);
         return true;
     };
 
index 7ed78e0..189a61a 100644 (file)
@@ -11,6 +11,7 @@
 
 #include <ngraph_ops/selu_ie.hpp>
 #include <transformations/utils/utils.hpp>
+#include <ngraph/rt_info.hpp>
 
 void ngraph::pass::ConvertSeluToSeluIE::convert_selu() {
     auto input_0 = std::make_shared<pattern::op::Label>(element::f32, Shape{1});
@@ -41,6 +42,7 @@ void ngraph::pass::ConvertSeluToSeluIE::convert_selu() {
 
         auto selu_ie = std::make_shared<ngraph::op::SeluIE>(selu->input(0).get_source_output(), alpha, gamma);
         selu_ie->set_friendly_name(selu->get_friendly_name());
+        ngraph::copy_runtime_info(selu, selu_ie);
         ngraph::replace_node(selu, selu_ie);
         return true;
     };
index e09476e..c83b15f 100644 (file)
@@ -11,6 +11,7 @@
 
 #include <ngraph_ops/power.hpp>
 #include <transformations/utils/utils.hpp>
+#include <ngraph/rt_info.hpp>
 
 void ngraph::pass::ConvertSqrtToPowerIE::convert_sqrt() {
     auto input_0 = std::make_shared<pattern::op::Label>(element::f32, Shape{1});
@@ -24,7 +25,8 @@ void ngraph::pass::ConvertSqrtToPowerIE::convert_sqrt() {
         }
         auto power_ie = std::make_shared<ngraph::op::PowerIE>(sqrt->input(0).get_source_output(), 0.5f, 1, 0);
         power_ie->set_friendly_name(sqrt->get_friendly_name());
-        ngraph::replace_node(m.get_match_root(), power_ie);
+        ngraph::copy_runtime_info(sqrt, power_ie);
+        ngraph::replace_node(sqrt, power_ie);
         return true;
     };
 
index 2f08aca..2ff6131 100644 (file)
@@ -11,6 +11,7 @@
 #include <ngraph/opsets/opset1.hpp>
 
 #include <ngraph_ops/crop_ie.hpp>
+#include <ngraph/rt_info.hpp>
 
 void ngraph::pass::ConvertStridedSliceToCrop::convert_strided_slice_to_crop() {
     auto data = std::make_shared<pattern::op::Label>(element::f32, Shape{1, 1, 1, 1});
@@ -190,7 +191,7 @@ void ngraph::pass::ConvertStridedSliceToCrop::convert_strided_slice_to_crop() {
             return false;
         }
 
-        // NODES
+        NodeVector new_ops;
 
         // Reshape in case of new axis
         if (!new_axis_mask.empty()) {
@@ -198,11 +199,13 @@ void ngraph::pass::ConvertStridedSliceToCrop::convert_strided_slice_to_crop() {
                                                                     ngraph::Shape{reshape_pattern.size()}, reshape_pattern);
             data_node = std::make_shared<ngraph::opset1::Reshape>(data_node, new_shape, true);
             data_node->set_friendly_name(slice->get_friendly_name() + "/Reshape_before");
+            new_ops.push_back(data_node);
         }
 
         // Crop
         data_node = std::make_shared<ngraph::op::CropIE> (data_node, axes, dim, offset);
         data_node->set_friendly_name(slice->get_friendly_name());
+        new_ops.push_back(data_node);
 
         auto crop_data_node = data_node;
 
@@ -213,9 +216,11 @@ void ngraph::pass::ConvertStridedSliceToCrop::convert_strided_slice_to_crop() {
             data_node = std::make_shared<ngraph::opset1::Reshape>(data_node, new_shape, true);
             crop_data_node->set_friendly_name(slice->get_friendly_name() + "/Crop");
             data_node->set_friendly_name(slice->get_friendly_name());
+            new_ops.push_back(data_node);
         }
 
-        ngraph::replace_node(m.get_match_root(), std::dynamic_pointer_cast<ngraph::Node>(data_node));
+        ngraph::copy_runtime_info(slice, new_ops);
+        ngraph::replace_node(slice, data_node);
         return true;
     };
 
index 07dc909..938efdf 100644 (file)
@@ -10,6 +10,7 @@
 #include <ngraph/opsets/opset1.hpp>
 
 #include <ngraph_ops/strided_slice_ie.hpp>
+#include <ngraph/rt_info.hpp>
 
 void ngraph::pass::ConvertStridedSliceToStridedSliceIE::convert_strided_slice_to_strided_slice_ie() {
     auto data = std::make_shared<pattern::op::Label>(element::f32, Shape{1, 1, 1, 1});
@@ -54,7 +55,8 @@ void ngraph::pass::ConvertStridedSliceToStridedSliceIE::convert_strided_slice_to
                                                                              output_shape);
         strided_slice_ie->set_friendly_name(strided_slice->get_friendly_name());
 
-        ngraph::replace_node(m.get_match_root(), strided_slice_ie);
+        ngraph::copy_runtime_info(strided_slice, {converted_begin, converted_end, converted_stride, strided_slice_ie});
+        ngraph::replace_node(strided_slice, strided_slice_ie);
         return true;
     };
 
index 5a77b7a..965bb67 100644 (file)
@@ -10,6 +10,7 @@
 #include <ngraph/opsets/opset1.hpp>
 
 #include <ngraph_ops/tile_ie.hpp>
+#include <ngraph/rt_info.hpp>
 
 void ngraph::pass::ConvertTileToIETile::convert_tile() {
     auto data = std::make_shared<pattern::op::Label>(element::f32, Shape{1, 1, 1, 1});
@@ -64,6 +65,8 @@ void ngraph::pass::ConvertTileToIETile::convert_tile() {
             friendly_name += ":";
         }
 
+        NodeVector new_ops;
+
         auto tiles_it = tiles.rbegin();
         while (tiles_it != tiles.rend()) {
             int64_t tile_dim = *tiles_it;
@@ -73,12 +76,15 @@ void ngraph::pass::ConvertTileToIETile::convert_tile() {
                 friendly_name += "_" + std::to_string(cur_dim_id);
 
                 last_node = std::dynamic_pointer_cast<ngraph::Node>(ie_tile);
+                new_ops.push_back(last_node);
             }
             --cur_dim_id;
             ++tiles_it;
         }
 
-        ngraph::replace_node(m.get_match_root(), last_node);
+        last_node->set_friendly_name(tile->get_friendly_name());
+        ngraph::copy_runtime_info(tile, new_ops);
+        ngraph::replace_node(tile, last_node);
         return true;
     };
 
index be94add..b2e4e3d 100644 (file)
@@ -11,6 +11,7 @@
 #include <ngraph/opsets/opset1.hpp>
 
 #include <ngraph_ops/topk_ie.hpp>
+#include <ngraph/rt_info.hpp>
 
 void ngraph::pass::ConvertTopKToTopKIE::convert_topk_to_topk_ie() {
     auto input_0 = std::make_shared<pattern::op::Label>(element::f32, Shape{1, 1, 1, 1});
@@ -57,7 +58,8 @@ void ngraph::pass::ConvertTopKToTopKIE::convert_topk_to_topk_ie() {
         auto new_topk = std::make_shared<ngraph::op::TopKIE>(topk->input(0).get_source_output(), unsqueezed_k, topk->get_axis(), mode,
                                                              sort_type, topk->output(0).get_shape());
         new_topk->set_friendly_name(topk->get_friendly_name());
-        ngraph::replace_node(m.get_match_root(), new_topk);
+        ngraph::copy_runtime_info(topk, {unsqueezed_k, new_topk});
+        ngraph::replace_node(topk, new_topk);
         return true;
     };
 
index 1e1a0a8..cfafbce 100644 (file)
@@ -8,6 +8,7 @@
 #include <vector>
 
 #include <ngraph/opsets/opset1.hpp>
+#include <ngraph/rt_info.hpp>
 
 #include "ngraph_ops/convolution_ie.hpp"
 #include "transformations/utils/utils.hpp"
@@ -55,13 +56,17 @@ void ngraph::pass::Reshape1DConvolutions::reshape_convolutions() {
         new_pads_begin.insert(new_pads_begin.begin(), 0);
         new_pad_end.insert(new_pad_end.begin(), 0);
 
+        NodeVector new_ops;
+
         auto reshape_begin = op::util::reshapeTo(conv->input_value(0), new_input_shape);
         reshape_begin->set_friendly_name(conv->get_friendly_name() + "/reshape_begin");
+        new_ops.push_back(reshape_begin);
 
         auto create_convolution = [&](const Output<Node> & input) -> std::shared_ptr<Node> {
             Shape new_weights_shape(conv->input_value(1).get_shape());
             new_weights_shape.insert(new_weights_shape.begin() + 2, 1);
             auto weights = op::util::reshapeTo(conv->input_value(1), new_weights_shape);
+            new_ops.push_back(weights);
             if (conv->inputs().size() == 2) {
                 return std::make_shared<op::ConvolutionIE>(input,
                                                            weights,
@@ -88,10 +93,13 @@ void ngraph::pass::Reshape1DConvolutions::reshape_convolutions() {
 
         auto new_conv = create_convolution(reshape_begin);
         new_conv->set_friendly_name(conv->get_friendly_name() + "/new");
+        new_ops.push_back(new_conv);
 
         auto reshape_end = op::util::reshapeTo(new_conv, output_shape);
         reshape_end->set_friendly_name(conv->get_friendly_name());
+        new_ops.push_back(reshape_end);
 
+        ngraph::copy_runtime_info(conv, new_ops);
         ngraph::replace_node(conv, reshape_end);
         return true;
     };
index b540e56..c43fa65 100644 (file)
@@ -8,6 +8,7 @@
 #include <vector>
 
 #include <ngraph/opsets/opset1.hpp>
+#include <ngraph/rt_info.hpp>
 
 #include "ngraph_ops/fully_connected.hpp"
 #include "transformations/utils/utils.hpp"
@@ -31,9 +32,12 @@ void ngraph::pass::ReshapeFullyConnected::reshape_fully_connected() {
             return false;
         }
 
+        NodeVector new_ops;
+
         std::vector<int64_t> reshape_shape{-1, static_cast<int64_t>(input_shape.back())};
         auto reshape = std::make_shared<opset1::Reshape>(fc->input_value(0),
                                                          opset1::Constant::create(element::i64, Shape{2}, reshape_shape), true);
+        new_ops.push_back(reshape);
 
         reshape->set_friendly_name(fc->get_friendly_name() + "/Reshape");
 
@@ -47,14 +51,18 @@ void ngraph::pass::ReshapeFullyConnected::reshape_fully_connected() {
                                                            fc->input_value(1),
                                                            fc->input_value(2),
                                                            output_shape_new);
+        new_ops.push_back(fc_new);
 
         if (output_shape != output_shape_new) {
             auto reshape_output = op::util::reshapeTo(fc_new, output_shape);
+            new_ops.push_back(reshape_output);
             reshape_output->set_friendly_name(fc->get_friendly_name());
             fc->set_friendly_name(fc->get_friendly_name() + "/FC");
+            ngraph::copy_runtime_info(fc, new_ops);
             ngraph::replace_node(fc, reshape_output);
         } else {
             fc_new->set_friendly_name(fc->get_friendly_name());
+            ngraph::copy_runtime_info(fc, new_ops);
             ngraph::replace_node(fc, fc_new);
         }
 
index b4029b6..72b7668 100644 (file)
@@ -3,23 +3,29 @@
 //
 
 #include "transformations/convert_opset2_to_opset1/convert_opset2_to_opset1.hpp"
+
 #include "transformations/convert_gelu.hpp"
 #include "transformations/convert_batch_to_space.hpp"
 #include "transformations/convert_space_to_batch.hpp"
-#include <transformations/utils/pass_manager.hpp>
+
 #include <memory>
+#include <vector>
+
+#include <ngraph/pass/manager.hpp>
 
 bool ngraph::pass::ConvertOpSet2ToOpSet1::run_on_function(std::shared_ptr<ngraph::Function> f) {
-    auto convert_gelu = ConvertGELU();
-    convert_gelu.setCallback(transformation_callback);
-    convert_gelu.run_on_function(f);
+    ngraph::pass::Manager OpSet2ToOpSet1;
+    std::vector<std::shared_ptr<ngraph::pass::PassBase> > transforms;
 
-    auto convert_space_to_batch = ConvertSpaceToBatch();
-    convert_space_to_batch.setCallback(transformation_callback);
-    convert_space_to_batch.run_on_function(f);
+#define NGRAPH_PASS(NAME, NAMESPACE) transforms.push_back(OpSet2ToOpSet1.register_pass<NAMESPACE::NAME>());
+#include <transformations/convert_opset2_to_opset1/convert_opset2_to_opset1_tbl.hpp>
+#undef NGRAPH_PASS
 
-    auto convert_batch_to_space = ConvertBatchToSpace();
-    convert_batch_to_space.setCallback(transformation_callback);
-    convert_batch_to_space.run_on_function(f);
+    for (auto & t : transforms) {
+        if (auto t_param = std::dynamic_pointer_cast<PassParam>(t)) {
+            t_param->setCallback(transformation_callback);
+        }
+    }
+    OpSet2ToOpSet1.run_passes(f);
     return true;
 }
\ No newline at end of file
index aac74af..0c42fe6 100644 (file)
@@ -8,6 +8,7 @@
 #include <vector>
 
 #include <ngraph/opsets/opset2.hpp>
+#include <ngraph/rt_info.hpp>
 
 void ngraph::pass::ConvertSpaceToBatch::convert_space_to_batch() {
     auto input0 = std::make_shared<pattern::op::Label>(element::f32, Shape{1, 1, 1, 1});
@@ -62,8 +63,11 @@ void ngraph::pass::ConvertSpaceToBatch::convert_space_to_batch_by_elements() {
         std::vector<int64_t> block_values;
         block_values = block_const->cast_vector<int64_t>();
 
+        NodeVector new_ops;
+
         std::shared_ptr<Node> flat_node = data.get_node_shared_ptr();
-        flat_node = std::make_shared<op::v1::Pad>(flat_node, pads_begin_const, pads_end_const, ngraph::op::PadMode::CONSTANT);
+        flat_node = std::make_shared<opset2::Pad>(flat_node, pads_begin_const, pads_end_const, ngraph::op::PadMode::CONSTANT);
+        new_ops.push_back(flat_node);
         auto out_shape = flat_node->get_shape();
 
         std::vector<int64_t> dispersed_shape(block_values.size() + 1);
@@ -93,6 +97,7 @@ void ngraph::pass::ConvertSpaceToBatch::convert_space_to_batch_by_elements() {
                     op::Constant::create(element::i64, Shape{dispersed_shape.size()}, dispersed_shape);
             const bool special_zero = false;
             flat_node = std::make_shared<ngraph::op::v1::Reshape>(flat_node, out_pattern_1, special_zero);
+            new_ops.push_back(flat_node);
 
             const auto axes_order_const =
                     op::Constant::create(element::i64,
@@ -100,16 +105,18 @@ void ngraph::pass::ConvertSpaceToBatch::convert_space_to_batch_by_elements() {
                                          std::vector<int64_t>(axes_order.begin(), axes_order.end()));
             flat_node = std::make_shared<ngraph::opset1::Transpose>(flat_node, axes_order_const)
                     ->add_provenance_group_members_above({flat_node});
-
+            new_ops.push_back(flat_node);
             squeezed_shape[0] *= block_values[block_idx];
             squeezed_shape[block_idx] /= block_values[block_idx];
             const auto out_pattern_2 =
                     op::Constant::create(element::i64, Shape{squeezed_shape.size()}, squeezed_shape);
             flat_node = std::make_shared<ngraph::op::v1::Reshape>(flat_node, out_pattern_2, special_zero)
                     ->add_provenance_group_members_above({data});
+            new_ops.push_back(flat_node);
         }
 
         flat_node->set_friendly_name(space_to_batch->get_friendly_name());
+        ngraph::copy_runtime_info(space_to_batch, new_ops);
         ngraph::replace_node(space_to_batch, flat_node);
         return true;
     };
index 009c630..f4ff7ec 100644 (file)
@@ -8,6 +8,7 @@
 #include <vector>
 
 #include <ngraph/opsets/opset1.hpp>
+#include <ngraph/rt_info.hpp>
 
 void ngraph::pass::ConvertSpaceToDepth::convert() {
     auto input0 = std::make_shared<pattern::op::Label>(element::f32, Shape{1, 1, 1, 1});
@@ -81,6 +82,7 @@ void ngraph::pass::ConvertSpaceToDepth::convert() {
         auto transpose = std::make_shared<ngraph::opset1::Transpose>(reshape_begin, create_constant(order));
         auto reshape_end = std::make_shared<ngraph::opset1::Reshape>(transpose, create_constant(shape_end), true);
         reshape_end->set_friendly_name(std_node->get_friendly_name());
+        ngraph::copy_runtime_info(std_node, {reshape_begin, transpose, reshape_end});
         ngraph::replace_node(std_node, reshape_end);
         return true;
     };
index 29fdb7d..427f85d 100644 (file)
@@ -8,6 +8,7 @@
 #include <vector>
 
 #include <ngraph/opsets/opset1.hpp>
+#include <ngraph/rt_info.hpp>
 
 void ngraph::pass::ConvertSubtract::convert_subtract() {
     auto input0 = std::make_shared<pattern::op::Label>(element::i64, Shape{1, 1, 1, 1});
@@ -26,7 +27,7 @@ void ngraph::pass::ConvertSubtract::convert_subtract() {
         auto add = std::make_shared<ngraph::opset1::Add>(sub->input(0).get_source_output(), neg);
 
         add->set_friendly_name(sub->get_friendly_name());
-
+        ngraph::copy_runtime_info(sub, {neg, add});
         ngraph::replace_node(sub, add);
         return true;
     };
diff --git a/inference-engine/src/transformations/src/transformations/init_node_info.cpp b/inference-engine/src/transformations/src/transformations/init_node_info.cpp
new file mode 100644 (file)
index 0000000..2984c62
--- /dev/null
@@ -0,0 +1,30 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "transformations/init_node_info.hpp"
+#include "transformations/rt_info/fused_names_attribute.hpp"
+
+#include <memory>
+#include <vector>
+
+#include <ngraph/opsets/opset1.hpp>
+#include <ngraph/rt_info.hpp>
+
+bool ngraph::pass::InitNodeInfo::run_on_function(std::shared_ptr<ngraph::Function> f) {
+    std::vector<std::shared_ptr<Variant> > attributes {
+        std::make_shared<VariantWrapper<FusedNames> >(FusedNames())
+    };
+
+    for (auto & node : f->get_ops()) {
+        auto & rtInfo = node->get_rt_info();
+        for (auto & attr : attributes) {
+            // Skip initialization if attribute has been already set
+            if (rtInfo.count(attr->get_type_info().name)) continue;
+            if (auto init_attr = attr->init(node)) {
+                rtInfo[attr->get_type_info().name] = init_attr;
+            }
+        }
+    }
+    return false;
+}
index f03c05c..ea5efa1 100644 (file)
@@ -8,6 +8,7 @@
 #include <vector>
 
 #include <ngraph/opsets/opset1.hpp>
+#include <ngraph/rt_info.hpp>
 
 void ngraph::pass::PullTransposeThroughFQUp::pull_transpose_through_fq() {
     auto data1 = std::make_shared<pattern::op::Label>(element::f32, Shape{1, 1, 1, 1});
@@ -39,6 +40,7 @@ void ngraph::pass::PullTransposeThroughFQUp::pull_transpose_through_fq() {
 
         auto input_shape = fq->input(0).get_source_output().get_shape();
 
+        ngraph::NodeVector new_ops;
         ngraph::OutputVector fq_inputs;
         for (size_t i = 0; i < fq->inputs().size(); ++i) {
             std::shared_ptr<ngraph::Node> fq_input;
@@ -51,13 +53,17 @@ void ngraph::pass::PullTransposeThroughFQUp::pull_transpose_through_fq() {
             if (!unsqueeze_axes.empty()) {
                 fq_input = std::make_shared<ngraph::opset1::Unsqueeze>(fq_input,
                                                                        opset1::Constant::create(element::i64, Shape{unsqueeze_axes.size()}, unsqueeze_axes));
+                new_ops.push_back(fq_input);
             }
             fq_input = transpose->copy_with_new_inputs({fq_input, const_order});
+            ngraph::copy_runtime_info(transpose, fq_input);
             fq_inputs.push_back(fq_input);
         }
 
         auto new_fq = fq->copy_with_new_inputs(fq_inputs);
+        new_ops.push_back(new_fq);
         new_fq->set_friendly_name(fq->get_friendly_name());
+        ngraph::copy_runtime_info({fq, transpose}, new_ops);
         ngraph::replace_node(transpose, new_fq);
 
         return true;
diff --git a/inference-engine/src/transformations/src/transformations/rt_info/fused_names_attribute.cpp b/inference-engine/src/transformations/src/transformations/rt_info/fused_names_attribute.cpp
new file mode 100644 (file)
index 0000000..e625d4c
--- /dev/null
@@ -0,0 +1,79 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <assert.h>
+#include <functional>
+#include <memory>
+#include <iterator>
+#include <ostream>
+
+#include <ngraph/node.hpp>
+#include <ngraph/variant.hpp>
+
+#include "transformations/rt_info/fused_names_attribute.hpp"
+
+namespace ngraph {
+
+constexpr VariantTypeInfo VariantWrapper<FusedNames>::type_info;
+
+std::string FusedNames::getNames() const {
+    std::string res;
+    for (auto &name : fused_names) {
+        res += (res.empty() ? name : "," + name);
+    }
+    return res;
+}
+
+std::vector<std::string> FusedNames::getVectorNames() const {
+    return std::vector<std::string>(fused_names.begin(), fused_names.end());
+}
+
+void FusedNames::fuseWith(const FusedNames &names) {
+    for (auto name : names.fused_names) {
+        fused_names.insert(name);
+    }
+}
+
+std::shared_ptr<ngraph::Variant> VariantWrapper<FusedNames>::merge(const ngraph::NodeVector & nodes) {
+    FusedNames mergedNames;
+    for (auto &node : nodes) {
+        const auto &rtInfo = node->get_rt_info();
+        if (!rtInfo.count(VariantWrapper<FusedNames>::type_info.name)) continue;
+
+        const auto attr = rtInfo.at(VariantWrapper<FusedNames>::type_info.name);
+        if (auto fusedNames = std::dynamic_pointer_cast<VariantWrapper<FusedNames> >(attr)) {
+            mergedNames.fuseWith(fusedNames->get());
+        }
+    }
+    return std::make_shared<VariantWrapper<FusedNames> >(mergedNames);
+}
+
+std::shared_ptr<ngraph::Variant> VariantWrapper<FusedNames>::init(const std::shared_ptr<ngraph::Node> & node) {
+    return std::make_shared<VariantWrapper<FusedNames> > (FusedNames(node->get_friendly_name()));
+}
+
+std::string getFusedNames(const std::shared_ptr<ngraph::Node> &node) {
+    const auto &rtInfo = node->get_rt_info();
+    using FusedNamesWraper = VariantWrapper<FusedNames>;
+
+    if (!rtInfo.count(FusedNamesWraper::type_info.name)) return {};
+
+    const auto &attr = rtInfo.at(FusedNamesWraper::type_info.name);
+    FusedNames fusedNames = as_type_ptr<FusedNamesWraper>(attr)->get();
+    return fusedNames.getNames();
+}
+
+std::vector<std::string> getFusedNamesVector(const std::shared_ptr<ngraph::Node> &node) {
+    const auto &rtInfo = node->get_rt_info();
+    using FusedNamesWraper = VariantWrapper<FusedNames>;
+
+    if (!rtInfo.count(FusedNamesWraper::type_info.name)) return {};
+
+    const auto &attr = rtInfo.at(FusedNamesWraper::type_info.name);
+    FusedNames fusedNames = as_type_ptr<FusedNamesWraper>(attr)->get();
+    return fusedNames.getVectorNames();
+}
+
+
+}  // namespace ngraph
\ No newline at end of file
diff --git a/inference-engine/src/transformations/src/transformations/utils/pass_manager.cpp b/inference-engine/src/transformations/src/transformations/utils/pass_manager.cpp
deleted file mode 100644 (file)
index 04e9307..0000000
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright (C) 2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include "transformations/utils/pass_manager.hpp"
-
-#include <transformations/constant_eltwise_reduction.hpp>
-#include <transformations/convert_broadcast_to_tiles.hpp>
-#include <transformations/convert_opset1_to_legacy/convert_convolutions.hpp>
-#include <transformations/convert_divide.hpp>
-#include <transformations/convert_mod.hpp>
-#include <transformations/convert_opset1_to_legacy/convert_gather_to_gather_ie.hpp>
-#include <transformations/convert_opset1_to_legacy/convert_gathertree_to_gathertree_ie.hpp>
-#include <transformations/convert_opset1_to_legacy/convert_interpolate_to_interp_or_resample.hpp>
-#include <transformations/convert_opset1_to_legacy/convert_lrn_to_lrn_ie.hpp>
-#include <transformations/convert_opset1_to_legacy/convert_lstm_cell_to_lstm_cell_ie.hpp>
-#include <transformations/convert_opset1_to_legacy/convert_matmul_to_fc_or_gemm.hpp>
-#include <transformations/convert_minimum_to_power_and_max.hpp>
-#include <transformations/convert_opset1_to_legacy/convert_mul_add_to_scaleshift_or_power.hpp>
-#include <transformations/convert_opset1_to_legacy/convert_mul_or_add_finally.hpp>
-#include <transformations/convert_negative.hpp>
-#include <transformations/convert_opset1_to_legacy/convert_nms_to_nms_ie.hpp>
-#include <transformations/convert_opset1_to_legacy/convert_normalizel2_to_normalize_ie.hpp>
-#include <transformations/convert_opset1_to_legacy/convert_one_hot_to_one_hot_ie.hpp>
-#include <transformations/convert_opset1_to_legacy/convert_pad_to_pad_ie.hpp>
-#include <transformations/convert_opset1_to_legacy/convert_sqrt_to_power_ie.hpp>
-#include <transformations/convert_opset1_to_legacy/convert_power_to_power_ie.hpp>
-#include <transformations/convert_opset1_to_legacy/convert_prelu_to_relu_ie.hpp>
-#include <transformations/convert_opset1_to_legacy/convert_proposal_to_proposal_ie.hpp>
-#include <transformations/convert_opset1_to_legacy/convert_prior_to_ie_prior.hpp>
-#include <transformations/convert_reduce_to_pooling.hpp>
-#include <transformations/convert_opset1_to_legacy/convert_strided_slice_to_crop.hpp>
-#include <transformations/convert_subtract.hpp>
-#include <transformations/convert_opset1_to_legacy/convert_selu_to_selu_ie.hpp>
-#include <transformations/convert_opset1_to_legacy/convert_tile_to_ie_tile.hpp>
-#include <transformations/convert_opset1_to_legacy/convert_topk_to_topk_ie.hpp>
-#include <transformations/convert_depth_to_space.hpp>
-#include <transformations/convert_space_to_depth.hpp>
-#include <transformations/batch_norm_decomposition.hpp>
-#include <transformations/convert_opset1_to_legacy/conv_bias_fusion.hpp>
-#include <transformations/convert_opset1_to_legacy/fc_bias_fusion.hpp>
-#include <transformations/mul_add_squence_fusion.hpp>
-#include <transformations/mul_add_verification.hpp>
-#include <transformations/convert_opset1_to_legacy/reshape_fc_fusion.hpp>
-#include <transformations/convert_opset1_to_legacy/reshape_1d_convolutions.hpp>
-#include <transformations/convert_opset1_to_legacy/reshape_fully_connected.hpp>
-#include <transformations/pull_transpose_through_fq.hpp>
-#include <transformations/convert_opset1_to_legacy/convert_strided_slice_to_strided_slice_ie.hpp>
-#include <transformations/convert_opset1_to_legacy/convert_hard_sigmoid_to_hard_sigmoid_ie.hpp>
-
-#include <ngraph/pass/constant_folding.hpp>
-
-#include <memory>
-#include <vector>
-
-void ngraph::pass::ConversionPassManager::register_conversion_passes() {
-    std::vector<std::shared_ptr<PassBase> > transforms;
-
-#define NGRAPH_PASS(NAME, NAMESPACE) transforms.push_back(register_pass<NAMESPACE::NAME>());
-#include <transformations/convert_opset1_to_legacy/convert_opset1_to_legacy_tbl.hpp>
-
-#undef NGRAPH_PASS
-
-    for (auto & t : transforms) {
-        if (auto t_param = std::dynamic_pointer_cast<PassParam>(t)) {
-            t_param->setCallback(transformation_callback);
-        }
-    }
-}
index 2052a8b..db848a2 100644 (file)
@@ -20,10 +20,10 @@ if(ENABLE_MYRIAD)
 
     add_subdirectory(myriad_plugin)
 
-    install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/custom_kernels/
-            DESTINATION ${IE_CPACK_LIBRARY_PATH}/vpu_custom_kernels
-            COMPONENT myriad)
     if(DEFINED VPU_CLC_MA2X8X_ROOT)
+        install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/custom_kernels/
+                DESTINATION ${IE_CPACK_LIBRARY_PATH}/vpu_custom_kernels
+                COMPONENT myriad)
         install(DIRECTORY ${VPU_CLC_MA2X8X_ROOT}/
                 DESTINATION deployment_tools/tools/cl_compiler
                 COMPONENT myriad
index 50c8dfc..e34a0e6 100644 (file)
@@ -8,11 +8,12 @@
 
 #include <memory>
 
-namespace ngraph { namespace op {
+namespace ngraph { namespace vpu { namespace op {
 
-class DynamicShapeResolver : public Op {
+class DynamicShapeResolver : public ngraph::op::Op {
 public:
-    static constexpr NodeTypeInfo type_info{"DynamicShapeResolver", 1};
+    static constexpr NodeTypeInfo type_info{"DynamicShapeResolver", 0};
+
     const NodeTypeInfo& get_type_info() const override { return type_info; }
 
     DynamicShapeResolver(const Output<Node>& tensorWithData, const Output<Node>& tensorWithDims);
@@ -25,4 +26,5 @@ public:
 };
 
 }  // namespace op
+}  // namespace vpu
 }  // namespace ngraph
index 8d8dcb5..eb28ba4 100644 (file)
 #include <memory>
 #include <vector>
 
-namespace ngraph {
-namespace op {
+namespace ngraph { namespace vpu { namespace op {
 
-class StaticShapeNonZero : public Op {
+class StaticShapeNonZero : public ngraph::op::Op {
 public:
-    static constexpr NodeTypeInfo type_info{"StaticShapeNonZero", 1};
+    static constexpr NodeTypeInfo type_info{"StaticShapeNonZero", 0};
+
     const NodeTypeInfo& get_type_info() const override { return type_info; }
 
     explicit StaticShapeNonZero(const Output<ngraph::Node>& input);
@@ -26,5 +26,7 @@ public:
 
     bool visit_attributes(ngraph::AttributeVisitor& visitor) override;
 };
+
 }  // namespace op
+}  // namespace vpu
 }  // namespace ngraph
index 855cbcf..0ab93a4 100644 (file)
@@ -9,18 +9,19 @@
 #include <vector>
 #include <memory>
 
-namespace ngraph {
-namespace pass {
+namespace vpu {
 
-class DynamicToStaticShape : public FunctionPass {
-public:
-    DynamicToStaticShape() = default;
+using Transformations = std::unordered_map<ngraph::NodeTypeInfo, std::function<void(std::shared_ptr<ngraph::Node>)>>;
 
-    bool run_on_function(std::shared_ptr<ngraph::Function> function) override;
+class DynamicToStaticShape {
+public:
+    explicit DynamicToStaticShape(const Transformations& specificTransformations = {});
+    void transform(ngraph::Function& function) const;
 
 private:
-    bool validateStaticShapes(std::shared_ptr<ngraph::Function> function) const;
+    Transformations transformations;
 };
 
-}  // namespace pass
-}  // namespace ngraph
+void printTo(std::ostream& stream, const ngraph::NodeTypeInfo& object);
+
+}  // namespace vpu
diff --git a/inference-engine/src/vpu/common/include/vpu/ngraph/transformations/dynamic_to_static_shape_binary_elementwise.hpp b/inference-engine/src/vpu/common/include/vpu/ngraph/transformations/dynamic_to_static_shape_binary_elementwise.hpp
new file mode 100644 (file)
index 0000000..b0ac4d6
--- /dev/null
@@ -0,0 +1,15 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include "ngraph/node.hpp"
+
+#include <memory>
+
+namespace vpu {
+
+void dynamicToStaticShapeBinaryEltwise(std::shared_ptr<ngraph::Node> eltwise);
+
+}  // namespace vpu
diff --git a/inference-engine/src/vpu/common/include/vpu/ngraph/transformations/dynamic_to_static_shape_non_max_suppression.hpp b/inference-engine/src/vpu/common/include/vpu/ngraph/transformations/dynamic_to_static_shape_non_max_suppression.hpp
new file mode 100644 (file)
index 0000000..31cf333
--- /dev/null
@@ -0,0 +1,15 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include "ngraph/node.hpp"
+
+#include <memory>
+
+namespace vpu {
+
+void dynamicToStaticNonMaxSuppression(std::shared_ptr<ngraph::Node> node);
+
+}  // namespace vpu
index 15a7f53..88e6560 100644 (file)
@@ -4,18 +4,12 @@
 
 #pragma once
 
-#include <ngraph/pass/graph_rewrite.hpp>
+#include "ngraph/node.hpp"
 
-#include <vector>
 #include <memory>
 
-namespace ngraph {
-namespace pass {
+namespace vpu {
 
-class DynamicToStaticShapeNonZero : public GraphRewrite {
-public:
-    DynamicToStaticShapeNonZero();
-};
+void dynamicToStaticShapeNonZero(std::shared_ptr<ngraph::Node> nonZero);
 
-}  // namespace pass
-}  // namespace ngraph
+}  // namespace vpu
diff --git a/inference-engine/src/vpu/common/include/vpu/ngraph/transformations/dynamic_to_static_shape_roialign.hpp b/inference-engine/src/vpu/common/include/vpu/ngraph/transformations/dynamic_to_static_shape_roialign.hpp
new file mode 100644 (file)
index 0000000..f194030
--- /dev/null
@@ -0,0 +1,15 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include "ngraph/node.hpp"
+
+#include <memory>
+
+namespace vpu {
+
+void dynamicToStaticShapeROIAlign(std::shared_ptr<ngraph::Node> target);
+
+}  // namespace vpu
diff --git a/inference-engine/src/vpu/common/include/vpu/ngraph/transformations/dynamic_to_static_shape_squeeze.hpp b/inference-engine/src/vpu/common/include/vpu/ngraph/transformations/dynamic_to_static_shape_squeeze.hpp
new file mode 100644 (file)
index 0000000..d856ef7
--- /dev/null
@@ -0,0 +1,15 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include "ngraph/node.hpp"
+
+#include <memory>
+
+namespace vpu {
+
+void dynamicToStaticShapeSqueeze(std::shared_ptr<ngraph::Node> target);
+
+}  // namespace vpu
diff --git a/inference-engine/src/vpu/common/include/vpu/ngraph/transformations/dynamic_to_static_shape_transpose.hpp b/inference-engine/src/vpu/common/include/vpu/ngraph/transformations/dynamic_to_static_shape_transpose.hpp
new file mode 100644 (file)
index 0000000..c64bb56
--- /dev/null
@@ -0,0 +1,15 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include "ngraph/node.hpp"
+
+#include <memory>
+
+namespace vpu {
+
+void dynamicToStaticShapeTranspose(std::shared_ptr<ngraph::Node> transpose);
+
+}  // namespace vpu
diff --git a/inference-engine/src/vpu/common/include/vpu/ngraph/transformations/dynamic_to_static_shape_unary_elementwise.hpp b/inference-engine/src/vpu/common/include/vpu/ngraph/transformations/dynamic_to_static_shape_unary_elementwise.hpp
new file mode 100644 (file)
index 0000000..ec0ce72
--- /dev/null
@@ -0,0 +1,15 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include "ngraph/node.hpp"
+
+#include <memory>
+
+namespace vpu {
+
+void dynamicToStaticUnaryElementwise(std::shared_ptr<ngraph::Node> node);
+
+}  // namespace vpu
diff --git a/inference-engine/src/vpu/common/include/vpu/ngraph/transformations/dynamic_to_static_shape_unsqueeze.hpp b/inference-engine/src/vpu/common/include/vpu/ngraph/transformations/dynamic_to_static_shape_unsqueeze.hpp
new file mode 100644 (file)
index 0000000..37087ce
--- /dev/null
@@ -0,0 +1,15 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include "ngraph/node.hpp"
+
+#include <memory>
+
+namespace vpu {
+
+void dynamicToStaticShapeUnsqueeze(std::shared_ptr<ngraph::Node> target);
+
+}  // namespace vpu
index ebe0a62..a33c37e 100644 (file)
@@ -4,7 +4,7 @@
 
 #include "vpu/ngraph/operations/dynamic_shape_resolver.hpp"
 
-namespace ngraph { namespace op {
+namespace ngraph { namespace vpu { namespace op {
 
 constexpr NodeTypeInfo DynamicShapeResolver::type_info;
 
@@ -23,21 +23,24 @@ void DynamicShapeResolver::validate_and_infer_types() {
     NODE_VALIDATION_CHECK(this, get_input_partial_shape(0).is_static(), "(", get_friendly_name(), ") does not support dynamic shape for data tensor");
     NODE_VALIDATION_CHECK(this, get_input_partial_shape(1).is_static(), "(", get_friendly_name(), ") does not support dynamic shape for dims tensor");
 
+    const auto& dataElementType = get_input_element_type(0);
+    NODE_VALIDATION_CHECK(this, dataElementType.is_static(), "(", get_friendly_name(), ") does not support dynamic element type for data tensor");
     const auto& dimsElementType = get_input_element_type(1);
-    NODE_VALIDATION_CHECK(this, dimsElementType.is_integral_number() && dimsElementType.is_static(), "(", get_friendly_name(), ") supports only integral "
-        "number type for dims tensor, but ", dimsElementType, " provided");
+    NODE_VALIDATION_CHECK(this, dimsElementType.is_static() && dimsElementType.compatible(ngraph::element::i64), "(", get_friendly_name(),
+        ") supports only i64 number type for dims tensor, but ", dimsElementType, " provided");
 
     const auto& dataShape = get_input_shape(0);
     const auto& dimsShape = get_input_shape(1);
     NODE_VALIDATION_CHECK(this, dimsShape.size() == 1 && dimsShape.front() == dataShape.size(), "(", get_friendly_name(), ") inputs shapes mismatch: first "
         "input shape = ", dataShape, " second input shape = ", dimsShape, " but ", dataShape, " and ", Shape{dataShape.size()}, " are expected");
 
-    set_output_type(0, get_input_element_type(0), get_input_shape(0));
+    set_output_type(0, dataElementType, dataShape);
 }
 
-bool DynamicShapeResolver::visit_attributes(ngraph::AttributeVisitor& visitor) {
+bool DynamicShapeResolver::visit_attributes(ngraph::AttributeVisitor&) {
     return true;
 }
 
 }  // namespace op
+}  // namespace vpu
 }  // namespace ngraph
index 3400e5b..a694274 100644 (file)
@@ -4,8 +4,7 @@
 
 #include "vpu/ngraph/operations/static_shape_nonzero.hpp"
 
-namespace ngraph {
-namespace op {
+namespace ngraph { namespace vpu { namespace op {
 
 constexpr NodeTypeInfo StaticShapeNonZero::type_info;
 
@@ -45,4 +44,5 @@ bool StaticShapeNonZero::visit_attributes(ngraph::AttributeVisitor& visitor) {
 }
 
 }  // namespace op
+}  // namespace vpu
 }  // namespace ngraph
index e7f92cf..b7d4a8a 100644 (file)
 // SPDX-License-Identifier: Apache-2.0
 //
 
+#include "vpu/ngraph/transformations/dynamic_to_static_shape_unary_elementwise.hpp"
+#include "vpu/ngraph/transformations/dynamic_to_static_shape_roialign.hpp"
+#include "vpu/ngraph/transformations/dynamic_to_static_shape_transpose.hpp"
+#include "vpu/ngraph/transformations/dynamic_to_static_shape_non_max_suppression.hpp"
+#include "vpu/ngraph/transformations/dynamic_to_static_shape_nonzero.hpp"
+#include "vpu/ngraph/transformations/dynamic_to_static_shape_binary_elementwise.hpp"
 #include "vpu/ngraph/transformations/dynamic_to_static_shape.hpp"
+#include "vpu/ngraph/transformations/dynamic_to_static_shape_squeeze.hpp"
+#include "vpu/ngraph/transformations/dynamic_to_static_shape_unsqueeze.hpp"
+#include "vpu/utils/error.hpp"
 
-#include "vpu/ngraph/transformations/dynamic_to_static_shape_nonzero.hpp"
+#include "ngraph/opsets/opset3.hpp"
 
-#include <vpu/utils/error.hpp>
+namespace vpu {
 
-namespace ngraph {
-namespace pass {
+void printTo(std::ostream& stream, const ngraph::NodeTypeInfo& object) {
+    stream << object.name << " ver. " << object.version;
+}
 
-bool DynamicToStaticShape::run_on_function(std::shared_ptr<ngraph::Function> function) {
-    DynamicToStaticShapeNonZero().run_on_function(function);
+namespace {
 
-    return validateStaticShapes(function);
+using namespace ngraph;
+
+bool isDynamic(const Node& node) {
+    const auto& outputs = node.outputs();
+    return std::any_of(outputs.cbegin(), outputs.cend(), [](const Output<const Node>& output) { return output.get_partial_shape().is_dynamic(); });
 }
 
-bool DynamicToStaticShape::validateStaticShapes(std::shared_ptr<ngraph::Function> function) const {
-    function->validate_nodes_and_infer_types();
-
-    for (const auto& node : function->get_ops()) {
-        for (const auto& output : node->get_outputs()) {
-            const auto outputPartialShape = output.get_partial_shape();
-            VPU_THROW_UNLESS(outputPartialShape.is_static(),
-                             "DynamicToStaticShape pass: after all the transformations there is "
-                             "still dynamism in the network. First met node with dynamic output: "
-                             "%s (type: %s)", node->get_friendly_name(), node->get_type_name());
-            return false;
-        }
+bool validateStaticShapes(const ngraph::Function& function) {
+    for (const auto& node : function.get_ordered_ops()) {
+        VPU_THROW_UNLESS(!isDynamic(*node),
+            "DynamicToStaticShape transformation: after all the transformations there is still dynamism in the network."
+            " First met node with dynamic output: {} (type: {})", node->get_friendly_name(), node->get_type_name());
     }
     return true;
 }
 
-}  // namespace pass
-}  // namespace ngraph
+const Transformations& getDefaultTransformations() {
+    static const Transformations transformations = {
+        {ngraph::opset3::Add::type_info, dynamicToStaticShapeBinaryEltwise},
+        {ngraph::opset3::Multiply::type_info, dynamicToStaticShapeBinaryEltwise},
+        {ngraph::opset3::Subtract::type_info, dynamicToStaticShapeBinaryEltwise},
+        {ngraph::opset3::Divide::type_info, dynamicToStaticShapeBinaryEltwise},
+        {ngraph::opset3::Equal::type_info, dynamicToStaticShapeBinaryEltwise},
+        {ngraph::opset3::Power::type_info, dynamicToStaticShapeBinaryEltwise},
+        {ngraph::opset3::NonMaxSuppression::type_info, dynamicToStaticNonMaxSuppression},
+        {ngraph::opset3::NonZero::type_info,   dynamicToStaticShapeNonZero},
+        {ngraph::opset3::Transpose::type_info, dynamicToStaticShapeTranspose},
+        {ngraph::opset3::Convert::type_info,   dynamicToStaticUnaryElementwise},
+        {ngraph::opset3::Clamp::type_info,     dynamicToStaticUnaryElementwise},
+        {ngraph::opset3::Floor::type_info,     dynamicToStaticUnaryElementwise},
+        {ngraph::opset3::Log::type_info,       dynamicToStaticUnaryElementwise},
+        {ngraph::opset3::Relu::type_info,      dynamicToStaticUnaryElementwise},
+        {ngraph::opset3::ScatterUpdate::type_info, dynamicToStaticUnaryElementwise},
+        {ngraph::opset3::Sigmoid::type_info,   dynamicToStaticUnaryElementwise},
+        {ngraph::opset3::Sqrt::type_info,      dynamicToStaticUnaryElementwise},
+        {ngraph::opset3::Squeeze::type_info,   dynamicToStaticShapeSqueeze},
+        {ngraph::opset3::Unsqueeze::type_info, dynamicToStaticShapeUnsqueeze},
+        {ngraph::opset3::ROIAlign::type_info,  dynamicToStaticShapeROIAlign},
+    };
+    return transformations;
+}
+
+std::set<NodeTypeInfo> getSupportedTypes(const Transformations& transformations) {
+    auto supportedTypes = std::set<NodeTypeInfo>{};
+    for (const auto& transformation : transformations) {
+        supportedTypes.insert(transformation.first);
+    }
+    return supportedTypes;
+}
+
+}  // namespace
+
+DynamicToStaticShape::DynamicToStaticShape(const Transformations& specificTransformations)
+    : transformations(specificTransformations.empty() ? getDefaultTransformations() : specificTransformations) {
+    transformations.emplace(ngraph::opset3::Result::type_info, [](const std::shared_ptr<ngraph::Node>&){});
+}
+
+void DynamicToStaticShape::transform(ngraph::Function& function) const {
+    for (const auto& operation : function.get_ordered_ops()) {
+        if (!isDynamic(*operation)) {
+            continue;
+        }
+
+        const auto type = operation->get_type_info();
+        const auto transformation = transformations.find(type);
+        VPU_THROW_UNLESS(transformation != transformations.cend(),
+            "DynamicToStaticShape transformation encountered dynamic node {} of type {}, but only {} types are supported for dynamic nodes",
+            operation->get_friendly_name(), type, getSupportedTypes(transformations));
+        transformation->second(operation);
+    }
+
+    function.validate_nodes_and_infer_types();
+    validateStaticShapes(function);
+}
+
+}  // namespace vpu
diff --git a/inference-engine/src/vpu/common/src/ngraph/transformations/dynamic_to_static_shape_binary_elementwise.cpp b/inference-engine/src/vpu/common/src/ngraph/transformations/dynamic_to_static_shape_binary_elementwise.cpp
new file mode 100644 (file)
index 0000000..4cc013b
--- /dev/null
@@ -0,0 +1,50 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "vpu/ngraph/transformations/dynamic_to_static_shape_binary_elementwise.hpp"
+
+#include "vpu/ngraph/operations/dynamic_shape_resolver.hpp"
+#include <vpu/utils/error.hpp>
+
+#include "ngraph/graph_util.hpp"
+#include "ngraph/opsets/opset3.hpp"
+
+#include <memory>
+
+namespace vpu {
+
+void dynamicToStaticShapeBinaryEltwise(std::shared_ptr<ngraph::Node> eltwise) {
+    const auto lhsRank = eltwise->input_value(0).get_partial_shape().rank();
+    const auto rhsRank = eltwise->input_value(1).get_partial_shape().rank();
+
+    const auto copied = eltwise->copy_with_new_inputs(eltwise->input_values());
+
+    auto shapeToConstant = [&eltwise](const ngraph::Output<ngraph::Node> & output) -> std::shared_ptr<ngraph::opset3::Constant> {
+        VPU_THROW_UNLESS(output.get_partial_shape().is_static(),
+            "DynamicToStaticShape transformation for {} of type {} expects static shape on inputs without DSR",
+            eltwise->get_friendly_name(), eltwise->get_type_info());
+        return ngraph::opset3::Constant::create(ngraph::element::i64, {output.get_shape().size()}, output.get_shape());
+    };
+
+    const auto lhsDSR = ngraph::as_type_ptr<ngraph::vpu::op::DynamicShapeResolver>(eltwise->input_value(0).get_node_shared_ptr());
+    const auto rhsDSR = ngraph::as_type_ptr<ngraph::vpu::op::DynamicShapeResolver>(eltwise->input_value(1).get_node_shared_ptr());
+
+    VPU_THROW_UNLESS(lhsDSR || rhsDSR, "DynamicToStaticShape transformation for {} of type {} expects at least one DSR as input",
+        eltwise->get_friendly_name(), eltwise->get_type_info());
+
+    auto lhsInput = lhsDSR ? lhsDSR->input_value(1) : shapeToConstant(eltwise->input_value(0));
+    auto rhsInput = rhsDSR ? rhsDSR->input_value(1) : shapeToConstant(eltwise->input_value(1));
+
+    const auto diff = std::abs(lhsRank.get_length() - rhsRank.get_length());
+    if (diff) {
+        auto & broadcastInput = lhsRank.get_length() < rhsRank.get_length() ? lhsInput : rhsInput;
+        const auto broadcastConst = ngraph::opset3::Constant::create(broadcastInput.get_element_type(), {static_cast<uint64_t>(diff)}, {1});
+        broadcastInput = std::make_shared<ngraph::opset3::Concat>(ngraph::OutputVector{broadcastConst, broadcastInput}, 0);
+    }
+
+    const auto shape = std::make_shared<ngraph::opset3::Maximum>(lhsInput, rhsInput);
+    ngraph::replace_node(std::move(eltwise), std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(copied, shape));
+}
+
+}  // namespace vpu
diff --git a/inference-engine/src/vpu/common/src/ngraph/transformations/dynamic_to_static_shape_non_max_suppression.cpp b/inference-engine/src/vpu/common/src/ngraph/transformations/dynamic_to_static_shape_non_max_suppression.cpp
new file mode 100644 (file)
index 0000000..2652d99
--- /dev/null
@@ -0,0 +1,48 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "vpu/ngraph/transformations/dynamic_to_static_shape_non_max_suppression.hpp"
+
+#include "vpu/ngraph/operations/dynamic_shape_resolver.hpp"
+#include <vpu/utils/error.hpp>
+
+#include "ngraph/graph_util.hpp"
+#include "ngraph/opsets/opset3.hpp"
+
+#include <memory>
+
+namespace vpu {
+
+void dynamicToStaticNonMaxSuppression(std::shared_ptr<ngraph::Node> target) {
+    const auto dsr1 = target->input_value(1).get_node_shared_ptr();
+    VPU_THROW_UNLESS(std::dynamic_pointer_cast<ngraph::vpu::op::DynamicShapeResolver>(dsr1),
+                     "DynamicToStaticShape transformation for {} of type {} expects {} as input with index {}",
+                     target->get_friendly_name(), target->get_type_info(), ngraph::vpu::op::DynamicShapeResolver::type_info, 1);
+
+    const auto scores_shape = dsr1->input(1).get_source_output();
+
+    const auto index_num_classes = ngraph::opset3::Constant::create(ngraph::element::i64, {1}, std::vector<int64_t>{1});
+    const auto axis_num_classes = ngraph::opset3::Constant::create(ngraph::element::i64, {}, std::vector<int64_t>{0});
+    const auto num_classes = std::make_shared<ngraph::opset3::Gather>(scores_shape, index_num_classes, axis_num_classes);
+
+    const auto index_num_boxes = ngraph::opset3::Constant::create(ngraph::element::i64, {1}, std::vector<int64_t>{2});
+    const auto axis_num_boxes = ngraph::opset3::Constant::create(ngraph::element::i64, {}, std::vector<int64_t>{0});
+    const auto num_boxes = std::make_shared<ngraph::opset3::Gather>(scores_shape, index_num_boxes, axis_num_boxes);
+
+    VPU_THROW_UNLESS(target->inputs().size() > 2,  "DynamicToStaticShape transformation for {} expects at least 3 inputs", target);
+    // originally 3rd input is a scalar of any integer type, so we cast and unsqueeze it to 1D
+    const auto max_output_boxes_per_class = std::make_shared<ngraph::opset3::Convert>(std::make_shared<ngraph::opset3::Unsqueeze>(
+            target->input_value(2).get_node_shared_ptr(), ngraph::opset3::Constant::create(ngraph::element::i32, {1}, {0})), scores_shape.get_element_type());
+
+    const auto max_output_boxes_overall = std::make_shared<ngraph::opset3::Multiply>(max_output_boxes_per_class, num_classes);
+    const auto num_selected_boxes = std::make_shared<ngraph::opset3::Minimum>(num_boxes, max_output_boxes_overall);
+
+    const auto triplet_const = ngraph::opset3::Constant::create(scores_shape.get_element_type(), {1}, std::vector<int64_t>{3});
+    const auto output_shape = std::make_shared<ngraph::opset3::Concat>(ngraph::OutputVector{num_selected_boxes, triplet_const}, 0);
+
+    const auto copied = target->clone_with_new_inputs(target->input_values());
+    ngraph::replace_node(target, std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(copied, output_shape));
+}
+
+}  // namespace vpu
index 604dd90..570a68b 100644 (file)
@@ -4,45 +4,25 @@
 
 #include "vpu/ngraph/transformations/dynamic_to_static_shape_nonzero.hpp"
 
-#include <vpu/ngraph/operations/static_shape_nonzero.hpp>
-#include <vpu/ngraph/operations/dynamic_shape_resolver.hpp>
+#include "vpu/ngraph/operations/static_shape_nonzero.hpp"
+#include "vpu/ngraph/operations/dynamic_shape_resolver.hpp"
 
-#include <ngraph/opsets/opset3.hpp>
+#include "ngraph/graph_util.hpp"
 
 #include <memory>
 
-namespace ngraph {
-namespace pass {
-
-DynamicToStaticShapeNonZero::DynamicToStaticShapeNonZero() {
-    // We don't set strict_mode when use pattern Matcher,
-    // so we can set any type and shape for input.
-    auto inputWithAnyTypeAndShape = std::make_shared<pattern::op::Label>(
-            element::dynamic, PartialShape{});
-    auto nonZeroPattern = std::make_shared<ngraph::op::NonZero>(inputWithAnyTypeAndShape);
-
-    ngraph::graph_rewrite_callback callback = [](pattern::Matcher& matcher) {
-        const auto nonZero = std::dynamic_pointer_cast<ngraph::opset3::NonZero>(matcher.get_match_root());
-        if (!nonZero) {
-            return false;
-        }
-
-        auto staticShapeNonZero = std::make_shared<ngraph::op::StaticShapeNonZero>(
-                nonZero->input(0).get_source_output());
-        staticShapeNonZero->set_friendly_name(nonZero->get_friendly_name() + "/static_shape");
-
-        auto dynamicShapeResolver = std::make_shared<ngraph::op::DynamicShapeResolver>(
-                staticShapeNonZero->output(0), staticShapeNonZero->output(1));
-        dynamicShapeResolver->set_friendly_name(nonZero->get_friendly_name() + "/resolve_shape");
-
-        ngraph::replace_node(matcher.get_match_root(), dynamicShapeResolver);
-        return true;
-    };
-
-    const auto matcher = std::make_shared<ngraph::pattern::Matcher>(
-            nonZeroPattern, "DynamicToStaticShapeNonZero");
-    this->add_matcher(matcher, callback, PassProperty::CHANGE_DYNAMIC_STATE);
+namespace vpu {
+
+void dynamicToStaticShapeNonZero(std::shared_ptr<ngraph::Node> nonZero) {
+    auto staticShapeNonZero = std::make_shared<ngraph::vpu::op::StaticShapeNonZero>(nonZero->input(0).get_source_output());
+    staticShapeNonZero->set_friendly_name(nonZero->get_friendly_name() + "/static_shape");
+
+    auto dynamicShapeResolver = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(
+        staticShapeNonZero->output(0), staticShapeNonZero->output(1));
+    dynamicShapeResolver->set_friendly_name(nonZero->get_friendly_name() + "/resolve_shape");
+
+    ngraph::replace_node(std::move(nonZero), std::move(dynamicShapeResolver));
 }
 
-}  // namespace pass
-}  // namespace ngraph
+}  // namespace vpu
+
diff --git a/inference-engine/src/vpu/common/src/ngraph/transformations/dynamic_to_static_shape_roialign.cpp b/inference-engine/src/vpu/common/src/ngraph/transformations/dynamic_to_static_shape_roialign.cpp
new file mode 100644 (file)
index 0000000..9a93ca8
--- /dev/null
@@ -0,0 +1,55 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "vpu/ngraph/transformations/dynamic_to_static_shape_roialign.hpp"
+
+#include "vpu/ngraph/operations/dynamic_shape_resolver.hpp"
+#include <vpu/utils/error.hpp>
+
+#include "ngraph/graph_util.hpp"
+#include "ngraph/opsets/opset3.hpp"
+
+#include <memory>
+
+namespace vpu {
+
+void dynamicToStaticShapeROIAlign(std::shared_ptr<ngraph::Node> target) {
+    const auto roi_align = std::dynamic_pointer_cast<ngraph::opset3::ROIAlign>(target);
+    VPU_THROW_UNLESS(roi_align,
+        "dynamicToStaticShapeROIAlign transformation is not applicable for {}, it should be {} instead",
+        target, ngraph::opset3::ROIAlign::type_info);
+
+    auto shapeToConstant = [&roi_align](const ngraph::Output<ngraph::Node> & output) -> std::shared_ptr<ngraph::opset3::Constant> {
+        VPU_THROW_UNLESS(output.get_partial_shape().is_static(),
+                         "DynamicToStaticShape transformation for {} of type {} expects static shape on inputs without DSR",
+                         roi_align->get_friendly_name(), roi_align->get_type_info());
+        return ngraph::opset3::Constant::create(ngraph::element::i64, {output.get_shape().size()}, output.get_shape());
+    };
+
+    const auto dataDSR = ngraph::as_type_ptr<ngraph::vpu::op::DynamicShapeResolver>(roi_align->input_value(0).get_node_shared_ptr());
+    const auto num_roisDSR = ngraph::as_type_ptr<ngraph::vpu::op::DynamicShapeResolver>(roi_align->input_value(2).get_node_shared_ptr());
+
+    VPU_THROW_UNLESS(dataDSR || num_roisDSR, "DynamicToStaticShape transformation for {} of type {} expects at least one DSR as input",
+                     roi_align->get_friendly_name(), roi_align->get_type_info());
+
+    auto input_0_shape = dataDSR ? dataDSR->input_value(1) : shapeToConstant(roi_align->input_value(0));
+    auto num_rois = num_roisDSR ? num_roisDSR->input_value(1) : shapeToConstant(roi_align->input_value(2));
+
+    const auto c_index = std::make_shared<ngraph::opset3::Constant>(ngraph::element::i64, ngraph::Shape{1}, std::vector<int64_t>{1});
+    const auto c_axis = std::make_shared<ngraph::opset3::Constant>(ngraph::element::i64, ngraph::Shape{1}, std::vector<int64_t>{0});
+    const auto c = std::make_shared<ngraph::opset3::Gather>(input_0_shape, c_index, c_axis);
+
+    const auto pooled_h = std::make_shared<ngraph::opset3::Constant>(
+            input_0_shape.get_element_type(), ngraph::Shape{1}, std::vector<int64_t>{roi_align->get_pooled_h()});
+    const auto pooled_w = std::make_shared<ngraph::opset3::Constant>(
+            input_0_shape.get_element_type(), ngraph::Shape{1}, std::vector<int64_t>{roi_align->get_pooled_w()});
+
+    const auto output_shape = std::make_shared<ngraph::opset3::Concat>(
+            ngraph::OutputVector{num_rois, c, pooled_h, pooled_w}, 0);
+
+    const auto copied = target->clone_with_new_inputs(target->input_values());
+    ngraph::replace_node(target, std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(copied, output_shape));
+}
+
+}  // namespace vpu
diff --git a/inference-engine/src/vpu/common/src/ngraph/transformations/dynamic_to_static_shape_squeeze.cpp b/inference-engine/src/vpu/common/src/ngraph/transformations/dynamic_to_static_shape_squeeze.cpp
new file mode 100644 (file)
index 0000000..a21e6cd
--- /dev/null
@@ -0,0 +1,59 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "vpu/ngraph/transformations/dynamic_to_static_shape_squeeze.hpp"
+
+#include "vpu/ngraph/operations/dynamic_shape_resolver.hpp"
+#include <vpu/utils/error.hpp>
+
+#include "ngraph/graph_util.hpp"
+#include "ngraph/ops.hpp"
+#include "ngraph/validation_util.hpp"
+
+#include "ngraph/opsets/opset3.hpp"
+#include <algorithm>
+#include <memory>
+
+namespace vpu {
+
+void dynamicToStaticShapeSqueeze(std::shared_ptr<ngraph::Node> target) {
+    const auto dsr = target->input_value(0).get_node_shared_ptr();
+    VPU_THROW_UNLESS(std::dynamic_pointer_cast<ngraph::vpu::op::DynamicShapeResolver>(dsr),
+        "DynamicToStaticShape transformation for {} of type {} expects {} as input with index {}",
+        target->get_friendly_name(), target->get_type_info(), ngraph::vpu::op::DynamicShapeResolver::type_info, 0);
+
+    const auto axes = std::dynamic_pointer_cast<ngraph::opset3::Constant>(target->input_value(1).get_node_shared_ptr());
+    VPU_THROW_UNLESS(axes, "DynamicToStaticShape transformation for {} of type {} expects {} as input with index {}",
+        target->get_friendly_name(), target->get_type_info(), ngraph::op::Constant::type_info, 1);
+
+    const auto squeeze = std::dynamic_pointer_cast<ngraph::opset3::Squeeze>(target);
+    const auto copied = squeeze->clone_with_new_inputs(target->input_values());
+    const auto shape = dsr->input(1).get_source_output();
+
+    const auto input_rank = squeeze->get_input_partial_shape(0).rank();
+    VPU_THROW_UNLESS(input_rank.is_static(),
+            "DynamicToStaticShape transformation for {} expects static input rank, but it is not", target);
+
+    const auto original_axes = axes->cast_vector<int64_t>();
+    VPU_THROW_UNLESS(!original_axes.empty(),
+            "DynamicToStaticShape transformation for {} does not support default squeezing which may result in rank dynamism", target);
+
+    const auto axes_value = ngraph::normalize_axes(
+            squeeze->description(), original_axes, input_rank);
+    const auto rank_value = input_rank.get_length();
+
+    std::vector<int64_t> indices_vector;
+    for (auto i = 0; i < rank_value; ++i) {
+        if (std::find(axes_value.begin(), axes_value.end(), i) == axes_value.end())
+            indices_vector.push_back(i);
+    }
+    const auto index = std::make_shared<ngraph::opset3::Constant>(
+            ngraph::element::i64, ngraph::Shape{indices_vector.size()}, indices_vector);
+    const auto axis = std::make_shared<ngraph::opset3::Constant>(
+            ngraph::element::i64, ngraph::Shape{1}, std::vector<int64_t>{0});
+    const auto squeeze_output_shape = std::make_shared<ngraph::opset3::Gather>(shape, index, axis);
+    ngraph::replace_node(std::move(target), std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(copied, squeeze_output_shape));
+}
+
+}  // namespace vpu
diff --git a/inference-engine/src/vpu/common/src/ngraph/transformations/dynamic_to_static_shape_transpose.cpp b/inference-engine/src/vpu/common/src/ngraph/transformations/dynamic_to_static_shape_transpose.cpp
new file mode 100644 (file)
index 0000000..3fa3dad
--- /dev/null
@@ -0,0 +1,40 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "vpu/ngraph/transformations/dynamic_to_static_shape_transpose.hpp"
+
+#include "vpu/ngraph/operations/dynamic_shape_resolver.hpp"
+#include <vpu/utils/error.hpp>
+
+#include "ngraph/graph_util.hpp"
+#include "ngraph/opsets/opset3.hpp"
+
+#include <memory>
+
+namespace vpu {
+
+void dynamicToStaticShapeTranspose(std::shared_ptr<ngraph::Node> target) {
+    const auto dsr = target->get_argument(0);
+    VPU_THROW_UNLESS(ngraph::as_type_ptr<ngraph::vpu::op::DynamicShapeResolver>(dsr),
+        "DynamicToStaticShape transformation for {} of type {} expects {} as input with index {}",
+        target->get_friendly_name(), target->get_type_info(), ngraph::vpu::op::DynamicShapeResolver::type_info, 0);
+
+    const auto transposition = target->get_argument(1);
+    VPU_THROW_UNLESS(ngraph::as_type_ptr<ngraph::opset3::Constant>(transposition),
+        "DynamicToStaticShape transformation for {] of type {} expects {} as input with index {}",
+        target->get_friendly_name(), target->get_type_info(), ngraph::opset3::Constant::type_info, 1);
+
+    const auto transpose = std::dynamic_pointer_cast<ngraph::opset3::Transpose>(target);
+    const auto copied = transpose->copy_with_new_args(target->get_arguments());
+    const auto shape = dsr->input(1).get_source_output();
+
+    const auto axis = std::make_shared<ngraph::opset3::Constant>(
+        ngraph::element::u64,
+        ngraph::Shape{std::initializer_list<std::size_t>{1}},
+        std::vector<std::size_t>{0});
+    const auto scatterElementsUpdate = std::make_shared<ngraph::opset3::ScatterElementsUpdate>(shape, transposition, shape, axis);
+    ngraph::replace_node(std::move(target), std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(copied, scatterElementsUpdate));
+}
+
+}  // namespace vpu
diff --git a/inference-engine/src/vpu/common/src/ngraph/transformations/dynamic_to_static_shape_unary_elementwise.cpp b/inference-engine/src/vpu/common/src/ngraph/transformations/dynamic_to_static_shape_unary_elementwise.cpp
new file mode 100644 (file)
index 0000000..b921a61
--- /dev/null
@@ -0,0 +1,28 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "vpu/ngraph/transformations/dynamic_to_static_shape_unary_elementwise.hpp"
+
+#include "vpu/ngraph/operations/dynamic_shape_resolver.hpp"
+#include <vpu/utils/error.hpp>
+
+#include "ngraph/graph_util.hpp"
+#include "ngraph/ops.hpp"
+
+#include <memory>
+
+namespace vpu {
+
+void dynamicToStaticUnaryElementwise(std::shared_ptr<ngraph::Node> target) {
+    const auto dsr = target->input_value(0).get_node_shared_ptr();
+    VPU_THROW_UNLESS(ngraph::as_type_ptr<ngraph::vpu::op::DynamicShapeResolver>(dsr),
+                     "DynamicToStaticShape transformation for {} of type {} expects {} as input with index {}",
+                     target->get_friendly_name(), target->get_type_info(), ngraph::vpu::op::DynamicShapeResolver::type_info, 0);
+
+    const auto shape = dsr->input(1).get_source_output();
+    const auto copied = target->clone_with_new_inputs(target->input_values());
+    ngraph::replace_node(target, std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(copied, shape));
+}
+
+}  // namespace vpu
diff --git a/inference-engine/src/vpu/common/src/ngraph/transformations/dynamic_to_static_shape_unsqueeze.cpp b/inference-engine/src/vpu/common/src/ngraph/transformations/dynamic_to_static_shape_unsqueeze.cpp
new file mode 100644 (file)
index 0000000..0dde63d
--- /dev/null
@@ -0,0 +1,64 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "vpu/ngraph/transformations/dynamic_to_static_shape_unsqueeze.hpp"
+
+#include "vpu/ngraph/operations/dynamic_shape_resolver.hpp"
+#include <vpu/utils/error.hpp>
+
+#include "ngraph/graph_util.hpp"
+#include "ngraph/ops.hpp"
+#include "ngraph/validation_util.hpp"
+
+#include "ngraph/opsets/opset3.hpp"
+#include <algorithm>
+#include <vector>
+#include <memory>
+
+namespace vpu {
+
+void dynamicToStaticShapeUnsqueeze(std::shared_ptr<ngraph::Node> target) {
+    const auto dsr = target->input_value(0).get_node_shared_ptr();
+    VPU_THROW_UNLESS(std::dynamic_pointer_cast<ngraph::vpu::op::DynamicShapeResolver>(dsr),
+        "DynamicToStaticShape transformation for {} of type {} expects {} as input with index {}",
+        target->get_friendly_name(), target->get_type_info(), ngraph::vpu::op::DynamicShapeResolver::type_info, 0);
+
+    const auto axes = std::dynamic_pointer_cast<ngraph::opset3::Constant>(target->input_value(1).get_node_shared_ptr());
+    VPU_THROW_UNLESS(axes, "DynamicToStaticShape transformation for {} of type {} expects {} as input with index {}",
+        target->get_friendly_name(), target->get_type_info(), ngraph::op::Constant::type_info, 1);
+
+    const auto unsqueeze = std::dynamic_pointer_cast<ngraph::opset3::Unsqueeze>(target);
+    const auto copied = unsqueeze->clone_with_new_inputs(target->input_values());
+    const auto shape = dsr->input(1).get_source_output();
+
+    const auto input_rank = unsqueeze->get_input_partial_shape(0).rank();
+    VPU_THROW_UNLESS(input_rank.is_static(), "DynamicToStaticShape transformation for {} expects static input rank, but it is not", target);
+
+    const auto original_axes = axes->cast_vector<int64_t>();
+
+    auto axes_value = ngraph::normalize_axes(
+            unsqueeze->description(), original_axes, input_rank + original_axes.size());
+    std::sort(axes_value.begin(), axes_value.end());
+
+    const auto rank_value = input_rank.get_length();
+
+    ngraph::OutputVector new_shape_dims;
+    if (rank_value) {
+        const auto split_axis = std::make_shared<ngraph::opset3::Constant>(
+                ngraph::element::i64, ngraph::Shape{}, std::vector<int64_t>{0});
+        const auto split = std::make_shared<ngraph::opset3::Split>(shape, split_axis, rank_value);
+        new_shape_dims = split->outputs();
+    }
+    // for scalar case -- there is no need to split shape as it is empty
+
+    for (const auto & i : axes_value) {
+        const auto new_dim = std::make_shared<ngraph::opset3::Constant>(
+                shape.get_element_type(), ngraph::Shape{1}, std::vector<int64_t>{1});
+        new_shape_dims.insert(new_shape_dims.begin() + i, new_dim);
+    }
+    const auto unsqueeze_output_shape = std::make_shared<ngraph::opset3::Concat>(new_shape_dims, 0);
+    ngraph::replace_node(std::move(target), std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(copied, unsqueeze_output_shape));
+}
+
+}  // namespace vpu
index 35379a1..96df041 100644 (file)
@@ -1,6 +1,6 @@
 #pragma OPENCL EXTENSION cl_khr_fp16 : enable
 
-// Define if runtime supports it. MX runtime is compatible
+// Define if runtime supports it. MX runtime is compatible, KMB is in WIP state
 #define USE_MANUAL_DMA 1
 
 #if defined (USE_MANUAL_DMA)
index 16b4dc9..ab595bb 100644 (file)
@@ -1,6 +1,6 @@
 #pragma OPENCL EXTENSION cl_khr_fp16 : enable
 
-// Define if runtime supports it. MX runtime is compatible
+// Define if runtime supports it. MX runtime is compatible, KMB is in WIP state
 #define USE_MANUAL_DMA 1
 
 // Set to 1 if only output is zerroed before kernel execution
index cc3847a..76ee331 100644 (file)
@@ -147,6 +147,7 @@ public:
     void parseExpPriorGridGenerator(const Model& model, const ie::CNNLayerPtr& layer, const DataVector& inputs, const DataVector& outputs) const;
     void parseExpGenerateProposals(const Model& model, const ie::CNNLayerPtr& layer, const DataVector& inputs, const DataVector& outputs) const;
     void parseScatterUpdate(const Model& model, const ie::CNNLayerPtr& layer, const DataVector& inputs, const DataVector& outputs) const;
+    void parseScatterElementsUpdate(const Model& model, const ie::CNNLayerPtr& layer, const DataVector& inputs, const DataVector& outputs) const;
     void parseExpTopKROIs(const Model& model, const ie::CNNLayerPtr& layer, const DataVector& inputs, const DataVector& outputs) const;
     void parseNonZero(const Model& model, const ie::CNNLayerPtr& layer, const DataVector& inputs, const DataVector& outputs) const;
     void parseROIAlign(const Model& model, const ie::CNNLayerPtr& layer, const DataVector& inputs, const DataVector& outputs) const;
@@ -161,7 +162,7 @@ public:
     void parseConcat(const Model& model, const ie::CNNLayerPtr& layer, const DataVector& inputs, const DataVector& outputs) const;
     void parseSplit(const Model& model, const ie::CNNLayerPtr& layer, const DataVector& inputs, const DataVector& outputs) const;
     void parseStridedSlice(const Model& model, const ie::CNNLayerPtr& layer, const DataVector& inputs, const DataVector& outputs) const;
-
+    void parseDSR(const Model& model, const ie::CNNLayerPtr& layer, const DataVector& inputs, const DataVector& outputs) const;
 
     //
     // Parser with data sharing
index 66c44d1..08fca0b 100644 (file)
@@ -77,7 +77,7 @@ public:
      * Allocates memory for single data node
      */
     bool allocateData(const Data& data);
-    ShapeLocation allocateConstShape(Data& data);
+    ShapeLocation allocateShape(Data& data);
     void freeData(const Data& data, DeallocationMode mode = DeallocationMode::JustFree);
 
     void selfCheck();
index 75096bf..87d4284 100644 (file)
@@ -99,11 +99,17 @@ VPU_DEFINE_SHARED_PTR_TYPES(StageInput, Edge)
 VPU_DEFINE_HANDLE_TYPES(StageOutput, Edge)
 VPU_DEFINE_SHARED_PTR_TYPES(StageOutput, Edge)
 
+VPU_DEFINE_HANDLE_TYPES(StageDependency, Edge)
+VPU_DEFINE_SHARED_PTR_TYPES(StageDependency, Edge)
+
 VPU_DEFINE_HANDLE_TYPES(StageTempBuffer, Edge)
 VPU_DEFINE_SHARED_PTR_TYPES(StageTempBuffer, Edge)
 
-VPU_DEFINE_HANDLE_TYPES(SharedAllocation, Edge)
-VPU_DEFINE_SHARED_PTR_TYPES(SharedAllocation, Edge)
+VPU_DEFINE_HANDLE_TYPES(DataToDataAllocation, Edge)
+VPU_DEFINE_SHARED_PTR_TYPES(DataToDataAllocation, Edge)
+
+VPU_DEFINE_HANDLE_TYPES(DataToShapeAllocation, Edge)
+VPU_DEFINE_SHARED_PTR_TYPES(DataToShapeAllocation, Edge)
 
 VPU_DEFINE_HANDLE_TYPES(Injection, Edge)
 VPU_DEFINE_SHARED_PTR_TYPES(Injection, Edge)
index e4d656f..b59bbfa 100644 (file)
@@ -117,17 +117,29 @@ class DataNode final :
     VPU_MODEL_ATTRIBUTE(StageOutput, producerEdge, nullptr)
     VPU_MODEL_ATTRIBUTE_PTR_RANGE(StageInputList, consumerEdges)
 
+    VPU_MODEL_ATTRIBUTE_PTR_RANGE(StageDependencyList, dependentStagesEdges)
+
     VPU_MODEL_ATTRIBUTE(StageTempBuffer, tempBufferEdge, nullptr)
 
     /**
      * Parent data edge actually allocates memory
      */
-    VPU_MODEL_ATTRIBUTE(SharedAllocation, parentDataEdge, nullptr)
+    VPU_MODEL_ATTRIBUTE(DataToDataAllocation, parentDataToDataEdge, nullptr)
 
     /**
      * Children data edges uses parent's memory
      */
-    VPU_MODEL_ATTRIBUTE_PTR_RANGE(SharedAllocationList, childDataEdges)
+    VPU_MODEL_ATTRIBUTE_PTR_RANGE(DataToDataAllocationList, childDataToDataEdges)
+
+    /**
+     * Parent data edge actually allocates memory as a shape for current data
+     */
+    VPU_MODEL_ATTRIBUTE(DataToShapeAllocation, parentDataToShapeEdge, nullptr)
+
+    /**
+     * Children data edges uses parent's memory as a shape
+     */
+    VPU_MODEL_ATTRIBUTE_PTR_RANGE(DataToShapeAllocationList, childDataToShapeEdges)
 
     //
     // Const data content
@@ -157,7 +169,7 @@ private:
     };
 
     struct ChildDataAccess final {
-        inline auto operator()(const SharedAllocation& edge) const -> decltype(edge->child()) {
+        inline auto operator()(const DataToDataAllocation& edge) const -> decltype(edge->child()) {
             return edge->child();
         }
     };
@@ -182,14 +194,14 @@ public:
     }
 
     inline Data parentData() const {
-        return _parentDataEdge == nullptr ? nullptr : _parentDataEdge->parent();
+        return _parentDataToDataEdge == nullptr ? nullptr : _parentDataToDataEdge->parent();
     }
 
     inline int numChildDatas() const {
-        return _childDataEdges.size();
+        return _childDataToDataEdges.size();
     }
-    inline auto childDatas() const -> decltype(mapRange<ChildDataAccess>(childDataEdges())) {
-        return mapRange<ChildDataAccess>(childDataEdges());
+    inline auto childDatas() const -> decltype(mapRange<ChildDataAccess>(childDataToDataEdges())) {
+        return mapRange<ChildDataAccess>(childDataToDataEdges());
     }
 
     Data getTopParentData() const;
@@ -257,7 +269,9 @@ private:
 private:
     inline DataNode() :
         _consumerEdges(&StageInputEdge::_posInData),
-        _childDataEdges(&SharedAllocationEdge::_posInData),
+        _dependentStagesEdges(&StageDependencyEdge::_posInData),
+        _childDataToDataEdges(&DataToDataAllocationEdge::_posInData),
+        _childDataToShapeEdges(&DataToShapeAllocationEdge::_posInData),
         _posInModel(this) {
     }
 
index f5c3dcb..e0d071a 100644 (file)
@@ -9,11 +9,11 @@
 namespace vpu {
 
 //
-// StageInputEdge
+// Data -> Stage edges.
 //
 
 //
-// Data -> Stage edge.
+// StageInputEdge
 //
 
 class StageInputEdge final :
@@ -38,6 +38,28 @@ private:
 };
 
 //
+// StageDependencyEdge defines that some data should be calculated before the stage starts
+// but this data is not an input for the stage, e.g. this data is used as a shape for stage output.
+//
+
+class StageDependencyEdge final :
+        public EnableHandle,
+        public EnableCustomAttributes {
+VPU_MODEL_ATTRIBUTE(Data, data, nullptr)
+VPU_MODEL_ATTRIBUTE(Stage, dependentStage, nullptr)
+
+private:
+    StageDependencyEdge() : _posInData(this) {}
+
+private:
+    StageDependencyPtrList::iterator _ptrPosInModel;
+    StageDependencyListNode _posInData;
+
+    friend ModelObj;
+    friend DataNode;
+};
+
+//
 // StageOutputEdge
 //
 
@@ -82,7 +104,7 @@ private:
 };
 
 //
-// SharedAllocationEdge
+// DataToDataAllocationEdge
 //
 
 //
@@ -126,7 +148,7 @@ VPU_DECLARE_ENUM(SharedConnectionMode,
     SINGLE_STAGE,
     SUBGRAPH)
 
-class SharedAllocationEdge final :
+class DataToDataAllocationEdge final :
         public EnableHandle,
         public EnableCustomAttributes {
     VPU_MODEL_ATTRIBUTE(Data, parent, nullptr)
@@ -137,12 +159,37 @@ class SharedAllocationEdge final :
     VPU_MODEL_ATTRIBUTE(SharedConnectionMode, connectionMode, SharedConnectionMode::SINGLE_STAGE);
 
 private:
-    SharedAllocationEdge() : _posInData(this) {}
+    DataToDataAllocationEdge() : _posInData(this) {}
 
 private:
     Model _model;
-    SharedAllocationPtrList::iterator _ptrPosInModel;
-    SharedAllocationListNode _posInData;
+    DataToDataAllocationPtrList::iterator _ptrPosInModel;
+    DataToDataAllocationListNode _posInData;
+
+    friend ModelObj;
+    friend DataNode;
+};
+
+//
+// DataToShapeAllocationEdge
+//
+
+//
+// Data <-> Shape of data edge - used to share data memory of one DataNode as shape for another DataNode
+//
+
+class DataToShapeAllocationEdge final :
+        public EnableHandle,
+        public EnableCustomAttributes {
+    VPU_MODEL_ATTRIBUTE(Data, parent, nullptr)
+    VPU_MODEL_ATTRIBUTE(Data, child, nullptr)
+
+private:
+    DataToShapeAllocationEdge() : _posInData(this) {}
+
+private:
+    DataToShapeAllocationPtrList::iterator _ptrPosInModel;
+    DataToShapeAllocationListNode _posInData;
 
     friend ModelObj;
     friend DataNode;
index d35b824..ea11234 100644 (file)
@@ -141,6 +141,10 @@ public:
             const Stage& stage,
             const Data& data);
 
+    StageDependency addStageDependency(
+            const Stage& stage,
+            const Data& data);
+
     StageTempBuffer addTempBuffer(
             const Stage& stage,
             const DataDesc& desc);
@@ -192,30 +196,30 @@ public:
     // Data<->Data edges
     //
 
-    class DataEdgeHelper final {
+    class DataToDataEdgeHelper final {
     public:
-        inline DataEdgeHelper(DataEdgeHelper&&) = default;
+        inline DataToDataEdgeHelper(DataToDataEdgeHelper&&) = default;
 
-        DataEdgeHelper(const DataEdgeHelper&) = delete;
-        DataEdgeHelper& operator=(const DataEdgeHelper&) = delete;
-        DataEdgeHelper& operator=(DataEdgeHelper&&) = delete;
+        DataToDataEdgeHelper(const DataToDataEdgeHelper&) = delete;
+        DataToDataEdgeHelper& operator=(const DataToDataEdgeHelper&) = delete;
+        DataToDataEdgeHelper& operator=(DataToDataEdgeHelper&&) = delete;
 
-        ~DataEdgeHelper();
+        ~DataToDataEdgeHelper();
 
-        DataEdgeHelper& parent(const Data& parent);
-        DataEdgeHelper& child(const Data& child);
+        DataToDataEdgeHelper& parent(const Data& parent);
+        DataToDataEdgeHelper& child(const Data& child);
 
-        DataEdgeHelper& mode(SharedDataMode mode);
-        DataEdgeHelper& order(SharedDataOrder order);
+        DataToDataEdgeHelper& mode(SharedDataMode mode);
+        DataToDataEdgeHelper& order(SharedDataOrder order);
 
-        DataEdgeHelper& offset(const DimValues& offset);
+        DataToDataEdgeHelper& offset(const DimValues& offset);
 
-        DataEdgeHelper& connectionMode(SharedConnectionMode);
+        DataToDataEdgeHelper& connectionMode(SharedConnectionMode);
 
-        SharedAllocation done();
+        DataToDataAllocation done();
 
     private:
-        inline explicit DataEdgeHelper(const Model& model) : _model(model) {}
+        inline explicit DataToDataEdgeHelper(const Model& model) : _model(model) {}
 
     private:
         Model _model;
@@ -237,18 +241,22 @@ public:
         friend ModelObj;
     };
 
-    inline DataEdgeHelper connectDatas() {
-        return DataEdgeHelper(this);
+    inline DataToDataEdgeHelper connectDataWithData() {
+        return DataToDataEdgeHelper(this);
     }
 
+    DataToShapeAllocation connectDataWithShape(
+            const Data& parent,
+            const Data& child);
+
     void replaceParentData(
-            const SharedAllocation& edge,
+            const DataToDataAllocation& edge,
             const Data& newParent);
     void replaceChildData(
-            const SharedAllocation& edge,
+            const DataToDataAllocation& edge,
             const Data& newChild);
 
-    void disconnectDatas(const SharedAllocation& edge);
+    void disconnectDatas(const DataToDataAllocation& edge);
 
     //
     // Nodes removal
@@ -309,7 +317,7 @@ private:
             const Stage& parent,
             const Stage& child);
 
-    SharedAllocation connectDatasImpl(
+    DataToDataAllocation connectDataWithDataImpl(
             const Data& parent,
             const Data& child,
             SharedDataMode mode,
@@ -328,7 +336,9 @@ private:
     StageInputPtrList _inEdgePtrList;
     StageOutputPtrList _outEdgePtrList;
     StageTempBufferPtrList _tempBufferEdgePtrList;
-    SharedAllocationPtrList _dataEdgePtrList;
+    DataToDataAllocationPtrList _dataEdgePtrList;
+    DataToShapeAllocationPtrList _shapeEdgePtrList;
+    StageDependencyPtrList _stageDependencyEdgePtrList;
     InjectionPtrList _stageEdgePtrList;
 
     Allocator _allocator;
@@ -339,7 +349,7 @@ private:
     std::function<void(Stage&)> onNewStageCallback = nullptr;
 
     friend class InjectStageHelper;
-    friend class DataEdgeHelper;
+    friend class DataToDataEdgeHelper;
 };
 
 template <class StageImpl>
index 206cd4e..0f6136f 100644 (file)
@@ -165,6 +165,7 @@ VPU_DECLARE_ENUM(StageType,
     ROIAlign = 123,
     ExpGenerateProposals = 124,
     ExpTopKROIs = 125,
+    ScatterElementsUpdate = 126,
 )
 
 //
index b1a2156..419179d 100644 (file)
@@ -275,6 +275,16 @@ public:
             const Data& updates,
             const Data& axis);
 
+    Stage addScatterElementsUpdateStage(
+            const Model& model,
+            const std::string& name,
+            const ie::CNNLayerPtr& layer,
+            const Data& input,
+            const Data& output,
+            const Data& indices,
+            const Data& updates,
+            const Data& axis);
+
     Stage addLoopStartStage(
         const Model& model,
         const std::string& name,
index ec3311e..ca525e0 100644 (file)
@@ -179,8 +179,12 @@ void BackEnd::dumpModelToDot(
                     }
                 }
                 lbl.appendPair("memReqs", data->memReqs());
-                lbl.appendPair("location", data->dataLocation().location);
-                lbl.appendPair("memoryOffset", data->dataLocation().offset);
+                lbl.appendPair("dataLocation", data->dataLocation().location);
+                lbl.appendPair("dataOffset", data->dataLocation().offset);
+                lbl.appendPair("dimsLocation", data->shapeLocation().dimsLocation);
+                lbl.appendPair("dimsOffset", data->shapeLocation().dimsOffset);
+                lbl.appendPair("stridesLocation", data->shapeLocation().stridesLocation);
+                lbl.appendPair("stridesOffset", data->shapeLocation().stridesOffset);
                 if (!data->attrs().empty()) {
                     lbl.appendPair("extraAttrs", data->attrs());
                 }
@@ -308,18 +312,34 @@ void BackEnd::dumpModelToDot(
         }
 
         //
+        // Dump Data->Stage edges
+        //
+
+        for (const auto& data : model->datas()) {
+            for (const auto& dependentStageEdge : data->dependentStagesEdges()) {
+                out.append("%s -> %s [", dataDotName(data), stageDotName(dependentStageEdge->dependentStage()));
+                {
+                    VPU_DOT_IDENT(out);
+
+                    DotLabel lbl("Extra dependency", out);
+                }
+                out.append("];");
+            }
+        }
+
+        //
         // Dump Data<->Data edges
         //
 
         for (const auto& data : model->datas()) {
-            if (auto edge = data->parentDataEdge()) {
+            if (auto edge = data->parentDataToDataEdge()) {
                 out.append("%s -> %s [", dataDotName(edge->child()), dataDotName(edge->parent()));
                 {
                     VPU_DOT_IDENT(out);
 
                     out.append("style=dotted");
 
-                    DotLabel lbl("SharedAllocation", out);
+                    DotLabel lbl("DataToDataAllocation", out);
                     lbl.appendPair("mode", edge->mode());
                     lbl.appendPair("order", edge->order());
                     if (!edge->attrs().empty()) {
@@ -331,6 +351,24 @@ void BackEnd::dumpModelToDot(
         }
 
         //
+        // Dump Data<->Data shape edges
+        //
+
+        for (const auto& data : model->datas()) {
+            if (auto edge = data->parentDataToShapeEdge()) {
+                out.append("%s -> %s [", dataDotName(edge->parent()), dataDotName(edge->child()));
+                {
+                    VPU_DOT_IDENT(out);
+
+                    out.append("style=dotted");
+
+                    DotLabel lbl("DataToShapeAllocation", out);
+                }
+                out.append("];");
+            }
+        }
+
+        //
         // Dump Stage<->Stage edges
         //
 
index d98e6e0..783fd0d 100644 (file)
@@ -75,7 +75,7 @@ int BackEnd::serializeIOInfoSection(
                 data->usage());
         }
 
-        VPU_INTERNAL_CHECK(data->parentDataEdge() == nullptr,
+        VPU_INTERNAL_CHECK(data->parentDataToDataEdge() == nullptr,
             "serializeIOInfoSection failed on {} with usage {}. IO data must have no parentDatas but it does");
 
         VPU_INTERNAL_CHECK(!data->attrs().has("ioIdx"),
@@ -119,7 +119,7 @@ void BackEnd::serializeConstData(const Model& model, const mv_blob_header& blobH
         }
 
         IE_ASSERT(data->producerEdge() == nullptr);
-        IE_ASSERT(data->parentDataEdge() == nullptr);
+        IE_ASSERT(data->parentDataToDataEdge() == nullptr);
         IE_ASSERT(data->numConsumers() != 0);
         IE_ASSERT(data->dataLocation().location == Location::Blob);
 
index c00fc96..a2dc677 100644 (file)
@@ -16,6 +16,7 @@
 #include <graph_tools.hpp>
 
 #include <ngraph/function.hpp>
+#include <ngraph/opsets/opset3.hpp>
 
 #include <vpu/compile_env.hpp>
 
@@ -144,7 +145,7 @@ void FrontEnd::detectNetworkBatch(
             for (const auto& outputHandle : layer->get_outputs()) {
                 for (const auto& inputHandle : outputHandle.get_inputs()) {
                     auto outNode = inputHandle->get_node();
-                    if (std::dynamic_pointer_cast<::ngraph::op::Result>(outNode)) {
+                    if (std::dynamic_pointer_cast<::ngraph::opset3::Result>(outNode)) {
                         continue;
                     }
                     VPU_THROW_FORMAT("Unsupported layer %s configuration : it is not a network output", layer->get_name());
index 03ede02..0902dd0 100644 (file)
@@ -100,9 +100,11 @@ FrontEnd::FrontEnd(StageBuilder::Ptr stageBuilder)
         {"ExperimentalDetectronPriorGridGenerator",            LAYER_PARSER(parseExpPriorGridGenerator)},
         {"ExperimentalDetectronGenerateProposalsSingleImage",  LAYER_PARSER(parseExpGenerateProposals)},
         {"ScatterUpdate",                                      LAYER_PARSER(parseScatterUpdate)},
+        {"ScatterElementsUpdate",                              LAYER_PARSER(parseScatterElementsUpdate)},
         {"ExperimentalDetectronTopKROIs",                      LAYER_PARSER(parseExpTopKROIs)},
         {"StaticShapeNonZero",                                 LAYER_PARSER(parseNonZero)},
         {"ROIAlign",                                           LAYER_PARSER(parseROIAlign)},
+        {"DynamicShapeResolver",                               LAYER_PARSER(parseDSR)},
     }} {}
 
 ModelPtr FrontEnd::buildInitialModel(ie::ICNNNetwork& network) {
index a20de0c..36a9a9e 100644 (file)
@@ -61,7 +61,7 @@ Allocator::Allocator(): _allocatorOfShaves(_cmxMemoryPool) {
 namespace {
 
 void updateChildDataAllocation(const Data& data, int offsetLimitation) {
-    for (const auto& edge : data->childDataEdges()) {
+    for (const auto& edge : data->childDataToDataEdges()) {
         auto parent = edge->parent();
         auto child = edge->child();
 
@@ -107,7 +107,7 @@ bool Allocator::allocateData(const Data& data) {
 
     if (data->usage() == DataUsage::Fake) {
         if (_allocatedData.count(data) == 0) {
-            IE_ASSERT(data->parentDataEdge() == nullptr);
+            IE_ASSERT(data->parentDataToDataEdge() == nullptr);
 
             updateChildDataAllocation(data, 0);
 
@@ -123,7 +123,7 @@ bool Allocator::allocateData(const Data& data) {
 
     if (data->usage() == DataUsage::Input) {
         if (_allocatedData.count(data) == 0) {
-            IE_ASSERT(data->parentDataEdge() == nullptr);
+            IE_ASSERT(data->parentDataToDataEdge() == nullptr);
 
             auto finalByteSize = data->totalByteSize() * _modelBatchSize;
 
@@ -144,7 +144,7 @@ bool Allocator::allocateData(const Data& data) {
 
     if (data->usage() == DataUsage::Output) {
         if (_allocatedData.count(data) == 0) {
-            IE_ASSERT(data->parentDataEdge() == nullptr);
+            IE_ASSERT(data->parentDataToDataEdge() == nullptr);
 
             int finalByteSize = 0;
             if (data->attrs().getOrDefault<bool>("unbatched", false)) {
@@ -170,7 +170,7 @@ bool Allocator::allocateData(const Data& data) {
 
     if (data->usage() == DataUsage::Const) {
         if (_allocatedData.count(data) == 0) {
-            IE_ASSERT(data->parentDataEdge() == nullptr);
+            IE_ASSERT(data->parentDataToDataEdge() == nullptr);
             IE_ASSERT(data->checkStrides(StridesRequirement::compact()));
             IE_ASSERT(data->content() != nullptr);
 
@@ -192,15 +192,21 @@ bool Allocator::allocateData(const Data& data) {
     //
 
     if (data->usage() == DataUsage::Intermediate) {
-        IE_ASSERT(data->producerEdge() != nullptr);
-        IE_ASSERT(data->numConsumers() > 0);
+        VPU_INTERNAL_CHECK(data->producerEdge() != nullptr,
+            "Allocation check failed: data {} with usage {} must have producer, but actually it doesn't",
+            data->name(), data->usage());
+        VPU_INTERNAL_CHECK(!data->consumers().empty() || !data->childDataToShapeEdges().empty() ||
+            !data->dependentStagesEdges().empty(),
+            "Allocation check failed: data {} with usage {} must have at least one data/stage "
+            "depending on it, but it doesn't have either",
+            data->name(), data->usage());
     }
 
     //
     // Allocate parent data if any
     //
 
-    if (auto parentEdge = data->parentDataEdge()) {
+    if (auto parentEdge = data->parentDataToDataEdge()) {
         auto parent = parentEdge->parent();
 
         auto parentMemType = parent->memReqs();
@@ -210,7 +216,7 @@ bool Allocator::allocateData(const Data& data) {
         return allocateData(parent);
     }
 
-    IE_ASSERT(data->parentDataEdge() == nullptr);
+    IE_ASSERT(data->parentDataToDataEdge() == nullptr);
 
     //
     // Check if the data is already allocated
@@ -237,6 +243,7 @@ bool Allocator::allocateData(const Data& data) {
     //
 
     int inUse = 0;
+
     if (data->usage() == DataUsage::Temp) {
         inUse = 1;
     } else {
@@ -245,7 +252,12 @@ bool Allocator::allocateData(const Data& data) {
             return DataLoopStatus::NextChild;
         });
     }
-    IE_ASSERT(inUse >= 1);
+
+    inUse += data->childDataToShapeEdges().size();
+
+    VPU_INTERNAL_CHECK(inUse >= 1,
+        "allocateData failed: data {} with usage {} isn't used by anything",
+        data->name(), data->usage());
 
     auto chunk = allocateMem(memoryType, finalByteSize, inUse);
 
@@ -268,17 +280,27 @@ bool Allocator::allocateData(const Data& data) {
     return chunk->memType == memoryType;
 }
 
-ShapeLocation Allocator::allocateConstShape(Data& data) {
+ShapeLocation Allocator::allocateShape(Data& data) {
     ShapeLocation shapeLocation;
 
-    shapeLocation.dimsLocation = Location::Blob;
-    shapeLocation.stridesLocation = Location::Blob;
-
     const auto dimsByteSize = data->desc().dimsByteSize();
 
-    shapeLocation.dimsOffset = _blobMemOffset;
-    _blobMemOffset += dimsByteSize;
+    if (data->parentDataToShapeEdge()) {
+        // Dims for this data is already allocated, so reuse it
+        const auto& dataLocation = data->parentDataToShapeEdge()->parent()->dataLocation();
 
+        shapeLocation.dimsLocation = dataLocation.location;
+        shapeLocation.dimsOffset = dataLocation.offset;
+    } else {
+        // Static allocation
+        shapeLocation.dimsLocation = Location::Blob;
+        shapeLocation.dimsOffset = _blobMemOffset;
+        _blobMemOffset += dimsByteSize;
+    }
+
+
+    // Allocate strides always statically, as dynamically we can get only dims
+    shapeLocation.stridesLocation = Location::Blob;
     shapeLocation.stridesOffset = _blobMemOffset;
     _blobMemOffset += dimsByteSize;
 
@@ -290,6 +312,37 @@ void Allocator::freeData(const Data& data, DeallocationMode mode) {
     // Release the chunk
     //
 
+    if (const auto& parentDataToShapeEdge = data->parentDataToShapeEdge()) {
+        auto const& parent = parentDataToShapeEdge->parent();
+
+        auto it = _memChunksPerData.find(parentDataToShapeEdge->parent());
+        auto chunk = it->second;
+
+        VPU_INTERNAL_CHECK(it != _memChunksPerData.end(),
+            "Allocator failed on freeData for {} with usage {}: parent data {} with usage {} "
+            "containing shape for current data wasn't yet allocated",
+            data->name(), data->usage(), parent->name(), parent->usage());
+
+        VPU_INTERNAL_CHECK(chunk != nullptr,
+            "Allocator failed on freeData for {} with usage {}: parent data {} with usage {} "
+            "containing shape for current data has no memory chunk",
+            data->name(), data->usage(), parent->name(), parent->usage());
+
+        VPU_INTERNAL_CHECK(chunk->inUse > 0,
+            "Allocator failed on freeData for {} with usage {}: parent data {} with usage {} "
+            "containing shape for this data has zero usages, but it is using at least by current data",
+            data->name(), data->usage(), parent->name(), parent->usage());
+
+        --chunk->inUse;
+
+        if (chunk->inUse == 0) {
+            freeMem(chunk);
+
+            _memChunksPerData.erase(parent);
+            _allocatedIntermData.erase(parent);
+        }
+    }
+
     auto topParent = data->getTopParentData();
 
     if (topParent->usage() == DataUsage::Intermediate ||
@@ -629,7 +682,7 @@ bool Allocator::removeCMXCandidates(const vpu::Data& data) {
     auto it = _candidatesForCMX.find(data);
 
     if (it != _candidatesForCMX.end()) {
-        IE_ASSERT(data->parentDataEdge() == nullptr);
+        IE_ASSERT(data->parentDataToDataEdge() == nullptr);
 
         if (_allocatedIntermData.count(data) != 0) {
             if (auto producerEdge = data->producerEdge()) {
@@ -654,7 +707,7 @@ bool Allocator::removeCMXCandidates(const vpu::Data& data) {
         auto cmxDatas = getAllocatedDatas(MemoryType::CMX);
 
         for (const auto& cmxData : cmxDatas) {
-            IE_ASSERT(cmxData->parentDataEdge() == nullptr);
+            IE_ASSERT(cmxData->parentDataToDataEdge() == nullptr);
 
             it = _candidatesForCMX.find(cmxData);
 
index 7381360..8538f34 100644 (file)
@@ -252,7 +252,7 @@ void PassImpl::adjustModelForMemReqs(const Model& model) {
 
         for (const auto& cmxData : allCmxDatas) {
             IE_ASSERT(cmxData->usage() == DataUsage::Intermediate);
-            IE_ASSERT(cmxData->parentDataEdge() == nullptr);
+            IE_ASSERT(cmxData->parentDataToDataEdge() == nullptr);
 
             auto cmxDataProducer = cmxData->producer();
             IE_ASSERT(cmxDataProducer != nullptr);
@@ -313,7 +313,7 @@ void PassImpl::adjustModelForMemReqs(const Model& model) {
             model->replaceStageInput(cmxConsumerEdge, ddrCopy);
 
             env.log->trace("Update child datas");
-            for (const auto& childDataEdge : cmxData->childDataEdges()) {
+            for (const auto& childDataEdge : cmxData->childDataToDataEdges()) {
                 VPU_LOGGER_SECTION(env.log);
 
                 auto order = childDataEdge->order();
@@ -447,7 +447,7 @@ void PassImpl::packDataInCmx(const Model& model) {
         env.log->trace("Try use CMX for Data [%s]", curCandidate->name());
         VPU_LOGGER_SECTION(env.log);
 
-        IE_ASSERT(curCandidate->parentDataEdge() == nullptr);
+        IE_ASSERT(curCandidate->parentDataToDataEdge() == nullptr);
         IE_ASSERT(curCandidate->usage() == DataUsage::Intermediate);
 
         auto curMemoryType = curCandidate->memReqs();
index b664eb6..ad2725a 100644 (file)
@@ -189,7 +189,7 @@ AllocationResult runAllocator(const Model& model, bool onlyCheckCMX) {
     //
 
     for (auto data : model->datas()) {
-        const auto shapeLocation = allocator.allocateConstShape(data);
+        const auto shapeLocation = allocator.allocateShape(data);
         data->setShapeAllocationInfo(shapeLocation);
     }
 
index 32cffd6..c888b02 100644 (file)
@@ -43,7 +43,7 @@ bool PassImpl::isApplicable(const Stage& copyStage) {
     IE_ASSERT(copyInput->producerEdge() != nullptr);
     IE_ASSERT(copyInput->desc().dimsOrder() == copyOutput->desc().dimsOrder());
 
-    if (copyInput->parentDataEdge() != nullptr) {
+    if (copyInput->parentDataToDataEdge() != nullptr) {
         return false;
     }
     if (copyInput->numChildDatas() > 0) {
index afbf7ed..b76f071 100644 (file)
@@ -48,7 +48,7 @@ void PassImpl::run(const Model& model) {
         // Data <-> Data Edges.
         //
 
-        if (auto dataEdge = data->parentDataEdge()) {
+        if (auto dataEdge = data->parentDataToDataEdge()) {
             auto parent = dataEdge->parent();
             auto child = dataEdge->child();
 
index f812aa8..bc4ecbe 100644 (file)
@@ -113,8 +113,8 @@ void PassImpl::run(const Model& model) {
 
         model->disconnectStage(stage);
 
-        const bool isOverlapByX = (input->desc().dim(Dim::W) + padLeft + padRight) == kernelSizeX;
-        const bool isOverlapByY = (input->desc().dim(Dim::H) + padTop + padBottom) == kernelSizeY;
+        const bool isOverlapByX = kernelSizeX - padLeft >= input->desc().dim(Dim::W);
+        const bool isOverlapByY = kernelSizeY - padTop >= input->desc().dim(Dim::H);
         const bool isOverlapByKernel = isOverlapByX && isOverlapByY;
         const bool paddingsNotExist = padLeft == 0 && padRight == 0 && padTop == 0 && padBottom == 0;
         const bool isGlobalPoolingOutputFormat =
index 4e5f210..ae3d0f3 100644 (file)
@@ -41,7 +41,7 @@ void SpecialStageProcessor::processSplit(
         bool needCopy = false;
         if (output->usage() != DataUsage::Intermediate) {
             needCopy = true;
-        } else if (output->parentDataEdge() != nullptr) {
+        } else if (output->parentDataToDataEdge() != nullptr) {
             needCopy = true;
         } else {
             //
@@ -104,7 +104,7 @@ void SpecialStageProcessor::processSplit(
         // Add Data<->Data edge
         //
 
-        model->connectDatas()
+        model->connectDataWithData()
             .parent(input)
             .child(output)
             .mode(SharedDataMode::ROI)
@@ -145,7 +145,7 @@ void SpecialStageProcessor::processConcat(
         if (input->usage() != DataUsage::Intermediate) {
             needCopy = true;
             optionalCopy = false;
-        } else if (input->parentDataEdge() != nullptr) {
+        } else if (input->parentDataToDataEdge() != nullptr) {
             needCopy = true;
             optionalCopy = false;
         } else {
@@ -249,7 +249,7 @@ void SpecialStageProcessor::processConcat(
         // Add Data<->Data edge
         //
 
-        model->connectDatas()
+        model->connectDataWithData()
             .parent(output)
             .child(input)
             .mode(SharedDataMode::ROI)
@@ -280,8 +280,8 @@ void SpecialStageProcessor::processReshape(
     if (input->usage() != DataUsage::Intermediate &&
         output->usage() != DataUsage::Intermediate) {
         needCopy = true;
-    } else if (input->parentDataEdge() != nullptr &&
-               output->parentDataEdge() != nullptr) {
+    } else if (input->parentDataToDataEdge() != nullptr &&
+               output->parentDataToDataEdge() != nullptr) {
         needCopy = true;
     }
 
@@ -323,8 +323,8 @@ void SpecialStageProcessor::processReshape(
     //
 
     if (input->usage() == DataUsage::Intermediate &&
-        input->parentDataEdge() == nullptr) {
-        model->connectDatas()
+        input->parentDataToDataEdge() == nullptr) {
+        model->connectDataWithData()
             .parent(output)
             .child(input)
             .mode(SharedDataMode::Reshape)
@@ -332,9 +332,9 @@ void SpecialStageProcessor::processReshape(
             .done();
     } else {
         IE_ASSERT(output->usage() == DataUsage::Intermediate);
-        IE_ASSERT(output->parentDataEdge() == nullptr);
+        IE_ASSERT(output->parentDataToDataEdge() == nullptr);
 
-        model->connectDatas()
+        model->connectDataWithData()
             .parent(input)
             .child(output)
             .mode(SharedDataMode::Reshape)
@@ -368,7 +368,7 @@ void SpecialStageProcessor::processExpand(
     if (input->usage() != DataUsage::Intermediate) {
         needCopy = true;
         optionalCopy = false;
-    } else if (input->parentDataEdge() != nullptr) {
+    } else if (input->parentDataToDataEdge() != nullptr) {
         needCopy = true;
         optionalCopy = false;
     } else {
@@ -472,7 +472,7 @@ void SpecialStageProcessor::processExpand(
     // Add Data<->Data edge
     //
 
-    model->connectDatas()
+    model->connectDataWithData()
         .parent(output)
         .child(input)
         .mode(SharedDataMode::ROI)
@@ -504,7 +504,7 @@ void SpecialStageProcessor::processCrop(
     bool needCopy = false;
     if (output->usage() != DataUsage::Intermediate) {
         needCopy = true;
-    } else if (output->parentDataEdge() != nullptr) {
+    } else if (output->parentDataToDataEdge() != nullptr) {
         needCopy = true;
     } else {
         //
@@ -567,7 +567,7 @@ void SpecialStageProcessor::processCrop(
     // Add Data<->Data edge
     //
 
-    model->connectDatas()
+    model->connectDataWithData()
         .parent(input)
         .child(output)
         .mode(SharedDataMode::ROI)
@@ -592,7 +592,7 @@ void SpecialStageProcessor::processLoopStart(const Model& model, const Stage& st
                 order = SharedDataOrder::ParentWritesToChild;
             }
 
-            model->connectDatas()
+            model->connectDataWithData()
                 .parent(parent)
                 .child(child)
                 .mode(SharedDataMode::ROI)
@@ -607,7 +607,7 @@ void SpecialStageProcessor::processLoopStart(const Model& model, const Stage& st
         const auto& dst = backedge.second;
 
         // Tensor Iterator's body output data object must be a parent since it's not processed yet and don't have neither parent or child
-        model->connectDatas()
+        model->connectDataWithData()
             .parent(dst)
             .child(src)
             .mode(SharedDataMode::ROI)
@@ -634,7 +634,7 @@ void SpecialStageProcessor::processLoopEnd(const Model& model, const Stage& stag
                 order = SharedDataOrder::ParentWritesToChild;
             }
 
-            model->connectDatas()
+            model->connectDataWithData()
                 .parent(parent)
                 .child(child)
                 .mode(SharedDataMode::ROI)
index cbb6247..ddf9598 100644 (file)
@@ -40,9 +40,9 @@ Data DataNode::getTopParentData() const {
 }
 
 DimValues DataNode::strides() const {
-    if (_parentDataEdge != nullptr) {
-        if (_parentDataEdge->mode() == SharedDataMode::ROI) {
-            return _parentDataEdge->parent()->strides();
+    if (_parentDataToDataEdge != nullptr) {
+        if (_parentDataToDataEdge->mode() == SharedDataMode::ROI) {
+            return _parentDataToDataEdge->parent()->strides();
         }
     }
 
@@ -51,7 +51,7 @@ DimValues DataNode::strides() const {
 
 int DataNode::totalByteSize() const {
     // IT doesn't have sence for child Data.
-    IE_ASSERT(_parentDataEdge == nullptr);
+    IE_ASSERT(_parentDataToDataEdge == nullptr);
 
     return calcTotalByteSize(_desc, strides());
 }
@@ -87,8 +87,8 @@ bool DataNode::checkStrides(const StridesRequirement& reqs) const {
 
 void DataNode::updateRequiredStrides(const StridesRequirement& newReqs) {
     // There shouldn't be any Data<->Data edges.
-    IE_ASSERT(_parentDataEdge == nullptr);
-    IE_ASSERT(_childDataEdges.empty());
+    IE_ASSERT(_parentDataToDataEdge == nullptr);
+    IE_ASSERT(_childDataToDataEdges.empty());
 
     auto prevReqs = _requiredStrides;
 
index 3645c59..b0667f0 100644 (file)
@@ -97,7 +97,7 @@ Data ModelObj::addConstData(
     IE_ASSERT(content != nullptr);
 
     VPU_THROW_UNLESS(desc.totalDimSize() * desc.elemSize() == content->byteSize(),
-        "duplicateData error: while duplicating {} Const data got different "
+        "addConstData error: while duplicating {} Const data got different "
         "newDesc and content byte sizes ({} and {} respectively)",
         name, desc.totalDimSize() * desc.elemSize(), content->byteSize());
 
@@ -345,11 +345,11 @@ StageOutput ModelObj::addStageOutput(
 
     IE_ASSERT(data->_producerEdge == nullptr);
 
-    if (data->_parentDataEdge != nullptr) {
-        IE_ASSERT(data->_parentDataEdge->_order != SharedDataOrder::ParentWritesToChild);
+    if (data->_parentDataToDataEdge != nullptr) {
+        IE_ASSERT(data->_parentDataToDataEdge->_order != SharedDataOrder::ParentWritesToChild);
     }
 
-    for (const auto& childDataEdge : data->_childDataEdges) {
+    for (const auto& childDataEdge : data->_childDataToDataEdges) {
         IE_ASSERT(childDataEdge->_order != SharedDataOrder::ChildWritesToParent);
     }
 
@@ -390,6 +390,34 @@ StageOutput ModelObj::addStageOutput(
     return edge;
 }
 
+StageDependency ModelObj::addStageDependency(const Stage& stage, const Data& data) {
+    _resetStageOrder = true;
+
+    std::shared_ptr<StageDependencyEdge> edge(new StageDependencyEdge);
+    edge->_ptrPosInModel = _stageDependencyEdgePtrList.emplace(_stageDependencyEdgePtrList.end(), edge);
+
+    edge->_data = data;
+    edge->_dependentStage = stage;
+
+    data->_dependentStagesEdges.push_back(edge);
+
+    VPU_THROW_UNLESS(data->usage() == DataUsage::Intermediate,
+        "Adding stage dependency for {} with type {} failed: only {} datas can be added as a dependency "
+        "while adding {} with usage {} was attempted",
+        stage->name(), stage->type(), DataUsage::Intermediate, data->name(), data->usage());
+
+    VPU_THROW_UNLESS(data->_producerEdge != nullptr,
+        "Adding stage dependency for {} with type {} failed: data {} with usage {} should have producer, "
+        "but actually it doesn't", stage->name(), stage->type(), data->name(), data->usage());
+
+    if (data->_producerEdge != nullptr) {
+        ++data->_producerEdge->_producer->_nextStages[stage];
+        ++stage->_prevStages[data->_producerEdge->_producer];
+    }
+
+    return edge;
+}
+
 StageTempBuffer ModelObj::addTempBuffer(
         const Stage& stage,
         const DataDesc& desc) {
@@ -547,11 +575,11 @@ void ModelObj::replaceStageOutput(
 
     IE_ASSERT(newOutput->_producerEdge == nullptr);
 
-    if (newOutput->_parentDataEdge != nullptr) {
-        IE_ASSERT(newOutput->_parentDataEdge->_order != SharedDataOrder::ParentWritesToChild);
+    if (newOutput->_parentDataToDataEdge != nullptr) {
+        IE_ASSERT(newOutput->_parentDataToDataEdge->_order != SharedDataOrder::ParentWritesToChild);
     }
 
-    for (const auto& childDataEdge : newOutput->_childDataEdges) {
+    for (const auto& childDataEdge : newOutput->_childDataToDataEdges) {
         IE_ASSERT(childDataEdge->_order != SharedDataOrder::ChildWritesToParent);
     }
 
@@ -1104,7 +1132,7 @@ void ModelObj::revertInjection(const Injection& edge) {
     _stageEdgePtrList.erase(edge->_ptrPosInModel);
 }
 
-ModelObj::DataEdgeHelper::~DataEdgeHelper() {
+ModelObj::DataToDataEdgeHelper::~DataToDataEdgeHelper() {
     //
     // Check that `done` was called.
     //
@@ -1114,7 +1142,7 @@ ModelObj::DataEdgeHelper::~DataEdgeHelper() {
     }
 }
 
-ModelObj::DataEdgeHelper& ModelObj::DataEdgeHelper::parent(const Data& parent) {
+ModelObj::DataToDataEdgeHelper& ModelObj::DataToDataEdgeHelper::parent(const Data& parent) {
     //
     // Check that `done` was not called.
     //
@@ -1138,7 +1166,7 @@ ModelObj::DataEdgeHelper& ModelObj::DataEdgeHelper::parent(const Data& parent) {
     return *this;
 }
 
-ModelObj::DataEdgeHelper& ModelObj::DataEdgeHelper::child(const Data& child) {
+ModelObj::DataToDataEdgeHelper& ModelObj::DataToDataEdgeHelper::child(const Data& child) {
     //
     // Check that `done` was not called.
     //
@@ -1162,7 +1190,7 @@ ModelObj::DataEdgeHelper& ModelObj::DataEdgeHelper::child(const Data& child) {
     return *this;
 }
 
-ModelObj::DataEdgeHelper& ModelObj::DataEdgeHelper::mode(SharedDataMode mode) {
+ModelObj::DataToDataEdgeHelper& ModelObj::DataToDataEdgeHelper::mode(SharedDataMode mode) {
     //
     // Check that `done` was not called.
     //
@@ -1181,7 +1209,7 @@ ModelObj::DataEdgeHelper& ModelObj::DataEdgeHelper::mode(SharedDataMode mode) {
     return *this;
 }
 
-ModelObj::DataEdgeHelper& ModelObj::DataEdgeHelper::order(SharedDataOrder order) {
+ModelObj::DataToDataEdgeHelper& ModelObj::DataToDataEdgeHelper::order(SharedDataOrder order) {
     //
     // Check that `done` was not called.
     //
@@ -1200,7 +1228,7 @@ ModelObj::DataEdgeHelper& ModelObj::DataEdgeHelper::order(SharedDataOrder order)
     return *this;
 }
 
-ModelObj::DataEdgeHelper& ModelObj::DataEdgeHelper::offset(const DimValues& offset) {
+ModelObj::DataToDataEdgeHelper& ModelObj::DataToDataEdgeHelper::offset(const DimValues& offset) {
     //
     // Check that `done` was not called.
     //
@@ -1219,7 +1247,7 @@ ModelObj::DataEdgeHelper& ModelObj::DataEdgeHelper::offset(const DimValues& offs
     return *this;
 }
 
-ModelObj::DataEdgeHelper& ModelObj::DataEdgeHelper::connectionMode(SharedConnectionMode connectionMode) {
+ModelObj::DataToDataEdgeHelper& ModelObj::DataToDataEdgeHelper::connectionMode(SharedConnectionMode connectionMode) {
     //
     // Check that `done` was not called.
     //
@@ -1237,7 +1265,7 @@ ModelObj::DataEdgeHelper& ModelObj::DataEdgeHelper::connectionMode(SharedConnect
     return *this;
 }
 
-SharedAllocation ModelObj::DataEdgeHelper::done() {
+DataToDataAllocation ModelObj::DataToDataEdgeHelper::done() {
     //
     // Check that `done` was not called.
     //
@@ -1260,7 +1288,7 @@ SharedAllocation ModelObj::DataEdgeHelper::done() {
     // Call the actual implementation.
     //
 
-    auto edge = _model->connectDatasImpl(
+    auto edge = _model->connectDataWithDataImpl(
         _parent, _child,
         _mode, _order,
         _offset, _connectionMode);
@@ -1440,7 +1468,7 @@ Stage getDataConnectionStage(
 
 }  // namespace
 
-SharedAllocation ModelObj::connectDatasImpl(
+DataToDataAllocation ModelObj::connectDataWithDataImpl(
         const Data& parent,
         const Data& child,
         SharedDataMode mode,
@@ -1451,13 +1479,13 @@ SharedAllocation ModelObj::connectDatasImpl(
     // Child must not have other parents
     //
 
-    IE_ASSERT(child->parentDataEdge() == nullptr);
+    IE_ASSERT(child->parentDataToDataEdge() == nullptr);
 
     //
     // Create new Edge.
     //
 
-    std::shared_ptr<SharedAllocationEdge> edge(new SharedAllocationEdge);
+    std::shared_ptr<DataToDataAllocationEdge> edge(new DataToDataAllocationEdge);
     edge->_ptrPosInModel = _dataEdgePtrList.emplace(_dataEdgePtrList.end(), edge);
 
     edge->_parent = parent;
@@ -1476,8 +1504,8 @@ SharedAllocation ModelObj::connectDatasImpl(
         edge->attrs().set("offset", offset);
     }
 
-    parent->_childDataEdges.push_back(edge);
-    child->_parentDataEdge = edge;
+    parent->_childDataToDataEdges.push_back(edge);
+    child->_parentDataToDataEdge = edge;
 
     //
     // Notify allocator.
@@ -1490,13 +1518,41 @@ SharedAllocation ModelObj::connectDatasImpl(
     return edge;
 }
 
+DataToShapeAllocation ModelObj::connectDataWithShape(
+        const Data& parent,
+        const Data& child) {
+    VPU_THROW_UNLESS(child->parentDataToShapeEdge() == nullptr,
+        "connectDataWithShape failed: child data {} with usage {} must not have any parents "
+        "but it actually have (data {} with usage {})",
+        child->name(), child->usage(), child->parentDataToShapeEdge()->parent()->name(), child->parentDataToShapeEdge()->parent()->usage());
+
+    std::shared_ptr<DataToShapeAllocationEdge> edge(new DataToShapeAllocationEdge);
+    edge->_ptrPosInModel = _shapeEdgePtrList.emplace(_shapeEdgePtrList.end(), edge);
+
+    edge->_parent = parent;
+    edge->_child = child;
+
+    parent->_childDataToShapeEdges.push_back(edge);
+    child->_parentDataToShapeEdge = edge;
+
+    const auto& parentStage = parent->producer();
+    const auto& childStage = child->producer();
+
+    if (parentStage && childStage && parentStage != childStage && parent->usage() == DataUsage::Intermediate) {
+        // Shape and data are produced from different stages, make sure that shape is calculated before data
+        addStageDependency(childStage, parent);
+    }
+
+    return edge;
+}
+
 void ModelObj::replaceParentData(
-        const SharedAllocation& edge,
+        const DataToDataAllocation& edge,
         const Data& newParent) {
     auto oldParent = edge->parent();
     auto child = edge->child();
 
-    oldParent->_childDataEdges.erase(edge);
+    oldParent->_childDataToDataEdges.erase(edge);
 
     edge->_parent = newParent;
     if (edge->connectionMode() == SharedConnectionMode::SINGLE_STAGE) {
@@ -1507,7 +1563,7 @@ void ModelObj::replaceParentData(
             this);
     }
 
-    newParent->_childDataEdges.push_back(edge);
+    newParent->_childDataToDataEdges.push_back(edge);
 
     if (oldParent->usage() != DataUsage::Intermediate ||
         newParent->usage() != DataUsage::Intermediate) {
@@ -1516,12 +1572,12 @@ void ModelObj::replaceParentData(
 }
 
 void ModelObj::replaceChildData(
-        const SharedAllocation& edge,
+        const DataToDataAllocation& edge,
         const Data& newChild) {
     auto parent = edge->parent();
     auto oldChild = edge->child();
 
-    oldChild->_parentDataEdge = nullptr;
+    oldChild->_parentDataToDataEdge = nullptr;
 
     edge->_child = newChild;
     if (edge->connectionMode() == SharedConnectionMode::SINGLE_STAGE) {
@@ -1532,19 +1588,19 @@ void ModelObj::replaceChildData(
             this);
     }
 
-    newChild->_parentDataEdge = edge;
+    newChild->_parentDataToDataEdge = edge;
 
     if (parent->usage() != DataUsage::Intermediate) {
         getAllocator().setNeedToAllocNonIntermData();
     }
 }
 
-void ModelObj::disconnectDatas(const SharedAllocation& edge) {
+void ModelObj::disconnectDatas(const DataToDataAllocation& edge) {
     auto parent = edge->parent();
     auto child = edge->child();
 
-    child->_parentDataEdge = nullptr;
-    parent->_childDataEdges.erase(edge);
+    child->_parentDataToDataEdge = nullptr;
+    parent->_childDataToDataEdges.erase(edge);
 
     IE_ASSERT(edge->_ptrPosInModel != _dataEdgePtrList.end());
     _dataEdgePtrList.erase(edge->_ptrPosInModel);
@@ -1659,11 +1715,12 @@ void ModelObj::cleanUp() {
 
     for (const auto& data : datas()) {
         if (data->_usage == DataUsage::Input) {
-            IE_ASSERT(!data->_consumerEdges.empty());
-            IE_ASSERT(data->_parentDataEdge == nullptr);
+            VPU_THROW_UNLESS(!data->_consumerEdges.empty(),
+                "Input data {} must have at least one consumers, but got zero.", data->name());
+            IE_ASSERT(data->_parentDataToDataEdge == nullptr);
         } else if (data->_usage == DataUsage::Output) {
             IE_ASSERT(data->_producerEdge != nullptr);
-            IE_ASSERT(data->_parentDataEdge == nullptr);
+            IE_ASSERT(data->_parentDataToDataEdge == nullptr);
         } else if (data->_usage == DataUsage::Temp) {
             if (data->_tempBufferEdge == nullptr) {
                 _dataList.erase(data);
diff --git a/inference-engine/src/vpu/graph_transformer/src/stages/dynamic_shape_resolver.cpp b/inference-engine/src/vpu/graph_transformer/src/stages/dynamic_shape_resolver.cpp
new file mode 100644 (file)
index 0000000..82b23c6
--- /dev/null
@@ -0,0 +1,54 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <vpu/frontend/frontend.hpp>
+
+namespace vpu {
+
+void FrontEnd::parseDSR(const Model& model, const ie::CNNLayerPtr& layer, const DataVector& inputs, const DataVector& outputs) const {
+    VPU_THROW_UNLESS(inputs.size() == 2, "Error while parsing {} with type {}, got {} inputs, while {} were expected",
+        layer->name, layer->type, inputs.size(), 2);
+
+    VPU_THROW_UNLESS(outputs.size() == 1, "Error while parsing {} with type {}, got {} outputs, while {} were expected",
+                     layer->name, layer->type, outputs.size(), 1);
+
+    const auto& data = inputs[0];
+    const auto& shape = inputs[1];
+
+    const auto& dataOutput = outputs[0];
+
+    VPU_THROW_UNLESS(shape->desc().numDims() == 1,
+        "Error while parsing {} with type {}, the number of dimensions for the second input {} should be equal to 1 "
+        "but got {} instead",
+        layer->name, layer->type, shape->name(), shape->desc().numDims());
+
+    VPU_THROW_UNLESS(shape->desc().totalDimSize() == data->desc().numDims(),
+        "Error while parsing {} with type {}, the total number of elements for the second input {} should be equal to "
+        "the number of dimensions for the first input {}, but got {} and {} respectively",
+        layer->name, layer->type, shape->name(), data->name(), shape->desc().totalDimSize(), data->desc().numDims());
+
+    // Dynamic input shape is unsupported
+    VPU_THROW_UNLESS(data->producer() != nullptr,
+        "Parsing layer {} with type {} failed: DSR stages must have a producer, but actually it doesn't",
+        layer->name, layer->type);
+
+    const auto dataOutputEdge = data->producerEdge();
+    const auto shapeOutputEdge = shape->producerEdge();
+
+    if (dataOutput->usage() == DataUsage::Output) {
+        // Create the second output with shape in case of dynamic output
+        const auto& shapeOutput = model->addOutputData(dataOutput->name() + "@shape", shape->desc());
+
+        model->replaceStageOutput(shapeOutputEdge, shapeOutput);
+
+        model->removeUnusedData(shape);
+    } else {
+        model->connectDataWithShape(shape, dataOutput);
+    }
+
+    model->replaceStageOutput(dataOutputEdge, dataOutput);
+    model->removeUnusedData(data);
+}
+
+}  // namespace vpu
index 8d8f6d3..da02787 100644 (file)
@@ -140,7 +140,8 @@ private:
 
         {
             auto supportedDataTypesInput0 = EnumSet<DataType>{DataType::FP16};
-            if (operation == StageType::Sum || operation == StageType::Greater_equal || operation == StageType::Select || operation == StageType::Prod) {
+            if (operation == StageType::Sum || operation == StageType::Greater_equal || operation == StageType::Select ||
+                operation == StageType::Prod || operation == StageType::Max) {
                 supportedDataTypesInput0.insert(DataType::S32);
             }
 
index 2241dfe..0f2c100 100644 (file)
@@ -17,11 +17,11 @@ VPU_DECLARE_ENUM(ROIAlignMode,
     Max = 1
 )
 
-static const std::string s_mode = "mode";
-static const std::string s_pooled_w = "pooled_w";
-static const std::string s_pooled_h = "pooled_h";
-static const std::string s_sampling_ratio = "sampling_ratio";
-static const std::string s_spatial_scale = "spatial_scale";
+static const char s_mode[] = "mode";
+static const char s_pooled_w[] = "pooled_w";
+static const char s_pooled_h[] = "pooled_h";
+static const char s_sampling_ratio[] = "sampling_ratio";
+static const char s_spatial_scale[] = "spatial_scale";
 
 namespace {
 
diff --git a/inference-engine/src/vpu/graph_transformer/src/stages/scatter_elements_update.cpp b/inference-engine/src/vpu/graph_transformer/src/stages/scatter_elements_update.cpp
new file mode 100644 (file)
index 0000000..a4f2cdf
--- /dev/null
@@ -0,0 +1,253 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <vpu/frontend/frontend.hpp>
+
+#include <memory>
+#include <string>
+
+namespace vpu {
+
+using InferenceEngine::CNNLayerPtr;
+
+//----------------------------------------------------------------------
+
+namespace {
+
+class ScatterElementsUpdateStage final : public StageNode {
+public:
+    using StageNode::StageNode;
+
+private:
+    StagePtr cloneImpl() const override {
+        return std::make_shared<ScatterElementsUpdateStage>(*this);
+    }
+
+    void propagateDataOrderImpl(StageDataInfo<DimsOrder>& orderInfo) override {
+        const auto data = inputEdge(0)->input();
+        const auto indices = inputEdge(1)->input();
+        const auto updates = inputEdge(2)->input();
+        const auto axis = inputEdge(3)->input();
+        const auto output = outputEdge(0)->output();
+        orderInfo.setInput(inputEdge(0), DimsOrder::fromNumDims(data->desc().numDims()));
+        orderInfo.setInput(inputEdge(1), DimsOrder::fromNumDims(indices->desc().numDims()));
+        orderInfo.setInput(inputEdge(2), DimsOrder::fromNumDims(updates->desc().numDims()));
+        orderInfo.setInput(inputEdge(3), DimsOrder::fromNumDims(axis->desc().numDims()));
+        orderInfo.setOutput(outputEdge(0), DimsOrder::fromNumDims(output->desc().numDims()));
+    }
+
+    void getDataStridesRequirementsImpl(StageDataInfo<StridesRequirement>& stridesInfo) override {
+        stridesInfo.setInput(inputEdge(0), StridesRequirement::compact());    // `data`    tensor
+        stridesInfo.setInput(inputEdge(1), StridesRequirement::compact());    // `indices` tensor
+        stridesInfo.setInput(inputEdge(2), StridesRequirement::compact());    // `updates` tensor
+        stridesInfo.setOutput(outputEdge(0), StridesRequirement::compact());  // `output`  tensor
+    }
+
+    void finalizeDataLayoutImpl() override {
+    }
+
+    void getBatchSupportInfoImpl(StageDataInfo<BatchSupport>& /*batchInfo*/) override {
+    }
+
+    StageSHAVEsRequirements getSHAVEsRequirementsImpl() const override {
+        return StageSHAVEsRequirements::NotNeeded;
+    }
+
+    void initialCheckImpl() const override {
+        const auto& srcType = input(0)->desc().type();
+        assertInputsOutputsTypes(this, {{srcType}, {DataType::S32}, {srcType}, {DataType::S32}}, {{srcType}});
+        //                               `data`  ,  `indices`     , `updates`,  `axis`         ,   `output`
+    }
+
+    void serializeDataImpl(BlobSerializer& serializer) const override {
+        auto data    = input(0);
+        auto indices = input(1);
+        auto updates = input(2);
+        auto axis    = input(3);
+        auto out = output(0);
+
+        data->serializeBuffer(serializer);
+        out->serializeBuffer(serializer);
+        indices->serializeBuffer(serializer);
+        updates->serializeBuffer(serializer);
+        axis->serializeBuffer(serializer);
+    }
+
+    void serializeParamsImpl(BlobSerializer& serializer) const override {
+    }
+};
+
+}  // namespace
+
+//----------------------------------------------------------------------
+
+static
+void checkTensorShapes(const vpu::Data& input,
+                       const vpu::Data& output,
+                       const vpu::Data& indices,
+                       const vpu::Data& updates,
+                       const vpu::Data& axis) {
+    const DataDesc& inputDesc = input->desc();
+    const DataDesc& outputDesc = output->desc();
+    const DataDesc& indicesDesc = indices->desc();
+    const DataDesc& updatesDesc = updates->desc();
+    const DataDesc& axisDesc = axis->desc();
+
+    const auto inputType = inputDesc.type();
+    const auto outputType = outputDesc.type();
+    const auto indicesType = indicesDesc.type();
+    const auto updatesType = updatesDesc.type();
+    const auto axisType = axisDesc.type();
+
+    VPU_THROW_UNLESS(inputType == DataType::S32 ||
+                     inputType == DataType::FP16, "input type is invalid");
+    VPU_THROW_UNLESS(outputType == inputType, "output type is invalid");
+    VPU_THROW_UNLESS(updatesType == inputType, "updates type is invalid");
+    VPU_THROW_UNLESS(indicesType == DataType::S32, "indices type is invalid");
+    VPU_THROW_UNLESS(axisType == DataType::S32, "axis type is invalid");
+
+    const int inputNDims = inputDesc.numDims();
+    const int outputNDims = outputDesc.numDims();
+    const int indicesNDims = indicesDesc.numDims();
+    const int updatesNDims = updatesDesc.numDims();
+    const int axisNDims = axisDesc.numDims();
+
+    VPU_THROW_UNLESS(inputNDims > 0, "input tensor must not be 0-dimensional");
+    VPU_THROW_UNLESS(outputNDims > 0, "output tensor must not be 0-dimensional");
+    VPU_THROW_UNLESS(indicesNDims > 0, "indices tensor must not be 0-dimensional");
+    VPU_THROW_UNLESS(updatesNDims > 0, "updates tensor must not be 0-dimensional");
+    VPU_THROW_UNLESS(axisNDims > 0, "axis tensor must not be 0-dimensional");
+
+    VPU_THROW_UNLESS(inputNDims == outputNDims,
+                     "input and output have different shapes: inputNDims={}, outputNDims={}",
+                     inputNDims, outputNDims);
+
+    VPU_THROW_UNLESS(inputNDims == indicesNDims,
+                     "input and indices have different shapes: inputNDims={}, indicesNDims={}",
+                     inputNDims, updatesNDims);
+
+    VPU_THROW_UNLESS(inputNDims == updatesNDims,
+                     "input and updates have different shapes: inputNDims={}, updatesNDims={}",
+                     inputNDims, updatesNDims);
+
+    VPU_THROW_UNLESS(axisNDims == 1,
+                     "axis tensor must be 1-dimensional, but axisNDims={}",
+                     axisNDims);
+
+    const DimsOrder inputDimsOrder = inputDesc.dimsOrder();
+    const DimsOrder outputDimsOrder = outputDesc.dimsOrder();
+    const DimsOrder indicesDimsOrder = indicesDesc.dimsOrder();
+    const DimsOrder updatesDimsOrder = updatesDesc.dimsOrder();
+    const DimsOrder axisDimsOrder = axisDesc.dimsOrder();
+
+    VPU_THROW_UNLESS(inputDimsOrder == outputDimsOrder, "output must have same layout as input"
+                     ", but inputDimsOrder = \"{}\", and outputDimsOrder = \"{}\"",
+                     inputDimsOrder, outputDimsOrder);
+
+    VPU_THROW_UNLESS(inputDimsOrder == indicesDimsOrder, "indices must have same layout as input"
+                     ", but inputDimsOrder = \"{}\", and indicesDimsOrder = \"{}\"",
+                     inputDimsOrder, indicesDimsOrder);
+
+    VPU_THROW_UNLESS(inputDimsOrder == updatesDimsOrder, "updates must have same layout as input"
+                     ", but inputDimsOrder = \"{}\", and updatesDimsOrder = \"{}\"",
+                     inputDimsOrder, updatesDimsOrder);
+
+    const DimValues& inputDims = inputDesc.dims();
+    const DimValues& outputDims = outputDesc.dims();
+    const DimValues& indicesDims = indicesDesc.dims();
+    const DimValues& updatesDims = updatesDesc.dims();
+    const DimValues& axisDims = axisDesc.dims();
+
+    VPU_THROW_UNLESS(inputDims == outputDims, "input and output tensors must have same lengths"
+                     ", but inputDims = \"{}\", and outputDims = \"{}\"", inputDims, outputDims);
+
+    VPU_THROW_UNLESS(indicesDims == updatesDims, "indices and updates tensors must have same lengths"
+                     ", but indicesDims = \"{}\", and updatesDims = \"{}\"", indicesDims, updatesDims);
+
+    // Permutation is array of dims, from minor to major
+    const DimVector outputPerm = outputDimsOrder.toPermutation();
+    const DimVector updatesPerm = updatesDimsOrder.toPermutation();
+
+    // Check if the updates fits the data shape
+    for (int i = 0; i < inputNDims - 1; i++) {
+        const Dim outputDim = outputPerm[i];
+        const Dim updatesDim = updatesPerm[i];
+        const int outputSize = outputDims[outputDim];
+        const int updatesSize = updatesDims[updatesDim];
+        VPU_THROW_UNLESS(updatesSize <= outputSize,
+                         "updates size must fit output for corresponding axes, "
+                         "but for axis={}: output size={}, updates size={}",
+                         i, outputSize, updatesSize);
+    }
+
+    // Note, that for a 1D tensor the layout is "C"
+    VPU_THROW_UNLESS(axisDimsOrder == DimsOrder::C,
+                     "axis must be 1D tensor, but its dims order is {}",
+                     axisDimsOrder);
+    VPU_THROW_UNLESS(axisDims[Dim::C] == 1,
+                     "axis tensor must be 1D array of 1 element, but axis length = %d",
+                     axisDims[Dim::C]);
+}
+
+void FrontEnd::parseScatterElementsUpdate(const Model      & model,
+                                          const CNNLayerPtr& layer,
+                                          const DataVector & inputs,
+                                          const DataVector & outputs) const {
+    VPU_THROW_UNLESS(inputs.size() == 4, "invalid number of inputs: %lu", inputs.size());
+    VPU_THROW_UNLESS(outputs.size() == 1, "invalid number of outputs: %lu", outputs.size());
+
+    const auto& input   = inputs[0];
+    const auto& indices = inputs[1];
+    const auto& updates = inputs[2];
+    const auto& axis    = inputs[3];
+    const auto& output = outputs[0];
+
+    checkTensorShapes(input, output, indices, updates, axis);
+
+    auto scatterElementsUpdateLayer = std::dynamic_pointer_cast<ie::ScatterElementsUpdateLayer>(layer);
+
+    VPU_THROW_UNLESS(scatterElementsUpdateLayer != nullptr,
+                     "this layer is not an instance of ScatterElementsUpdateLayer: "
+                     "layer name = \"%s\", layer type = \"%s\"",
+                     layer->name.c_str(), layer->type.c_str());
+
+    auto stage = model->addNewStage<ScatterElementsUpdateStage>(layer->name,
+                                                                StageType::ScatterElementsUpdate,
+                                                                layer,
+                                                                {input, indices, updates, axis},
+                                                                {output});
+
+    VPU_THROW_UNLESS(stage != nullptr,
+                     "failed to create ScatterElementsUpdateStage: "
+                     "layer name = \"%s\", layer type = \"%s\"",
+                     layer->name.c_str(), layer->type.c_str());
+}
+
+//----------------------------------------------------------------------
+
+Stage StageBuilder::addScatterElementsUpdateStage(const Model& model,
+                                                  const std::string& name,
+                                                  const ie::CNNLayerPtr& layer,
+                                                  const Data& input,
+                                                  const Data& output,
+                                                  const Data& indices,
+                                                  const Data& updates,
+                                                  const Data& axis) {
+    checkTensorShapes(input, output, indices, updates, axis);
+
+    auto stage = model->addNewStage<ScatterElementsUpdateStage>(name,
+                                                                StageType::ScatterElementsUpdate,
+                                                                layer,
+                                                                {input, indices, updates, axis},
+                                                                {output});
+
+    VPU_THROW_UNLESS(stage != nullptr,
+                     "failed to create ScatterElementsUpdateStage: "
+                     "layer name = \"%s\", layer type = \"%s\"",
+                     layer->name.c_str(), layer->type.c_str());
+
+    return stage;
+}
+
+}  // namespace vpu
index c74a8b8..1edaa94 100644 (file)
@@ -152,7 +152,8 @@ void MyriadInferRequest::GetResult() {
         return foundBlob->second->getTensorDesc().getLayout();
     };
 
-    if (_outputs.size() == 1) {
+    // For networks with only one output
+    if (_outputInfo.offset.size() == 1) {
         const auto& it = _outputs.begin();
         const auto& name = (*it).first;
         const auto& blob = (*it).second;
@@ -166,10 +167,10 @@ void MyriadInferRequest::GetResult() {
     _executor->getResult(_graphDesc, resultBuffer.data(), resultBuffer.size());
 
     for (const auto& output : _outputs) {
-        const auto& name = output.first;
-        const auto& blob = output.second;
+        const auto& ieBlobName = output.first;
+        const auto& ieBlob = output.second; // Original IE output blob
 
-        const auto resultOffset = [&] {
+        const auto resultOffset = [&](const std::string& name) {
             const auto offset_it = _outputInfo.offset.find(name);
             IE_ASSERT(offset_it != _outputInfo.offset.end())  << "MyriadInferRequest::InferAsync()\n"
                                                                        << "Output offset [" << name << "] error.";
@@ -179,14 +180,27 @@ void MyriadInferRequest::GetResult() {
                                                       << "Required offset: " << offset
                                                       << "Result buffer size: " << resultBuffer.size();
             return offset;
-        }();
-
-        const auto outDesc = blob->getTensorDesc();
+        };
+
+        const auto& ieOutDesc = ieBlob->getTensorDesc();
+        const auto& ieOutPrc = ieOutDesc.getPrecision();
+        auto ieOutDims = ieOutDesc.getDims();
+        // Eject dynamic output shape (suffix "@shape") and copy it to vector of dimensions in reverse order
+        const auto& shapeInfo = _outputInfo.offset.find(ieBlobName + "@shape");
+        if (shapeInfo != _outputInfo.offset.end()) {
+            const auto shapeOffset = resultOffset(shapeInfo->first);
+            const auto shapePtr = reinterpret_cast<const int32_t*>(resultBuffer.data() + shapeOffset);
+
+            const auto shapeRank = ieOutDims.size();
+            for (size_t idx = 0; idx < shapeRank; ++idx) {
+                ieOutDims[idx] = shapePtr[shapeRank - idx - 1];
+            }
+        }
         // TODO: TensorDesc doesn't update internal BlockingDesc and strides when setLayout is called
-        const auto tempTensorDesc = ie::TensorDesc{outDesc.getPrecision(), outDesc.getDims(), getVpuLayout(name)};
-        const auto tmpBlob = make_blob_with_precision(tempTensorDesc, resultBuffer.data() + resultOffset);
+        const auto tempTensorDesc = ie::TensorDesc{ieOutPrc, ieOutDims, getVpuLayout(ieBlobName)};
+        const auto tmpBlob = make_blob_with_precision(tempTensorDesc, resultBuffer.data() + resultOffset(ieBlobName));
 
-        copyBlob(tmpBlob, blob);
+        copyBlob(tmpBlob, ieBlob);
     }
 }
 
index 65044cf..7e44237 100644 (file)
@@ -16,8 +16,9 @@
 #include <vpu/parsed_config.hpp>
 #include <vpu/utils/profiling.hpp>
 #include <vpu/utils/error.hpp>
-#include <vpu/ngraph/transformations/dynamic_to_static_shape.hpp>
-#include <generic_ie.hpp>
+
+#include "vpu/ngraph/transformations/dynamic_to_static_shape.hpp"
+#include "generic_ie.hpp"
 
 #include "myriad_plugin.h"
 
@@ -35,10 +36,10 @@ ExecutableNetworkInternal::Ptr Engine::LoadExeNetworkImpl(
     auto parsedConfigCopy = _parsedConfig;
     parsedConfigCopy.update(config);
 
-    std::shared_ptr<ICNNNetwork> clonedNetwork = cloneNetwork(network);
-    if (auto func = clonedNetwork->getFunction()) {
-        ngraph::op::GenericIE::DisableReshape noReshape(func);
-        ngraph::pass::DynamicToStaticShape().run_on_function(func);
+    auto clonedNetwork = cloneNetwork(network);
+    if (auto function = clonedNetwork->getFunction()) {
+        ngraph::op::GenericIE::DisableReshape noReshape(function);
+        vpu::DynamicToStaticShape().transform(*function);
     }
 
     return std::make_shared<ExecutableNetwork>(*clonedNetwork, _devicePool, parsedConfigCopy);
index a302bc2..711ac3b 100644 (file)
@@ -14,7 +14,9 @@ enable_testing()
 add_subdirectory(ngraph_functions)
 add_subdirectory(unit)
 
-add_subdirectory(ie_test_utils)
+if(ENABLE_FUNCTIONAL_TESTS)
+    add_subdirectory(ie_test_utils)
+endif()
 
 if (ENABLE_FUNCTIONAL_TESTS)
     add_subdirectory(functional)
index 145b4d7..da353a0 100644 (file)
@@ -15,6 +15,7 @@ addIeTargetTest(
         LINK_LIBRARIES
             funcTestUtils
             ngraphFunctions
+            inference_engine_transformations
         ADD_CPPLINT
         LABELS
             INFERENCE_ENGINE
@@ -27,6 +28,7 @@ include(CMakeParseArguments)
 #
 # ie_headers_compilation_with_custom_flags(TEST_SUFFIX <prefix>
 #                                          FLAGS <flags>
+#                                          DEFINITIONS <definitions>
 #                                          HEADERS_TO_SKIP <skip headers>
 #                                          CXX_STANDARD <number>)
 #
@@ -35,7 +37,7 @@ include(CMakeParseArguments)
 function(ie_headers_compilation_with_custom_flags)
     set(options)
     set(oneValueArgs TEST_SUFFIX CXX_STANDARD)
-    set(multiValueArgs FLAGS HEADERS_TO_SKIP)
+    set(multiValueArgs FLAGS DEFINITIONS HEADERS_TO_SKIP)
     cmake_parse_arguments(IE_TEST "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
 
     file(GLOB_RECURSE header_files RELATIVE "${IE_MAIN_SOURCE_DIR}/include"
@@ -98,7 +100,10 @@ function(ie_headers_compilation_with_custom_flags)
     if(IE_TEST_FLAGS)
         set_target_properties(${target_name} PROPERTIES
                               COMPILE_FLAGS ${IE_TEST_FLAGS})
+    endif()
 
+    if(IE_TEST_DEFINITIONS)
+        target_compile_definitions(${target_name} PRIVATE ${IE_TEST_DEFINITIONS})
     endif()
 
     add_dependencies(${TARGET_NAME} ${target_name})
@@ -109,6 +114,11 @@ ie_headers_compilation_with_custom_flags(TEST_SUFFIX Cxx17 CXX_STANDARD 17)
 
 if(UNIX)    
     ie_headers_compilation_with_custom_flags(TEST_SUFFIX Pedantic FLAGS -Wpedantic)
+else()
+    ie_headers_compilation_with_custom_flags(TEST_SUFFIX WindowsAreErrors
+                                             FLAGS "/we4996 /W4 /WX")
+    ie_headers_compilation_with_custom_flags(TEST_SUFFIX Unicode
+                                             DEFINITIONS UNICODE _UNICODE)
 endif()
 
 # compilation with -Wweak-vtables
@@ -2,12 +2,12 @@
 // SPDX-License-Identifier: Apache-2.0
 //
 
-#include <ie_blob.h>
 #include <gtest/gtest.h>
+
 #include <random>
 #include <chrono>
 
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_blob.h>
 #include <blob_transform.hpp>
 
 using namespace ::testing;
@@ -19,12 +19,12 @@ using PrecisionType = InferenceEngine::Precision::ePrecision;
 using IsInterleaved =  bool;            // true = interleaved, false = deinterleaved.
 using Dims = std::vector<size_t>;       // dimensions are in the form of (N x C x D1 x D2 ... Dn), so Dims is vector (D1 x D2 ... Dn)
 
+namespace {
 
 InferenceEngine::Layout setLayout(IsInterleaved isInterleaved, int dimsSize) {
     if (dimsSize == 3) {
         return (isInterleaved) ?  InferenceEngine::Layout::NDHWC : InferenceEngine::Layout::NCDHW;
-    }
-    else if (dimsSize == 2) {
+    } else if (dimsSize == 2) {
         return (isInterleaved) ?  InferenceEngine::Layout::NHWC : InferenceEngine::Layout::NCHW;
     }
     THROW_IE_EXCEPTION << "Can't set layout";
@@ -33,10 +33,9 @@ InferenceEngine::Layout setLayout(IsInterleaved isInterleaved, int dimsSize) {
 //  Support only for 4d and 5d blobs
 SizeVector  SetDimVector(BatchNum batchNum, ChannelNum channelNum, Dims dims) {
     if (dims.size() == 2) {
-            return SizeVector {batchNum,channelNum, dims[0], dims[1] };
-    }
-    else if (dims.size() == 3) {
-            return SizeVector {batchNum, channelNum, dims[0], dims[1], dims[2] };
+        return SizeVector{ batchNum, channelNum, dims[0], dims[1] };
+    } else if (dims.size() == 3) {
+        return SizeVector{ batchNum, channelNum, dims[0], dims[1], dims[2] };
     }
     THROW_IE_EXCEPTION << "Can't set dimVector";
 }
@@ -44,8 +43,7 @@ SizeVector  SetDimVector(BatchNum batchNum, ChannelNum channelNum, Dims dims) {
 //  For FP16 and Q78 precision we use int16_t type
 InferenceEngine::Blob::Ptr createBlob(InferenceEngine::Precision precision, SizeVector dimsVector, InferenceEngine::Layout layout) {
     InferenceEngine::TensorDesc tensorDesc(precision, dimsVector, layout);
-    switch (precision)
-    {
+    switch (precision) {
         case  InferenceEngine::Precision::FP32 :
              return make_shared_blob<float>(tensorDesc);
         case InferenceEngine::Precision::FP16:
@@ -66,7 +64,7 @@ InferenceEngine::Blob::Ptr createBlob(InferenceEngine::Precision precision, Size
         case InferenceEngine::Precision::U8:
             return make_shared_blob<uint8_t>(tensorDesc);
         default:
-            THROW_IE_EXCEPTION<<"Unsupported precision";
+            THROW_IE_EXCEPTION << "Unsupported precision";
     }
 }
 
@@ -74,9 +72,8 @@ InferenceEngine::Blob::Ptr createBlob(InferenceEngine::Precision precision, Size
 size_t GenerateRandom(size_t elem) {
     size_t result;
     do {
-        result = std::floor(std::rand() / (float)RAND_MAX * elem);
-    }
-    while (result >= elem);
+        result = std::floor(std::rand() / static_cast<float>(RAND_MAX * elem));
+    } while (result >= elem);
     return result;
 }
 
@@ -86,20 +83,18 @@ size_t GenerateRandom(size_t elem) {
 SizeVector GenerateRandomVector(SizeVector dims) {
    SizeVector idx(dims.size());
 
-   for (auto i = 0; i < dims.size(); ++i){
-        idx[i] = GenerateRandom(dims[i]);
+   for (auto i = 0; i < dims.size(); ++i) {
+       idx[i] = GenerateRandom(dims[i]);
    }
    return idx;
 }
 
 
-void PrintParams (InferenceEngine::Layout layout, SizeVector dims, std::string blobType, InferenceEngine::Precision precision) {
+void PrintParams(InferenceEngine::Layout layout, SizeVector dims, std::string blobType, InferenceEngine::Precision precision) {
     std::cout <<blobType <<"Blob params: " << layout << ", precision: "<< precision << ", dims: {";
-
-    for(int i = 0; i <  dims.size(); i++){
+    for (int i = 0; i <  dims.size(); i++) {
         std::cout << (i > 0 ? ", ": "") << dims[i];
     }
-
     std::cout << "}" << std::endl;
 }
 
@@ -108,16 +103,16 @@ template<typename T>
 void FillBlobRandom(Blob::Ptr& inputBlob) {
     srand(1);
     auto inputBlobData = inputBlob->buffer().as<T*>();
+    unsigned int seed = RAND_MAX;
     for (size_t i = 0; i < inputBlob->size(); i++) {
-        inputBlobData[i] = (T) (rand() / (float)RAND_MAX * 100);
+        inputBlobData[i] = (T) (GenerateRandom(RAND_MAX) / static_cast<float>(RAND_MAX) * 100);
     }
 }
 
 //  For FP16 and Q78 precision we use int16_t type
 void FillBlob(Blob::Ptr& inputBlob) {
     auto precision = inputBlob->getTensorDesc().getPrecision();
-    switch (precision)
-    {
+    switch (precision) {
         case  InferenceEngine::Precision::FP32 :
             return FillBlobRandom<float>(inputBlob);
         case InferenceEngine::Precision::FP16:
@@ -138,7 +133,7 @@ void FillBlob(Blob::Ptr& inputBlob) {
         case InferenceEngine::Precision::U8:
             return FillBlobRandom<uint8_t>(inputBlob);
         default:
-            THROW_IE_EXCEPTION << "Cant fill blob with \""<<precision<<"\" precision\n";
+            THROW_IE_EXCEPTION << "Cant fill blob with \"" << precision << "\" precision\n";
     }
 }
 
@@ -151,24 +146,30 @@ T GetElem(Blob::Ptr& blob, SizeVector idx) {
 
     SizeVector strides = blob->getTensorDesc().getBlockingDesc().getStrides();
     if (blobLayout == NHWC || blobLayout == NDHWC) {
-        for(int i = 2; i < strides.size(); i++)
+        for (int i = 2; i < strides.size(); i++) {
             std::swap(strides[1], strides[i]);
+        }
     }
 
     int offset = 0;
 
-    for(int i = 0; i < idx.size(); i++) {
+    for (int i = 0; i < idx.size(); i++) {
         offset += idx[i] * strides[i];
     }
 
     return src[offset];
 }
 
-int SetExperimentsNum (int blobSize) {
-    if(blobSize < 1000) return blobSize;
-    else if(blobSize < 10000) return 1000;
-    else if(blobSize < 100000) return blobSize / 10;
-    else return blobSize / 100;
+int SetExperimentsNum(int blobSize) {
+    if (blobSize < 1000) {
+        return blobSize;
+    } else if (blobSize < 10000) {
+        return 1000;
+    } else if (blobSize < 100000) {
+        return blobSize / 10;
+    } else {
+        return blobSize / 100;
+    }
 }
 
 template <typename T>
@@ -176,30 +177,28 @@ bool IsCorrectBlobCopy_Impl(Blob::Ptr& srcBlob, Blob::Ptr& dstBlob) {
     EXPECT_TRUE(srcBlob->size() == dstBlob->size());
     int experimentsNum = SetExperimentsNum(srcBlob->size());
     int errorsCount = 0;
-    for( ; experimentsNum > 0; --experimentsNum)
-    {
+    for ( ; experimentsNum > 0; --experimentsNum) {
         SizeVector randomElemIdx = GenerateRandomVector(srcBlob->getTensorDesc().getDims());
         auto srcElem = GetElem<T>(srcBlob, randomElemIdx);
         auto dstElem = GetElem<T>(dstBlob, randomElemIdx);
-        if(srcElem != dstElem) {
-           if(errorsCount < 10) {
+        if (srcElem != dstElem) {
+           if (errorsCount < 10) {
                errorsCount++;
                std::cout << "ERROR: srcElem = " << srcElem << ", dstElem = " << dstElem << std::endl;
-           }
-           else
+           } else {
                errorsCount++;
+           }
         }
     }
     if (errorsCount > 0) {
         std::cout << "errorsCount = " << errorsCount << std::endl;
     }
-    return errorsCount == 0 ;
+    return errorsCount == 0;
 }
 
 
 bool IsCorrectBlobCopy(Blob::Ptr& srcBlob, Blob::Ptr& dstBlob) {
-    switch (srcBlob->getTensorDesc().getPrecision())
-    {
+    switch (srcBlob->getTensorDesc().getPrecision()) {
         case  InferenceEngine::Precision::FP32 :
             return IsCorrectBlobCopy_Impl<float>(srcBlob, dstBlob);
         case InferenceEngine::Precision::FP16:
@@ -224,12 +223,11 @@ bool IsCorrectBlobCopy(Blob::Ptr& srcBlob, Blob::Ptr& dstBlob) {
     }
 }
 
+}  // namespace
 
-class BlobCopyTest : public ::testing::TestWithParam <std::tuple<IsInterleaved, IsInterleaved, BatchNum, ChannelNum, Dims, PrecisionType >> {
-};
-
+using BlobCopyTest = ::testing::TestWithParam <std::tuple<IsInterleaved, IsInterleaved, BatchNum, ChannelNum, Dims, PrecisionType >>;
 
-TEST_P (BlobCopyTest, BlobCopy) {
+TEST_P(BlobCopyTest, BlobCopy) {
     IsInterleaved srcIsInterleaved = get<0>(GetParam());
     IsInterleaved dstIsInterleaved = get<1>(GetParam());
     BatchNum batchNum = get<2>(GetParam());
@@ -243,8 +241,8 @@ TEST_P (BlobCopyTest, BlobCopy) {
     InferenceEngine::Layout srcLayout = setLayout(srcIsInterleaved, dims.size());
     InferenceEngine::Layout dstLayout = setLayout(dstIsInterleaved, dims.size());
 
-    PrintParams (srcLayout, srcDims, "src", precisionType);
-    PrintParams (dstLayout, dstDims, "dst", precisionType);
+    PrintParams(srcLayout, srcDims, "src", precisionType);
+    PrintParams(dstLayout, dstDims, "dst", precisionType);
 
     Blob::Ptr srcBlob = createBlob(precisionType, srcDims, srcLayout);
     Blob::Ptr dstBlob = createBlob(precisionType, dstDims, dstLayout);
@@ -258,28 +256,31 @@ TEST_P (BlobCopyTest, BlobCopy) {
     blob_copy(srcBlob, dstBlob);
     auto finish =  std::chrono::high_resolution_clock::now();
 
-    std::cout << "Blob_copy execution time : " <<std::chrono::duration_cast<std::chrono::microseconds>(finish - start).count()<<" micros" << std::endl << std::endl;
+    std::cout << "Blob_copy execution time : " << std::chrono::duration_cast<std::chrono::microseconds>(finish - start).count() << " micros" << std::endl;
 
     ASSERT_TRUE(IsCorrectBlobCopy(srcBlob, dstBlob)) << "'blob_copy' function is't correct";
 }
+
+namespace {
+
 // is interleaved srcBlob?
-static std::vector<IsInterleaved> BlobCopy_srcLayoutParam = {
+std::vector<IsInterleaved> BlobCopy_srcLayoutParam = {
         true, false,
 };
 // is interleaved dstBlob?
-static std::vector<IsInterleaved> BlobCopy_dstLayoutParam = {
+std::vector<IsInterleaved> BlobCopy_dstLayoutParam = {
         false, true,
 };
 
-static std::vector<BatchNum> BlobCopy_BatchNum = {
+std::vector<BatchNum> BlobCopy_BatchNum = {
         1, 3,
 };
 
-static std::vector<ChannelNum > BlobCopy_ChannelNum = {
+std::vector<ChannelNum > BlobCopy_ChannelNum = {
         3, 7,
 };
 
-static std::vector<Dims> BlobCopy_Dims = {
+std::vector<Dims> BlobCopy_Dims = {
         {{10, 20, 30}},
         {{60, 80}},
 };
@@ -289,12 +290,13 @@ static std::vector<Dims> BlobCopy_Dims = {
 //  FP16 is used for cases with the following accuracy:  FP16, U16, I16
 //  U8 is used for cases with the following accuracy:  U8, I8
 //  Cases with other precision are not supported
-static std::vector<PrecisionType> BlobCopy_PrecisionParams = {
+std::vector<PrecisionType> BlobCopy_PrecisionParams = {
         InferenceEngine::Precision::FP32,
         InferenceEngine::Precision::FP16,
         InferenceEngine::Precision::U8,
 };
 
+}  // namespace
 
 INSTANTIATE_TEST_CASE_P(accuracy, BlobCopyTest,
                         ::testing::Combine(::testing::ValuesIn(BlobCopy_srcLayoutParam),
@@ -304,7 +306,7 @@ INSTANTIATE_TEST_CASE_P(accuracy, BlobCopyTest,
                            ::testing::ValuesIn(BlobCopy_Dims),
                            ::testing::ValuesIn(BlobCopy_PrecisionParams)));
 
-
+namespace {
 
 template <typename T>
 bool IsEqualBlobCopy_Impl(Blob::Ptr& ref, Blob::Ptr& dst) {
@@ -384,21 +386,22 @@ void copy3DBlobsAllBytesWithReLayoutWrapper(const Blob::Ptr& srcLayoutBlob, Blob
     }
 }
 
-class BlobCopySetLayoutTest : public ::testing::TestWithParam<
-    std::tuple<Dims, PrecisionType>> {};
 
-static std::vector<Dims> BlobCopySetLayout_Dims = {
+std::vector<Dims> BlobCopySetLayout_Dims = {
     {{1, 10, 10}},
     {{2, 100, 100}},
     {{3, 224, 224}},
 };
 
-static std::vector<PrecisionType> BlobCopySetLayout_Precisions = {
+std::vector<PrecisionType> BlobCopySetLayout_Precisions = {
     Precision::U8,
     Precision::U16,
     InferenceEngine::Precision::FP32,
 };
 
+}  // namespace
+
+using BlobCopySetLayoutTest = ::testing::TestWithParam<std::tuple<Dims, PrecisionType>>;
 
 // test after [IE] Fix TensorDesc::setLayout method, 735d275b47c4fd0c7b0db5c8f9fe8705967270f0
 TEST_P(BlobCopySetLayoutTest, BlobCopyWithNCHW_To_NHWC_After_setLayout) {
@@ -4,24 +4,22 @@
 
 #include <gtest/gtest.h>
 
-#include "tests_utils.hpp"
 #include <cpp_interfaces/exception2status.hpp>
 #include <details/ie_exception_conversion.hpp>
 
 using namespace InferenceEngine;
 
-class ExceptionTests : public ::testing::Test {
-};
+using ExceptionTests = ::testing::Test;
 
 template<StatusCode T>
 class WrapperClass {
 public:
     static InferenceEngine::StatusCode toStatusWrapper(InferenceEngine::ResponseDesc *resp) {
-        TO_STATUS(THROW_IE_EXCEPTION << details::as_status << T);
+        TO_STATUS(THROW_IE_EXCEPTION << details::as_status << T)
     }
 
     static InferenceEngine::StatusCode toStatusWrapperMsg(std::string &msg, InferenceEngine::ResponseDesc *resp) {
-        TO_STATUS(THROW_IE_EXCEPTION << details::as_status << T << msg);
+        TO_STATUS(THROW_IE_EXCEPTION << details::as_status << T << msg)
     }
 };
 
@@ -50,8 +48,8 @@ TEST_F(ExceptionTests, canConvertStatusToException) {
 TEST_F(ExceptionTests, canHandleNullPtr) {
     class Mock {
     public:
-        StatusCode func0(ResponseDesc* resp) {return StatusCode ::OK;};
-        StatusCode func1(int x, ResponseDesc* resp) {return StatusCode ::OK;};
+        StatusCode func0(ResponseDesc*) {return StatusCode ::OK;}
+        StatusCode func1(int, ResponseDesc*) {return StatusCode ::OK;}
     };
     //  shared_ptr holding the nullptr
     std::shared_ptr<Mock> actual;
@@ -61,12 +59,12 @@ TEST_F(ExceptionTests, canHandleNullPtr) {
 }
 
 TEST_F(ExceptionTests, throwAfterConvertStatusToClassContainMessage) {
-    std::string message = "Exception message!";
+    std::string refMessage = "Exception message!";
     auto actual = std::make_shared<WrapperClass<StatusCode::NOT_ALLOCATED>>();
     try {
-        CALL_STATUS_FNC(toStatusWrapperMsg, message);
+        CALL_STATUS_FNC(toStatusWrapperMsg, refMessage)
     } catch (const NotAllocated &iex) {
-        ASSERT_STR_CONTAINS(iex.what(), message);
+        std::string actualMessage = iex.what();
+        ASSERT_EQ(actualMessage.find(refMessage), 0);
     }
 }
-
index ea1a9c8..2d008bb 100644 (file)
@@ -62,7 +62,7 @@ protected:
                         const std::vector<InferenceEngine::CNNLayerPtr> &refLayersVec) {
         ASSERT_NO_THROW(FuncTestUtils::compareLayerByLayer<std::vector<InferenceEngine::CNNLayerPtr>>(
                 InferenceEngine::details::CNNNetSortTopologically(network),
-                refLayersVec));
+                refLayersVec, false));
     }
 
     const std::string _modelPath = "NetReader_test.xml";
index 517e7ce..f8d6143 100644 (file)
@@ -7,7 +7,7 @@
 
 TEST_F(NGraphReaderTests, ReadAbsNetwork) {
     std::string model = R"V0G0N(
-<net name="Abs_net" version="10">
+<net name="Network" version="10">
     <layers>
         <layer name="in1" type="Parameter" id="0" version="opset1">
             <data element_type="f32" shape="1,3,22,22"/>
@@ -56,9 +56,10 @@ TEST_F(NGraphReaderTests, ReadAbsNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Abs_net" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer name="in1" type="Input" precision="FP32" id="0">
+            <data originalLayersNames="in1"/>
             <output>
                 <port id="0">
                     <dim>1</dim>
@@ -69,6 +70,7 @@ TEST_F(NGraphReaderTests, ReadAbsNetwork) {
             </output>
         </layer>
         <layer name="Abs" id="1" type="Abs" precision="FP32">
+            <data originalLayersNames="Abs"/>
             <input>
                 <port id="1">
                     <dim>1</dim>
index 4367303..ff95c1a 100644 (file)
@@ -8,7 +8,7 @@
 
 TEST_F(NGraphReaderTests, ReadAcosNetwork) {
     std::string model = R"V0G0N(
-<net name="Acos_net" version="10">
+<net name="Network" version="10">
     <layers>
         <layer name="in1" type="Parameter" id="0" version="opset1">
             <data element_type="f32" shape="1,3,22,22"/>
@@ -57,9 +57,10 @@ TEST_F(NGraphReaderTests, ReadAcosNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Acos_net" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer name="in1" type="Input" precision="FP32" id="0">
+            <data originalLayersNames="in1"/>
             <output>
                 <port id="0">
                     <dim>1</dim>
@@ -70,6 +71,7 @@ TEST_F(NGraphReaderTests, ReadAcosNetwork) {
             </output>
         </layer>
         <layer name="Acos" id="1" type="Acos" precision="FP32">
+            <data originalLayersNames="Acos"/>
             <input>
                 <port id="1">
                     <dim>1</dim>
index a358853..2379a1e 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadAsinNetwork) {
     std::string model = R"V0G0N(
-<net name="Asin_net" version="10">
+<net name="Network" version="10">
     <layers>
         <layer name="in1" type="Parameter" id="0" version="opset1">
             <data element_type="f32" shape="1,3,22,22"/>
@@ -55,9 +55,10 @@ TEST_F(NGraphReaderTests, ReadAsinNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Asin_net" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer name="in1" type="Input" precision="FP32" id="0">
+            <data originalLayersNames="in1"/>
             <output>
                 <port id="0">
                     <dim>1</dim>
@@ -68,6 +69,7 @@ TEST_F(NGraphReaderTests, ReadAsinNetwork) {
             </output>
         </layer>
         <layer name="Asin" id="1" type="Asin" precision="FP32">
+            <data originalLayersNames="Asin"/>
             <input>
                 <port id="1">
                     <dim>1</dim>
index 4f48941..688ad06 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadAtanNetwork) {
     std::string model = R"V0G0N(
-<net name="Atan_net" version="10">
+<net name="Network" version="10">
     <layers>
         <layer name="in1" type="Parameter" id="0" version="opset1">
             <data element_type="f32" shape="1,3,22,22"/>
@@ -55,9 +55,10 @@ TEST_F(NGraphReaderTests, ReadAtanNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Atan_net" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer name="in1" type="Input" precision="FP32" id="0">
+            <data originalLayersNames="in1"/>
             <output>
                 <port id="0">
                     <dim>1</dim>
@@ -68,6 +69,7 @@ TEST_F(NGraphReaderTests, ReadAtanNetwork) {
             </output>
         </layer>
         <layer name="Atan" id="1" type="Atan" precision="FP32">
+            <data originalLayersNames="Atan"/>
             <input>
                 <port id="1">
                     <dim>1</dim>
index bfbafb0..45028d1 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadBatchNormInferenceNetwork) {
     std::string model = R"V0G0N(
-<net name="BNFusion" version="10">
+<net name="Network" version="10">
     <layers>
         <layer name="in1" type="Parameter" id="0" version="opset1">
             <data element_type="f32" shape="1,3,22,22"/>
@@ -142,7 +142,7 @@ TEST_F(NGraphReaderTests, ReadBatchNormInferenceNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="BNFusion" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="in1" precision="FP32" type="Input">
             <output>
@@ -155,7 +155,7 @@ TEST_F(NGraphReaderTests, ReadBatchNormInferenceNetwork) {
             </output>
         </layer>
         <layer id="3" name="bn" precision="FP32" type="Convolution">
-            <data dilations="1,1" group="1" kernel="1,1" output="3" pads_begin="0,0" pads_end="0,0" strides="1,1"/>
+            <data dilations="1,1" group="1" kernel="1,1" output="3" pads_begin="0,0" pads_end="0,0" strides="1,1" originalLayersNames="bn,conv"/>
             <input>
                 <port id="0">
                     <dim>1</dim>
index 3a24e91..4140d87 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ConvertBroadcastToTiles1) {
     std::string model = R"V0G0N(
-<net name="Multiply" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="14" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="112,1"/>
@@ -63,7 +63,7 @@ TEST_F(NGraphReaderTests, ConvertBroadcastToTiles1) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Convolution" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
@@ -84,6 +84,7 @@ TEST_F(NGraphReaderTests, ConvertBroadcastToTiles1) {
             </blobs>
         </layer>
         <layer id="2" name="DynReshape_108" precision="FP32" type="Reshape">
+            <data originalLayersNames="broadcast_1"/>
             <input>
                 <port id="0">
                     <dim>112</dim>
@@ -103,7 +104,7 @@ TEST_F(NGraphReaderTests, ConvertBroadcastToTiles1) {
             </output>
         </layer>
         <layer id="3" name="broadcast_1:" precision="FP32" type="Tile">
-            <data axis="3" tiles="112"/>
+            <data axis="3" tiles="112" originalLayersNames="broadcast_1"/>
             <input>
                 <port id="0">
                     <dim>1</dim>
@@ -121,7 +122,7 @@ TEST_F(NGraphReaderTests, ConvertBroadcastToTiles1) {
                 </port>
             </output>
         </layer>
-        <layer id="4" name="broadcast_1:_3" precision="FP32" type="Tile">
+        <layer id="4" name="broadcast_1" precision="FP32" type="Tile">
             <data axis="1" tiles="64"/>
             <input>
                 <port id="0">
@@ -161,7 +162,7 @@ TEST_F(NGraphReaderTests, ConvertBroadcastToTiles1) {
 
 TEST_F(NGraphReaderTests, ConvertBroadcastToTiles2) {
     std::string model = R"V0G0N(
-<net name="Broadcast" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="14" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="1"/>
@@ -219,7 +220,7 @@ TEST_F(NGraphReaderTests, ConvertBroadcastToTiles2) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Convolution" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
@@ -239,6 +240,7 @@ TEST_F(NGraphReaderTests, ConvertBroadcastToTiles2) {
             </blobs>
         </layer>
         <layer id="2" name="DynReshape_108" precision="FP32" type="Reshape">
+            <data originalLayersNames="broadcast_1"/>
             <input>
                 <port id="0">
                     <dim>1</dim>
@@ -257,7 +259,7 @@ TEST_F(NGraphReaderTests, ConvertBroadcastToTiles2) {
             </output>
         </layer>
         <layer id="3" name="broadcast_1:" precision="FP32" type="Tile">
-            <data axis="3" tiles="112"/>
+            <data axis="3" tiles="112" originalLayersNames="broadcast_1"/>
             <input>
                 <port id="0">
                     <dim>1</dim>
@@ -276,7 +278,7 @@ TEST_F(NGraphReaderTests, ConvertBroadcastToTiles2) {
             </output>
         </layer>
         <layer id="4" name="broadcast_1:_3" precision="FP32" type="Tile">
-            <data axis="2" tiles="112"/>
+            <data axis="2" tiles="112" originalLayersNames="broadcast_1"/>
             <input>
                 <port id="0">
                     <dim>1</dim>
@@ -294,7 +296,7 @@ TEST_F(NGraphReaderTests, ConvertBroadcastToTiles2) {
                 </port>
             </output>
         </layer>
-        <layer id="5" name="broadcast_1:_3_2" precision="FP32" type="Tile">
+        <layer id="5" name="broadcast_1" precision="FP32" type="Tile">
             <data axis="1" tiles="64"/>
             <input>
                 <port id="0">
@@ -335,7 +337,7 @@ TEST_F(NGraphReaderTests, ConvertBroadcastToTiles2) {
 
 TEST_F(NGraphReaderTests, ConvertBroadcastToTiles3) {
     std::string model = R"V0G0N(
-<net name="Broadcast" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="14" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,64,1,112"/>
@@ -399,7 +401,7 @@ TEST_F(NGraphReaderTests, ConvertBroadcastToTiles3) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Convolution" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
@@ -448,7 +450,7 @@ TEST_F(NGraphReaderTests, ConvertBroadcastToTiles3) {
 
 TEST_F(NGraphReaderTests, ConvertBroadcastToTiles4) {
     std::string model = R"V0G0N(
-<net name="Broadcast" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="14" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="3,64"/>
@@ -517,7 +519,7 @@ TEST_F(NGraphReaderTests, ConvertBroadcastToTiles4) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Broadcast" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
@@ -538,6 +540,7 @@ TEST_F(NGraphReaderTests, ConvertBroadcastToTiles4) {
             </blobs>
         </layer>
         <layer id="2" name="DynReshape_108" precision="FP32" type="Reshape">
+            <data originalLayersNames="broadcast_1"/>
             <input>
                 <port id="0">
                     <dim>3</dim>
@@ -598,7 +601,7 @@ TEST_F(NGraphReaderTests, ConvertBroadcastToTiles4) {
 
 TEST_F(NGraphReaderTests, DISABLED_ConvertBroadcastToTiles5) {
     std::string model = R"V0G0N(
-<net name="Broadcast" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="14" name="data" type="Parameter" version="opset1">
             <output>
@@ -666,7 +669,7 @@ TEST_F(NGraphReaderTests, DISABLED_ConvertBroadcastToTiles5) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Broadcast" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
index 373de62..cb14af3 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadCeilingNetwork) {
     std::string model = R"V0G0N(
-<net name="Ceiling_net" version="10">
+<net name="Network" version="10">
     <layers>
         <layer name="in1" type="Parameter" id="0" version="opset1">
             <data element_type="f32" shape="1,3,22,22"/>
@@ -55,7 +55,7 @@ TEST_F(NGraphReaderTests, ReadCeilingNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Ceiling_net" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer name="in1" type="Input" precision="FP32" id="0">
             <output>
index 08861e1..139864b 100644 (file)
@@ -7,7 +7,7 @@
 
 TEST_F(NGraphReaderTests, ReadClampNetwork) {
     std::string model = R"V0G0N(
-<net name="Activation" version="10">
+<net name="Network" version="10">
     <layers>
         <layer name="in1" type="Parameter" id="0" version="opset1">
             <data element_type="f32" shape="1,3,22,22"/>
@@ -57,7 +57,7 @@ TEST_F(NGraphReaderTests, ReadClampNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Activation" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer name="in1" type="Input" precision="FP32" id="0">
             <output>
index f5dfdb7..70932fc 100644 (file)
@@ -7,7 +7,7 @@
 
 TEST_F(NGraphReaderTests, ReadConcatNetwork) {
     std::string model = R"V0G0N(
-<net name="Activation" version="10">
+<net name="Network" version="10">
     <layers>
         <layer name="in1" type="Parameter" id="0" version="opset1">
             <data element_type="f32" shape="1,3,22,22"/>
@@ -75,7 +75,7 @@ TEST_F(NGraphReaderTests, ReadConcatNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Activation" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer name="in1" type="Input" precision="FP32" id="0">
             <output>
index 6538a65..bfb3859 100644 (file)
@@ -7,7 +7,7 @@
 
 TEST_F(NGraphReaderTests, ReadConvolutionNetwork) {
     std::string model = R"V0G0N(
-<net name="Convolution" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,3,227,227"/>
@@ -75,7 +75,7 @@ TEST_F(NGraphReaderTests, ReadConvolutionNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Convolution" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
index 9967fb0..c434878 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadCosNetwork) {
     std::string model = R"V0G0N(
-<net name="Cos_net" version="10">
+<net name="Network" version="10">
     <layers>
         <layer name="in1" type="Parameter" id="0" version="opset1">
             <data element_type="f32" shape="1,3,22,22"/>
@@ -55,7 +55,7 @@ TEST_F(NGraphReaderTests, ReadCosNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Cos_net" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer name="in1" type="Input" precision="FP32" id="0">
             <output>
index 744383d..d18729b 100644 (file)
@@ -7,7 +7,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadCoshNetwork) {
     std::string model = R"V0G0N(
-<net name="Cosh_net" version="10">
+<net name="Network" version="10">
     <layers>
         <layer name="in1" type="Parameter" id="0" version="opset1">
             <data element_type="f32" shape="1,3,22,22"/>
@@ -56,7 +56,7 @@ TEST_F(NGraphReaderTests, ReadCoshNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Cosh_net" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer name="in1" type="Input" precision="FP32" id="0">
             <output>
index 5995803..a773030 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, DISABLED_ReadDeconvolution3DNetwork) {
     std::string model = R"V0G0N(
-<net name="Convolution" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,512,4,4,4"/>
@@ -80,7 +80,7 @@ TEST_F(NGraphReaderTests, DISABLED_ReadDeconvolution3DNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Convolution" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
@@ -126,7 +126,7 @@ TEST_F(NGraphReaderTests, DISABLED_ReadDeconvolution3DNetwork) {
 
 TEST_F(NGraphReaderTests, DISABLED_ReadDeconvolution2DNetwork) {
     std::string model = R"V0G0N(
-<net name="Convolution" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,512,4,4"/>
@@ -194,7 +194,7 @@ TEST_F(NGraphReaderTests, DISABLED_ReadDeconvolution2DNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Convolution" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
index cf63e1b..182838f 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, DISABLED_ReadDetectionOutputNetwork) {
     std::string model = R"V0G0N(
-<net name="Activation" version="10">
+<net name="Network" version="10">
     <layers>
         <layer name="in1" type="Parameter" id="0" version="opset1">
             <data element_type="f32" shape="1,38360"/>
@@ -88,7 +88,7 @@ TEST_F(NGraphReaderTests, DISABLED_ReadDetectionOutputNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Activation" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer name="in1" type="Input" precision="FP32" id="0">
             <output>
index dad4947..3fec256 100644 (file)
@@ -8,7 +8,7 @@
 
 TEST_F(NGraphReaderTests, ReadDivideNetwork) {
     std::string model = R"V0G0N(
-<net name="Multiply" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,64,112,112"/>
index a8779b4..d4bf298 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadELUNetwork) {
     std::string model = R"V0G0N(
-<net name="Activation" version="10">
+<net name="Network" version="10">
     <layers>
         <layer name="in1" type="Parameter" id="0" version="opset1">
             <data element_type="f32" shape="1,3,22,22"/>
@@ -56,7 +56,7 @@ TEST_F(NGraphReaderTests, ReadELUNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Activation" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer name="in1" type="Input" precision="FP32" id="0">
             <output>
index 4163273..6c10de9 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadErfNetwork) {
     std::string model = R"V0G0N(
-<net name="Erf_net" version="10">
+<net name="Network" version="10">
     <layers>
         <layer name="in1" type="Parameter" id="0" version="opset1">
             <data element_type="f32" shape="1,3,22,22"/>
@@ -55,7 +55,7 @@ TEST_F(NGraphReaderTests, ReadErfNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Erf_net" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer name="in1" type="Input" precision="FP32" id="0">
             <output>
index f47794c..a3045a0 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadExpNetwork) {
     std::string model = R"V0G0N(
-<net name="Activation" version="10">
+<net name="Network" version="10">
     <layers>
         <layer name="in1" type="Parameter" id="0" version="opset1">
             <data element_type="f32" shape="1,3,22,22"/>
@@ -55,7 +55,7 @@ TEST_F(NGraphReaderTests, ReadExpNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Activation" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer name="in1" type="Input" precision="FP32" id="0">
             <output>
index ae46f27..e4d3418 100644 (file)
@@ -8,7 +8,7 @@
 
 TEST_F(NGraphReaderTests, ReadFQNetwork) {
     std::string model = R"V0G0N(
-<net name="FakeQuantize" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="in1" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,56,96,168"/>
@@ -130,7 +130,7 @@ TEST_F(NGraphReaderTests, ReadFQNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="FakeQuantize" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="in1" type="Input" precision="FP32">
             <output>
index 0b9e265..f6ce50f 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadFloorNetwork) {
     std::string model = R"V0G0N(
-<net name="Floor_net" version="10">
+<net name="Network" version="10">
     <layers>
         <layer name="in1" type="Parameter" id="0" version="opset1">
             <data element_type="f32" shape="1,3,22,22"/>
@@ -55,7 +55,7 @@ TEST_F(NGraphReaderTests, ReadFloorNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Floor_net" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer name="in1" type="Input" precision="FP32" id="0">
             <output>
index b13cde0..d23f464 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ConvBiasFusion) {
     std::string model = R"V0G0N(
-<net name="ConvBias" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,3,227,227"/>
@@ -109,7 +109,7 @@ TEST_F(NGraphReaderTests, ConvBiasFusion) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Convolution" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
@@ -122,7 +122,7 @@ TEST_F(NGraphReaderTests, ConvBiasFusion) {
             </output>
         </layer>
         <layer id="3" name="add" precision="FP32" type="Convolution">
-            <data dilations="1,1" group="1" kernel="11,11" output="96" pads_begin="0,0" pads_end="0,0" strides="4,4"/>
+            <data dilations="1,1" group="1" kernel="11,11" output="96" pads_begin="0,0" pads_end="0,0" strides="4,4" originalLayersNames="add,conv"/>
             <input>
                 <port id="0">
                     <dim>1</dim>
@@ -160,7 +160,7 @@ TEST_F(NGraphReaderTests, ConvBiasFusion) {
 
 TEST_F(NGraphReaderTests, ConvBiasFusionFP16) {
     std::string model = R"V0G0N(
-<net name="ConvBias" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f16" shape="1,3,227,227"/>
@@ -263,7 +263,7 @@ TEST_F(NGraphReaderTests, ConvBiasFusionFP16) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Convolution" version="5" precision="FP16" batch="1">
+<net name="Network" version="5" precision="FP16" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP16" type="Input">
             <output>
@@ -276,7 +276,7 @@ TEST_F(NGraphReaderTests, ConvBiasFusionFP16) {
             </output>
         </layer>
         <layer id="3" name="add" precision="FP16" type="Convolution">
-            <data dilations="1,1" group="1" kernel="11,11" output="96" pads_begin="0,0" pads_end="0,0" strides="4,4"/>
+            <data dilations="1,1" group="1" kernel="11,11" output="96" pads_begin="0,0" pads_end="0,0" strides="4,4" originalLayersNames="add,conv"/>
             <input>
                 <port id="0">
                     <dim>1</dim>
@@ -314,7 +314,7 @@ TEST_F(NGraphReaderTests, ConvBiasFusionFP16) {
 
 TEST_F(NGraphReaderTests, MatMulBiasFusionNoBroadcast) {
     std::string model = R"V0G0N(
-<net name="MatMulBias" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,2048"/>
@@ -398,7 +398,7 @@ TEST_F(NGraphReaderTests, MatMulBiasFusionNoBroadcast) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="MatMulBias" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
@@ -409,7 +409,7 @@ TEST_F(NGraphReaderTests, MatMulBiasFusionNoBroadcast) {
             </output>
         </layer>
         <layer id="3" name="add" precision="FP32" type="FullyConnected">
-            <data alpha="0" beta="0" out-size="1000"/>
+            <data alpha="0" beta="0" out-size="1000" originalLayersNames="add,fc"/>
             <input>
                 <port id="0">
                     <dim>1</dim>
@@ -437,7 +437,7 @@ TEST_F(NGraphReaderTests, MatMulBiasFusionNoBroadcast) {
 
 TEST_F(NGraphReaderTests, DISABLED_MatMulBiasFusion) {
     std::string model = R"V0G0N(
-<net name="MatMulBias" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <output>
@@ -557,7 +557,7 @@ TEST_F(NGraphReaderTests, DISABLED_MatMulBiasFusion) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="MatMulBias" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
index 2af437e..4ff515d 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadGatherTreeNetwork) {
     std::string model = R"V0G0N(
-<net name="GatherTree" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="in1" type="Parameter"  version="opset1">
             <data element_type="f32" shape="100,1,10"/>
@@ -89,7 +89,7 @@ TEST_F(NGraphReaderTests, ReadGatherTreeNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Activation" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="in1" type="Input" >
             <output>
index 6be6016..4e3b508 100644 (file)
@@ -7,7 +7,7 @@
 #include "common_test_utils/xml_net_builder/ir_net.hpp"
 
 TEST_F(NGraphReaderTests, ReadGeluNetwork) {
-    CommonTestUtils::IRBuilder_v10 ir_builder_v10("Gelu");
+    CommonTestUtils::IRBuilder_v10 ir_builder_v10("Network");
 
     auto input_layer = ir_builder_v10
             .AddLayer("in1", "Parameter", {{"shape", "1,128"},
@@ -31,7 +31,7 @@ TEST_F(NGraphReaderTests, ReadGeluNetwork) {
     // f(x) = 0.5 * x * (1.0 + erf( x / sqrt(2.0) )
     std::string model_v7 = R"V0G0N(
 <?xml version="1.0"?>
-<net name="Function_7" version="6" batch="1">
+<net name="Network" version="6" batch="1">
        <layers>
                <layer name="in1" type="Input" precision="FP32" id="0">
                        <output>
@@ -135,4 +135,4 @@ TEST_F(NGraphReaderTests, ReadGeluNetwork) {
     std::string model_v10 = ir_builder_v10.serialize();
 
     compareIRs(model_v10, model_v7, 0);
-}
\ No newline at end of file
+}
index 06f6a04..adfb62c 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadGreaterNetwork) {
     std::string model = R"V0G0N(
-<net name="Greater" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,64,112,112"/>
@@ -73,7 +73,7 @@ TEST_F(NGraphReaderTests, ReadGreaterNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Greater" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
@@ -135,7 +135,7 @@ TEST_F(NGraphReaderTests, ReadGreaterNetwork) {
 
 TEST_F(NGraphReaderTests, ReadGreaterEqualNetwork) {
     std::string model = R"V0G0N(
-<net name="Greater" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,64,112,112"/>
@@ -202,7 +202,7 @@ TEST_F(NGraphReaderTests, ReadGreaterEqualNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Greater" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
index e4f4106..c51ed04 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadGRNNetwork) {
     std::string model = R"V0G0N(
-<net name="GRN_net" version="10">
+<net name="Network" version="10">
     <layers>
         <layer name="in1" type="Parameter" id="0" version="opset1">
             <data element_type="f32" shape="1,3,22,22"/>
@@ -56,7 +56,7 @@ TEST_F(NGraphReaderTests, ReadGRNNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="GRN_net" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer name="in1" type="Input" precision="FP32" id="0">
             <output>
index 38c85e7..962e706 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadHardSigmoidNetwork) {
     std::string model = R"V0G0N(
-<net name="Activation" version="10">
+<net name="Network" version="10">
     <layers>
         <layer name="in1" type="Parameter" id="0" version="opset1">
             <data element_type="f32" shape="1,3,22,22"/>
@@ -71,7 +71,7 @@ TEST_F(NGraphReaderTests, ReadHardSigmoidNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Activation" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer name="in1" type="Input" precision="FP32" id="0">
             <output>
index 155f5f2..f0bf8b5 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadInterpolateNetwork) {
     std::string model = R"V0G0N(
-<net name="Reshape" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" precision="FP32" version="opset1">
             <data element_type="f32" shape="1,2,48,80"/>
@@ -68,7 +68,7 @@ TEST_F(NGraphReaderTests, ReadInterpolateNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Convolution" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
index b0efbc6..0f1e2cf 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadLessNetwork) {
     std::string model = R"V0G0N(
-<net name="Less" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,64,112,112"/>
@@ -73,7 +73,7 @@ TEST_F(NGraphReaderTests, ReadLessNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Greater" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
@@ -135,7 +135,7 @@ TEST_F(NGraphReaderTests, ReadLessNetwork) {
 
 TEST_F(NGraphReaderTests, ReadLessEqualNetwork) {
     std::string model = R"V0G0N(
-<net name="LessEqual" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,64,112,112"/>
@@ -202,7 +202,7 @@ TEST_F(NGraphReaderTests, ReadLessEqualNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Greater" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
index 9e77165..253501e 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ConvertMulAddToScaleShift) {
     std::string model = R"V0G0N(
-<net name="Multiply" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,64,112,112"/>
@@ -106,7 +106,7 @@ TEST_F(NGraphReaderTests, ConvertMulAddToScaleShift) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Convolution" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
@@ -119,6 +119,7 @@ TEST_F(NGraphReaderTests, ConvertMulAddToScaleShift) {
             </output>
         </layer>
         <layer id="3" name="add" precision="FP32" type="ScaleShift">
+            <data originalLayersNames="add,mul"/>
             <input>
                 <port id="0">
                     <dim>1</dim>
@@ -149,7 +150,7 @@ TEST_F(NGraphReaderTests, ConvertMulAddToScaleShift) {
 
 TEST_F(NGraphReaderTests, ConvertMulAddToPower) {
     std::string model = R"V0G0N(
-<net name="Multiply" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,64,112,112"/>
@@ -253,7 +254,7 @@ TEST_F(NGraphReaderTests, ConvertMulAddToPower) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Convolution" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
@@ -266,7 +267,7 @@ TEST_F(NGraphReaderTests, ConvertMulAddToPower) {
             </output>
         </layer>
         <layer id="3" name="add" precision="FP32" type="Power">
-            <data power="1.000000" scale="127.500000" shift="0.820000"/>
+            <data power="1.000000" scale="127.500000" shift="0.820000" originalLayersNames="add,mul"/>
             <input>
                 <port id="0">
                     <dim>1</dim>
@@ -303,7 +304,7 @@ TEST_F(NGraphReaderTests, ConvertMulAddToPower) {
 
 TEST_F(NGraphReaderTests, ConvertMulToPower) {
     std::string model = R"V0G0N(
-<net name="Multiply" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,64,112,112"/>
@@ -368,7 +369,7 @@ TEST_F(NGraphReaderTests, ConvertMulToPower) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Convolution" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
@@ -417,7 +418,7 @@ TEST_F(NGraphReaderTests, ConvertMulToPower) {
 
 TEST_F(NGraphReaderTests, ConvertMulToPower2) {
     std::string model = R"V0G0N(
-<net name="Multiply" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,64,112,112"/>
@@ -478,7 +479,7 @@ TEST_F(NGraphReaderTests, ConvertMulToPower2) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Convolution" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
@@ -528,7 +529,7 @@ TEST_F(NGraphReaderTests, ConvertMulToPower2) {
 
 TEST_F(NGraphReaderTests, ConvertAddToPower) {
     std::string model = R"V0G0N(
-<net name="Multiply" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,64,112,112"/>
@@ -593,7 +594,7 @@ TEST_F(NGraphReaderTests, ConvertAddToPower) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Convolution" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
@@ -642,7 +643,7 @@ TEST_F(NGraphReaderTests, ConvertAddToPower) {
 
 TEST_F(NGraphReaderTests, ConvertMulToScaleShift) {
     std::string model = R"V0G0N(
-<net name="Multiply" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,64,112,112"/>
@@ -707,7 +708,7 @@ TEST_F(NGraphReaderTests, ConvertMulToScaleShift) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Convolution" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
@@ -750,7 +751,7 @@ TEST_F(NGraphReaderTests, ConvertMulToScaleShift) {
 
 TEST_F(NGraphReaderTests, ConvertAddToScaleShift) {
     std::string model = R"V0G0N(
-<net name="Multiply" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,64,112,112"/>
@@ -815,7 +816,7 @@ TEST_F(NGraphReaderTests, ConvertAddToScaleShift) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Convolution" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
@@ -858,7 +859,7 @@ TEST_F(NGraphReaderTests, ConvertAddToScaleShift) {
 
 TEST_F(NGraphReaderTests, ConvertMulToEltwise) {
     std::string model = R"V0G0N(
-<net name="Multiply" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,64,112,112"/>
@@ -921,7 +922,7 @@ TEST_F(NGraphReaderTests, ConvertMulToEltwise) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Convolution" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
@@ -979,7 +980,7 @@ TEST_F(NGraphReaderTests, ConvertMulToEltwise) {
 
 TEST_F(NGraphReaderTests, ConvertAddToEltwise) {
     std::string model = R"V0G0N(
-<net name="Multiply" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,64,112,112"/>
@@ -1042,7 +1043,7 @@ TEST_F(NGraphReaderTests, ConvertAddToEltwise) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Convolution" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
@@ -1100,7 +1101,7 @@ TEST_F(NGraphReaderTests, ConvertAddToEltwise) {
 
 TEST_F(NGraphReaderTests, ReadAddNoBroadcastNetwork) {
     std::string model = R"V0G0N(
-<net name="Add" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,64,112,112"/>
@@ -1167,7 +1168,7 @@ TEST_F(NGraphReaderTests, ReadAddNoBroadcastNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Convolution" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
@@ -1229,7 +1230,7 @@ TEST_F(NGraphReaderTests, ReadAddNoBroadcastNetwork) {
 
 TEST_F(NGraphReaderTests, ReadMultiplyNoBroadcastNetwork) {
     std::string model = R"V0G0N(
-<net name="Multiply" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,64,112,112"/>
@@ -1296,7 +1297,7 @@ TEST_F(NGraphReaderTests, ReadMultiplyNoBroadcastNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Convolution" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
@@ -1358,7 +1359,7 @@ TEST_F(NGraphReaderTests, ReadMultiplyNoBroadcastNetwork) {
 
 TEST_F(NGraphReaderTests, RemoveAdd) {
     std::string model = R"V0G0N(
-<net name="Multiply" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,64,112,112"/>
@@ -1442,7 +1443,7 @@ TEST_F(NGraphReaderTests, RemoveAdd) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Convolution" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
@@ -1487,7 +1488,7 @@ TEST_F(NGraphReaderTests, RemoveAdd) {
 
 TEST_F(NGraphReaderTests, RemoveMulAdd) {
     std::string model = R"V0G0N(
-<net name="Multiply" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,64,112,112"/>
@@ -1610,7 +1611,7 @@ TEST_F(NGraphReaderTests, RemoveMulAdd) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Convolution" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
@@ -1659,7 +1660,7 @@ TEST_F(NGraphReaderTests, RemoveMulAdd) {
 
 TEST_F(NGraphReaderTests, RemoveAdd2) {
     std::string model = R"V0G0N(
-<net name="Multiply" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,64,112,112"/>
@@ -1743,7 +1744,7 @@ TEST_F(NGraphReaderTests, RemoveAdd2) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Convolution" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
@@ -1756,6 +1757,7 @@ TEST_F(NGraphReaderTests, RemoveAdd2) {
             </output>
         </layer>
         <layer id="3" name="add" precision="FP32" type="ReLU">
+            <data originalLayersNames="relu" />
             <input>
                 <port id="0">
                     <dim>1</dim>
@@ -1788,7 +1790,7 @@ TEST_F(NGraphReaderTests, RemoveAdd2) {
 
 TEST_F(NGraphReaderTests, RemoveAdd3) {
     std::string model = R"V0G0N(
-<net name="Multiply" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,64,112,112"/>
@@ -1918,7 +1920,7 @@ TEST_F(NGraphReaderTests, RemoveAdd3) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Convolution" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
@@ -2004,7 +2006,7 @@ TEST_F(NGraphReaderTests, RemoveAdd3) {
 
 TEST_F(NGraphReaderTests, ConvertAddToEltwise2) {
     std::string model = R"V0G0N(
-<net name="Multiply" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="i32" shape="1,64,112,112"/>
@@ -2069,7 +2071,7 @@ TEST_F(NGraphReaderTests, ConvertAddToEltwise2) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Convolution" version="5" precision="I32" batch="1">
+<net name="Network" version="5" precision="I32" batch="1">
     <layers>
         <layer id="0" name="data" precision="I32" type="Input">
             <output>
index 86b59af..010c706 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadLogNetwork) {
     std::string model = R"V0G0N(
-<net name="Log_net" version="10">
+<net name="Network" version="10">
     <layers>
         <layer name="in1" type="Parameter" id="0" version="opset1">
             <data element_type="f32" shape="1,3,22,22"/>
@@ -55,7 +55,7 @@ TEST_F(NGraphReaderTests, ReadLogNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Log_net" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer name="in1" type="Input" precision="FP32" id="0">
             <output>
index c4283ac..29c3656 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadLogicalAndNetwork) {
     std::string model = R"V0G0N(
-<net name="LogicalAnd_net" version="10">
+<net name="Network" version="10">
     <layers>
         <layer name="in1" type="Parameter" id="0" version="opset1">
             <data element_type="boolean" shape="1,3,22,22"/>
@@ -73,7 +73,7 @@ TEST_F(NGraphReaderTests, ReadLogicalAndNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="LogicalAnd_net" version="5" precision="BOOL" batch="1">
+<net name="Network" version="5" precision="BOOL" batch="1">
     <layers>
         <layer name="in1" type="Input" precision="BOOL" id="0">
             <output>
index 1bbe724..7cc7f6b 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, DISABLED_ReadLogicalNotNetwork) {
     std::string model = R"V0G0N(
-<net name="LogicalNot_net" version="10">
+<net name="Network" version="10">
     <layers>
         <layer name="in1" type="Parameter" id="0" version="opset1">
             <data element_type="boolean" shape="1,3,22,22"/>
@@ -55,7 +55,7 @@ TEST_F(NGraphReaderTests, DISABLED_ReadLogicalNotNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="LogicalNot_net" version="5" precision="BOOL" batch="1">
+<net name="Network" version="5" precision="BOOL" batch="1">
     <layers>
         <layer name="in1" type="Input" precision="BOOL" id="0">
             <output>
index 8f0cbc2..ca0bd6f 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadLogicalOrNetwork) {
     std::string model = R"V0G0N(
-<net name="LogicalOr_net" version="10">
+<net name="Network" version="10">
     <layers>
         <layer name="in1" type="Parameter" id="0" version="opset1">
             <data element_type="boolean" shape="1,3,22,22"/>
@@ -73,7 +73,7 @@ TEST_F(NGraphReaderTests, ReadLogicalOrNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="LogicalOr_net" version="5" precision="BOOL" batch="1">
+<net name="Network" version="5" precision="BOOL" batch="1">
     <layers>
         <layer name="in1" type="Input" precision="BOOL" id="0">
             <output>
index 3d320fd..0376cf3 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadLogicalXorNetwork) {
     std::string model = R"V0G0N(
-<net name="LogicalXor_net" version="10">
+<net name="Network" version="10">
     <layers>
         <layer name="in1" type="Parameter" id="0" version="opset1">
             <data element_type="boolean" shape="1,3,22,22"/>
@@ -73,7 +73,7 @@ TEST_F(NGraphReaderTests, ReadLogicalXorNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="LogicalXor_net" version="5" precision="BOOL" batch="1">
+<net name="Network" version="5" precision="BOOL" batch="1">
     <layers>
         <layer name="in1" type="Input" precision="BOOL" id="0">
             <output>
index dfd077b..cd1de35 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadMatMulNetwork1) {
     std::string model = R"V0G0N(
-<net name="Convolution" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,2048"/>
@@ -62,7 +62,7 @@ TEST_F(NGraphReaderTests, ReadMatMulNetwork1) {
 )V0G0N";
     // 'fc' layer biases are fake and added due to IE limitation for Fully Connected layer
     std::string modelV5 = R"V0G0N(
-<net name="Convolution" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
@@ -100,7 +100,7 @@ TEST_F(NGraphReaderTests, ReadMatMulNetwork1) {
 
 TEST_F(NGraphReaderTests, ReadMatMulNetwork2) {
     std::string model = R"V0G0N(
-<net name="Convolution" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,2048"/>
@@ -157,7 +157,7 @@ TEST_F(NGraphReaderTests, ReadMatMulNetwork2) {
 )V0G0N";
     // 'fc' layer biases are fake and added due to IE limitation for Fully Connected layer
     std::string modelV5 = R"V0G0N(
-<net name="Convolution" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
@@ -195,7 +195,7 @@ TEST_F(NGraphReaderTests, ReadMatMulNetwork2) {
 
 TEST_F(NGraphReaderTests, ReadMatMulNetwork3) {
     std::string model = R"V0G0N(
-<net name="Convolution" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="2048,1"/>
@@ -252,7 +252,7 @@ TEST_F(NGraphReaderTests, ReadMatMulNetwork3) {
 )V0G0N";
     // 'fc' layer biases are fake and added due to IE limitation for FUlly Connected layer
     std::string modelV5 = R"V0G0N(
-<net name="Convolution" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
@@ -263,7 +263,7 @@ TEST_F(NGraphReaderTests, ReadMatMulNetwork3) {
             </output>
         </layer>
         <layer id="1" name="fc/transpose_a" precision="FP32" type="Permute">
-            <data order="1,0"/>
+            <data order="1,0" originalLayersNames="fc"/>
             <input>
                 <port id="0">
                     <dim>2048</dim>
@@ -306,7 +306,7 @@ TEST_F(NGraphReaderTests, ReadMatMulNetwork3) {
 
 TEST_F(NGraphReaderTests, ReadMatMulNetwork4) {
     std::string model = R"V0G0N(
-<net name="Convolution" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data1" type="Parameter" version="opset1">
             <data element_type="f32" shape="2048,1"/>
@@ -362,7 +362,7 @@ TEST_F(NGraphReaderTests, ReadMatMulNetwork4) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Convolution" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data1" precision="FP32" type="Input">
             <output>
@@ -411,7 +411,7 @@ TEST_F(NGraphReaderTests, ReadMatMulNetwork4) {
 
 TEST_F(NGraphReaderTests, ReadMatMulNetwork5) {
     std::string model = R"V0G0N(
-<net name="Convolution" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data1" type="Parameter" version="opset1">
             <data element_type="f32" shape="2,3,2"/>
@@ -476,7 +476,7 @@ TEST_F(NGraphReaderTests, ReadMatMulNetwork5) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Convolution" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data1" precision="FP32" type="Input">
             <output>
@@ -508,6 +508,7 @@ TEST_F(NGraphReaderTests, ReadMatMulNetwork5) {
             </blobs>
         </layer>
         <layer id="2" name="fc/reshape" precision="FP32" type="Reshape">
+            <data originalLayersNames="fc"/>
             <input>
                 <port id="0">
                     <dim>2</dim>
index a058efb..bdda420 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadMaximumNetwork) {
     std::string model = R"V0G0N(
-<net name="Multiply" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,64,112,112"/>
@@ -73,7 +73,7 @@ TEST_F(NGraphReaderTests, ReadMaximumNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Convolution" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
index 8567b3f..0453d71 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadMVNNetwork) {
     std::string model = R"V0G0N(
-<net name="Activation" version="10">
+<net name="Network" version="10">
     <layers>
         <layer name="in1" type="Parameter" id="0" version="opset1">
             <data element_type="f32" shape="1,3,22,22"/>
@@ -56,7 +56,7 @@ TEST_F(NGraphReaderTests, ReadMVNNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Activation" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer name="in1" type="Input" precision="FP32" id="0">
             <output>
index ff35492..d961eb0 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadNegativeNetwork) {
     std::string model = R"V0G0N(
-<net name="Negative" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,64,112,112"/>
@@ -55,7 +55,7 @@ TEST_F(NGraphReaderTests, ReadNegativeNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Negative" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
index 1502df0..cc9f3c3 100644 (file)
@@ -8,7 +8,7 @@
 
 TEST_F(NGraphReaderTests, DISABLED_ReadIncorrectNetwork) {
     std::string model = R"V0G0N(
-<net name="Activation" version="10">
+<net name="Network" version="10">
     <layers>
         <layer name="in1" type="Parameter" id="0" version="opset1">
             <data element_type="f32" shape="1,3,22,22"/>
@@ -80,7 +80,7 @@ TEST_F(NGraphReaderTests, DISABLED_ReadIncorrectNetwork) {
 
 TEST_F(NGraphReaderTests, ConvertNGraphFromIterator) {
     std::string model = R"V0G0N(
-<net name="Convolution" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,3,227,227"/>
index d107c92..bc2f3aa 100644 (file)
@@ -7,7 +7,7 @@
 /*
 TEST_F(NGraphReaderTests, ReadNonMaxSuppression) {
     std::string model = R"V0G0N(
-<net name="NonMaxSuppression" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="in1" type="Parameter" >
             <data element_type="f32" shape="1,15130,4"/>
@@ -111,7 +111,7 @@ TEST_F(NGraphReaderTests, ReadNonMaxSuppression) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="NonMaxSuppression" version="5">
+<net name="Network" version="5">
     <layers>
         <layer id="0" name="in1" type="Input" >
             <data precision="I32"/>
@@ -216,4 +216,4 @@ TEST_F(NGraphReaderTests, ReadNonMaxSuppression) {
     });
 }
 
- */
\ No newline at end of file
+ */
index 470b23c..7c8ee7e 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadOneHotFP32) {
     std::string model = R"V0G0N(
-<net name="OneHot" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="in1" type="Parameter"  version="opset1">
             <data element_type="i64" shape="1,10,22"/>
@@ -78,7 +78,7 @@ TEST_F(NGraphReaderTests, ReadOneHotFP32) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="OneHot" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer name="in1" type="Input" precision="I64" id="0">
             <output>
@@ -126,7 +126,7 @@ TEST_F(NGraphReaderTests, ReadOneHotFP32) {
 
 TEST_F(NGraphReaderTests, ReadOneHotINT16) {
     std::string model = R"V0G0N(
-<net name="OneHot" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="in1" type="Parameter"  version="opset1">
             <data element_type="i64" shape="1,10,22"/>
@@ -224,7 +224,7 @@ TEST_F(NGraphReaderTests, ReadOneHotINT16) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="OneHot" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer name="in1" type="Input" precision="I64" id="0">
             <output>
@@ -254,7 +254,7 @@ TEST_F(NGraphReaderTests, ReadOneHotINT16) {
             </output>
         </layer>
         <layer id="2" name="onehot/Convert" type="Convert" precision="I16">
-            <data precision="I16"/>
+            <data precision="I16" originalLayersNames="one_hot_v10"/>
             <input>
                 <port id="0">
                     <dim>1</dim>
index 11ea35f..35da676 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadPadNoPadValue) {
     std::string model = R"V0G0N(
-<net name="Pad" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="in1" type="Parameter"  version="opset1">
             <data element_type="f32" shape="1,3,22,22"/>
@@ -80,7 +80,7 @@ TEST_F(NGraphReaderTests, ReadPadNoPadValue) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Activation" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer name="in1" type="Input" precision="FP32" id="0">
             <output>
@@ -134,7 +134,7 @@ TEST_F(NGraphReaderTests, ReadPadNoPadValue) {
 
 TEST_F(NGraphReaderTests, ReadPadWithPadValue) {
     std::string model = R"V0G0N(
-<net name="Pad" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="in1" type="Parameter"  version="opset1">
             <data element_type="f32" shape="1,3,22,22"/>
@@ -218,7 +218,7 @@ TEST_F(NGraphReaderTests, ReadPadWithPadValue) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Activation" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer name="in1" type="Input" precision="FP32" id="0">
             <output>
index d3fcc33..d319e25 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadMaxPoolNetwork) {
     std::string model = R"V0G0N(
-<net name="Activation" version="10">
+<net name="Network" version="10">
     <layers>
         <layer name="in1" type="Parameter" id="0" version="opset1">
             <data element_type="f32" shape="1,3,22,22"/>
@@ -56,7 +56,7 @@ TEST_F(NGraphReaderTests, ReadMaxPoolNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Activation" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer name="in1" type="Input" precision="FP32" id="0">
             <output>
@@ -99,7 +99,7 @@ TEST_F(NGraphReaderTests, ReadMaxPoolNetwork) {
 
 TEST_F(NGraphReaderTests, ReadAvgPoolNetwork) {
     std::string model = R"V0G0N(
-<net name="Activation" version="10">
+<net name="Network" version="10">
     <layers>
         <layer name="in1" type="Parameter" id="0" version="opset1">
             <data element_type="f32" shape="1,3,22,22"/>
@@ -149,7 +149,7 @@ TEST_F(NGraphReaderTests, ReadAvgPoolNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Activation" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer name="in1" type="Input" precision="FP32" id="0">
             <output>
index 8548ef8..e1da5b5 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadPowNetwork) {
     std::string model = R"V0G0N(
-<net name="Multiply" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,64,112,112"/>
@@ -73,7 +73,7 @@ TEST_F(NGraphReaderTests, ReadPowNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Convolution" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
index 2bb3203..6f127ab 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadPReLUNetwork) {
     std::string model = R"V0G0N(
-<net name="Activation" version="10">
+<net name="Network" version="10">
     <layers>
         <layer name="in1" type="Parameter" id="0" version="opset1">
             <data element_type="f32" shape="1,64,22,22"/>
@@ -71,7 +71,7 @@ TEST_F(NGraphReaderTests, ReadPReLUNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Activation" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer name="in1" type="Input" precision="FP32" id="0">
             <output>
index 77c3eac..4d4a551 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadPriorBoxClusteredNetwork) {
     std::string model = R"V0G0N(
-<net name="PriorBoxClusteredNet" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="in1" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,768,30,30"/>
@@ -30,7 +30,7 @@ TEST_F(NGraphReaderTests, ReadPriorBoxClusteredNetwork) {
                 </port>
             </output>
         </layer>
-        <layer id="2" name="ch_concat_mixed_7_chconcat_anchors/0_port" type="ShapeOf" version="opset1">
+        <layer id="2" name="shape_of1" type="ShapeOf" version="opset1">
             <input>
                 <port id="0" precision="FP32">
                     <dim>1</dim>
@@ -69,7 +69,7 @@ TEST_F(NGraphReaderTests, ReadPriorBoxClusteredNetwork) {
                 </port>
             </output>
         </layer>
-        <layer id="5" name="ch_concat_mixed_7_chconcat_anchors/ss_0_port" type="StridedSlice" version="opset1">
+        <layer id="5" name="ss1" type="StridedSlice" version="opset1">
             <data begin_mask="0" ellipsis_mask="0" end_mask="0" new_axis_mask="0" shrink_axis_mask="0"/>
             <input>
                 <port id="0" precision="I64">
@@ -91,7 +91,7 @@ TEST_F(NGraphReaderTests, ReadPriorBoxClusteredNetwork) {
                 </port>
             </output>
         </layer>
-        <layer id="6" name="ch_concat_mixed_7_chconcat_anchors/1_port" type="ShapeOf" version="opset1">
+        <layer id="6" name="shape_of2" type="ShapeOf" version="opset1">
             <input>
                 <port id="0" precision="FP32">
                     <dim>1</dim>
@@ -106,7 +106,7 @@ TEST_F(NGraphReaderTests, ReadPriorBoxClusteredNetwork) {
                 </port>
             </output>
         </layer>
-        <layer id="7" name="ch_concat_mixed_7_chconcat_anchors/ss_1_port" type="StridedSlice" version="opset1">
+        <layer id="7" name="ss2" type="StridedSlice" version="opset1">
             <data begin_mask="0" ellipsis_mask="0" end_mask="0" new_axis_mask="0" shrink_axis_mask="0"/>
             <input>
                 <port id="0" precision="I64">
@@ -202,7 +202,7 @@ TEST_F(NGraphReaderTests, ReadPriorBoxClusteredNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Activation" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="in1" type="Input" precision="FP32">
             <output>
@@ -225,7 +225,7 @@ TEST_F(NGraphReaderTests, ReadPriorBoxClusteredNetwork) {
             </output>
         </layer>
         <layer name="ExpandDims" id="2" type="PriorBoxClustered" precision="FP32">
-            <data clip="0" step_h="16.000000" step_w="16.000000" flip="1" height="44,10,30,19,94,32,61,53,17" offset="0.500000" step="16.000000" variance="0.1,0.1,0.2,0.2" width="86,13,57,39,68,34,142,50,23"/>
+            <data clip="0" step_h="16.000000" step_w="16.000000" flip="1" height="44,10,30,19,94,32,61,53,17" offset="0.500000" step="16.000000" variance="0.1,0.1,0.2,0.2" width="86,13,57,39,68,34,142,50,23" originalLayersNames="ExpandDims,prior,shape_of1,shape_of2,ss1,ss2"/>
             <input>
                 <port id="1">
                     <dim>1</dim>
@@ -267,7 +267,7 @@ TEST_F(NGraphReaderTests, ReadPriorBoxClusteredNetwork) {
 
 TEST_F(NGraphReaderTests, ReadPriorBoxNetwork) {
     std::string model = R"V0G0N(
-<net name="PriorBoxNet" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="in1" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,768,30,30"/>
@@ -291,7 +291,7 @@ TEST_F(NGraphReaderTests, ReadPriorBoxNetwork) {
                 </port>
             </output>
         </layer>
-        <layer id="2" name="ch_concat_mixed_7_chconcat_anchors/0_port" type="ShapeOf" version="opset1">
+        <layer id="2" name="shape_of1" type="ShapeOf" version="opset1">
             <input>
                 <port id="0" precision="FP32">
                     <dim>1</dim>
@@ -330,7 +330,7 @@ TEST_F(NGraphReaderTests, ReadPriorBoxNetwork) {
                 </port>
             </output>
         </layer>
-        <layer id="5" name="ch_concat_mixed_7_chconcat_anchors/ss_0_port" type="StridedSlice" version="opset1">
+        <layer id="5" name="ss1" type="StridedSlice" version="opset1">
             <data begin_mask="0" ellipsis_mask="0" end_mask="0" new_axis_mask="0" shrink_axis_mask="0"/>
             <input>
                 <port id="0" precision="I64">
@@ -352,7 +352,7 @@ TEST_F(NGraphReaderTests, ReadPriorBoxNetwork) {
                 </port>
             </output>
         </layer>
-        <layer id="6" name="ch_concat_mixed_7_chconcat_anchors/1_port" type="ShapeOf" version="opset1">
+        <layer id="6" name="shape_of2" type="ShapeOf" version="opset1">
             <input>
                 <port id="0" precision="FP32">
                     <dim>1</dim>
@@ -367,7 +367,7 @@ TEST_F(NGraphReaderTests, ReadPriorBoxNetwork) {
                 </port>
             </output>
         </layer>
-        <layer id="7" name="ch_concat_mixed_7_chconcat_anchors/ss_1_port" type="StridedSlice" version="opset1">
+        <layer id="7" name="ss2" type="StridedSlice" version="opset1">
             <data begin_mask="0" ellipsis_mask="0" end_mask="0" new_axis_mask="0" shrink_axis_mask="0"/>
             <input>
                 <port id="0" precision="I64">
@@ -462,7 +462,7 @@ TEST_F(NGraphReaderTests, ReadPriorBoxNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Activation" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="in1" type="Input" precision="FP32">
             <output>
@@ -485,7 +485,7 @@ TEST_F(NGraphReaderTests, ReadPriorBoxNetwork) {
             </output>
         </layer>
         <layer name="ExpandDims" id="2" type="PriorBox" precision="FP32">
-            <data density="" fixed_ratio="" fixed_size="" aspect_ratio="2,0.5" clip="0" flip="0" img_h="0" img_size="0" img_w="0" max_size="" min_size="51.200001,72.407555" offset="0.500000" scale_all_sizes="0" step="17.066666666666666" step_h="0" step_w="0" variance="0.1,0.1,0.2,0.2"/>
+            <data density="" fixed_ratio="" fixed_size="" aspect_ratio="2,0.5" clip="0" flip="0" img_h="0" img_size="0" img_w="0" max_size="" min_size="51.200001,72.407555" offset="0.500000" scale_all_sizes="0" step="17.066666666666666" step_h="0" step_w="0" variance="0.1,0.1,0.2,0.2" originalLayersNames="ExpandDims,prior,shape_of1,shape_of2,ss1,ss2"/>
             <input>
                 <port id="1">
                     <dim>1</dim>
index a7aa565..7816f31 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadProposalNetwork) {
     std::string model_v10 = R"V0G0N(
-<net name="ProposalNet" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="in1" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,12,34,62"/>
@@ -82,7 +82,7 @@ TEST_F(NGraphReaderTests, ReadProposalNetwork) {
     </net>
     )V0G0N";
     std::string model_v6  = R"V0G0N(
-<net name="ProposalNet" version="6" batch="1">
+<net name="Network" version="6" batch="1">
     <layers>
         <layer name="in3" type="Const" precision="I64" id="4">
             <output>
@@ -157,7 +157,7 @@ TEST_F(NGraphReaderTests, ReadProposalNetwork) {
 
 TEST_F(NGraphReaderTests, ReadProposalNetwork_2) {
     std::string model_v10 = R"V0G0N(
-<net name="ProposalNet" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="in1" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,12,34,62"/>
@@ -233,7 +233,7 @@ TEST_F(NGraphReaderTests, ReadProposalNetwork_2) {
     </net>
     )V0G0N";
     std::string model_v6  = R"V0G0N(
-<net name="ProposalNet" version="6" batch="1">
+<net name="Network" version="6" batch="1">
     <layers>
         <layer name="in3" type="Const" precision="I64" id="4">
             <output>
index 6a8982c..b07876f 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadRangeNetwork) {
     std::string model = R"V0G0N(
-<net name="Range" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="in1" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,2,12"/>
@@ -99,7 +99,7 @@ TEST_F(NGraphReaderTests, ReadRangeNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Range" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="in1" precision="FP32" type="Input">
             <output>
index 49eaab2..b5d9750 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadReduceLogicalAndNetwork) {
     std::string model = R"V0G0N(
-<net name="ReduceLogicalAnd_net" version="10">
+<net name="Network" version="10">
     <layers>
         <layer name="in1" type="Parameter" id="0" version="opset1">
             <data element_type="boolean" shape="1,3,22,22"/>
@@ -68,7 +68,7 @@ TEST_F(NGraphReaderTests, ReadReduceLogicalAndNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="ReduceLogicalAnd_net" version="5" precision="BOOL" batch="1">
+<net name="Network" version="5" precision="BOOL" batch="1">
     <layers>
         <layer name="in1" type="Input" precision="BOOL" id="0">
             <output>
index 7a0d88b..ce85ccb 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadReduceLogicalOrNetwork) {
     std::string model = R"V0G0N(
-<net name="ReduceLogicalOr_net" version="10">
+<net name="Network" version="10">
     <layers>
         <layer name="in1" type="Parameter" id="0" version="opset1">
             <data element_type="boolean" shape="1,3,22,22"/>
@@ -68,7 +68,7 @@ TEST_F(NGraphReaderTests, ReadReduceLogicalOrNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="ReduceLogicalOr_net" version="5" precision="BOOL" batch="1">
+<net name="Network" version="5" precision="BOOL" batch="1">
     <layers>
         <layer name="in1" type="Input" precision="BOOL" id="0">
             <output>
index cdacaec..e382ef3 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReduceMeanToAvgPool) {
     std::string model = R"V0G0N(
-<net name="ReduceMean" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,3,227,227"/>
@@ -68,7 +68,7 @@ TEST_F(NGraphReaderTests, ReduceMeanToAvgPool) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="ReduceMean" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
@@ -115,7 +115,7 @@ TEST_F(NGraphReaderTests, ReduceMeanToAvgPool) {
 
 TEST_F(NGraphReaderTests, ReduceMeanToAvgPoolKeepDimsFalse) {
     std::string model = R"V0G0N(
-<net name="ReduceMean" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,3,227,64"/>
@@ -176,7 +176,7 @@ TEST_F(NGraphReaderTests, ReduceMeanToAvgPoolKeepDimsFalse) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Convolution" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
@@ -189,7 +189,7 @@ TEST_F(NGraphReaderTests, ReduceMeanToAvgPoolKeepDimsFalse) {
             </output>
         </layer>
         <layer id="1" name="reduce/pool" precision="FP32" type="Pooling">
-            <data dilations="1,1" group="1" kernel="227,1" output="3" pads_begin="0,0" pads_end="0,0" strides="1,1" pool-method="avg" exclude-pad="true" rounding_type="floor"/>
+            <data dilations="1,1" group="1" kernel="227,1" output="3" pads_begin="0,0" pads_end="0,0" strides="1,1" pool-method="avg" exclude-pad="true" rounding_type="floor" originalLayersNames="reduce"/>
             <input>
                 <port id="0">
                     <dim>1</dim>
@@ -254,7 +254,7 @@ TEST_F(NGraphReaderTests, ReduceMeanToAvgPoolKeepDimsFalse) {
 
 TEST_F(NGraphReaderTests, ReduceMeanToAvgPoolNonSpatial) {
     std::string model = R"V0G0N(
-<net name="ReduceMean" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,3,24,12"/>
@@ -317,7 +317,7 @@ TEST_F(NGraphReaderTests, ReduceMeanToAvgPoolNonSpatial) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Convolution" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
@@ -340,6 +340,7 @@ TEST_F(NGraphReaderTests, ReduceMeanToAvgPoolNonSpatial) {
             </blobs>
         </layer>
         <layer id="2" name="reduce/reshape_begin" precision="FP32" type="Reshape">
+            <data originalLayersNames="reduce"/>
             <input>
                 <port id="0">
                     <dim>1</dim>
@@ -361,7 +362,7 @@ TEST_F(NGraphReaderTests, ReduceMeanToAvgPoolNonSpatial) {
             </output>
         </layer>
         <layer id="3" name="reduce/pool" precision="FP32" type="Pooling">
-            <data dilations="1,1" group="1" kernel="3,1" output="1" pads_begin="0,0" pads_end="0,0" strides="1,1" pool-method="avg" exclude-pad="true" rounding_type="floor"/>
+            <data dilations="1,1" group="1" kernel="3,1" output="1" pads_begin="0,0" pads_end="0,0" strides="1,1" pool-method="avg" exclude-pad="true" rounding_type="floor" originalLayersNames="reduce"/>
             <input>
                 <port id="0">
                     <dim>1</dim>
@@ -429,7 +430,7 @@ TEST_F(NGraphReaderTests, ReduceMeanToAvgPoolNonSpatial) {
 
 TEST_F(NGraphReaderTests, ReduceMeanToAvgPoolNonSpatialHard) {
     std::string model = R"V0G0N(
-<net name="ReduceMean" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,3,24,12"/>
@@ -492,7 +493,7 @@ TEST_F(NGraphReaderTests, ReduceMeanToAvgPoolNonSpatialHard) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Convolution" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
@@ -515,6 +516,7 @@ TEST_F(NGraphReaderTests, ReduceMeanToAvgPoolNonSpatialHard) {
             </blobs>
         </layer>
         <layer id="2" name="reduce/reshape_begin" precision="FP32" type="Reshape">
+            <data originalLayersNames="reduce"/>
             <input>
                 <port id="0">
                     <dim>1</dim>
@@ -572,7 +574,7 @@ TEST_F(NGraphReaderTests, ReduceMeanToAvgPoolNonSpatialHard) {
 
 TEST_F(NGraphReaderTests, ReduceMeanToMaxPool) {
     std::string model = R"V0G0N(
-<net name="ReduceMean" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,3,227,227"/>
@@ -635,7 +637,7 @@ TEST_F(NGraphReaderTests, ReduceMeanToMaxPool) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="ReduceMean" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
@@ -682,7 +684,7 @@ TEST_F(NGraphReaderTests, ReduceMeanToMaxPool) {
 
 TEST_F(NGraphReaderTests, ReduceMeanToMaxPoolKeepDimsFalse) {
     std::string model = R"V0G0N(
-<net name="ReduceMean" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,3,227,64"/>
@@ -743,7 +745,7 @@ TEST_F(NGraphReaderTests, ReduceMeanToMaxPoolKeepDimsFalse) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Convolution" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
@@ -756,7 +758,7 @@ TEST_F(NGraphReaderTests, ReduceMeanToMaxPoolKeepDimsFalse) {
             </output>
         </layer>
         <layer id="1" name="reduce/pool" precision="FP32" type="Pooling">
-            <data dilations="1,1" group="1" kernel="227,1" output="3" pads_begin="0,0" pads_end="0,0" strides="1,1" pool-method="max" rounding_type="floor"/>
+            <data dilations="1,1" group="1" kernel="227,1" output="3" pads_begin="0,0" pads_end="0,0" strides="1,1" pool-method="max" rounding_type="floor" originalLayersNames="reduce"/>
             <input>
                 <port id="0">
                     <dim>1</dim>
@@ -821,7 +823,7 @@ TEST_F(NGraphReaderTests, ReduceMeanToMaxPoolKeepDimsFalse) {
 
 TEST_F(NGraphReaderTests, ReduceMeanToMaxPoolNonSpatial) {
     std::string model = R"V0G0N(
-<net name="ReduceMean" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,3,24,12"/>
@@ -884,7 +886,7 @@ TEST_F(NGraphReaderTests, ReduceMeanToMaxPoolNonSpatial) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Convolution" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
@@ -907,6 +909,7 @@ TEST_F(NGraphReaderTests, ReduceMeanToMaxPoolNonSpatial) {
             </blobs>
         </layer>
         <layer id="2" name="reduce/reshape_begin" precision="FP32" type="Reshape">
+            <data originalLayersNames="reduce"/>
             <input>
                 <port id="0">
                     <dim>1</dim>
@@ -928,7 +931,7 @@ TEST_F(NGraphReaderTests, ReduceMeanToMaxPoolNonSpatial) {
             </output>
         </layer>
         <layer id="3" name="reduce/pool" precision="FP32" type="Pooling">
-            <data dilations="1,1" group="1" kernel="3,1" output="1" pads_begin="0,0" pads_end="0,0" strides="1,1" pool-method="max" rounding_type="floor"/>
+            <data dilations="1,1" group="1" kernel="3,1" output="1" pads_begin="0,0" pads_end="0,0" strides="1,1" pool-method="max" rounding_type="floor" originalLayersNames="reduce"/>
             <input>
                 <port id="0">
                     <dim>1</dim>
@@ -996,7 +999,7 @@ TEST_F(NGraphReaderTests, ReduceMeanToMaxPoolNonSpatial) {
 
 TEST_F(NGraphReaderTests, ReduceSumToAvgPool) {
     std::string model = R"V0G0N(
-<net name="ReduceMean" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,3,227,227"/>
@@ -1058,7 +1061,7 @@ TEST_F(NGraphReaderTests, ReduceSumToAvgPool) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="ReduceMean" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
@@ -1071,7 +1074,7 @@ TEST_F(NGraphReaderTests, ReduceSumToAvgPool) {
             </output>
         </layer>
         <layer id="1" name="reduce/pool" precision="FP32" type="Pooling">
-            <data dilations="1,1" group="1" kernel="227,227" output="3" pads_begin="0,0" pads_end="0,0" strides="1,1" pool-method="avg" exclude-pad="true" rounding_type="floor"/>
+            <data dilations="1,1" group="1" kernel="227,227" output="3" pads_begin="0,0" pads_end="0,0" strides="1,1" pool-method="avg" exclude-pad="true" rounding_type="floor" originalLayersNames="reduce"/>
             <input>
                 <port id="0">
                     <dim>1</dim>
index 601de70..b397c80 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadReLUNetworkWithoutTopologicalOrder) {
     std::string model = R"V0G0N(
-<net name="Activation" version="10">
+<net name="Network" version="10">
     <layers>
         <layer name="output" type="Result" id="2" version="opset1">
             <input>
@@ -55,7 +55,7 @@ TEST_F(NGraphReaderTests, ReadReLUNetworkWithoutTopologicalOrder) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Activation" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer name="activation" id="1" type="ReLU" precision="FP32">
             <input>
@@ -97,7 +97,7 @@ TEST_F(NGraphReaderTests, ReadReLUNetworkWithoutTopologicalOrder) {
 
 TEST_F(NGraphReaderTests, ReadReLUNetwork) {
     std::string model = R"V0G0N(
-<net name="Activation" version="10">
+<net name="Network" version="10">
     <layers>
         <layer name="in1" type="Parameter" id="0" version="opset1">
             <data element_type="f32" shape="1,3,22,22"/>
@@ -146,7 +146,7 @@ TEST_F(NGraphReaderTests, ReadReLUNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Activation" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer name="in1" type="Input" precision="FP32" id="0">
             <output>
@@ -188,7 +188,7 @@ TEST_F(NGraphReaderTests, ReadReLUNetwork) {
 
 TEST_F(NGraphReaderTests, ReadReLUScalarNetwork) {
     std::string model = R"V0G0N(
-<net name="Activation" version="10">
+<net name="Network" version="10">
     <layers>
         <layer name="in1" type="Parameter" id="0" version="opset1">
             <data element_type="f32" shape=""/>
index ad130fd..c9a23db 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadReshapeNetwork) {
     std::string model = R"V0G0N(
-<net name="Reshape" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,2048,1,1"/>
@@ -64,7 +64,7 @@ TEST_F(NGraphReaderTests, ReadReshapeNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Convolution" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
index 064c7bc..42fd370 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadReverseSequenceNetwork) {
     std::string model = R"V0G0N(
-<net name="ReverseSequence" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="3,10,100,200"/>
@@ -67,7 +67,7 @@ TEST_F(NGraphReaderTests, ReadReverseSequenceNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Convolution" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
index 078a6be..058f5e3 100644 (file)
@@ -7,7 +7,7 @@
 
 TEST_F(NGraphReaderTests, ReadSelectFP32Network) {
     std::string model = R"V0G0N(
-<net name="Select_net" version="10">
+<net name="Network" version="10">
     <layers>
         <layer name="cond" type="Parameter" id="0" version="opset1">
             <data element_type="boolean" shape="1,3,22,22"/>
@@ -92,7 +92,7 @@ TEST_F(NGraphReaderTests, ReadSelectFP32Network) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Select_net" version="5" precision="BOOL" batch="1">
+<net name="Network" version="5" precision="BOOL" batch="1">
     <layers>
         <layer name="cond" type="Input" precision="BOOL" id="0">
             <output>
@@ -168,7 +168,7 @@ TEST_F(NGraphReaderTests, ReadSelectFP32Network) {
 
 TEST_F(NGraphReaderTests, ReadSelectI32Network) {
     std::string model = R"V0G0N(
-<net name="Select_net" version="10">
+<net name="Network" version="10">
     <layers>
         <layer name="cond" type="Parameter" id="0" version="opset1">
             <data element_type="boolean" shape="1,3,22,22"/>
@@ -253,7 +253,7 @@ TEST_F(NGraphReaderTests, ReadSelectI32Network) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Select_net" version="5" precision="BOOL" batch="1">
+<net name="Network" version="5" precision="BOOL" batch="1">
     <layers>
         <layer name="cond" type="Input" precision="BOOL" id="0">
             <output>
index 2230376..881fc9c 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadSeluNetwork) {
     std::string model = R"V0G0N(
-<net name="Activation" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="in1" type="Parameter"  version="opset1">
             <data element_type="f32" shape="1,3,22,22"/>
@@ -79,7 +79,7 @@ TEST_F(NGraphReaderTests, ReadSeluNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Activation" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer name="in1" type="Input" precision="FP32" id="0">
             <output>
index fc37278..875a656 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, DISABLED_ReadShapeOfNetwork) {
     std::string model = R"V0G0N(
-<net name="Activation" version="10">
+<net name="Network" version="10">
     <layers>
         <layer name="in1" type="Parameter" id="0" version="opset1">
             <data element_type="f32" shape="1,3,22,22"/>
@@ -72,7 +72,7 @@ TEST_F(NGraphReaderTests, DISABLED_ReadShapeOfNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Activation" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer name="in1" type="Input" id="1">
             <output>
index 7fad210..e2502eb 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadSigmoidNetwork) {
     std::string model = R"V0G0N(
-<net name="Activation" version="10">
+<net name="Network" version="10">
     <layers>
         <layer name="in1" type="Parameter" id="0" version="opset1">
             <data element_type="f32" shape="1,3,22,22"/>
@@ -55,7 +55,7 @@ TEST_F(NGraphReaderTests, ReadSigmoidNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Activation" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer name="in1" type="Input" precision="FP32" id="0">
             <output>
index 48f3c58..a130004 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadSignNetwork) {
     std::string model = R"V0G0N(
-<net name="Sign_net" version="10">
+<net name="Network" version="10">
     <layers>
         <layer name="in1" type="Parameter" id="0" version="opset1">
             <data element_type="f32" shape="1,3,22,22"/>
@@ -55,7 +55,7 @@ TEST_F(NGraphReaderTests, ReadSignNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Sign_net" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer name="in1" type="Input" precision="FP32" id="0">
             <output>
index 799efca..64e28f9 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadSinNetwork) {
     std::string model = R"V0G0N(
-<net name="Sin_net" version="10">
+<net name="Network" version="10">
     <layers>
         <layer name="in1" type="Parameter" id="0" version="opset1">
             <data element_type="f32" shape="1,3,22,22"/>
@@ -55,7 +55,7 @@ TEST_F(NGraphReaderTests, ReadSinNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Sin_net" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer name="in1" type="Input" precision="FP32" id="0">
             <output>
index 6e8ca71..5c7a388 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadSinhNetwork) {
     std::string model = R"V0G0N(
-<net name="Sinh_net" version="10">
+<net name="Network" version="10">
     <layers>
         <layer name="in1" type="Parameter" id="0" version="opset1">
             <data element_type="f32" shape="1,3,22,22"/>
@@ -55,7 +55,7 @@ TEST_F(NGraphReaderTests, ReadSinhNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Sinh_net" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer name="in1" type="Input" precision="FP32" id="0">
             <output>
index 91bb47a..e7be91b 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadSoftMaxNetwork) {
     std::string model = R"V0G0N(
-<net name="Activation" version="10">
+<net name="Network" version="10">
     <layers>
         <layer name="in1" type="Parameter" id="0" version="opset1">
             <data element_type="f32" shape="1,1000"/>
@@ -48,7 +48,7 @@ TEST_F(NGraphReaderTests, ReadSoftMaxNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Activation" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer name="in1" type="Input" precision="FP32" id="0">
             <output>
index 844140a..21eb1d2 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadSplitNetwork) {
     std::string model = R"V0G0N(
-<net name="Activation" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="in1" type="Parameter"  version="opset1">
             <data element_type="f32" shape="1,6,22,22"/>
@@ -81,7 +81,7 @@ TEST_F(NGraphReaderTests, ReadSplitNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Activation" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer name="in1" type="Input" precision="FP32" id="0">
             <output>
@@ -133,7 +133,7 @@ TEST_F(NGraphReaderTests, ReadSplitNetwork) {
 
 TEST_F(NGraphReaderTests, ReadSplitNetwork2) {
     std::string model = R"V0G0N(
-<net name="Activation" version="10">
+<net name="Network" version="10">
     <layers>
         <layer name="in1" type="Parameter" id="0" version="opset1">
             <data element_type="f32" shape="1,63,46,46"/>
@@ -255,7 +255,7 @@ TEST_F(NGraphReaderTests, ReadSplitNetwork2) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Activation" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer name="in1" type="Input" precision="FP32" id="0">
             <output>
@@ -367,7 +367,7 @@ TEST_F(NGraphReaderTests, ReadSplitNetwork2) {
 
 TEST_F(NGraphReaderTests, ReadVariadicSplitNetwork) {
     std::string model = R"V0G0N(
-<net name="Activation" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="in1" type="Parameter"  version="opset1">
             <data element_type="f32" shape="1,6,22,22"/>
@@ -453,7 +453,7 @@ TEST_F(NGraphReaderTests, ReadVariadicSplitNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Activation" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer name="in1" type="Input" precision="FP32" id="0">
             <output>
index 5d26b1e..03c6c5a 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadSqrtNetwork) {
     std::string model = R"V0G0N(
-<net name="Sqrt_net" version="10">
+<net name="Network" version="10">
     <layers>
         <layer name="in1" type="Parameter" id="0" version="opset1">
             <data element_type="f32" shape="1,3,22,22"/>
@@ -55,7 +55,7 @@ TEST_F(NGraphReaderTests, ReadSqrtNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Sqrt_net" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer name="in1" type="Input" precision="FP32" id="0">
             <output>
index 29bfa8a..55021ce 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadSquaredDifferenceNetwork) {
     std::string model = R"V0G0N(
-<net name="Less" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,64,112,112"/>
@@ -73,7 +73,7 @@ TEST_F(NGraphReaderTests, ReadSquaredDifferenceNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Greater" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
index f60f0c1..6443201 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadSqueeze) {
     std::string model = R"V0G0N(
-<net name="Reshape" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,2048,1,1"/>
@@ -65,7 +65,7 @@ TEST_F(NGraphReaderTests, ReadSqueeze) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Convolution" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
index ed736d7..38691af 100644 (file)
@@ -10,7 +10,7 @@
 // strided_slice_to_crop transformation
 TEST_F(NGraphReaderTests, ConvertStridedSliceToCrop) {
     std::string model_version10 = R"V0G0N(
-<net name="Reshape" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="300,90,1,4"/>
@@ -96,7 +96,7 @@ TEST_F(NGraphReaderTests, ConvertStridedSliceToCrop) {
     </net>
     )V0G0N";
     std::string model_version6 = R"V0G0N(
-<net name="Reshape" version="6" batch="300">
+<net name="Network" version="6" batch="300">
     <layers>
         <layer name="data" type="Input" precision="FP32" id="0">
             <output>
@@ -174,7 +174,7 @@ TEST_F(NGraphReaderTests, DISABLED_ConvertStridedSliceToCropMultipleMasks) {
     // # (1, 9, 9, 9, 9, 2, 1, 2, 2) without shrink
     // # (1, 9, 9, 9, 9, 1, 2, 2) with shrink
     std::string model_version10 = R"V0G0N(
-<net name="Reshape" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="9,9,9,9,9,9,9"/>
@@ -274,7 +274,7 @@ TEST_F(NGraphReaderTests, DISABLED_ConvertStridedSliceToCropMultipleMasks) {
 </net>
 )V0G0N";
     std::string model_version6 = R"V0G0N(
-<net name="Reshape" version="6" batch="9">
+<net name="Network" version="6" batch="9">
        <layers>
                <layer name="data" type="Input" precision="FP32" id="0">
                        <output>
@@ -430,7 +430,7 @@ TEST_F(NGraphReaderTests, DISABLED_ConvertStridedSliceToCropMultipleMasks) {
 // TODO delete this check in ngraph "Check 'static_cast<size_t>(data_rank) == mask_size'
 TEST_F(NGraphReaderTests, DISABLED_ConvertStridedSliceToCropMultipleMasks_2) {
     std::string model_version10 = R"V0G0N(
-<net name="Reshape" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="9,9,9,9,9,9,9"/>
@@ -536,7 +536,7 @@ TEST_F(NGraphReaderTests, DISABLED_ConvertStridedSliceToCropMultipleMasks_2) {
 </net>
 )V0G0N";
     std::string model_version6 = R"V0G0N(
-<net name="Reshape" version="6" batch="9">
+<net name="Network" version="6" batch="9">
        <layers>
                <layer name="data" type="Input" precision="FP32" id="0">
                        <output>
index 590803e..5bbbc88 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadTanNetwork) {
     std::string model = R"V0G0N(
-<net name="Tan_net" version="10">
+<net name="Network" version="10">
     <layers>
         <layer name="in1" type="Parameter" id="0" version="opset1">
             <data element_type="f32" shape="1,3,22,22"/>
@@ -55,7 +55,7 @@ TEST_F(NGraphReaderTests, ReadTanNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Tan_net" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer name="in1" type="Input" precision="FP32" id="0">
             <output>
index cbc80f1..82cfa2b 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadTanhNetwork) {
     std::string model = R"V0G0N(
-<net name="Activation" version="10">
+<net name="Network" version="10">
     <layers>
         <layer name="in1" type="Parameter" id="0" version="opset1">
             <data element_type="f32" shape="1,3,22,22"/>
@@ -55,7 +55,7 @@ TEST_F(NGraphReaderTests, ReadTanhNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Activation" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer name="in1" type="Input" precision="FP32" id="0">
             <output>
index 1a882c6..16874d5 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadTileNetwork) {
     std::string model = R"V0G0N(
-<net name="Transpose" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,2,3,4"/>
@@ -67,7 +67,7 @@ TEST_F(NGraphReaderTests, ReadTileNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Transpose" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
@@ -116,7 +116,7 @@ TEST_F(NGraphReaderTests, ReadTileNetwork) {
 
 TEST_F(NGraphReaderTests, ReadTileNetwork2) {
     std::string model = R"V0G0N(
-<net name="Transpose" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,64,10,10"/>
@@ -177,7 +177,7 @@ TEST_F(NGraphReaderTests, ReadTileNetwork2) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Transpose" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
@@ -190,7 +190,7 @@ TEST_F(NGraphReaderTests, ReadTileNetwork2) {
             </output>
         </layer>
         <layer id="1" name="tile:" precision="FP32" type="Tile">
-        <data axis="3" tiles="4"/>
+        <data axis="3" tiles="4" originalLayersNames="tile"/>
             <input>
                 <port id="0">
                     <dim>1</dim>
@@ -209,7 +209,7 @@ TEST_F(NGraphReaderTests, ReadTileNetwork2) {
             </output>
         </layer>
         <layer id="2" name="tile:_3" precision="FP32" type="Tile">
-        <data axis="2" tiles="3"/>
+        <data axis="2" tiles="3" originalLayersNames="tile"/>
             <input>
                 <port id="0">
                     <dim>1</dim>
@@ -227,7 +227,7 @@ TEST_F(NGraphReaderTests, ReadTileNetwork2) {
                 </port>
             </output>
         </layer>
-        <layer id="3" name="tile:_3_2" precision="FP32" type="Tile">
+        <layer id="3" name="tile" precision="FP32" type="Tile">
         <data axis="0" tiles="2"/>
             <input>
                 <port id="0">
index 6c3bd0f..a2d3023 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, DISABLED_ReadTopKNetwork) {
     std::string model = R"V0G0N(
-<net name="TopK" version="10">
+<net name="Network" version="10">
     <layers>
         <layer name="in1" type="Parameter" id="0" version="opset1">
             <data element_type="f32" shape="1,3,22,22"/>
@@ -133,7 +133,7 @@ TEST_F(NGraphReaderTests, DISABLED_ReadTopKNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="TopK" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer name="in1" type="Input" precision="FP32" id="0">
             <output>
index b7a0a5c..6495bea 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadTransposeNetwork) {
     std::string model = R"V0G0N(
-<net name="Transpose" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="1,2,3,4"/>
@@ -67,7 +67,7 @@ TEST_F(NGraphReaderTests, ReadTransposeNetwork) {
 </net>
 )V0G0N";
     std::string modelV5 = R"V0G0N(
-<net name="Transpose" version="5" precision="FP32" batch="1">
+<net name="Network" version="5" precision="FP32" batch="1">
     <layers>
         <layer id="0" name="data" precision="FP32" type="Input">
             <output>
index 0ce57d3..b47ee89 100644 (file)
@@ -6,7 +6,7 @@
 #include "ngraph_reader_tests.hpp"
 TEST_F(NGraphReaderTests, ReadUnsqueeze) {
     std::string model_version10 = R"V0G0N(
-<net name="Reshape" version="10">
+<net name="Network" version="10">
     <layers>
         <layer id="0" name="data" type="Parameter" version="opset1">
             <data element_type="f32" shape="32,64,60"/>
@@ -65,7 +65,7 @@ TEST_F(NGraphReaderTests, ReadUnsqueeze) {
 </net>
 )V0G0N";
     std::string model_version6 = R"V0G0N(
-<net name="Reshape" version="6" batch="1">
+<net name="Network" version="6" batch="1">
     <layers>
         <layer name="data" type="Input" precision="FP32" id="0">
             <output>
@@ -3,7 +3,7 @@
 //
 
 #include <gtest/gtest.h>
-#include <tests_common.hpp>
+
 #include <ie_parameter.hpp>
 #include <ie_layouts.h>
 
@@ -33,10 +33,9 @@ public:
 size_t DestructorTest::destructorCount = 0;
 size_t DestructorTest::constructorCount = 0;
 
-class ParameterTests : public TestsCommon {
+class ParameterTests : public ::testing::Test {
 public:
     void SetUp() override {
-        TestsCommon::SetUp();
         DestructorTest::destructorCount = 0;
         DestructorTest::constructorCount = 0;
     }
@@ -223,6 +222,26 @@ TEST_F(ParameterTests, ParametersEqual) {
     ASSERT_FALSE(p1 != p2);
 }
 
+TEST_F(ParameterTests, ParametersStringEqual) {
+    std::string s1 = "abc";
+    std::string s2 = std::string("a") + "bc";
+    Parameter p1 = s1;
+    Parameter p2 = s2;
+    ASSERT_TRUE(s1 == s2);
+    ASSERT_TRUE(p1 == p2);
+    ASSERT_FALSE(p1 != p2);
+}
+
+TEST_F(ParameterTests, ParametersCStringEqual) {
+    const char s1[] = "abc";
+    const char s2[] = "abc";
+    Parameter p1 = s1;
+    Parameter p2 = s2;
+    ASSERT_TRUE(s1 != s2);
+    ASSERT_TRUE(p1 == p2);
+    ASSERT_FALSE(p1 != p2);
+}
+
 TEST_F(ParameterTests, CompareParametersWithoutEqualOperator) {
     class TestClass {
     public:
@@ -233,9 +252,9 @@ TEST_F(ParameterTests, CompareParametersWithoutEqualOperator) {
         int* testPtr;
     };
 
-    TestClass a(2, (int *)0x234);
-    TestClass b(2, (int *)0x234);
-    TestClass c(3, (int *)0x234);
+    TestClass a(2, reinterpret_cast<int*>(0x234));
+    TestClass b(2, reinterpret_cast<int*>(0x234));
+    TestClass c(3, reinterpret_cast<int*>(0x234));
     Parameter parA = a;
     Parameter parB = b;
     Parameter parC = c;
@@ -2,39 +2,19 @@
 // SPDX-License-Identifier: Apache-2.0
 //
 
-#include <ie_layouts.h>
-#include <ie_blob.h>
 #include <gtest/gtest.h>
+
 #include <random>
 #include <chrono>
 
-#include <cpp/ie_cnn_net_reader.h>
-
-#include <gmock/gmock-spec-builders.h>
-
-#include "unit_test_utils/mocks/mock_allocator.hpp"
-
-#ifdef WIN32
-#define UNUSED
-#else
-#define UNUSED  __attribute__((unused))
-#endif
+#include <ie_layouts.h>
+#include <ie_blob.h>
 
 using namespace ::testing;
 using namespace std;
 using namespace InferenceEngine;
 
-class TensorDescTests: public ::testing::Test {
-protected:
-    virtual void TearDown() {
-    }
-
-    virtual void SetUp() {
-    }
-
-public:
-
-};
+using TensorDescTests = ::testing::Test;
 
 TEST_F(TensorDescTests, CreateBlobWithIncorrectLayout) {
     ASSERT_THROW(make_shared_blob<float>({ Precision::FP32, {1, 3, 32}, Layout::NC }), details::InferenceEngineException);
@@ -4,7 +4,8 @@
 
 #include <gtest/gtest.h>
 
-#include "tests_common.hpp"
+#include "common_test_utils/test_common.hpp"
+
 #include <string>
 #include <sstream>
 #include <fstream>
 #include <ngraph_ops/fully_connected.hpp>
 #include <transformations/convert_opset1_to_legacy/convert_matmul_to_fc_or_gemm.hpp>
 #include <transformations/convert_opset1_to_legacy/reshape_fully_connected.hpp>
+#include <transformations/init_node_info.hpp>
 #include <transformations/utils/utils.hpp>
 
 #include "ngraph_test_utils.hpp"
 
 using namespace testing;
 
-class ConvertMatMulTests : public TestsCommon {};
-
-TEST_F(ConvertMatMulTests, ConvertMatMulTest1) {
+TEST(TransformationTests, ConvertMatMulTest1) {
     std::shared_ptr<ngraph::Function> f(nullptr), f_ref(nullptr);
     {
         auto input1 = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, ngraph::Shape{3, 1, 2});
@@ -34,7 +34,10 @@ TEST_F(ConvertMatMulTests, ConvertMatMulTest1) {
         auto matmul = std::make_shared<ngraph::opset1::MatMul>(input1, input2, false, false);
 
         f = std::make_shared<ngraph::Function>(ngraph::NodeVector{matmul}, ngraph::ParameterVector{input1, input2});
+
+        ngraph::pass::InitNodeInfo().run_on_function(f);
         ngraph::pass::ConvertMatMulToFCorGemm().run_on_function(f);
+        ASSERT_NO_THROW(check_rt_info(f));
     }
 
     {
@@ -52,7 +55,7 @@ TEST_F(ConvertMatMulTests, ConvertMatMulTest1) {
     ASSERT_TRUE(res.first) << res.second;
 }
 
-TEST_F(ConvertMatMulTests, ConvertMatMulTest2) {
+TEST(TransformationTests, ConvertMatMulTest2) {
     std::shared_ptr<ngraph::Function> f(nullptr), f_ref(nullptr);
     {
         auto input1 = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, ngraph::Shape{3, 1, 2});
@@ -60,7 +63,10 @@ TEST_F(ConvertMatMulTests, ConvertMatMulTest2) {
         auto matmul = std::make_shared<ngraph::opset1::MatMul>(input1, input2, false, false);
 
         f = std::make_shared<ngraph::Function>(ngraph::NodeVector{matmul}, ngraph::ParameterVector{input1, input2});
+
+        ngraph::pass::InitNodeInfo().run_on_function(f);
         ngraph::pass::ConvertMatMulToFCorGemm().run_on_function(f);
+        ASSERT_NO_THROW(check_rt_info(f));
     }
 
     {
@@ -78,7 +84,7 @@ TEST_F(ConvertMatMulTests, ConvertMatMulTest2) {
     ASSERT_TRUE(res.first) << res.second;
 }
 
-TEST_F(ConvertMatMulTests, ConvertMatMulTest3) {
+TEST(TransformationTests, ConvertMatMulTest3) {
     std::shared_ptr<ngraph::Function> f(nullptr), f_ref(nullptr);
     {
         auto input1 = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, ngraph::Shape{2});
@@ -86,7 +92,9 @@ TEST_F(ConvertMatMulTests, ConvertMatMulTest3) {
         auto matmul = std::make_shared<ngraph::opset1::MatMul>(input1, input2, false, false);
 
         f = std::make_shared<ngraph::Function>(ngraph::NodeVector{matmul}, ngraph::ParameterVector{input1, input2});
+        ngraph::pass::InitNodeInfo().run_on_function(f);
         ngraph::pass::ConvertMatMulToFCorGemm().run_on_function(f);
+        ASSERT_NO_THROW(check_rt_info(f));
     }
 
     {
@@ -104,7 +112,7 @@ TEST_F(ConvertMatMulTests, ConvertMatMulTest3) {
     ASSERT_TRUE(res.first) << res.second;
 }
 
-TEST_F(ConvertMatMulTests, ConvertMatMulTest4) {
+TEST(TransformationTests, ConvertMatMulTest4) {
     std::shared_ptr<ngraph::Function> f(nullptr), f_ref(nullptr);
     {
         auto input1 = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, ngraph::Shape{3, 1, 2});
@@ -112,7 +120,9 @@ TEST_F(ConvertMatMulTests, ConvertMatMulTest4) {
         auto matmul = std::make_shared<ngraph::opset1::MatMul>(input1, input2, false, false);
 
         f = std::make_shared<ngraph::Function>(ngraph::NodeVector{matmul}, ngraph::ParameterVector{input1, input2});
+        ngraph::pass::InitNodeInfo().run_on_function(f);
         ngraph::pass::ConvertMatMulToFCorGemm().run_on_function(f);
+        ASSERT_NO_THROW(check_rt_info(f));
     }
 
     {
@@ -127,7 +137,7 @@ TEST_F(ConvertMatMulTests, ConvertMatMulTest4) {
     ASSERT_TRUE(res.first) << res.second;
 }
 
-TEST_F(ConvertMatMulTests, ConvertMatMulTest5) {
+TEST(TransformationTests, ConvertMatMulTest5) {
     std::shared_ptr<ngraph::Function> f(nullptr), f_ref(nullptr);
     {
         auto input1 = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, ngraph::Shape{3, 2, 2});
@@ -135,7 +145,9 @@ TEST_F(ConvertMatMulTests, ConvertMatMulTest5) {
         auto matmul = std::make_shared<ngraph::opset1::MatMul>(input1, input2, false, true);
 
         f = std::make_shared<ngraph::Function>(ngraph::NodeVector{matmul}, ngraph::ParameterVector{input1});
+        ngraph::pass::InitNodeInfo().run_on_function(f);
         ngraph::pass::ConvertMatMulToFCorGemm().run_on_function(f);
+        ASSERT_NO_THROW(check_rt_info(f));
     }
 
     {
@@ -151,7 +163,7 @@ TEST_F(ConvertMatMulTests, ConvertMatMulTest5) {
     ASSERT_TRUE(res.first) << res.second;
 }
 
-TEST_F(ConvertMatMulTests, ConvertMatMulTest6) {
+TEST(TransformationTests, ConvertMatMulTest6) {
     std::shared_ptr<ngraph::Function> f(nullptr), f_ref(nullptr);
     {
         auto input1 = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, ngraph::Shape{3, 2, 2});
@@ -168,7 +180,7 @@ TEST_F(ConvertMatMulTests, ConvertMatMulTest6) {
         auto input2 = ngraph::opset1::Constant::create(ngraph::element::f32, ngraph::Shape{2, 2}, {1});
         auto input3 = ngraph::opset1::Constant::create(ngraph::element::f32, ngraph::Shape{2}, {1});
         auto reshape_begin = ngraph::op::util::reshapeTo(input1, ngraph::Shape{6, 2});
-        auto fc= std::make_shared<ngraph::op::FullyConnected>(reshape_begin, input2, input3, ngraph::Shape{6, 2});
+        auto fc = std::make_shared<ngraph::op::FullyConnected>(reshape_begin, input2, input3, ngraph::Shape{6, 2});
         auto reshape_end = ngraph::op::util::reshapeTo(fc, ngraph::Shape{3, 2, 2});
 
         f_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{reshape_end}, ngraph::ParameterVector{input1});
@@ -178,7 +190,7 @@ TEST_F(ConvertMatMulTests, ConvertMatMulTest6) {
     ASSERT_TRUE(res.first) << res.second;
 }
 
-TEST_F(ConvertMatMulTests, ConvertMatMulTest7) {
+TEST(TransformationTests, ConvertMatMulTest7) {
     std::shared_ptr<ngraph::Function> f(nullptr), f_ref(nullptr);
     {
         auto input1 = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, ngraph::Shape{3, 2, 2});
@@ -186,6 +198,8 @@ TEST_F(ConvertMatMulTests, ConvertMatMulTest7) {
         auto matmul = std::make_shared<ngraph::opset1::MatMul>(input1, input2, false, true);
 
         f = std::make_shared<ngraph::Function>(ngraph::NodeVector{matmul}, ngraph::ParameterVector{input1});
+
+        ngraph::pass::InitNodeInfo().run_on_function(f);
         ngraph::pass::ConvertMatMulToFCorGemm().run_on_function(f);
 
         auto callback = [](const std::shared_ptr<const ngraph::Node> & node) -> bool {
@@ -199,7 +213,7 @@ TEST_F(ConvertMatMulTests, ConvertMatMulTest7) {
         auto p = ngraph::pass::ReshapeFullyConnected();
         p.setCallback(callback);
         p.run_on_function(f);
-
+        ASSERT_NO_THROW(check_rt_info(f));
     }
 
     {
@@ -4,7 +4,8 @@
 
 #include <gtest/gtest.h>
 
-#include "tests_common.hpp"
+#include "common_test_utils/test_common.hpp"
+
 #include <string>
 #include <sstream>
 #include <fstream>
 #include <transformations/convert_opset1_to_legacy/convert_strided_slice_to_crop.hpp>
 #include <ngraph/op/reshape.hpp>
 #include <transformations/utils/utils.hpp>
+#include <transformations/init_node_info.hpp>
 
 #include "ngraph_test_utils.hpp"
 
 using namespace testing;
 
-class ConvertStridedSliceToCropTests : public TestsCommon {};
-
-TEST_F(ConvertStridedSliceToCropTests, ConvertStridedSliceToCropTests1) {
+TEST(TransformationTests, ConvertStridedSliceToCropTests1) {
     std::shared_ptr<ngraph::Function> f(nullptr), f_ref(nullptr);
     {
         auto input        = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, ngraph::Shape{1, 2, 384, 640});
@@ -48,7 +48,9 @@ TEST_F(ConvertStridedSliceToCropTests, ConvertStridedSliceToCropTests1) {
         sslice->set_friendly_name("strided_slice");
 
         f = std::make_shared<ngraph::Function>(ngraph::NodeVector{sslice}, ngraph::ParameterVector{input});
+        ngraph::pass::InitNodeInfo().run_on_function(f);
         ngraph::pass::ConvertStridedSliceToCrop().run_on_function(f);
+        ASSERT_NO_THROW(check_rt_info(f));
     }
 
     {
@@ -78,7 +80,7 @@ TEST_F(ConvertStridedSliceToCropTests, ConvertStridedSliceToCropTests1) {
     ASSERT_TRUE(names_are_correct) << "Transformation ConvertStridedSliceToCrop should keep output names.\n";
 }
 
-TEST_F(ConvertStridedSliceToCropTests, ConvertStridedSliceToCropTests2) {
+TEST(TransformationTests, ConvertStridedSliceToCropTests2) {
     std::shared_ptr<ngraph::Function> f(nullptr), f_ref(nullptr);
     {
         auto input        = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, ngraph::Shape{1, 2, 384, 640});
@@ -98,7 +100,9 @@ TEST_F(ConvertStridedSliceToCropTests, ConvertStridedSliceToCropTests2) {
         sslice->set_friendly_name("strided_slice");
 
         f = std::make_shared<ngraph::Function>(ngraph::NodeVector{sslice}, ngraph::ParameterVector{input});
+        ngraph::pass::InitNodeInfo().run_on_function(f);
         ngraph::pass::ConvertStridedSliceToCrop().run_on_function(f);
+        ASSERT_NO_THROW(check_rt_info(f));
     }
 
     {
@@ -4,7 +4,7 @@
 
 #include <gtest/gtest.h>
 
-#include "tests_common.hpp"
+#include "common_test_utils/test_common.hpp"
 #include <string>
 #include <sstream>
 #include <fstream>
 #include <ngraph_ops/fully_connected.hpp>
 #include <transformations/convert_opset1_to_legacy/fc_bias_fusion.hpp>
 #include <transformations/utils/utils.hpp>
+#include <transformations/init_node_info.hpp>
 
 #include "ngraph_test_utils.hpp"
 
 using namespace testing;
 
-using FCBiasFusionTests = TestsCommon;
-
-TEST_F(FCBiasFusionTests, FullyConnectedBiasFusionTest3D) {
+TEST(TransformationTests, FullyConnectedBiasFusionTest3D) {
     std::shared_ptr<ngraph::Function> f(nullptr), f_ref(nullptr);
     {
         auto input1 = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, ngraph::Shape{1, 128, 3072});
@@ -37,7 +36,9 @@ TEST_F(FCBiasFusionTests, FullyConnectedBiasFusionTest3D) {
         auto add = std::make_shared<ngraph::opset1::Add>(fc, const_bias);
 
         f = std::make_shared<ngraph::Function>(ngraph::NodeVector{add}, ngraph::ParameterVector{input1});
+        ngraph::pass::InitNodeInfo().run_on_function(f);
         ngraph::pass::FullyConnectedBiasFusion().run_on_function(f);
+        ASSERT_NO_THROW(check_rt_info(f));
         ngraph::pass::ConstantFolding().run_on_function(f);
     }
 
@@ -54,7 +55,7 @@ TEST_F(FCBiasFusionTests, FullyConnectedBiasFusionTest3D) {
     ASSERT_TRUE(res.first) << res.second;
 }
 
-TEST_F(FCBiasFusionTests, FullyConnectedBiasFusionTest2D) {
+TEST(TransformationTests, FullyConnectedBiasFusionTest2D) {
     std::shared_ptr<ngraph::Function> f(nullptr), f_ref(nullptr);
     {
         auto input1 = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, ngraph::Shape{1, 128});
@@ -66,7 +67,9 @@ TEST_F(FCBiasFusionTests, FullyConnectedBiasFusionTest2D) {
         auto add = std::make_shared<ngraph::opset1::Add>(fc, const_bias);
 
         f = std::make_shared<ngraph::Function>(ngraph::NodeVector{add}, ngraph::ParameterVector{input1});
+        ngraph::pass::InitNodeInfo().run_on_function(f);
         ngraph::pass::FullyConnectedBiasFusion().run_on_function(f);
+        ASSERT_NO_THROW(check_rt_info(f));
         ngraph::pass::ConstantFolding().run_on_function(f);
     }
 
@@ -4,7 +4,7 @@
 
 #include <gtest/gtest.h>
 
-#include "tests_common.hpp"
+#include "common_test_utils/test_common.hpp"
 #include <string>
 #include <sstream>
 #include <fstream>
 #include <ngraph_ops/convolution_ie.hpp>
 #include <ngraph/pass/constant_folding.hpp>
 #include <transformations/convert_opset1_to_legacy/reshape_1d_convolutions.hpp>
+#include <transformations/init_node_info.hpp>
+#include "ngraph_test_utils.hpp"
 
 using namespace testing;
 
-class ConvReshapeTests : public TestsCommon {};
-
-TEST_F(ConvReshapeTests , ConvReshapeTest1) {
+TEST(TransformationTests, ConvReshapeTest1) {
     auto input = ngraph::op::Constant::create(ngraph::element::f32, ngraph::Shape{1, 3, 64}, {1});
     auto w = ngraph::op::Constant::create(ngraph::element::f32, ngraph::Shape{6, 3, 3/*OIW*/}, {1});
 
@@ -33,7 +33,9 @@ TEST_F(ConvReshapeTests , ConvReshapeTest1) {
         auto conv = std::make_shared<ngraph::op::ConvolutionIE>(input, w, strides, pads_begin, pads_end, dilations, output_shape, 1);
 
         f = std::make_shared<ngraph::Function>(ngraph::NodeVector{conv}, ngraph::ParameterVector{});
+        ngraph::pass::InitNodeInfo().run_on_function(f);
         ngraph::pass::Reshape1DConvolutions().run_on_function(f);
+        ASSERT_NO_THROW(check_rt_info(f));
         ngraph::pass::ConstantFolding().run_on_function(f);
     }
 
@@ -51,7 +53,7 @@ TEST_F(ConvReshapeTests , ConvReshapeTest1) {
     }
 }
 
-TEST_F(ConvReshapeTests , ConvBiasReshapeTest1) {
+TEST(TransformationTests, ConvBiasReshapeTest1) {
     auto input = ngraph::op::Constant::create(ngraph::element::f32, ngraph::Shape{1, 3, 64}, {1});
     auto w = ngraph::op::Constant::create(ngraph::element::f32, ngraph::Shape{6, 3, 3/*OIW*/}, {1});
     auto b = ngraph::op::Constant::create(ngraph::element::f32, ngraph::Shape{6}, {1});
@@ -64,7 +66,9 @@ TEST_F(ConvReshapeTests , ConvBiasReshapeTest1) {
         auto conv = std::make_shared<ngraph::op::ConvolutionIE>(input, w, b, strides, pads_begin, pads_end, dilations, output_shape, 1);
 
         f = std::make_shared<ngraph::Function>(ngraph::NodeVector{conv}, ngraph::ParameterVector{});
+        ngraph::pass::InitNodeInfo().run_on_function(f);
         ngraph::pass::Reshape1DConvolutions().run_on_function(f);
+        ASSERT_NO_THROW(check_rt_info(f));
         ngraph::pass::ConstantFolding().run_on_function(f);
     }
 
@@ -4,7 +4,7 @@
 
 #include <gtest/gtest.h>
 
-#include "tests_common.hpp"
+#include "common_test_utils/test_common.hpp"
 #include <string>
 #include <sstream>
 #include <fstream>
 #include <ngraph/op/fused/space_to_depth.hpp>
 #include <transformations/convert_depth_to_space.hpp>
 #include <transformations/convert_space_to_depth.hpp>
+#include <transformations/init_node_info.hpp>
+#include "ngraph_test_utils.hpp"
 
 using namespace testing;
 
-class DepthAndSpaceTransformTests : public TestsCommon {};
-
-TEST_F(DepthAndSpaceTransformTests, TestDepthToSpaceTransformBlockFirst) {
+TEST(TransformationTests, TestDepthToSpaceTransformBlockFirst) {
     auto input = std::make_shared<ngraph::op::Parameter>(ngraph::element::f32, ngraph::Shape{1, 12, 1080, 1616});
     std::shared_ptr<ngraph::Function> f(nullptr);
 
     {
         auto depth_to_space = std::make_shared<ngraph::op::DepthToSpace>(input, ngraph::op::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, 2);
         f = std::make_shared<ngraph::Function>(ngraph::NodeVector{depth_to_space}, ngraph::ParameterVector{input});
+        ngraph::pass::InitNodeInfo().run_on_function(f);
         ngraph::pass::ConvertDepthToSpace().run_on_function(f);
+        ASSERT_NO_THROW(check_rt_info(f));
     }
 
     auto consumers = input->output(0).get_target_inputs();
-    ASSERT_TRUE(consumers.size() == 1);
+    ASSERT_EQ(consumers.size(), 1);
 
     auto reshape_begin = consumers.begin()->get_node();
     auto shape_begin = std::dynamic_pointer_cast<ngraph::op::Constant>(reshape_begin->input(1).get_source_output().get_node_shared_ptr());
     std::vector<int64_t> shape_begin_value = shape_begin->get_vector<int64_t>();
     std::vector<int64_t> shape_begin_value_ref{1, 2, 2, 3, 1080, 1616};
-    compare(shape_begin_value, shape_begin_value_ref);
+    ASSERT_EQ(shape_begin_value, shape_begin_value_ref);
 
     consumers = reshape_begin->output(0).get_target_inputs();
-    ASSERT_TRUE(consumers.size() == 1);
+    ASSERT_EQ(consumers.size(), 1);
 
     auto transpose = consumers.begin()->get_node();
     auto order = std::dynamic_pointer_cast<ngraph::op::Constant>(transpose->input(1).get_source_output().get_node_shared_ptr());
     std::vector<int64_t> order_value = order->get_vector<int64_t>();
     std::vector<int64_t> order_value_ref{0, 3, 4, 1, 5, 2};
-    compare(order_value, order_value_ref);
+    ASSERT_EQ(order_value, order_value_ref);
 
     consumers = transpose->output(0).get_target_inputs();
     auto reshape_end = consumers.begin()->get_node();
     auto shape_end = std::dynamic_pointer_cast<ngraph::op::Constant>(reshape_end->input(1).get_source_output().get_node_shared_ptr());
     std::vector<int64_t> shape_end_value = shape_end->get_vector<int64_t>();
     std::vector<int64_t> shape_end_value_ref{1, 3, 2 * 1080, 2 * 1616};
-    compare(shape_end_value, shape_end_value_ref);
+    ASSERT_EQ(shape_end_value, shape_end_value_ref);
 }
 
-TEST_F(DepthAndSpaceTransformTests, TestDepthToSpaceTransformDepthFirst) {
+TEST(TransformationTests, TestDepthToSpaceTransformDepthFirst) {
     auto input = std::make_shared<ngraph::op::Parameter>(ngraph::element::f32, ngraph::Shape{1, 12, 1080, 1616});
     std::shared_ptr<ngraph::Function> f(nullptr);
 
     {
         auto depth_to_space = std::make_shared<ngraph::op::DepthToSpace>(input, ngraph::op::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST, 2);
         f = std::make_shared<ngraph::Function>(ngraph::NodeVector{depth_to_space}, ngraph::ParameterVector{input});
+        ngraph::pass::InitNodeInfo().run_on_function(f);
         ngraph::pass::ConvertDepthToSpace().run_on_function(f);
+        ASSERT_NO_THROW(check_rt_info(f));
     }
 
     auto consumers = input->output(0).get_target_inputs();
-    ASSERT_TRUE(consumers.size() == 1);
+    ASSERT_EQ(consumers.size(), 1);
 
     auto reshape_begin = consumers.begin()->get_node();
     auto shape_begin = std::dynamic_pointer_cast<ngraph::op::Constant>(reshape_begin->input(1).get_source_output().get_node_shared_ptr());
     std::vector<int64_t> shape_begin_value = shape_begin->get_vector<int64_t>();
     std::vector<int64_t> shape_begin_value_ref{1, 3, 2, 2, 1080, 1616};
-    compare(shape_begin_value, shape_begin_value_ref);
+    ASSERT_EQ(shape_begin_value, shape_begin_value_ref);
 
     consumers = reshape_begin->output(0).get_target_inputs();
-    ASSERT_TRUE(consumers.size() == 1);
+    ASSERT_EQ(consumers.size(), 1);
 
     auto transpose = consumers.begin()->get_node();
     auto order = std::dynamic_pointer_cast<ngraph::op::Constant>(transpose->input(1).get_source_output().get_node_shared_ptr());
     std::vector<int64_t> order_value = order->get_vector<int64_t>();
     std::vector<int64_t> order_value_ref{0, 1, 4, 2, 5, 3};
-    compare(order_value, order_value_ref);
+    ASSERT_EQ(order_value, order_value_ref);
 
     consumers = transpose->output(0).get_target_inputs();
     auto reshape_end = consumers.begin()->get_node();
     auto shape_end = std::dynamic_pointer_cast<ngraph::op::Constant>(reshape_end->input(1).get_source_output().get_node_shared_ptr());
     std::vector<int64_t> shape_end_value = shape_end->get_vector<int64_t>();
     std::vector<int64_t> shape_end_value_ref{1, 3, 2 * 1080, 2 * 1616};
-    compare(shape_end_value, shape_end_value_ref);
+    ASSERT_EQ(shape_end_value, shape_end_value_ref);
 }
 
-TEST_F(DepthAndSpaceTransformTests, TestSpaceToDepthTransformBlockFirst) {
+TEST(TransformationTests, TestSpaceToDepthTransformBlockFirst) {
     auto input = std::make_shared<ngraph::op::Parameter>(ngraph::element::f32, ngraph::Shape{1, 12, 1080, 1616});
     std::shared_ptr<ngraph::Function> f(nullptr);
 
     {
         auto space_to_depth = std::make_shared<ngraph::op::SpaceToDepth>(input, ngraph::op::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST, 2);
         f = std::make_shared<ngraph::Function>(ngraph::NodeVector{space_to_depth}, ngraph::ParameterVector{input});
+        ngraph::pass::InitNodeInfo().run_on_function(f);
         ngraph::pass::ConvertSpaceToDepth().run_on_function(f);
+        ASSERT_NO_THROW(check_rt_info(f));
     }
 
     auto consumers = input->output(0).get_target_inputs();
-    ASSERT_TRUE(consumers.size() == 1);
+    ASSERT_EQ(consumers.size(), 1);
 
     auto reshape_begin = consumers.begin()->get_node();
     auto shape_begin = std::dynamic_pointer_cast<ngraph::op::Constant>(reshape_begin->input(1).get_source_output().get_node_shared_ptr());
     std::vector<int64_t> shape_begin_value = shape_begin->get_vector<int64_t>();
     std::vector<int64_t> shape_begin_value_ref{1, 12, 1080 / 2, 2, 1616 / 2, 2};
-    compare(shape_begin_value, shape_begin_value_ref);
+    ASSERT_EQ(shape_begin_value, shape_begin_value_ref);
 
     consumers = reshape_begin->output(0).get_target_inputs();
-    ASSERT_TRUE(consumers.size() == 1);
+    ASSERT_EQ(consumers.size(), 1);
 
     auto transpose = consumers.begin()->get_node();
     auto order = std::dynamic_pointer_cast<ngraph::op::Constant>(transpose->input(1).get_source_output().get_node_shared_ptr());
     std::vector<int64_t> order_value = order->get_vector<int64_t>();
     std::vector<int64_t> order_value_ref{0, 3, 5, 1, 2, 4};
-    compare(order_value, order_value_ref);
+    ASSERT_EQ(order_value, order_value_ref);
 
     consumers = transpose->output(0).get_target_inputs();
     auto reshape_end = consumers.begin()->get_node();
     auto shape_end = std::dynamic_pointer_cast<ngraph::op::Constant>(reshape_end->input(1).get_source_output().get_node_shared_ptr());
     std::vector<int64_t> shape_end_value = shape_end->get_vector<int64_t>();
     std::vector<int64_t> shape_end_value_ref{1, 12 * 4, 1080 / 2, 1616 / 2};
-    compare(shape_end_value, shape_end_value_ref);
+    ASSERT_EQ(shape_end_value, shape_end_value_ref);
 }
 
-TEST_F(DepthAndSpaceTransformTests, TestSpaceToDepthTransformDepthFirst) {
+TEST(TransformationTests, TestSpaceToDepthTransformDepthFirst) {
     auto input = std::make_shared<ngraph::op::Parameter>(ngraph::element::f32, ngraph::Shape{1, 12, 1080, 1616});
     std::shared_ptr<ngraph::Function> f(nullptr);
 
     {
         auto space_to_depth = std::make_shared<ngraph::op::SpaceToDepth>(input, ngraph::op::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST, 2);
         f = std::make_shared<ngraph::Function>(ngraph::NodeVector{space_to_depth}, ngraph::ParameterVector{input});
+        ngraph::pass::InitNodeInfo().run_on_function(f);
         ngraph::pass::ConvertSpaceToDepth().run_on_function(f);
+        ASSERT_NO_THROW(check_rt_info(f));
     }
 
     auto consumers = input->output(0).get_target_inputs();
-    ASSERT_TRUE(consumers.size() == 1);
+    ASSERT_EQ(consumers.size(), 1);
 
     auto reshape_begin = consumers.begin()->get_node();
     auto shape_begin = std::dynamic_pointer_cast<ngraph::op::Constant>(reshape_begin->input(1).get_source_output().get_node_shared_ptr());
     std::vector<int64_t> shape_begin_value = shape_begin->get_vector<int64_t>();
     std::vector<int64_t> shape_begin_value_ref{1, 12, 1080 / 2, 2, 1616 / 2, 2};
-    compare(shape_begin_value, shape_begin_value_ref);
+    ASSERT_EQ(shape_begin_value, shape_begin_value_ref);
 
     consumers = reshape_begin->output(0).get_target_inputs();
-    ASSERT_TRUE(consumers.size() == 1);
+    ASSERT_EQ(consumers.size(), 1);
 
     auto transpose = consumers.begin()->get_node();
     auto order = std::dynamic_pointer_cast<ngraph::op::Constant>(transpose->input(1).get_source_output().get_node_shared_ptr());
     std::vector<int64_t> order_value = order->get_vector<int64_t>();
     std::vector<int64_t> order_value_ref{0, 1, 3, 5, 2, 4};
-    compare(order_value, order_value_ref);
+    ASSERT_EQ(order_value, order_value_ref);
 
     consumers = transpose->output(0).get_target_inputs();
     auto reshape_end = consumers.begin()->get_node();
     auto shape_end = std::dynamic_pointer_cast<ngraph::op::Constant>(reshape_end->input(1).get_source_output().get_node_shared_ptr());
     std::vector<int64_t> shape_end_value = shape_end->get_vector<int64_t>();
     std::vector<int64_t> shape_end_value_ref{1, 12 * 4, 1080 / 2, 1616 / 2};
-    compare(shape_end_value, shape_end_value_ref);
+    ASSERT_EQ(shape_end_value, shape_end_value_ref);
 }
\ No newline at end of file
@@ -4,7 +4,7 @@
 
 #include <gtest/gtest.h>
 
-#include "tests_common.hpp"
+#include "common_test_utils/test_common.hpp"
 #include <string>
 #include <sstream>
 #include <fstream>
 #include <ngraph/op/fused/fake_quantize.hpp>
 #include <transformations/pull_transpose_through_fq.hpp>
 #include <ngraph/pass/constant_folding.hpp>
+#include <transformations/init_node_info.hpp>
+#include "ngraph_test_utils.hpp"
 
 using namespace testing;
 
-class FQTransposeTests : public TestsCommon {};
-
-TEST_F(FQTransposeTests, FQTransposeTest1) {
+TEST(TransformationTests, FQTransposeTest1) {
     auto data1 = ngraph::op::Constant::create(ngraph::element::f32, ngraph::Shape{1, 1, 3}, {1, 2, 3});
     auto data2 = ngraph::op::Constant::create(ngraph::element::f32, ngraph::Shape{3}, {1, 2, 3});
     auto data3 = ngraph::op::Constant::create(ngraph::element::f32, ngraph::Shape{1, 3}, {1, 2, 3});
@@ -36,7 +36,9 @@ TEST_F(FQTransposeTests, FQTransposeTest1) {
         auto transpose = std::make_shared<ngraph::op::Transpose>(fq, transpose_order);
 
         f = std::make_shared<ngraph::Function>(ngraph::NodeVector{transpose}, ngraph::ParameterVector{});
+        ngraph::pass::InitNodeInfo().run_on_function(f);
         ngraph::pass::PullTransposeThroughFQUp().run_on_function(f);
+        ASSERT_NO_THROW(check_rt_info(f));
         ngraph::pass::ConstantFolding().run_on_function(f);
     }
     std::vector<size_t> ref_shape{1, 3, 1};
@@ -4,7 +4,7 @@
 
 #include <gtest/gtest.h>
 
-#include "tests_common.hpp"
+#include "common_test_utils/test_common.hpp"
 #include <string>
 #include <sstream>
 #include <fstream>
 #include <ngraph/op/fused/mod.hpp>
 #include <transformations/convert_mod.hpp>
 #include <ngraph/pass/constant_folding.hpp>
+#include <transformations/init_node_info.hpp>
+#include "ngraph_test_utils.hpp"
 
 using namespace testing;
 
-class ModDecompositionTests : public TestsCommon {};
-
-TEST_F(ModDecompositionTests, Test1) {
+TEST(TransformationTests, ModDecompositionTests) {
     auto data1 = ngraph::op::Constant::create(ngraph::element::f32, ngraph::Shape{1, 1, 3}, {1, 2, 3});
     auto data2 = ngraph::op::Constant::create(ngraph::element::f32, ngraph::Shape{3}, {1, 2, 3});
 
@@ -30,7 +30,9 @@ TEST_F(ModDecompositionTests, Test1) {
         auto mod = std::make_shared<ngraph::op::v1::Mod>(data1, data2);
 
         f = std::make_shared<ngraph::Function>(ngraph::NodeVector{mod}, ngraph::ParameterVector{});
+        ngraph::pass::InitNodeInfo().run_on_function(f);
         ngraph::pass::ConvertMod().run_on_function(f);
+        ASSERT_NO_THROW(check_rt_info(f));
     }
     ASSERT_EQ(f->get_ops().size(), 12);
 }
@@ -53,4 +53,25 @@ std::pair<bool, std::string> compare_functions(const std::shared_ptr<ngraph::Fun
         }
     }
     return {true, ""};
+}
+
+void check_rt_info(const std::shared_ptr<ngraph::Function> & f) {
+    static const std::vector<std::string> attrs_to_check{"Variant::RuntimeAttribute::FusedNames"};
+
+    std::ostringstream err_log;
+    for (auto & op : f->get_ops()) {
+        if (op->is_constant()) continue;
+
+        const auto & rt_info = op->get_rt_info();
+        for (const auto & attr_name : attrs_to_check) {
+            if (!rt_info.count(attr_name)) {
+                err_log << "Node: " << op->get_friendly_name() << " has no attribute: " << attr_name << std::endl;
+            }
+        }
+    }
+
+    auto err_msg = err_log.str();
+    if (!err_msg.empty()) {
+        throw ngraph::ngraph_error(err_msg);
+    }
 }
\ No newline at end of file
@@ -8,4 +8,10 @@
 
 #include <ngraph/function.hpp>
 
-std::pair<bool, std::string> compare_functions(const std::shared_ptr<ngraph::Function> & f1, const std::shared_ptr<ngraph::Function> & f2);
\ No newline at end of file
+#include "common_test_utils/test_common.hpp"
+
+using TransformationTests = CommonTestUtils::TestsCommon;
+
+std::pair<bool, std::string> compare_functions(const std::shared_ptr<ngraph::Function> & f1, const std::shared_ptr<ngraph::Function> & f2);
+
+void check_rt_info(const std::shared_ptr<ngraph::Function> & f);
\ No newline at end of file
@@ -4,7 +4,7 @@
 
 #include <gtest/gtest.h>
 
-#include "tests_common.hpp"
+#include "common_test_utils/test_common.hpp"
 #include <string>
 #include <sstream>
 #include <fstream>
 #include <ngraph/pass/constant_folding.hpp>
 #include <ngraph_ops/fully_connected.hpp>
 #include <transformations/convert_opset1_to_legacy/reshape_fc_fusion.hpp>
+#include <transformations/init_node_info.hpp>
+#include "ngraph_test_utils.hpp"
 
 using namespace testing;
 
-class ReshapeFullyConnectedFusionTests : public TestsCommon {};
-
-TEST_F(ReshapeFullyConnectedFusionTests, ReshapeFCFusiuonTest1) {
+TEST(TransformationTests, ReshapeFCFusiuonTest1) {
     std::shared_ptr<ngraph::Function> f(nullptr);
     {
         auto input = ngraph::op::Constant::create(ngraph::element::f32, ngraph::Shape{1, 3, 64, 64}, {1});
@@ -34,12 +34,14 @@ TEST_F(ReshapeFullyConnectedFusionTests, ReshapeFCFusiuonTest1) {
         auto fc = std::make_shared<ngraph::op::FullyConnected>(reshape, fc_weights, fc_biases, ngraph::Shape{1, 6});
 
         f = std::make_shared<ngraph::Function>(ngraph::NodeVector{fc}, ngraph::ParameterVector{});
+        ngraph::pass::InitNodeInfo().run_on_function(f);
         ngraph::pass::ReshapeFullyConnectedFusion().run_on_function(f);
+        ASSERT_NO_THROW(check_rt_info(f));
     }
     ASSERT_EQ(f->get_ops().size(), 5);
 }
 
-TEST_F(ReshapeFullyConnectedFusionTests, ReshapeFCFusiuonTest2) {
+TEST(TransformationTests, ReshapeFCFusiuonTest2) {
     std::shared_ptr<ngraph::Function> f(nullptr);
     {
         auto input = ngraph::op::Constant::create(ngraph::element::f32, ngraph::Shape{1, 3, 64, 64}, {1});
@@ -51,12 +53,14 @@ TEST_F(ReshapeFullyConnectedFusionTests, ReshapeFCFusiuonTest2) {
         auto fc = std::make_shared<ngraph::op::FullyConnected>(reshape, fc_weights, fc_biases, ngraph::Shape{1, 6});
 
         f = std::make_shared<ngraph::Function>(ngraph::NodeVector{fc}, ngraph::ParameterVector{});
+        ngraph::pass::InitNodeInfo().run_on_function(f);
         ngraph::pass::ReshapeFullyConnectedFusion().run_on_function(f);
+        ASSERT_NO_THROW(check_rt_info(f));
     }
     ASSERT_EQ(f->get_ops().size(), 5);
 }
 
-TEST_F(ReshapeFullyConnectedFusionTests, ReshapeFCFusiuonTest3) {
+TEST(TransformationTests, ReshapeFCFusiuonTest3) {
     std::shared_ptr<ngraph::Function> f(nullptr);
     {
         auto input = ngraph::op::Constant::create(ngraph::element::f32, ngraph::Shape{2, 3, 64, 64}, {1});
@@ -68,7 +72,9 @@ TEST_F(ReshapeFullyConnectedFusionTests, ReshapeFCFusiuonTest3) {
         auto fc = std::make_shared<ngraph::op::FullyConnected>(reshape, fc_weights, fc_biases, ngraph::Shape{2, 6});
 
         f = std::make_shared<ngraph::Function>(ngraph::NodeVector{fc}, ngraph::ParameterVector{});
+        ngraph::pass::InitNodeInfo().run_on_function(f);
         ngraph::pass::ReshapeFullyConnectedFusion().run_on_function(f);
+        ASSERT_NO_THROW(check_rt_info(f));
     }
     ASSERT_EQ(f->get_ops().size(), 7);
 }
index 01e2519..53532c5 100644 (file)
@@ -163,7 +163,7 @@ typedef std::tuple<
  *
  * In 3rd stage do not forget bfloat16 preffix!
  */
-class BasicBF16Test : public LayerTestsUtils::LayerTestsCommonClass<basicParams> {
+class BasicBF16Test : public LayerTestsUtils::LayerTestsCommonDeprecated<basicParams> {
 protected:
     virtual std::shared_ptr<ngraph::Function> createGraph(InferenceEngine::Precision netPrecision) = 0;
 
index d3b92af..296c5ec 100644 (file)
@@ -10,9 +10,6 @@
 using namespace LayerTestsDefinitions;
 
 namespace {
-const std::vector<InferenceEngine::Precision> inputPrecisions = {
-        InferenceEngine::Precision::FP32
-};
 
 const std::vector<InferenceEngine::Precision> netPrecisions = {
         InferenceEngine::Precision::FP32
@@ -20,7 +17,6 @@ const std::vector<InferenceEngine::Precision> netPrecisions = {
 
 INSTANTIATE_TEST_CASE_P(NoReshape, ExecGraphUniqueNodeNames,
                         ::testing::Combine(
-                                ::testing::ValuesIn(inputPrecisions),
                                 ::testing::ValuesIn(netPrecisions),
                                 ::testing::Values(InferenceEngine::SizeVector({1, 2, 5, 5})),
                                 ::testing::Values(CommonTestUtils::DEVICE_CPU)),
diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/ngraph_conversion_tests/conv_bias_fusion.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/ngraph_conversion_tests/conv_bias_fusion.cpp
new file mode 100644 (file)
index 0000000..555252b
--- /dev/null
@@ -0,0 +1,14 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <vector>
+#include "ngraph_conversion_tests/conv_bias_fusion.hpp"
+
+using namespace NGraphConversionTestsDefinitions;
+
+namespace {
+
+INSTANTIATE_TEST_CASE_P(Basic, ConvBiasFusion, ::testing::Values("CPU"), ConvBiasFusion::getTestCaseName);
+
+}  // namespace
diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/other_tests/add_output.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/other_tests/add_output.cpp
new file mode 100644 (file)
index 0000000..008d758
--- /dev/null
@@ -0,0 +1,16 @@
+// Copyright (C) 2020 Intel Corporation
+//
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "other/add_output.hpp"
+
+const auto addOutputParams =
+    ::testing::Combine(::testing::Values("Memory_1"), ::testing::Values(CommonTestUtils::DEVICE_CPU));
+
+INSTANTIATE_TEST_CASE_P(AddOutputBasic, AddOutputTestsCommonClass, addOutputParams,
+                        AddOutputTestsCommonClass::getTestCaseName);
+
+TEST_P(AddOutputTestsCommonClass, basic) {
+    run_test();
+}
diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/add.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/add.cpp
new file mode 100644 (file)
index 0000000..744bfce
--- /dev/null
@@ -0,0 +1,33 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <cstddef>
+#include <vector>
+#include <map>
+
+#include "single_layer_tests/add.hpp"
+#include "common_test_utils/test_constants.hpp"
+
+using namespace LayerTestsDefinitions;
+
+namespace {
+
+const std::vector<InferenceEngine::Precision> netPrecisions = {
+        InferenceEngine::Precision::FP32,
+        InferenceEngine::Precision::FP16
+};
+
+const std::vector<std::vector<std::size_t>> inputShapes = {
+        {std::vector<std::size_t>({1, 30}), std::vector<std::size_t>({1, 30})}
+};
+
+INSTANTIATE_TEST_CASE_P(CompareWithRefs, AddLayerTest,
+        ::testing::Combine(
+                ::testing::ValuesIn(netPrecisions),
+                ::testing::Values(inputShapes),
+                ::testing::Values(CommonTestUtils::DEVICE_CPU),
+                ::testing::Values(std::map<std::string, std::string>({}))),
+                AddLayerTest::getTestCaseName);
+
+}  // namespace
index 3ea47a2..1865952 100644 (file)
@@ -13,20 +13,15 @@ namespace {
 
 batchToSpaceParamsTuple bts_only_test_cases[] = {
         batchToSpaceParamsTuple({1, 1, 2, 2}, {0, 0, 0, 0}, {0, 0, 0, 0}, {4, 1, 1, 1},
-                                InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP32,
-                                CommonTestUtils::DEVICE_CPU),
+                                InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_CPU),
         batchToSpaceParamsTuple({1, 1, 2, 2}, {0, 0, 0, 0}, {0, 0, 0, 0}, {4, 3, 1, 1},
-                                InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP32,
-                                CommonTestUtils::DEVICE_CPU),
+                                InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_CPU),
         batchToSpaceParamsTuple({1, 1, 2, 2}, {0, 0, 0, 0}, {0, 0, 0, 0}, {4, 1, 2, 2},
-                                InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP32,
-                                CommonTestUtils::DEVICE_CPU),
+                                InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_CPU),
         batchToSpaceParamsTuple({1, 1, 2, 2}, {0, 0, 0, 0}, {0, 0, 0, 0}, {8, 1, 1, 2},
-                                InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP32,
-                                CommonTestUtils::DEVICE_CPU),
+                                InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_CPU),
         batchToSpaceParamsTuple({1, 1, 3, 2, 2}, {0, 0, 1, 0, 3}, {0, 0, 2, 0, 0}, {12, 1, 2, 1, 2},
-                                InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP32,
-                                CommonTestUtils::DEVICE_CPU),
+                                InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_CPU),
 };
 
 INSTANTIATE_TEST_CASE_P(smoke_MKLDNN, BatchToSpaceLayerTest, ::testing::ValuesIn(bts_only_test_cases),
index 48d0a95..314953b 100644 (file)
@@ -20,21 +20,14 @@ std::vector<std::vector<std::vector<size_t>>> inShapes = {
         {{10, 10, 10, 10}, {10, 10, 10, 10}, {10, 10, 10, 10}, {10, 10, 10, 10}, {10, 10, 10, 10}}
 };
 
-std::vector<InferenceEngine::Precision> inputPrecisions = {InferenceEngine::Precision::FP32,
-//         InferenceEngine::Precision::U8, // TODO: Preferable primitive descriptor is not set.
-//         InferenceEngine::Precision::I8  // TODO: Preferable primitive descriptor is not set.
-};
-
 
 std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32,
                                                          InferenceEngine::Precision::FP16};
 
-
 INSTANTIATE_TEST_CASE_P(NoReshape, ConcatLayerTest,
                         ::testing::Combine(
                                 ::testing::ValuesIn(axes),
                                 ::testing::ValuesIn(inShapes),
-                                ::testing::ValuesIn(inputPrecisions),
                                 ::testing::ValuesIn(netPrecisions),
                                 ::testing::Values(CommonTestUtils::DEVICE_CPU)),
                         ConcatLayerTest::getTestCaseName);
index 7d8b5f5..7c9d83c 100644 (file)
@@ -11,14 +11,6 @@ using namespace LayerTestsDefinitions;
 
 namespace {
 
-// Common params
-const std::vector<InferenceEngine::Precision> inputPrecisions = {
-        InferenceEngine::Precision::FP32,
-//         InferenceEngine::Precision::FP16, // "[NOT_IMPLEMENTED] Input image format FP16 is not supported yet...
-        InferenceEngine::Precision::U8,
-//         InferenceEngine::Precision::I8 // Too much cases
-};
-
 const std::vector<InferenceEngine::Precision> netPrecisions = {
         InferenceEngine::Precision::FP32,
         InferenceEngine::Precision::FP16
@@ -35,7 +27,7 @@ const std::vector<std::vector<ptrdiff_t>> padEnds = {{0, 0},
                                                      {0, 3}};
 const std::vector<std::vector<size_t >> dilations = {{1, 1},
                                                             {3, 1}};
-const std::vector<size_t> numOutCannels = {1, 5};
+const std::vector<size_t> numOutChannels = {1, 5};
 const std::vector<ngraph::op::PadType> padTypes = {
         ngraph::op::PadType::EXPLICIT,
         ngraph::op::PadType::VALID
@@ -47,7 +39,7 @@ const auto conv2DParams_ExplicitPadding = ::testing::Combine(
         ::testing::ValuesIn(padBegins),
         ::testing::ValuesIn(padEnds),
         ::testing::ValuesIn(dilations),
-        ::testing::ValuesIn(numOutCannels),
+        ::testing::ValuesIn(numOutChannels),
         ::testing::Values(ngraph::op::PadType::EXPLICIT)
 );
 const auto conv2DParams_AutoPadValid = ::testing::Combine(
@@ -56,14 +48,13 @@ const auto conv2DParams_AutoPadValid = ::testing::Combine(
         ::testing::Values(std::vector<ptrdiff_t>({0, 0})),
         ::testing::Values(std::vector<ptrdiff_t>({0, 0})),
         ::testing::ValuesIn(dilations),
-        ::testing::ValuesIn(numOutCannels),
+        ::testing::ValuesIn(numOutChannels),
         ::testing::Values(ngraph::op::PadType::VALID)
 );
 
 INSTANTIATE_TEST_CASE_P(Convolution2D_ExplicitPadding, ConvolutionLayerTest,
                         ::testing::Combine(
                                 conv2DParams_ExplicitPadding,
-                                ::testing::ValuesIn(inputPrecisions),
                                 ::testing::ValuesIn(netPrecisions),
                                 ::testing::Values(std::vector<size_t >({1, 3, 30, 30})),
                                 ::testing::Values(CommonTestUtils::DEVICE_CPU)),
@@ -72,7 +63,6 @@ INSTANTIATE_TEST_CASE_P(Convolution2D_ExplicitPadding, ConvolutionLayerTest,
 INSTANTIATE_TEST_CASE_P(Convolution2D_AutoPadValid, ConvolutionLayerTest,
                         ::testing::Combine(
                                 conv2DParams_AutoPadValid,
-                                ::testing::ValuesIn(inputPrecisions),
                                 ::testing::ValuesIn(netPrecisions),
                                 ::testing::Values(std::vector<size_t >({1, 3, 30, 30})),
                                 ::testing::Values(CommonTestUtils::DEVICE_CPU)),
@@ -110,7 +100,6 @@ const auto conv3DParams_AutoPadValid = ::testing::Combine(
 INSTANTIATE_TEST_CASE_P(Convolution3D_ExplicitPadding, ConvolutionLayerTest,
                         ::testing::Combine(
                                 conv3DParams_ExplicitPadding,
-                                ::testing::ValuesIn(inputPrecisions),
                                 ::testing::ValuesIn(netPrecisions),
                                 ::testing::Values(std::vector<size_t >({1, 3, 10, 10, 10})),
                                 ::testing::Values(CommonTestUtils::DEVICE_CPU)),
@@ -119,7 +108,6 @@ INSTANTIATE_TEST_CASE_P(Convolution3D_ExplicitPadding, ConvolutionLayerTest,
 INSTANTIATE_TEST_CASE_P(Convolution3D_AutoPadValid, ConvolutionLayerTest,
                         ::testing::Combine(
                                 conv3DParams_AutoPadValid,
-                                ::testing::ValuesIn(inputPrecisions),
                                 ::testing::ValuesIn(netPrecisions),
                                 ::testing::Values(std::vector<size_t >({1, 3, 10, 10, 10})),
                                 ::testing::Values(CommonTestUtils::DEVICE_CPU)),
diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/group_convolution.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/group_convolution.cpp
new file mode 100644 (file)
index 0000000..752b8d6
--- /dev/null
@@ -0,0 +1,108 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <vector>
+
+#include "single_layer_tests/group_convolution.hpp"
+#include "common_test_utils/test_constants.hpp"
+
+using namespace LayerTestsDefinitions;
+
+namespace {
+
+const std::vector<InferenceEngine::Precision> netPrecisions = {
+        InferenceEngine::Precision::FP32
+};
+
+/* ============= 2D GroupConvolution ============= */
+const std::vector<std::vector<size_t >> kernels = {{3, 3}};
+const std::vector<std::vector<size_t >> strides = {{1, 1}};
+const std::vector<std::vector<ptrdiff_t>> padBegins = {{0, 0}};
+const std::vector<std::vector<ptrdiff_t>> padEnds = {{0, 0}};
+const std::vector<std::vector<size_t >> dilations = {{1, 1}};
+const std::vector<size_t> numOutChannels = {8, 16};
+const std::vector<size_t> numGroups = {2, 8};
+
+const auto groupConv2DParams_ExplicitPadding = ::testing::Combine(
+        ::testing::ValuesIn(kernels),
+        ::testing::ValuesIn(strides),
+        ::testing::ValuesIn(padBegins),
+        ::testing::ValuesIn(padEnds),
+        ::testing::ValuesIn(dilations),
+        ::testing::ValuesIn(numOutChannels),
+        ::testing::ValuesIn(numGroups),
+        ::testing::Values(ngraph::op::PadType::EXPLICIT)
+);
+const auto groupConv2DParams_AutoPadValid = ::testing::Combine(
+        ::testing::ValuesIn(kernels),
+        ::testing::ValuesIn(strides),
+        ::testing::Values(std::vector<ptrdiff_t>({0, 0})),
+        ::testing::Values(std::vector<ptrdiff_t>({0, 0})),
+        ::testing::ValuesIn(dilations),
+        ::testing::ValuesIn(numOutChannels),
+        ::testing::ValuesIn(numGroups),
+        ::testing::Values(ngraph::op::PadType::VALID)
+);
+
+INSTANTIATE_TEST_CASE_P(GroupConvolution2D_ExplicitPadding, GroupConvolutionLayerTest,
+                        ::testing::Combine(
+                                groupConv2DParams_ExplicitPadding,
+                                ::testing::ValuesIn(netPrecisions),
+                                ::testing::Values(std::vector<size_t >({1, 16, 30, 30})),
+                                ::testing::Values(CommonTestUtils::DEVICE_CPU)),
+                        GroupConvolutionLayerTest::getTestCaseName);
+
+INSTANTIATE_TEST_CASE_P(GroupConvolution2D_AutoPadValid, GroupConvolutionLayerTest,
+                        ::testing::Combine(
+                                groupConv2DParams_AutoPadValid,
+                                ::testing::ValuesIn(netPrecisions),
+                                ::testing::Values(std::vector<size_t >({1, 16, 30, 30})),
+                                ::testing::Values(CommonTestUtils::DEVICE_CPU)),
+                        GroupConvolutionLayerTest::getTestCaseName);
+
+/* ============= 3D GroupConvolution ============= */
+const std::vector<std::vector<size_t >> kernels3d = {{3, 3, 3}};
+const std::vector<std::vector<ptrdiff_t>> paddings3d = {{0, 0, 0}};
+
+const std::vector<std::vector<size_t >> strides3d = {{1, 1, 1}};
+const std::vector<std::vector<size_t >> dilations3d = {{1, 1, 1}};
+
+const auto groupConv3DParams_ExplicitPadding = ::testing::Combine(
+        ::testing::ValuesIn(kernels3d),
+        ::testing::ValuesIn(strides3d),
+        ::testing::ValuesIn(paddings3d),
+        ::testing::ValuesIn(paddings3d),
+        ::testing::ValuesIn(dilations3d),
+        ::testing::Values(4),
+        ::testing::Values(2),
+        ::testing::Values(ngraph::op::PadType::EXPLICIT)
+);
+const auto groupConv3DParams_AutoPadValid = ::testing::Combine(
+        ::testing::ValuesIn(kernels3d),
+        ::testing::ValuesIn(strides3d),
+        ::testing::Values(std::vector<ptrdiff_t>({0, 0, 0})),
+        ::testing::Values(std::vector<ptrdiff_t>({0, 0, 0})),
+        ::testing::ValuesIn(dilations3d),
+        ::testing::Values(4),
+        ::testing::Values(2),
+        ::testing::Values(ngraph::op::PadType::VALID)
+);
+
+INSTANTIATE_TEST_CASE_P(GroupConvolution3D_ExplicitPadding, GroupConvolutionLayerTest,
+                        ::testing::Combine(
+                                groupConv3DParams_ExplicitPadding,
+                                ::testing::ValuesIn(netPrecisions),
+                                ::testing::Values(std::vector<size_t >({1, 4, 10, 10, 10})),
+                                ::testing::Values(CommonTestUtils::DEVICE_CPU)),
+                        GroupConvolutionLayerTest::getTestCaseName);
+
+INSTANTIATE_TEST_CASE_P(GroupConvolution3D_AutoPadValid, GroupConvolutionLayerTest,
+                        ::testing::Combine(
+                                groupConv3DParams_AutoPadValid,
+                                ::testing::ValuesIn(netPrecisions),
+                                ::testing::Values(std::vector<size_t >({1, 4, 10, 10, 10})),
+                                ::testing::Values(CommonTestUtils::DEVICE_CPU)),
+                        GroupConvolutionLayerTest::getTestCaseName);
+
+}  // namespace
diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/lrn.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/lrn.cpp
new file mode 100644 (file)
index 0000000..8ba02c8
--- /dev/null
@@ -0,0 +1,35 @@
+// Copyright (C) 2020 Intel Corporation
+//
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "single_layer_tests/lrn.hpp"
+
+#include <vector>
+
+#include "common_test_utils/test_constants.hpp"
+
+using namespace LayerTestsDefinitions;
+
+namespace {
+// Common params
+
+const std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32,
+                                                               InferenceEngine::Precision::FP16};
+
+const double alpha = 9.9e-05;
+const size_t beta = 2;
+const size_t bias = 1.0f;
+const size_t size = 5;
+
+INSTANTIATE_TEST_CASE_P(LrnCheck, LrnLayerTest,
+                        ::testing::Combine(::testing::Values(alpha),
+                                           ::testing::Values(beta),
+                                           ::testing::Values(bias),
+                                           ::testing::Values(size),
+                                           ::testing::ValuesIn(netPrecisions),
+                                           ::testing::Values(std::vector<size_t>({10, 10, 3, 2})),
+                                           ::testing::Values(CommonTestUtils::DEVICE_CPU)),
+                        LrnLayerTest::getTestCaseName);
+
+}  // namespace
diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/multiply.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/multiply.cpp
new file mode 100644 (file)
index 0000000..70b3c60
--- /dev/null
@@ -0,0 +1,33 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <vector>
+#include "single_layer_tests/multiply.hpp"
+#include "common_test_utils/test_constants.hpp"
+
+using namespace LayerTestsDefinitions;
+
+namespace {
+
+    std::vector<std::vector<std::vector<size_t>>> inShapes = {
+            {{2}},
+            {{1, 1, 1, 3}},
+            {{1, 2, 4}},
+            {{1, 4, 4}},
+            {{1, 4, 4, 1}},
+            {{1, 1, 1, 1, 1, 1, 3}},
+            {{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}}
+    };
+
+    std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32,
+                                                             InferenceEngine::Precision::FP16,
+    };
+
+    INSTANTIATE_TEST_CASE_P(multilpy, MultiplyLayerTest,
+                            ::testing::Combine(
+                                    ::testing::ValuesIn(inShapes),
+                                    ::testing::ValuesIn(netPrecisions),
+                                    ::testing::Values(CommonTestUtils::DEVICE_CPU)),
+                            MultiplyLayerTest::getTestCaseName);
+}  // namespace
diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/mvn.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/mvn.cpp
new file mode 100644 (file)
index 0000000..cd78920
--- /dev/null
@@ -0,0 +1,51 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <vector>
+
+#include "single_layer_tests/mvn.hpp"
+#include "common_test_utils/test_constants.hpp"
+
+using namespace LayerTestsDefinitions;
+
+const std::vector<std::vector<size_t>> inputShapes = {
+    {1, 32, 17},
+    {1, 37, 9},
+    {1, 16, 5, 8},
+    {2, 19, 5, 10},
+    {7, 32, 2, 8},
+    {5, 8, 3, 5},
+    {4, 41, 6, 9},
+    {1, 32, 8, 1, 6},
+    {1, 9, 1, 15, 9},
+    {6, 64, 6, 1, 18},
+    {2, 31, 2, 9, 1},
+    {10, 16, 5, 10, 6}
+};
+
+const std::vector<bool> acrossChannels = {
+    true,
+    false
+};
+
+const std::vector<bool> normalizeVariance = {
+    true,
+    false
+};
+
+const std::vector<double> epsilon = {
+    0.000000001
+};
+
+const auto MvnCases = ::testing::Combine(
+    ::testing::ValuesIn(inputShapes),
+    ::testing::Values(InferenceEngine::Precision::FP32),
+    ::testing::ValuesIn(acrossChannels),
+    ::testing::ValuesIn(normalizeVariance),
+    ::testing::ValuesIn(epsilon),
+    ::testing::Values(CommonTestUtils::DEVICE_CPU)
+);
+
+INSTANTIATE_TEST_CASE_P(smoke_MKLDNN_TestsMVN, MvnLayerTest, MvnCases, MvnLayerTest::getTestCaseName);
+
index ec5e739..1968599 100644 (file)
@@ -49,7 +49,6 @@ const auto maxPool_ExplicitPad_FloorRounding_Params = ::testing::Combine(
 INSTANTIATE_TEST_CASE_P(MaxPool_ExplicitPad_FloorRpunding, PoolingLayerTest,
                         ::testing::Combine(
                                 maxPool_ExplicitPad_FloorRounding_Params,
-                                ::testing::ValuesIn(inputPrecisions),
                                 ::testing::ValuesIn(netPrecisions),
                                 ::testing::Values(std::vector<size_t >({1, 3, 30, 30})),
                                 ::testing::Values(CommonTestUtils::DEVICE_CPU)),
@@ -71,7 +70,6 @@ const auto maxPool_ExplicitPad_CeilRounding_Params = ::testing::Combine(
 INSTANTIATE_TEST_CASE_P(MaxPool_ExplicitPad_CeilRpunding, PoolingLayerTest,
                         ::testing::Combine(
                                 maxPool_ExplicitPad_CeilRounding_Params,
-                                ::testing::ValuesIn(inputPrecisions),
                                 ::testing::ValuesIn(netPrecisions),
                                 ::testing::Values(std::vector<size_t >({1, 3, 30, 30})),
                                 ::testing::Values(CommonTestUtils::DEVICE_CPU)),
@@ -96,7 +94,6 @@ const auto avgPoolExplicitPadCeilRoundingParams = ::testing::Combine(
 INSTANTIATE_TEST_CASE_P(AvgPool_ExplicitPad_CeilRounding, PoolingLayerTest,
                         ::testing::Combine(
                                 avgPoolExplicitPadCeilRoundingParams,
-                                ::testing::ValuesIn(inputPrecisions),
                                 ::testing::ValuesIn(netPrecisions),
                                 ::testing::Values(std::vector<size_t >({1, 3, 30, 30})),
                                 ::testing::Values(CommonTestUtils::DEVICE_CPU)),
@@ -118,7 +115,6 @@ const auto avgPoolExplicitPadFloorRoundingParams = ::testing::Combine(
 INSTANTIATE_TEST_CASE_P(AvgPool_ExplicitPad_FloorRounding, PoolingLayerTest,
                         ::testing::Combine(
                                 avgPoolExplicitPadFloorRoundingParams,
-                                ::testing::ValuesIn(inputPrecisions),
                                 ::testing::ValuesIn(netPrecisions),
                                 ::testing::Values(std::vector<size_t >({1, 3, 30, 30})),
                                 ::testing::Values(CommonTestUtils::DEVICE_CPU)),
@@ -141,7 +137,6 @@ const auto allPools_ValidPad_Params = ::testing::Combine(
 INSTANTIATE_TEST_CASE_P(MAX_and_AVGPool_ValidPad, PoolingLayerTest,
                         ::testing::Combine(
                                 allPools_ValidPad_Params,
-                                ::testing::ValuesIn(inputPrecisions),
                                 ::testing::ValuesIn(netPrecisions),
                                 ::testing::Values(std::vector<size_t >({1, 3, 30, 30})),
                                 ::testing::Values(CommonTestUtils::DEVICE_CPU)),
index d0976a7..66be13c 100644 (file)
 using namespace LayerTestsDefinitions;
 
 namespace {
-// Common params
-const std::vector<InferenceEngine::Precision> inputPrecisions = {
-        InferenceEngine::Precision::FP32,
-        InferenceEngine::Precision::U8
-};
-
 const std::vector<InferenceEngine::Precision> netPrecisions = {
         InferenceEngine::Precision::FP32,
         InferenceEngine::Precision::FP16
@@ -24,7 +18,6 @@ const std::vector<InferenceEngine::Precision> netPrecisions = {
 INSTANTIATE_TEST_CASE_P(ReshapeCheckDynBatch, ReshapeLayerTest,
         ::testing::Combine(
                 ::testing::Values(true),
-                ::testing::ValuesIn(inputPrecisions),
                 ::testing::ValuesIn(netPrecisions),
                 ::testing::Values(std::vector<size_t>({30, 30, 30, 30})),
                 ::testing::Values(std::vector<size_t>({30, 30, 30, 30})),
@@ -35,7 +28,6 @@ INSTANTIATE_TEST_CASE_P(ReshapeCheckDynBatch, ReshapeLayerTest,
 INSTANTIATE_TEST_CASE_P(ReshapeCheck, ReshapeLayerTest,
         ::testing::Combine(
                 ::testing::Values(true),
-                ::testing::ValuesIn(inputPrecisions),
                 ::testing::ValuesIn(netPrecisions),
                 ::testing::Values(std::vector<size_t>({10, 10, 10, 10})),
                 ::testing::Values(std::vector<size_t>({10, 0, 100})),
diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/select.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/select.cpp
new file mode 100644 (file)
index 0000000..8094975
--- /dev/null
@@ -0,0 +1,86 @@
+ // Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <vector>
+
+#include "single_layer_tests/select.hpp"
+#include "common_test_utils/test_constants.hpp"
+
+using namespace LayerTestsDefinitions;
+
+const std::vector<InferenceEngine::Precision> inputPrecision = {
+    InferenceEngine::Precision::I8,
+    InferenceEngine::Precision::I16,
+    InferenceEngine::Precision::I32,
+    InferenceEngine::Precision::FP32
+    // CPU plug-in doesn't support I64 and U64 precisions at the moment
+    // InferenceEngine::Precision::I64
+};
+
+const std::vector<std::vector<std::vector<size_t>>> noneShapes = {
+    {{1}, {1}, {1}},
+    {{8}, {8}, {8}},
+    {{4, 5}, {4, 5}, {4, 5}},
+    {{3, 4, 5}, {3, 4, 5}, {3, 4, 5}},
+    {{2, 3, 4, 5}, {2, 3, 4, 5}, {2, 3, 4, 5}},
+    {{2, 3, 4, 5, 6}, {2, 3, 4, 5, 6}, {2, 3, 4, 5, 6}}
+};
+
+const auto noneCases = ::testing::Combine(
+    ::testing::ValuesIn(noneShapes),
+    ::testing::ValuesIn(inputPrecision),
+    ::testing::Values(ngraph::op::AutoBroadcastSpec::NONE),
+    ::testing::Values(CommonTestUtils::DEVICE_CPU)
+);
+
+const std::vector<std::vector<std::vector<size_t>>> numpyShapes = {
+    {{1}, {1}, {1}},
+    {{1}, {16}, {1}},
+    {{1}, {1}, {16}},
+    {{1}, {8}, {8}},
+    {{8}, {1}, {8}},
+    {{8}, {8}, {8}},
+    {{4, 1}, {1}, {4, 8}},
+    {{3, 8}, {8}, {3, 1}},
+    {{8, 1}, {8, 1}, {8, 1}},
+    {{1}, {5, 8}, {5, 8}},
+    {{8, 1, 1}, {8, 1, 1}, {2, 5}},
+    {{8, 1}, {6, 8, 1}, {6, 1, 1}},
+    {{5, 1}, {8, 1, 7}, {5, 7}},
+    {{2, 8, 1}, {2, 8, 9}, {2, 1, 9}},
+    {{1, 4}, {8, 1, 1, 1}, {4}},
+    {{5, 4, 1}, {8, 5, 1, 1}, {4, 1}},
+    {{1, 4}, {6, 1, 8, 1}, {6, 1, 8, 4}},
+    {{7, 3, 1, 8}, {7, 1, 1, 8}, {3, 2, 8}},
+    {{1, 3, 1}, {8, 2, 3, 1}, {3, 9}},
+    {{5, 1, 8}, {2, 1, 9, 8}, {2, 5, 9, 8}},
+    {{6, 1, 1, 8}, {6, 7, 1, 8}, {2, 1}},
+    {{5, 1, 1, 1}, {5, 7, 8, 6}, {1, 8, 6}},
+    {{8, 1, 5}, {8, 1, 1, 1, 1}, {8, 7, 5}},
+    {{8, 1, 1, 9}, {4, 8, 1, 1, 1}, {1, 1, 9}},
+    {{5, 1, 2, 1}, {8, 1, 9, 1, 1}, {5, 1, 2, 1}},
+    {{8, 1}, {2, 1, 1, 8, 1}, {9, 1, 1}},
+    {{8, 5, 5, 5, 1}, {8, 1, 1, 1, 8}, {5, 5, 5, 8}},
+    {{4}, {8, 5, 6, 1, 1}, {2, 4}},
+    {{9, 9, 2, 8, 1}, {9, 1, 2, 8, 1}, {9, 1, 1, 1}},
+    {{5, 3, 3}, {8, 1, 1, 3, 3}, {5, 1, 3}},
+    {{5, 1, 8, 1}, {5, 5, 1, 8, 1}, {1}},
+    {{3}, {6, 8, 1, 1, 3}, {6, 1, 5, 3, 3}},
+    {{5, 1}, {3, 1, 4, 1, 8}, {1, 4, 5, 8}},
+    {{2, 1, 5}, {8, 6, 2, 3, 1}, {5}},
+    {{6}, {2, 1, 9, 8, 6}, {2, 4, 9, 8, 6}},
+    {{5, 7, 1, 8, 1}, {5, 7, 1, 8, 4}, {8, 1}},
+    {{7, 6, 5, 8}, {4, 7, 6, 5, 8}, {6, 1, 8}}
+};
+
+const auto numpyCases = ::testing::Combine(
+    ::testing::ValuesIn(numpyShapes),
+    ::testing::ValuesIn(inputPrecision),
+    ::testing::Values(ngraph::op::AutoBroadcastSpec::NUMPY),
+    ::testing::Values(CommonTestUtils::DEVICE_CPU)
+);
+
+INSTANTIATE_TEST_CASE_P(smoke_MKLDNN_TestsSelect_none, SelectLayerTest, noneCases, SelectLayerTest::getTestCaseName);
+
+INSTANTIATE_TEST_CASE_P(smoke_MKLDNN_TestsSelect_numpy, SelectLayerTest, numpyCases, SelectLayerTest::getTestCaseName);
index 09ddf01..cefc8de 100644 (file)
@@ -15,10 +15,6 @@ const std::vector<InferenceEngine::Precision> netPrecisions = {
     InferenceEngine::Precision::FP32,
 };
 
-const std::vector<InferenceEngine::Precision> inputPrecisions = {
-    InferenceEngine::Precision::FP32,
-};
-
 const std::vector<InferenceEngine::Layout> inputLayouts2D = {
     InferenceEngine::Layout::NC,
 };
@@ -33,7 +29,6 @@ const std::vector<size_t> axis2D = {
 
 const auto params2D = testing::Combine(
     testing::ValuesIn(netPrecisions),
-    testing::ValuesIn(inputPrecisions),
     testing::ValuesIn(inputLayouts2D),
     testing::ValuesIn(inputShapes2D),
     testing::ValuesIn(axis2D),
index 5b696fa..0037c23 100644 (file)
@@ -13,20 +13,15 @@ namespace {
 
 spaceToBatchParamsTuple stb_only_test_cases[] = {
         spaceToBatchParamsTuple({1, 1, 2, 2}, {0, 0, 0, 0}, {0, 0, 0, 0}, {1, 1, 2, 2},
-                                InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP32,
-                                CommonTestUtils::DEVICE_CPU),
+                                InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_CPU),
         spaceToBatchParamsTuple({1, 1, 2, 2}, {0, 0, 0, 0}, {0, 0, 0, 0}, {1, 3, 2, 2},
-                                InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP32,
-                                CommonTestUtils::DEVICE_CPU),
+                                InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_CPU),
         spaceToBatchParamsTuple({1, 1, 2, 2}, {0, 0, 0, 0}, {0, 0, 0, 0}, {1, 1, 4, 4},
-                                InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP32,
-                                CommonTestUtils::DEVICE_CPU),
+                                InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_CPU),
         spaceToBatchParamsTuple({1, 1, 2, 2}, {0, 0, 0, 2}, {0, 0, 0, 0}, {2, 1, 2, 4},
-                                InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP32,
-                                CommonTestUtils::DEVICE_CPU),
+                                InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_CPU),
         spaceToBatchParamsTuple({1, 1, 3, 2, 2}, {0, 0, 1, 0, 3}, {0, 0, 2, 0, 0}, {1, 1, 3, 2, 1},
-                                InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP32,
-                                CommonTestUtils::DEVICE_CPU),
+                                InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_CPU),
 };
 
 INSTANTIATE_TEST_CASE_P(
index f495ffe..f57fe1b 100644 (file)
 using namespace LayerTestsDefinitions;
 
 namespace {
-// Common params
-const std::vector<InferenceEngine::Precision> inputPrecisions = {
-        InferenceEngine::Precision::FP32,
-        InferenceEngine::Precision::U8,
-        InferenceEngine::Precision::I8
-};
 
 const std::vector<InferenceEngine::Precision> netPrecisions = {
         InferenceEngine::Precision::FP32,
@@ -26,7 +20,6 @@ INSTANTIATE_TEST_CASE_P(NumSplitsCheck, SplitLayerTest,
                         ::testing::Combine(
                                 ::testing::Values(1),
                                 ::testing::Values(0, 1, 2, 3),
-                                ::testing::ValuesIn(inputPrecisions),
                                 ::testing::ValuesIn(netPrecisions),
                                 ::testing::Values(std::vector<size_t >({30, 30, 30, 30})),
                                 ::testing::Values(CommonTestUtils::DEVICE_CPU)),
diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/subgraph_tests/reshape_squeeze_reshape_relu.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/subgraph_tests/reshape_squeeze_reshape_relu.cpp
new file mode 100644 (file)
index 0000000..ee1bdc9
--- /dev/null
@@ -0,0 +1,49 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+#include <vector>
+#include "subgraph_tests/reshape_squeeze_reshape_relu.hpp"
+#include "common_test_utils/test_constants.hpp"
+
+using namespace LayerTestsDefinitions;
+
+namespace {
+    std::vector<std::vector<std::vector<size_t>>> inputs{
+            {{1, 1, 3}, {0, 1}},
+            {{1, 1, 3}, {0}},
+            {{1, 1, 3}, {1}},
+            {{1, 3, 1}, {0, 2}},
+            {{1, 3, 1}, {0}},
+            {{1, 3, 1}, {2}},
+            {{3, 1, 1}, {1, 2}},
+            {{3, 1, 1}, {1}},
+            {{3, 1, 1}, {2}},
+            {{4, 1, 3, 1}, {1, 3}},
+            {{4, 1, 1, 3}, {1, 2}},
+            {{1, 4, 1, 3}, {0, 2}},
+            {{1, 3, 5, 2, 1}, {0, 4}},
+            {{3, 1, 2, 4, 4, 3}, {1}},
+            {{1, 1, 1, 1, 1, 3}, {0, 1, 2, 3, 4}},
+            {{1, 1, 1, 1, 1, 3}, {1, 3}},
+            {{1}, {0}},
+    };
+
+    std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32,
+                                                             InferenceEngine::Precision::FP16,
+    };
+
+    INSTANTIATE_TEST_CASE_P(reshape_squeeze_reshape_relu, ReshapeSqueezeReshapeRelu,
+                            ::testing::Combine(
+                                    ::testing::ValuesIn(inputs),
+                                    ::testing::ValuesIn(netPrecisions),
+                                    ::testing::Values(CommonTestUtils::DEVICE_CPU),
+                                    ::testing::Values(true)),
+                            ReshapeSqueezeReshapeRelu::getTestCaseName);
+
+    INSTANTIATE_TEST_CASE_P(reshape_unsqueeze_reshape_relu, ReshapeSqueezeReshapeRelu,
+                            ::testing::Combine(
+                                    ::testing::ValuesIn(inputs),
+                                    ::testing::ValuesIn(netPrecisions),
+                                    ::testing::Values(CommonTestUtils::DEVICE_CPU),
+                                    ::testing::Values(false)),
+                            ReshapeSqueezeReshapeRelu::getTestCaseName);
+}  // namespace
index ab13a9b..31705a0 100644 (file)
 using namespace LayerTestsDefinitions;
 
 namespace {
-const std::vector<InferenceEngine::Precision> inputPrecisions = {
-        InferenceEngine::Precision::FP32,
-        InferenceEngine::Precision::U8,
-        InferenceEngine::Precision::I8
-};
 
 const std::vector<InferenceEngine::Precision> netPrecisions = {
         InferenceEngine::Precision::FP32,
@@ -23,7 +18,6 @@ const std::vector<InferenceEngine::Precision> netPrecisions = {
 
 INSTANTIATE_TEST_CASE_P(NoReshape, SplitConvConcat,
                         ::testing::Combine(
-                                ::testing::ValuesIn(inputPrecisions),
                                 ::testing::ValuesIn(netPrecisions),
                                 ::testing::Values(InferenceEngine::SizeVector({1, 6, 40, 40})),
                                 ::testing::Values(CommonTestUtils::DEVICE_CPU)),
diff --git a/inference-engine/tests/functional/plugin/cpu/single_layer_tests/cpu_test_utils.hpp b/inference-engine/tests/functional/plugin/cpu/single_layer_tests/cpu_test_utils.hpp
new file mode 100644 (file)
index 0000000..e7bed01
--- /dev/null
@@ -0,0 +1,120 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <string>
+#include "network_serializer.h"
+#include "ie_system_conf.h"
+
+namespace CPUTestUtils {
+
+typedef enum {
+    nchw,
+    nChw8c,
+    nChw16c,
+    ncdhw,
+    nCdhw8c,
+    nCdhw16c,
+    undef
+} cpu_memory_format_t;
+
+const char *cpu_fmt2str(cpu_memory_format_t v) {
+    if (v == nchw) return "nchw";
+    if (v == nChw8c) return "nChw8c";
+    if (v == nChw16c) return "nChw16c";
+    if (v == ncdhw) return "ncdhw";
+    if (v == nCdhw8c) return "nCdhw8c";
+    if (v == nCdhw16c) return "nCdhw16c";
+    assert(!"unknown fmt");
+    return "undef";
+}
+
+cpu_memory_format_t cpu_str2fmt(const char *str) {
+#define CASE(_fmt) do { \
+    if (!strcmp(#_fmt, str) \
+            || !strcmp("mkldnn_" #_fmt, str)) \
+        return _fmt; \
+} while (0)
+    CASE(nchw);
+    CASE(nChw8c);
+    CASE(nChw16c);
+    CASE(ncdhw);
+    CASE(nCdhw8c);
+    CASE(nCdhw16c);
+#undef CASE
+    assert(!"unknown memory format");
+    return undef;
+}
+
+std::string fmts2str(const std::vector<cpu_memory_format_t> &fmts) {
+    std::string str;
+    for (auto &fmt : fmts) {
+        ((str += "cpu:") += cpu_fmt2str(fmt)) += ",";
+    }
+    str.erase(str.end() - 1);
+    return str;
+}
+
+std::string impls2str(const std::vector<std::string> &priority) {
+    std::string str;
+    for (auto &impl : priority) {
+        ((str += "cpu:") += impl) += ",";
+    }
+    str.erase(str.end() - 1);
+    return str;
+}
+
+IE_SUPPRESS_DEPRECATED_START
+void inline CheckCPUImpl(InferenceEngine::ExecutableNetwork &execNet, std::string nodeType, std::vector<cpu_memory_format_t> inputMemoryFormats,
+                         std::vector<cpu_memory_format_t> outputMemoryFormats, std::string selectedType) {
+    InferenceEngine::CNNNetwork execGraphInfo = execNet.GetExecGraphInfo();
+    auto nodes = InferenceEngine::Serialization::TopologicalSort(execGraphInfo);
+    for (auto &node : nodes) {
+        if (node->type == nodeType) {
+            ASSERT_LE(inputMemoryFormats.size(), node->insData.size());
+            ASSERT_LE(outputMemoryFormats.size(), node->outData.size());
+            for (int i = 0; i < inputMemoryFormats.size(); i++) {
+                for (auto &parentNode : nodes) {
+                    for (int j = 0; j < parentNode->outData.size(); j++) {
+                        if (parentNode->outData[j]->getName() == node->insData[i].lock()->getName()) {
+                            auto actualInputMemoryFormat = parentNode->params.find("outputLayouts");
+                            ASSERT_NE(actualInputMemoryFormat, parentNode->params.end());
+                            ASSERT_EQ(inputMemoryFormats[i], cpu_str2fmt(actualInputMemoryFormat->second.c_str()));
+                        }
+                    }
+                }
+            }
+            for (int i = 0; i < outputMemoryFormats.size(); i++) {
+                auto actualOutputMemoryFormat = node->params.find("outputLayouts");
+                ASSERT_NE(actualOutputMemoryFormat, node->params.end());
+                ASSERT_EQ(outputMemoryFormats[i], cpu_str2fmt(actualOutputMemoryFormat->second.c_str()));
+            }
+
+            auto primType = node->params.find("primitiveType");
+            ASSERT_NE(primType, node->params.end());
+            ASSERT_EQ(selectedType, primType->second);
+        }
+    }
+}
+IE_SUPPRESS_DEPRECATED_END
+
+std::map<std::string, std::shared_ptr<ngraph::Variant>> setCPUInfo(std::vector<cpu_memory_format_t> inFmts, std::vector<cpu_memory_format_t> outFmts,
+                                      std::vector<std::string> priority) {
+    std::map<std::string, std::shared_ptr<ngraph::Variant>> cpuInfo;
+
+    if (!inFmts.empty()) {
+        cpuInfo.insert({"InputMemoryFormats", InferenceEngine::Parameter(fmts2str(inFmts)).asVariant()});
+    }
+    if (!outFmts.empty()) {
+        cpuInfo.insert({"OutputMemoryFormats", InferenceEngine::Parameter(fmts2str(outFmts)).asVariant()});
+    }
+    if (!priority.empty()) {
+        cpuInfo.insert({"PrimitivesPriority", InferenceEngine::Parameter(impls2str(priority)).asVariant()});
+    }
+
+    return cpuInfo;
+}
+
+}  // namespace CPUTestUtils
\ No newline at end of file
diff --git a/inference-engine/tests/functional/plugin/cpu/single_layer_tests/group_convolution.cpp b/inference-engine/tests/functional/plugin/cpu/single_layer_tests/group_convolution.cpp
new file mode 100644 (file)
index 0000000..5cb93e4
--- /dev/null
@@ -0,0 +1,592 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <single_layer_tests/group_convolution.hpp>
+#include "cpu_test_utils.hpp"
+
+using namespace InferenceEngine;
+using namespace CPUTestUtils;
+
+namespace CPULayerTestsDefinitions {
+
+typedef std::tuple<
+        std::vector<cpu_memory_format_t>,
+        std::vector<cpu_memory_format_t>,
+        std::vector<std::string>,
+        std::string> groupConvCPUSpecificParams;
+
+typedef std::tuple<
+        groupConvLayerTestParamsSet,
+        groupConvCPUSpecificParams> groupConvLayerCPUTestParamsSet;
+
+class GroupConvolutionLayerCPUTest : public testing::WithParamInterface<groupConvLayerCPUTestParamsSet>,
+                                     public LayerTestsUtils::LayerTestsCommon {
+public:
+    static std::string getTestCaseName(testing::TestParamInfo<groupConvLayerCPUTestParamsSet> obj) {
+        groupConvLayerTestParamsSet basicParamsSet;
+        groupConvCPUSpecificParams cpuParams;
+        std::tie(basicParamsSet, cpuParams) = obj.param;
+
+        std::ostringstream result;
+        result << LayerTestsDefinitions::GroupConvolutionLayerTest::getTestCaseName(testing::TestParamInfo<groupConvLayerTestParamsSet>(
+                basicParamsSet, 0));
+
+        std::vector<cpu_memory_format_t> inFmts, outFmts;
+        std::vector<std::string> priority;
+        std::string selectedType;
+        std::tie(inFmts, outFmts, priority, selectedType) = cpuParams;
+
+        result << "_inFmts=" << CPUTestUtils::fmts2str(inFmts);
+        result << "_outFmts=" << CPUTestUtils::fmts2str(outFmts);
+        result << "_primitive=" << selectedType;
+
+        return result.str();
+    }
+
+protected:
+    void SetUp() {
+        groupConvLayerTestParamsSet basicParamsSet;
+        groupConvCPUSpecificParams cpuParams;
+        std::tie(basicParamsSet, cpuParams) = this->GetParam();
+
+        std::tie(inFmts, outFmts, priority, selectedType) = cpuParams;
+
+        groupConvSpecificParams groupConvParams;
+        std::vector<size_t> inputShape;
+        auto netPrecision   = InferenceEngine::Precision::UNSPECIFIED;
+        std::tie(groupConvParams, netPrecision, inputShape, targetDevice) = basicParamsSet;
+
+        ngraph::op::PadType padType;
+        InferenceEngine::SizeVector kernel, stride, dilation;
+        std::vector<ptrdiff_t> padBegin, padEnd;
+        size_t convOutChannels, numGroups;
+        std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, numGroups, padType) = groupConvParams;
+
+        auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
+        auto params = ngraph::builder::makeParams(ngPrc, {inputShape});
+        auto paramOuts = ngraph::helpers::convert2OutputVector(
+                ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(params));
+        auto groupConv = std::dynamic_pointer_cast<ngraph::opset1::GroupConvolution>(
+                ngraph::builder::makeGroupConvolution(paramOuts[0], ngPrc, kernel, stride, padBegin,
+                                                      padEnd, dilation, padType, convOutChannels, numGroups));
+        groupConv->get_rt_info() = CPUTestUtils::setCPUInfo(inFmts, outFmts, priority);
+        ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(groupConv)};
+        function = std::make_shared<ngraph::Function>(results, params, "groupConvolution");
+    }
+
+    std::vector<cpu_memory_format_t> inFmts, outFmts;
+    std::vector<std::string> priority;
+    std::string selectedType;
+};
+
+TEST_P(GroupConvolutionLayerCPUTest, CompareWithRefs) {
+    SKIP_IF_CURRENT_TEST_IS_DISABLED()
+
+    Run();
+    CPUTestUtils::CheckCPUImpl(executableNetwork, "Convolution", inFmts, outFmts, selectedType);
+}
+
+namespace {
+
+/* CPU PARAMS */
+const auto cpuParams_ref_2D = groupConvCPUSpecificParams{{nchw}, {nchw}, {"ref_any"}, "ref_any_FP32"};
+const auto cpuParams_ref_3D = groupConvCPUSpecificParams{{ncdhw}, {ncdhw}, {"ref_any"}, "ref_any_FP32"};
+
+const auto cpuParams_gemm_2D = groupConvCPUSpecificParams{{nchw}, {nchw}, {"gemm_any"}, "jit_gemm_FP32"};
+const auto cpuParams_gemm_3D = groupConvCPUSpecificParams{{ncdhw}, {ncdhw}, {"gemm_any"}, "jit_gemm_FP32"};
+
+const auto cpuParams_sse42_2D = groupConvCPUSpecificParams{{nChw8c}, {nChw8c}, {"jit_sse42"}, "jit_sse42_FP32"};
+const auto cpuParams_sse42_3D = groupConvCPUSpecificParams{{nCdhw8c}, {nCdhw8c}, {"jit_sse42"}, "jit_sse42_FP32"};
+const auto cpuParams_sse42_dw_2D = groupConvCPUSpecificParams{{nChw8c}, {nChw8c}, {"jit_sse42_dw"}, "jit_sse42_dw_FP32"};
+const auto cpuParams_sse42_dw_3D = groupConvCPUSpecificParams{{nCdhw8c}, {nCdhw8c}, {"jit_sse42_dw"}, "jit_sse42_dw_FP32"};
+
+const auto cpuParams_avx2_2D = groupConvCPUSpecificParams{{nChw8c}, {nChw8c}, {"jit_avx2"}, "jit_avx2_FP32"};
+const auto cpuParams_avx2_3D = groupConvCPUSpecificParams{{nCdhw8c}, {nCdhw8c}, {"jit_avx2"}, "jit_avx2_FP32"};
+const auto cpuParams_avx2_dw_2D = groupConvCPUSpecificParams{{nChw8c}, {nChw8c}, {"jit_avx2_dw"}, "jit_avx2_dw_FP32"};
+const auto cpuParams_avx2_dw_3D = groupConvCPUSpecificParams{{nCdhw8c}, {nCdhw8c}, {"jit_avx2_dw"}, "jit_avx2_dw_FP32"};
+
+const auto cpuParams_avx512_2D = groupConvCPUSpecificParams{{nChw16c}, {nChw16c}, {"jit_avx512"}, "jit_avx512_FP32"};
+const auto cpuParams_avx512_3D = groupConvCPUSpecificParams{{nCdhw16c}, {nCdhw16c}, {"jit_avx512"}, "jit_avx512_FP32"};
+const auto cpuParams_avx512_dw_2D = groupConvCPUSpecificParams{{nChw16c}, {nChw16c}, {"jit_avx512_dw"}, "jit_avx512_dw_FP32"};
+const auto cpuParams_avx512_dw_3D = groupConvCPUSpecificParams{{nCdhw16c}, {nCdhw16c}, {"jit_avx512_dw"}, "jit_avx512_dw_FP32"};
+/* ========== */
+
+/* GROUP CONV TEST UTILS */
+std::vector<groupConvCPUSpecificParams> filterCPUInfoForDevice(std::vector<groupConvCPUSpecificParams> CPUParams) {
+    std::vector<groupConvCPUSpecificParams> resCPUParams;
+    const int selectedTypeIndex = 3;
+
+    for (auto param : CPUParams) {
+        auto selectedTypeStr = std::get<selectedTypeIndex>(param);
+
+        if (selectedTypeStr.find("jit") != std::string::npos && !with_cpu_x86_sse42())
+            continue;
+        if (selectedTypeStr.find("sse42") != std::string::npos && !with_cpu_x86_sse42())
+            continue;
+        if (selectedTypeStr.find("avx2") != std::string::npos && !with_cpu_x86_avx2())
+            continue;
+        if (selectedTypeStr.find("avx512") != std::string::npos && !with_cpu_x86_avx512f())
+            continue;
+
+        resCPUParams.push_back(param);
+    }
+
+    return resCPUParams;
+}
+
+std::vector<groupConvLayerCPUTestParamsSet> filterParamsSetForDevice(std::vector<groupConvLayerCPUTestParamsSet> paramsSet) {
+    std::vector<groupConvLayerCPUTestParamsSet> resParamsSet;
+    const int cpuParamsIndex = 1;
+    const int selectedTypeIndex = 3;
+
+    for (auto param : paramsSet) {
+        auto cpuParams = std::get<cpuParamsIndex>(param);
+        auto selectedTypeStr = std::get<selectedTypeIndex>(cpuParams);
+
+        if (selectedTypeStr.find("jit") != std::string::npos && !with_cpu_x86_sse42())
+            continue;
+        if (selectedTypeStr.find("sse42") != std::string::npos && !with_cpu_x86_sse42())
+            continue;
+        if (selectedTypeStr.find("avx2") != std::string::npos && !with_cpu_x86_avx2())
+            continue;
+        if (selectedTypeStr.find("avx512") != std::string::npos && !with_cpu_x86_avx512f())
+            continue;
+
+        resParamsSet.push_back(param);
+    }
+
+    return resParamsSet;
+}
+/* ===================== */
+
+/* COMMON PARAMS */
+/* ============= GroupConvolution params (planar layout) ============= */
+const SizeVector numOutChannels_Planar = {6};
+const SizeVector numGroups_Planar = {2, 3};
+
+/* ============= GroupConvolution params (blocked layout) ============= */
+const SizeVector numOutChannels_Blocked = {64};
+const SizeVector numGroups_Blocked = {2, 4};
+
+/* ============= GroupConvolution params (DW) ============= */
+const SizeVector numOutChannels_DW = {32};
+const SizeVector numGroups_DW = {32};
+
+/* ============= GroupConvolution params (2D) ============= */
+const std::vector<SizeVector> kernels2d = {{3, 3}, {1, 1}};
+const std::vector<SizeVector> strides2d = {{1, 1}, {2, 2}};
+const std::vector<std::vector<ptrdiff_t>> padBegins2d = {{0, 0}, {1, 1}};
+const std::vector<std::vector<ptrdiff_t>> padEnds2d = {{0, 0}};
+const std::vector<SizeVector> dilations2d = {{1, 1}, {2, 2}};
+
+/* ============= GroupConvolution params (3D) ============= */
+const std::vector<SizeVector> kernels3d = {{3, 3, 3}, {1, 1, 1}};
+const std::vector<SizeVector> strides3d = {{1, 1, 1}, {2, 2, 2}};
+const std::vector<std::vector<ptrdiff_t>> padBegins3d = {{0, 0, 0}, {1, 1, 1}};
+const std::vector<std::vector<ptrdiff_t>> padEnds3d = {{0, 0, 0}};
+const std::vector<SizeVector> dilations3d = {{1, 1, 1}, {2, 2, 2}};
+/* ============= */
+
+
+/* INSTANCES */
+/* ============= GroupConvolution (Planar 2D) ============= */
+const auto groupConvParams_ExplicitPadding_Planar_2D = ::testing::Combine(
+        ::testing::ValuesIn(kernels2d),
+        ::testing::ValuesIn(strides2d),
+        ::testing::ValuesIn(padBegins2d),
+        ::testing::ValuesIn(padEnds2d),
+        ::testing::ValuesIn(dilations2d),
+        ::testing::ValuesIn(numOutChannels_Planar),
+        ::testing::ValuesIn(numGroups_Planar),
+        ::testing::Values(ngraph::op::PadType::EXPLICIT)
+);
+
+const std::vector<groupConvCPUSpecificParams> CPUParams_Planar_2D = {
+        cpuParams_gemm_2D
+};
+
+INSTANTIATE_TEST_CASE_P(GroupConv_2D_Planar_FP32, GroupConvolutionLayerCPUTest,
+                        ::testing::Combine(
+                                ::testing::Combine(
+                                        groupConvParams_ExplicitPadding_Planar_2D,
+                                        ::testing::Values(Precision::FP32),
+                                        ::testing::Values(std::vector<size_t >({2, 12, 7, 7})),
+                                        ::testing::Values(CommonTestUtils::DEVICE_CPU)),
+                                ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_Planar_2D))),
+                        GroupConvolutionLayerCPUTest::getTestCaseName);
+
+/* ============= GroupConvolution (Planar 3D) ============= */
+const auto groupConvParams_ExplicitPadding_Planar_3D = ::testing::Combine(
+        ::testing::ValuesIn(kernels3d),
+        ::testing::ValuesIn(strides3d),
+        ::testing::ValuesIn(padBegins3d),
+        ::testing::ValuesIn(padEnds3d),
+        ::testing::ValuesIn(dilations3d),
+        ::testing::ValuesIn(numOutChannels_Planar),
+        ::testing::ValuesIn(numGroups_Planar),
+        ::testing::Values(ngraph::op::PadType::EXPLICIT)
+);
+
+const std::vector<groupConvCPUSpecificParams> CPUParams_Planar_3D = {
+        cpuParams_gemm_3D
+};
+
+INSTANTIATE_TEST_CASE_P(GroupConv_3D_Planar_FP32, GroupConvolutionLayerCPUTest,
+                        ::testing::Combine(
+                                ::testing::Combine(
+                                        groupConvParams_ExplicitPadding_Planar_3D,
+                                        ::testing::Values(Precision::FP32),
+                                        ::testing::Values(std::vector<size_t >({2, 12, 7, 7, 7})),
+                                        ::testing::Values(CommonTestUtils::DEVICE_CPU)),
+                                ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_Planar_3D))),
+                        GroupConvolutionLayerCPUTest::getTestCaseName);
+
+/* ============= GroupConvolution (Blocked 2D) ============= */
+const auto groupConvParams_ExplicitPadding_Blocked_2D = ::testing::Combine(
+        ::testing::ValuesIn(kernels2d),
+        ::testing::ValuesIn(strides2d),
+        ::testing::ValuesIn(padBegins2d),
+        ::testing::ValuesIn(padEnds2d),
+        ::testing::ValuesIn(dilations2d),
+        ::testing::ValuesIn(numOutChannels_Blocked),
+        ::testing::ValuesIn(numGroups_Blocked),
+        ::testing::Values(ngraph::op::PadType::EXPLICIT)
+);
+
+const std::vector<groupConvCPUSpecificParams> CPUParams_Blocked_2D = {
+        cpuParams_sse42_2D,
+        cpuParams_avx2_2D,
+        cpuParams_avx512_2D
+};
+
+INSTANTIATE_TEST_CASE_P(GroupConv_2D_Blocked_FP32, GroupConvolutionLayerCPUTest,
+                        ::testing::Combine(
+                                ::testing::Combine(
+                                        groupConvParams_ExplicitPadding_Blocked_2D,
+                                        ::testing::Values(Precision::FP32),
+                                        ::testing::Values(std::vector<size_t >({2, 64, 7, 7})),
+                                        ::testing::Values(CommonTestUtils::DEVICE_CPU)),
+                                ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_Blocked_2D))),
+                        GroupConvolutionLayerCPUTest::getTestCaseName);
+
+/* ============= GroupConvolution (Blocked 3D) ============= */
+const auto groupConvParams_ExplicitPadding_Blocked_3D = ::testing::Combine(
+        ::testing::ValuesIn(kernels3d),
+        ::testing::ValuesIn(strides3d),
+        ::testing::ValuesIn(padBegins3d),
+        ::testing::ValuesIn(padEnds3d),
+        ::testing::ValuesIn(dilations3d),
+        ::testing::ValuesIn(numOutChannels_Blocked),
+        ::testing::ValuesIn(numGroups_Blocked),
+        ::testing::Values(ngraph::op::PadType::EXPLICIT)
+);
+
+const std::vector<groupConvCPUSpecificParams> CPUParams_Blocked_3D = {
+//        cpuParams_sse42_3D, // not supported jit_sse42 for 3d
+        cpuParams_avx2_3D,
+        cpuParams_avx512_3D
+};
+
+INSTANTIATE_TEST_CASE_P(GroupConv_3D_Blocked_FP32, GroupConvolutionLayerCPUTest,
+                        ::testing::Combine(
+                                ::testing::Combine(
+                                        groupConvParams_ExplicitPadding_Blocked_3D,
+                                        ::testing::Values(Precision::FP32),
+                                        ::testing::Values(std::vector<size_t >({2, 64, 7, 7, 7})),
+                                        ::testing::Values(CommonTestUtils::DEVICE_CPU)),
+                                ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_Blocked_3D))),
+                        GroupConvolutionLayerCPUTest::getTestCaseName);
+
+/* ============= GroupConvolution (DW 2D) ============= */
+const auto groupConvParams_ExplicitPadding_DW_2D = ::testing::Combine(
+        ::testing::ValuesIn(kernels2d),
+        ::testing::ValuesIn(strides2d),
+        ::testing::ValuesIn(padBegins2d),
+        ::testing::ValuesIn(padEnds2d),
+        ::testing::ValuesIn(dilations2d),
+        ::testing::ValuesIn(numOutChannels_DW),
+        ::testing::ValuesIn(numGroups_DW),
+        ::testing::Values(ngraph::op::PadType::EXPLICIT)
+);
+
+const std::vector<groupConvCPUSpecificParams> CPUParams_DW_2D = {
+        cpuParams_sse42_dw_2D,
+        cpuParams_avx2_dw_2D,
+        cpuParams_avx512_dw_2D
+};
+
+INSTANTIATE_TEST_CASE_P(GroupConv_2D_DW_FP32, GroupConvolutionLayerCPUTest,
+                        ::testing::Combine(
+                                ::testing::Combine(
+                                        groupConvParams_ExplicitPadding_DW_2D,
+                                        ::testing::Values(Precision::FP32),
+                                        ::testing::Values(std::vector<size_t >({2, 32, 7, 7})),
+                                        ::testing::Values(CommonTestUtils::DEVICE_CPU)),
+                                ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_DW_2D))),
+                        GroupConvolutionLayerCPUTest::getTestCaseName);
+
+/* ============= GroupConvolution (DW 3D) ============= */
+const auto groupConvParams_ExplicitPadding_DW_3D = ::testing::Combine(
+        ::testing::ValuesIn(kernels3d),
+        ::testing::ValuesIn(strides3d),
+        ::testing::ValuesIn(padBegins3d),
+        ::testing::ValuesIn(padEnds3d),
+        ::testing::ValuesIn(dilations3d),
+        ::testing::ValuesIn(numOutChannels_DW),
+        ::testing::ValuesIn(numGroups_DW),
+        ::testing::Values(ngraph::op::PadType::EXPLICIT)
+);
+
+const std::vector<groupConvCPUSpecificParams> CPUParams_DW_3D = {
+        cpuParams_sse42_dw_3D,
+        cpuParams_avx2_dw_3D,
+        cpuParams_avx512_dw_3D
+};
+
+INSTANTIATE_TEST_CASE_P(GroupConv_3D_DW_FP32, GroupConvolutionLayerCPUTest,
+                        ::testing::Combine(
+                                ::testing::Combine(
+                                        groupConvParams_ExplicitPadding_DW_3D,
+                                        ::testing::Values(Precision::FP32),
+                                        ::testing::Values(std::vector<size_t >({2, 32, 7, 7, 7})),
+                                        ::testing::Values(CommonTestUtils::DEVICE_CPU)),
+                                ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_DW_3D))),
+                        GroupConvolutionLayerCPUTest::getTestCaseName);
+/* ========= */
+
+
+/* ============= SINGLE TEST CASES ============= */
+groupConvLayerCPUTestParamsSet makeSingleGroupConvCPUTestCase(SizeVector kernels, SizeVector strides, SizeVector dilations,
+                                                        std::vector<ptrdiff_t> padBegins, std::vector<ptrdiff_t> padEnds, ngraph::op::PadType padType,
+                                                        int groups, int mb, SizeVector spDims, int inGroupSize, int outGroupSize,
+                                                        groupConvCPUSpecificParams CPUParams) {
+    int inChannels = groups * inGroupSize;
+    int outChannels = groups * outGroupSize;
+
+    SizeVector inputShapes;
+    inputShapes.push_back(mb);
+    inputShapes.push_back(inChannels);
+    inputShapes.insert(inputShapes.end(), spDims.begin(), spDims.end());
+
+    groupConvSpecificParams specificParams(kernels, strides, padBegins, padEnds, dilations, outChannels, groups, padType);
+    groupConvLayerTestParamsSet basicParamsSet(specificParams, Precision::FP32, inputShapes, CommonTestUtils::DEVICE_CPU);
+    return groupConvLayerCPUTestParamsSet(basicParamsSet, CPUParams);
+}
+
+/* ============= GEMM GroupConvolution ============= */
+const std::vector<groupConvLayerCPUTestParamsSet> gemmGroupConvTestCases = {
+        //  1. is_depthwise (true, false)
+        //  2. jcp.im2col_sz (=0,>0)
+        //  3. is_blocking_applicable (true, false)
+
+        //  is_depthwise == false, im2col_sz > 0
+        makeSingleGroupConvCPUTestCase({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, 2, 1, {5, 5}, 2, 2, cpuParams_gemm_2D),
+        //  is_depthwise == true
+        makeSingleGroupConvCPUTestCase({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, 2, 1, {5, 5}, 1, 1, cpuParams_gemm_2D),
+        //  im2col_sz == 0, is_blocking_applicable == true
+        makeSingleGroupConvCPUTestCase({1, 1}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, 2, 1, {5, 5}, 2, 2, cpuParams_gemm_2D),
+        //  is_blocking_applicable == false ((jcp.im2col_sz == 0) && (jcp.ic / jcp.oc >= 42))
+        makeSingleGroupConvCPUTestCase({1, 1}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, 2, 1, {5, 5}, 42, 1, cpuParams_gemm_2D),
+
+        //  "hard" cases
+        makeSingleGroupConvCPUTestCase({3, 3}, {2, 2}, {1, 1}, {1, 1}, {1, 1}, ngraph::op::PadType::EXPLICIT, 3, 2, {129, 129}, 4, 2, cpuParams_gemm_2D),
+        makeSingleGroupConvCPUTestCase({2, 4}, {1, 2}, {3, 2}, {2, 1}, {1, 0}, ngraph::op::PadType::EXPLICIT, 2, 1, {10, 10}, 3, 3, cpuParams_gemm_2D),
+        makeSingleGroupConvCPUTestCase({3, 3, 3}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}, ngraph::op::PadType::EXPLICIT,
+                3, 2, {33, 33, 33}, 4, 2, cpuParams_gemm_3D),
+        makeSingleGroupConvCPUTestCase({2, 3, 4}, {1, 2, 2}, {3, 1, 2}, {2, 2, 1}, {1, 1, 0}, ngraph::op::PadType::EXPLICIT,
+                2, 1, {10, 10, 10}, 3, 3, cpuParams_gemm_3D),
+};
+
+INSTANTIATE_TEST_CASE_P(GEMM_GroupConv, GroupConvolutionLayerCPUTest, ::testing::ValuesIn(filterParamsSetForDevice(gemmGroupConvTestCases)));
+
+/* ============= JIT SSE42 GroupConvolution ============= */
+const std::vector<groupConvLayerCPUTestParamsSet> JIT_SSE42_GroupConvTestCases = {
+        //  1. jcp.ur_w (=3,<3)
+        //  2. jcp.ur_w_tail (=0,>0)
+        //  3. jcp.kw (>7,<=7)
+        //  4. jcp.nb_oc = jcp.oc / jcp.oc_block;
+        //  5. jcp.nb_ic = jcp.ic / jcp.ic_block;
+        //  6. ocb_work
+
+        //  jcp.ur_w == 3, jcp.ur_w_tail == 2
+        makeSingleGroupConvCPUTestCase({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, 2, 1, {5, 10}, 8, 8, cpuParams_sse42_2D),
+        //  jcp.ur_w < 3 (jcp.ur_w == jcp.ow)
+        makeSingleGroupConvCPUTestCase({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, 2, 1, {5, 4}, 8, 8, cpuParams_sse42_2D),
+        //  jcp.ur_w == 3, jcp.ur_w_tail == 0
+        makeSingleGroupConvCPUTestCase({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, 2, 1, {5, 11}, 8, 8, cpuParams_sse42_2D),
+        //  jcp.kw > 7
+        makeSingleGroupConvCPUTestCase({3, 8}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, 2, 1, {5, 10}, 8, 8, cpuParams_sse42_2D),
+        //  jcp.nb_oc == 2
+        makeSingleGroupConvCPUTestCase({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, 2, 1, {5, 5}, 8, 16, cpuParams_sse42_2D),
+        //  jcp.nb_ic == 2
+        makeSingleGroupConvCPUTestCase({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, 2, 1, {5, 5}, 16, 8, cpuParams_sse42_2D),
+        //  ocb_work > 1 (ocb_work == 2)
+        makeSingleGroupConvCPUTestCase({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, 2, 1, {5, 5}, 8, 40, cpuParams_sse42_2D),
+        //  jcp.nb_ic == 2, ocb_work == 2
+        makeSingleGroupConvCPUTestCase({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, 2, 1, {5, 5}, 16, 40, cpuParams_sse42_2D),
+
+        //  "hard" cases
+        makeSingleGroupConvCPUTestCase({3, 3}, {2, 2}, {1, 1}, {1, 1}, {1, 1}, ngraph::op::PadType::EXPLICIT, 3, 2, {129, 129}, 8, 8, cpuParams_sse42_2D),
+        makeSingleGroupConvCPUTestCase({2, 4}, {1, 2}, {3, 2}, {2, 1}, {1, 0}, ngraph::op::PadType::EXPLICIT, 2, 1, {10, 10}, 8, 8, cpuParams_sse42_2D),
+
+        //  not supported jit_sse42 for 3d
+        //  makeSingleGroupConvCPUTestCase({3, 3, 3}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}, ngraph::op::PadType::EXPLICIT,
+        //                              3, 2, {33, 33, 33}, 8, 8, cpuParams_sse42_3D),
+        //  makeSingleGroupConvCPUTestCase({2, 3, 4}, {1, 2, 2}, {3, 1, 2}, {2, 2, 1}, {1, 1, 0}, ngraph::op::PadType::EXPLICIT,
+        //                              2, 1, {10, 10, 10}, 8, 8, cpuParams_sse42_3D),
+};
+
+INSTANTIATE_TEST_CASE_P(JIT_SSE42_GroupConv, GroupConvolutionLayerCPUTest, ::testing::ValuesIn(filterParamsSetForDevice(JIT_SSE42_GroupConvTestCases)));
+
+/* ============= JIT AVX2 GroupConvolution ============= */
+const std::vector<groupConvLayerCPUTestParamsSet> JIT_AVX2_GroupConvTestCases = {
+        //  1. jcp.ur_w (=3,<3)
+        //  2. jcp.ur_w_tail (=0,>0)
+        //  3. jcp.kw (>7,<=7)
+        //  4. jcp.nb_oc = jcp.oc / jcp.oc_block;
+        //  5. jcp.nb_ic = jcp.ic / jcp.ic_block;
+        //  6. ocb_work
+
+        //  jcp.ur_w == 3, jcp.ur_w_tail == 2
+        makeSingleGroupConvCPUTestCase({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, 2, 1, {5, 10}, 8, 8, cpuParams_avx2_2D),
+        //  jcp.ur_w < 3 (jcp.ur_w == jcp.ow)
+        makeSingleGroupConvCPUTestCase({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, 2, 1, {5, 4}, 8, 8, cpuParams_avx2_2D),
+        //  jcp.ur_w == 3, jcp.ur_w_tail == 0
+        makeSingleGroupConvCPUTestCase({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, 2, 1, {5, 11}, 8, 8, cpuParams_avx2_2D),
+        //  jcp.kw > 7
+        makeSingleGroupConvCPUTestCase({3, 8}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, 2, 1, {5, 10}, 8, 8, cpuParams_avx2_2D),
+        //  jcp.nb_oc == 2
+        makeSingleGroupConvCPUTestCase({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, 2, 1, {5, 5}, 8, 16, cpuParams_avx2_2D),
+        //  jcp.nb_ic == 2
+        makeSingleGroupConvCPUTestCase({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, 2, 1, {5, 5}, 16, 8, cpuParams_avx2_2D),
+        //  ocb_work > 1 (ocb_work == 2)
+        makeSingleGroupConvCPUTestCase({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, 2, 1, {5, 5}, 8, 40, cpuParams_avx2_2D),
+        //  jcp.nb_ic == 2, ocb_work == 2
+        makeSingleGroupConvCPUTestCase({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, 2, 1, {5, 5}, 16, 40, cpuParams_avx2_2D),
+
+        //  "hard" cases
+        makeSingleGroupConvCPUTestCase({3, 3}, {2, 2}, {1, 1}, {1, 1}, {1, 1}, ngraph::op::PadType::EXPLICIT, 3, 2, {129, 129}, 8, 8, cpuParams_avx2_2D),
+        makeSingleGroupConvCPUTestCase({2, 4}, {1, 2}, {3, 2}, {2, 1}, {1, 0}, ngraph::op::PadType::EXPLICIT, 2, 1, {10, 10}, 8, 8, cpuParams_avx2_2D),
+        makeSingleGroupConvCPUTestCase({3, 3, 3}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}, ngraph::op::PadType::EXPLICIT,
+                                    3, 2, {33, 33, 33}, 8, 8, cpuParams_avx2_3D),
+        makeSingleGroupConvCPUTestCase({2, 3, 4}, {1, 2, 2}, {3, 1, 2}, {2, 2, 1}, {1, 1, 0}, ngraph::op::PadType::EXPLICIT,
+                                    2, 1, {10, 10, 10}, 8, 8, cpuParams_avx2_3D),
+};
+
+INSTANTIATE_TEST_CASE_P(JIT_AVX2_GroupConv, GroupConvolutionLayerCPUTest, ::testing::ValuesIn(filterParamsSetForDevice(JIT_AVX2_GroupConvTestCases)));
+
+/* ============= JIT AVX512 GroupConvolution ============= */
+const std::vector<groupConvLayerCPUTestParamsSet> JIT_AVX512_GroupConvTestCases = {
+        //  1. "blocked to blocked" or "planar to blocked"
+        //  2. jcp.nb_ic, jcp.nb_oc
+
+        //  blocked to blocked
+        makeSingleGroupConvCPUTestCase({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, 2, 1, {5, 5}, 16, 16, cpuParams_avx512_2D),
+        //  jcp.nb_ic == 2
+        makeSingleGroupConvCPUTestCase({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, 2, 1, {5, 5}, 32, 16, cpuParams_avx512_2D),
+        //  jcp.nb_oc == 2
+        makeSingleGroupConvCPUTestCase({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, 2, 1, {5, 5}, 16, 32, cpuParams_avx512_2D),
+
+        //  "hard" cases
+        makeSingleGroupConvCPUTestCase({3, 3}, {2, 2}, {1, 1}, {1, 1}, {1, 1}, ngraph::op::PadType::EXPLICIT, 3, 2, {129, 129}, 16, 16, cpuParams_avx512_2D),
+        makeSingleGroupConvCPUTestCase({2, 4}, {1, 2}, {3, 2}, {2, 1}, {1, 0}, ngraph::op::PadType::EXPLICIT, 2, 1, {10, 10}, 16, 16, cpuParams_avx512_2D),
+        makeSingleGroupConvCPUTestCase({3, 3, 3}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}, ngraph::op::PadType::EXPLICIT,
+                                    3, 2, {33, 33, 33}, 16, 16, cpuParams_avx512_3D),
+        makeSingleGroupConvCPUTestCase({2, 3, 4}, {1, 2, 2}, {3, 1, 2}, {2, 2, 1}, {1, 1, 0}, ngraph::op::PadType::EXPLICIT,
+                                    2, 1, {10, 10, 10}, 16, 16, cpuParams_avx512_3D),
+};
+
+INSTANTIATE_TEST_CASE_P(JIT_AVX512_GroupConv, GroupConvolutionLayerCPUTest, ::testing::ValuesIn(filterParamsSetForDevice(JIT_AVX512_GroupConvTestCases)));
+
+/* ============= JIT SSE42 DW GroupConvolution ============= */
+const std::vector<groupConvLayerCPUTestParamsSet> JIT_SSE42_DW_GroupConvTestCases = {
+        //  1. jcp.ngroups % simd_w (=0,!=0)
+        //  2. jcp.nb_ch
+        //  3. jcp.nb_ch_blocking (=2,<2)
+        //  4. jcp.ur_w == 3
+
+        //  jcp.ngroups % simd_w == 0, jcp.nb_ch == 1, jcp.nb_ch_blocking == 1 (jcp.ngroups == 8)
+        makeSingleGroupConvCPUTestCase({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, 8, 1, {5, 5}, 1, 1, cpuParams_sse42_dw_2D),
+        //  jcp.ngroups % simd_w == 0, jcp.nb_ch == 2, jcp.nb_ch_blocking == 2 (jcp.ngroups == 16)
+        makeSingleGroupConvCPUTestCase({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, 16, 1, {5, 5}, 1, 1, cpuParams_sse42_dw_2D),
+        //  jcp.ngroups % simd_w != 0, jcp.nb_ch == 3, jcp.nb_ch_blocking == 2 (jcp.ngroups == 17) TODO: pad channels not supported for SSE42
+        //  makeSingleGroupConvCPUTestCase({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, 17, 1, {5, 5}, 1, 1, cpuParams_sse42_dw_2D),
+        //  jcp.ow > jcp.ur_w (jcp.ow == 7)
+        makeSingleGroupConvCPUTestCase({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, 8, 1, {5, 9}, 1, 1, cpuParams_sse42_dw_2D),
+
+        //  "hard" cases
+        makeSingleGroupConvCPUTestCase({3, 3}, {2, 2}, {1, 1}, {1, 1}, {1, 1}, ngraph::op::PadType::EXPLICIT, 8, 2, {129, 129}, 1, 1, cpuParams_sse42_dw_2D),
+        makeSingleGroupConvCPUTestCase({2, 4}, {1, 2}, {3, 2}, {2, 1}, {1, 0}, ngraph::op::PadType::EXPLICIT, 8, 1, {10, 10}, 1, 1, cpuParams_sse42_dw_2D),
+        makeSingleGroupConvCPUTestCase({3, 3, 3}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}, ngraph::op::PadType::EXPLICIT,
+                                    8, 2, {33, 33, 33}, 1, 1, cpuParams_sse42_dw_3D),
+        makeSingleGroupConvCPUTestCase({2, 3, 4}, {1, 2, 2}, {3, 1, 2}, {2, 2, 1}, {1, 1, 0}, ngraph::op::PadType::EXPLICIT,
+                                    8, 1, {10, 10, 10}, 1, 1, cpuParams_sse42_dw_3D),
+};
+
+INSTANTIATE_TEST_CASE_P(JIT_SSE42_DW_GroupConv, GroupConvolutionLayerCPUTest, ::testing::ValuesIn(filterParamsSetForDevice(JIT_SSE42_DW_GroupConvTestCases)));
+
+/* ============= JIT AVX2 DW GroupConvolution ============= */
+const std::vector<groupConvLayerCPUTestParamsSet> JIT_AVX2_DW_GroupConvTestCases = {
+        //  1. jcp.ngroups % simd_w (=0,!=0)
+        //  2. jcp.nb_ch
+        //  3. jcp.nb_ch_blocking (=3,<3)
+        //  4. jcp.ur_w == 4
+
+        //  jcp.ngroups % simd_w == 0, jcp.nb_ch == 1, jcp.nb_ch_blocking == 1 (jcp.ngroups == 8)
+        makeSingleGroupConvCPUTestCase({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, 8, 1, {5, 5}, 1, 1, cpuParams_avx2_dw_2D),
+        //  jcp.ngroups % simd_w == 0, jcp.nb_ch == 3, jcp.nb_ch_blocking == 3 (jcp.ngroups == 24)
+        makeSingleGroupConvCPUTestCase({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, 24, 1, {5, 5}, 1, 1, cpuParams_avx2_dw_2D),
+        //  jcp.ngroups % simd_w != 0, jcp.nb_ch == 4, jcp.nb_ch_blocking == 3 (jcp.ngroups == 25)
+        makeSingleGroupConvCPUTestCase({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, 25, 1, {5, 5}, 1, 1, cpuParams_avx2_dw_2D),
+        //  jcp.ow > jcp.ur_w (jcp.ow == 7)
+        makeSingleGroupConvCPUTestCase({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, 8, 1, {5, 9}, 1, 1, cpuParams_avx2_dw_2D),
+
+        //  "hard" cases
+        makeSingleGroupConvCPUTestCase({3, 3}, {2, 2}, {1, 1}, {1, 1}, {1, 1}, ngraph::op::PadType::EXPLICIT, 8, 2, {129, 129}, 1, 1, cpuParams_avx2_dw_2D),
+        makeSingleGroupConvCPUTestCase({2, 4}, {1, 2}, {3, 2}, {2, 1}, {1, 0}, ngraph::op::PadType::EXPLICIT, 8, 1, {10, 10}, 1, 1, cpuParams_avx2_dw_2D),
+        makeSingleGroupConvCPUTestCase({3, 3, 3}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}, ngraph::op::PadType::EXPLICIT,
+                                    8, 2, {33, 33, 33}, 1, 1, cpuParams_avx2_dw_3D),
+        makeSingleGroupConvCPUTestCase({2, 3, 4}, {1, 2, 2}, {3, 1, 2}, {2, 2, 1}, {1, 1, 0}, ngraph::op::PadType::EXPLICIT,
+                                    8, 1, {10, 10, 10}, 1, 1, cpuParams_avx2_dw_3D),
+};
+
+INSTANTIATE_TEST_CASE_P(JIT_AVX2_DW_GroupConv, GroupConvolutionLayerCPUTest, ::testing::ValuesIn(filterParamsSetForDevice(JIT_AVX2_DW_GroupConvTestCases)));
+
+/* ============= JIT AVX512 DW GroupConvolution ============= */
+const std::vector<groupConvLayerCPUTestParamsSet> JIT_AVX512_DW_GroupConvTestCases = {
+        //  1. jcp.ngroups % simd_w (=0,!=0)
+        //  2. jcp.nb_ch
+        //  3. jcp.nb_ch_blocking (=4,<4)
+        //  4. jcp.ur_w == 6
+
+        //  jcp.ngroups % simd_w == 0, jcp.nb_ch == 1, jcp.nb_ch_blocking == 1 (jcp.ngroups == 16)
+        makeSingleGroupConvCPUTestCase({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, 16, 1, {5, 5}, 1, 1, cpuParams_avx512_dw_2D),
+        //  jcp.ngroups % simd_w == 0, jcp.nb_ch == 4, jcp.nb_ch_blocking == 4 (jcp.ngroups == 64)
+        makeSingleGroupConvCPUTestCase({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, 64, 1, {5, 5}, 1, 1, cpuParams_avx512_dw_2D),
+        //  jcp.ngroups % simd_w != 0, jcp.nb_ch == 5, jcp.nb_ch_blocking == 4 (jcp.ngroups == 65)
+        makeSingleGroupConvCPUTestCase({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, 65, 1, {5, 5}, 1, 1, cpuParams_avx512_dw_2D),
+        //  jcp.ow > jcp.ur_w (jcp.ow == 7)
+        makeSingleGroupConvCPUTestCase({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, 8, 1, {5, 9}, 1, 1, cpuParams_avx512_dw_2D),
+
+        //  "hard" cases
+        makeSingleGroupConvCPUTestCase({3, 3}, {2, 2}, {1, 1}, {1, 1}, {1, 1}, ngraph::op::PadType::EXPLICIT, 16, 2, {129, 129}, 1, 1, cpuParams_avx512_dw_2D),
+        makeSingleGroupConvCPUTestCase({2, 4}, {1, 2}, {3, 2}, {2, 1}, {1, 0}, ngraph::op::PadType::EXPLICIT, 16, 1, {10, 10}, 1, 1, cpuParams_avx512_dw_2D),
+        makeSingleGroupConvCPUTestCase({3, 3, 3}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}, ngraph::op::PadType::EXPLICIT,
+                                    16, 2, {33, 33, 33}, 1, 1, cpuParams_avx512_dw_3D),
+        makeSingleGroupConvCPUTestCase({2, 3, 4}, {1, 2, 2}, {3, 1, 2}, {2, 2, 1}, {1, 1, 0}, ngraph::op::PadType::EXPLICIT,
+                                    16, 1, {10, 10, 10}, 1, 1, cpuParams_avx512_dw_3D),
+};
+
+INSTANTIATE_TEST_CASE_P(JIT_AVX512_DW_GroupConv, GroupConvolutionLayerCPUTest, ::testing::ValuesIn(filterParamsSetForDevice(JIT_AVX512_DW_GroupConvTestCases)));
+
+/* ============= JIT SSE42 1x1 Convolution (not supported with groups) ============= */
+/* ============= JIT AVX2 1x1 Convolution (not supported with groups) ============= */
+/* ============= JIT AVX512 1x1 Convolution (not supported with groups) ============= */
+/* ============= JIT AVX2 PLANAR Convolution (not supported with groups) ============= */
+/* ============= JIT AVX5122 PLANAR Convolution (not supported with groups) ============= */
+/* ============================================= */
+
+} // namespace
+
+} // namespace CPULayerTestsDefinitions
diff --git a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/other_tests/add_output.cpp b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/other_tests/add_output.cpp
new file mode 100644 (file)
index 0000000..b98bb42
--- /dev/null
@@ -0,0 +1,15 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "other/add_output.hpp"
+
+const auto addOutputParams =
+    ::testing::Combine(::testing::Values("Memory_1"), ::testing::Values(CommonTestUtils::DEVICE_GNA));
+
+INSTANTIATE_TEST_CASE_P(AddOutputBasic, AddOutputTestsCommonClass, addOutputParams,
+                        AddOutputTestsCommonClass::getTestCaseName);
+
+TEST_P(AddOutputTestsCommonClass, basic) {
+    run_test();
+}
index 4c829be..f481963 100644 (file)
@@ -20,12 +20,6 @@ std::vector<std::vector<std::vector<size_t>>> inShapes = {
         {{10, 10, 10, 10}, {10, 10, 10, 10}, {10, 10, 10, 10}, {10, 10, 10, 10}, {10, 10, 10, 10}}
 };
 
-std::vector<InferenceEngine::Precision> inputPrecisions = {InferenceEngine::Precision::FP32,
-//         InferenceEngine::Precision::U8, // TODO: Preferable primitive descriptor is not set.
-//         InferenceEngine::Precision::I8  // TODO: Issue: 26570
-};
-
-
 std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32,
                                                          InferenceEngine::Precision::FP16};
 
@@ -34,7 +28,6 @@ INSTANTIATE_TEST_CASE_P(DISABLED_NoReshape, ConcatLayerTest,
                         ::testing::Combine(
                                 ::testing::ValuesIn(axes),
                                 ::testing::ValuesIn(inShapes),
-                                ::testing::ValuesIn(inputPrecisions),
                                 ::testing::ValuesIn(netPrecisions),
                                 ::testing::Values(CommonTestUtils::DEVICE_GNA)),
                         ConcatLayerTest::getTestCaseName);
index 47550d8..0d89cba 100644 (file)
@@ -11,16 +11,6 @@ using namespace LayerTestsDefinitions;
 
 namespace {
 
-// Common params
-const std::vector<InferenceEngine::Precision> inputPrecisions = {
-        InferenceEngine::Precision::FP32,
-// TODO: Issue: 26570
-//      InferenceEngine::Precision::FP16,
-        InferenceEngine::Precision::U8,
-// TODO: Issue: 26570
-//      InferenceEngine::Precision::I8  // Too much cases
-};
-
 const std::vector<InferenceEngine::Precision> netPrecisions = {
         InferenceEngine::Precision::FP32,
         InferenceEngine::Precision::FP16
@@ -66,7 +56,6 @@ const auto conv2DParams_AutoPadValid = ::testing::Combine(
 INSTANTIATE_TEST_CASE_P(DISABLED_Convolution2D_ExplicitPadding, ConvolutionLayerTest,
                         ::testing::Combine(
                                 conv2DParams_ExplicitPadding,
-                                ::testing::ValuesIn(inputPrecisions),
                                 ::testing::ValuesIn(netPrecisions),
                                 ::testing::Values(std::vector<size_t >({1, 3, 30, 1})),
                                 ::testing::Values(CommonTestUtils::DEVICE_GNA)),
@@ -75,7 +64,6 @@ INSTANTIATE_TEST_CASE_P(DISABLED_Convolution2D_ExplicitPadding, ConvolutionLayer
 INSTANTIATE_TEST_CASE_P(DISABLED_Convolution2D_AutoPadValid, ConvolutionLayerTest,
                         ::testing::Combine(
                                 conv2DParams_AutoPadValid,
-                                ::testing::ValuesIn(inputPrecisions),
                                 ::testing::ValuesIn(netPrecisions),
                                 ::testing::Values(std::vector<size_t >({1, 3, 30, 1})),
                                 ::testing::Values(CommonTestUtils::DEVICE_GNA)),
diff --git a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/single_layer_tests/multiply.cpp b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/single_layer_tests/multiply.cpp
new file mode 100644 (file)
index 0000000..832ef00
--- /dev/null
@@ -0,0 +1,33 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <vector>
+#include "single_layer_tests/multiply.hpp"
+#include "common_test_utils/test_constants.hpp"
+
+using namespace LayerTestsDefinitions;
+
+namespace {
+
+std::vector<std::vector<std::vector<size_t>>> inShapes = {
+        {{2}},
+        {{1, 1, 1, 3}},
+        {{1, 2, 4}},
+        {{1, 4, 4}},
+        {{1, 4, 4, 1}},
+        {{1, 1, 1, 1, 1, 1, 3}},
+        {{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}}
+};
+
+std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32,
+                                                         InferenceEngine::Precision::FP16,
+};
+
+INSTANTIATE_TEST_CASE_P(multilpy, MultiplyLayerTest,
+                        ::testing::Combine(
+                                ::testing::ValuesIn(inShapes),
+                                ::testing::ValuesIn(netPrecisions),
+                                ::testing::Values(CommonTestUtils::DEVICE_GNA)),
+                        MultiplyLayerTest::getTestCaseName);
+}  // namespace
index 958ef75..aebed11 100644 (file)
@@ -12,28 +12,19 @@ using namespace NGraphFunctions;
 using namespace LayerTestsDefinitions;
 
 namespace {
-// Common params
-const std::vector<InferenceEngine::Precision> inputPrecisions = {
-        InferenceEngine::Precision::FP32,
-// TODO: Issue: 26570
-//      InferenceEngine::Precision::FP16,
-        InferenceEngine::Precision::U8,
-// TODO: Issue: 26570
-//      InferenceEngine::Precision::I8
-};
 
 const std::vector<InferenceEngine::Precision> netPrecisions = {
         InferenceEngine::Precision::FP16
 };
 
 const std::vector<std::vector<size_t >> kernels = {{3, 3},
-                                                          {3, 5}};
+                                                   {3, 5}};
 const std::vector<std::vector<size_t >> strides = {{1, 1},
-                                                          {1, 2}};
+                                                   {1, 2}};
 const std::vector<std::vector<size_t >> padBegins = {{0, 0},
-                                                            {0, 2}};
+                                                     {0, 2}};
 const std::vector<std::vector<size_t >> padEnds = {{0, 0},
-                                                          {0, 2}};
+                                                   {0, 2}};
 const std::vector<ngraph::op::RoundingType> roundingTypes = {ngraph::op::RoundingType::CEIL,
                                                              ngraph::op::RoundingType::FLOOR};
 ////* ========== Max Polling ========== */
@@ -53,9 +44,8 @@ const auto maxPool_ExplicitPad_FloorRounding_Params = ::testing::Combine(
 INSTANTIATE_TEST_CASE_P(DISABLED_MaxPool_ExplicitPad_FloorRpunding, PoolingLayerTest,
                         ::testing::Combine(
                                 maxPool_ExplicitPad_FloorRounding_Params,
-                                ::testing::ValuesIn(inputPrecisions),
                                 ::testing::ValuesIn(netPrecisions),
-                                ::testing::Values(std::vector<size_t >({1, 3, 30, 30})),
+                                ::testing::Values(std::vector<size_t>({1, 3, 30, 30})),
                                 ::testing::Values(CommonTestUtils::DEVICE_GNA)),
                         PoolingLayerTest::getTestCaseName);
 
@@ -75,9 +65,8 @@ const auto maxPool_ExplicitPad_CeilRounding_Params = ::testing::Combine(
 INSTANTIATE_TEST_CASE_P(DISABLED_MaxPool_ExplicitPad_CeilRpunding, PoolingLayerTest,
                         ::testing::Combine(
                                 maxPool_ExplicitPad_CeilRounding_Params,
-                                ::testing::ValuesIn(inputPrecisions),
                                 ::testing::ValuesIn(netPrecisions),
-                                ::testing::Values(std::vector<size_t >({1, 3, 30, 30})),
+                                ::testing::Values(std::vector<size_t>({1, 3, 30, 30})),
                                 ::testing::Values(CommonTestUtils::DEVICE_GNA)),
                         PoolingLayerTest::getTestCaseName);
 
@@ -100,9 +89,8 @@ const auto avgPoolExplicitPadCeilRoundingParams = ::testing::Combine(
 INSTANTIATE_TEST_CASE_P(DISABLED_AvgPool_ExplicitPad_CeilRounding, PoolingLayerTest,
                         ::testing::Combine(
                                 avgPoolExplicitPadCeilRoundingParams,
-                                ::testing::ValuesIn(inputPrecisions),
                                 ::testing::ValuesIn(netPrecisions),
-                                ::testing::Values(std::vector<size_t >({1, 3, 30, 30})),
+                                ::testing::Values(std::vector<size_t>({1, 3, 30, 30})),
                                 ::testing::Values(CommonTestUtils::DEVICE_GNA)),
                         PoolingLayerTest::getTestCaseName);
 /* +========== Explicit Pad Floor Rounding ========== */
@@ -122,9 +110,8 @@ const auto avgPoolExplicitPadFloorRoundingParams = ::testing::Combine(
 INSTANTIATE_TEST_CASE_P(DISABLED_AvgPool_ExplicitPad_FloorRounding, PoolingLayerTest,
                         ::testing::Combine(
                                 avgPoolExplicitPadFloorRoundingParams,
-                                ::testing::ValuesIn(inputPrecisions),
                                 ::testing::ValuesIn(netPrecisions),
-                                ::testing::Values(std::vector<size_t >({1, 3, 30, 30})),
+                                ::testing::Values(std::vector<size_t>({1, 3, 30, 30})),
                                 ::testing::Values(CommonTestUtils::DEVICE_GNA)),
                         PoolingLayerTest::getTestCaseName);
 
@@ -145,9 +132,8 @@ const auto allPools_ValidPad_Params = ::testing::Combine(
 INSTANTIATE_TEST_CASE_P(DISABLED_MAX_and_AVGPool_ValidPad, PoolingLayerTest,
                         ::testing::Combine(
                                 allPools_ValidPad_Params,
-                                ::testing::ValuesIn(inputPrecisions),
                                 ::testing::ValuesIn(netPrecisions),
-                                ::testing::Values(std::vector<size_t >({1, 3, 30, 30})),
+                                ::testing::Values(std::vector<size_t>({1, 3, 30, 30})),
                                 ::testing::Values(CommonTestUtils::DEVICE_GNA)),
                         PoolingLayerTest::getTestCaseName);
 }  // namespace
\ No newline at end of file
index 8d69d39..47e5752 100644 (file)
 using namespace LayerTestsDefinitions;
 
 namespace {
-// Common params
-const std::vector<InferenceEngine::Precision> inputPrecisions = {
-        InferenceEngine::Precision::FP32,
-        InferenceEngine::Precision::U8
-// TODO: Issue: 26570
-//      InferenceEngine::Precision::I8
-};
-
 const std::vector<InferenceEngine::Precision> netPrecisions = {
         InferenceEngine::Precision::FP32,
         InferenceEngine::Precision::FP16
@@ -28,7 +20,6 @@ INSTANTIATE_TEST_CASE_P(DISABLED_NumSplitsCheck, SplitLayerTest,
                         ::testing::Combine(
                                 ::testing::Values(1),
                                 ::testing::Values(0, 1),
-                                ::testing::ValuesIn(inputPrecisions),
                                 ::testing::ValuesIn(netPrecisions),
                                 ::testing::Values(std::vector<size_t >({30, 30})),
                                 ::testing::Values(CommonTestUtils::DEVICE_GNA)),
diff --git a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/subgraph_tests/reshape_squeeze_reshape_relu.cpp b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/subgraph_tests/reshape_squeeze_reshape_relu.cpp
new file mode 100644 (file)
index 0000000..f0ca9ec
--- /dev/null
@@ -0,0 +1,49 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+#include <vector>
+#include "subgraph_tests/reshape_squeeze_reshape_relu.hpp"
+#include "common_test_utils/test_constants.hpp"
+
+using namespace LayerTestsDefinitions;
+
+namespace {
+    std::vector<std::vector<std::vector<size_t>>> inputs{
+            {{1, 1, 3}, {0, 1}},
+            {{1, 1, 3}, {0}},
+            {{1, 1, 3}, {1}},
+            {{1, 3, 1}, {0, 2}},
+            {{1, 3, 1}, {0}},
+            {{1, 3, 1}, {2}},
+            {{3, 1, 1}, {1, 2}},
+            {{3, 1, 1}, {1}},
+            {{3, 1, 1}, {2}},
+            {{4, 1, 3, 1}, {1, 3}},
+            {{4, 1, 1, 3}, {1, 2}},
+            {{1, 4, 1, 3}, {0, 2}},
+            {{1, 3, 5, 2, 1}, {0, 4}},
+            {{3, 1, 2, 4, 4, 3}, {1}},
+            {{1, 1, 1, 1, 1, 3}, {0, 1, 2, 3, 4}},
+            {{1, 1, 1, 1, 1, 3}, {1, 3}},
+            {{1}, {0}},
+    };
+
+    std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32,
+                                                             InferenceEngine::Precision::FP16,
+    };
+
+    INSTANTIATE_TEST_CASE_P(reshape_squeeze_reshape_relu, ReshapeSqueezeReshapeRelu,
+                            ::testing::Combine(
+                                    ::testing::ValuesIn(inputs),
+                                    ::testing::ValuesIn(netPrecisions),
+                                    ::testing::Values(CommonTestUtils::DEVICE_GNA),
+                                    ::testing::Values(true)),
+                            ReshapeSqueezeReshapeRelu::getTestCaseName);
+
+    INSTANTIATE_TEST_CASE_P(reshape_unsqueeze_reshape_relu, ReshapeSqueezeReshapeRelu,
+                            ::testing::Combine(
+                                    ::testing::ValuesIn(inputs),
+                                    ::testing::ValuesIn(netPrecisions),
+                                    ::testing::Values(CommonTestUtils::DEVICE_GNA),
+                                    ::testing::Values(false)),
+                            ReshapeSqueezeReshapeRelu::getTestCaseName);
+}  // namespace
index d361c60..5ffac1c 100644 (file)
@@ -8,13 +8,6 @@
 #include "common_test_utils/test_constants.hpp"
 
 using namespace LayerTestsDefinitions;
-
-const std::vector<InferenceEngine::Precision> inputPrecisions = {
-        InferenceEngine::Precision::FP32,
-        InferenceEngine::Precision::U8
-//        InferenceEngine::Precision::I8 : Not supported by PLUGIN
-};
-
 const std::vector<InferenceEngine::Precision> netPrecisions = {
         InferenceEngine::Precision::FP32,
         InferenceEngine::Precision::FP16
@@ -22,7 +15,6 @@ const std::vector<InferenceEngine::Precision> netPrecisions = {
 // TODO: Issue:  26421 (Concat issue)
 INSTANTIATE_TEST_CASE_P(DISABLED_ReshapeNoReshape, SplitConvConcat,
                         ::testing::Combine(
-                                ::testing::ValuesIn(inputPrecisions),
                                 ::testing::ValuesIn(netPrecisions),
                                 ::testing::Values(std::vector<size_t >({1, 6, 40, 40})),
                                 ::testing::Values(CommonTestUtils::DEVICE_GNA)),
index 75f288e..b67bc3f 100644 (file)
@@ -27,7 +27,8 @@ const std::vector<ActivationTypes> activationTypes = {
         Exp,
         Log,
         Sign,
-        Abs
+        Abs,
+        Gelu
 };
 
 const auto basicCases = ::testing::Combine(
index ee7bf5e..9277843 100644 (file)
@@ -19,10 +19,6 @@ std::vector<std::vector<std::vector<size_t>>> inShapes = {
         {{10, 10, 10, 10}, {10, 10, 10, 10}, {10, 10, 10, 10}, {10, 10, 10, 10}},
         {{10, 10, 10, 10}, {10, 10, 10, 10}, {10, 10, 10, 10}, {10, 10, 10, 10}, {10, 10, 10, 10}}
 };
-std::vector<InferenceEngine::Precision> inputPrecisions = {InferenceEngine::Precision::FP32,
-                                                           InferenceEngine::Precision::FP16,
-                                                           InferenceEngine::Precision::U8
-};
 std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32,
                                                          InferenceEngine::Precision::FP16};
 
@@ -31,7 +27,6 @@ INSTANTIATE_TEST_CASE_P(NoReshape, ConcatLayerTest,
                         ::testing::Combine(
                                 ::testing::ValuesIn(axes),
                                 ::testing::ValuesIn(inShapes),
-                                ::testing::ValuesIn(inputPrecisions),
                                 ::testing::ValuesIn(netPrecisions),
                                 ::testing::Values(CommonTestUtils::DEVICE_GPU)),
                         ConcatLayerTest::getTestCaseName);
index 23853c7..42198d3 100644 (file)
 using namespace LayerTestsDefinitions;
 
 namespace {
-
-// Common params
-const std::vector<InferenceEngine::Precision> inputPrecisions = {
-        InferenceEngine::Precision::FP32,
-        InferenceEngine::Precision::FP16,
-        InferenceEngine::Precision::U8
-};
-
 const std::vector<InferenceEngine::Precision> netPrecisions = {
         InferenceEngine::Precision::FP32,
         InferenceEngine::Precision::FP16
@@ -61,7 +53,6 @@ const auto conv2DParams_AutoPadValid = ::testing::Combine(
 INSTANTIATE_TEST_CASE_P(Convolution2D_ExplicitPadding, ConvolutionLayerTest,
                         ::testing::Combine(
                                 conv2DParams_ExplicitPadding,
-                                ::testing::ValuesIn(inputPrecisions),
                                 ::testing::ValuesIn(netPrecisions),
                                 ::testing::Values(std::vector<size_t >({1, 3, 30, 30})),
                                 ::testing::Values(CommonTestUtils::DEVICE_GPU)),
@@ -70,7 +61,6 @@ INSTANTIATE_TEST_CASE_P(Convolution2D_ExplicitPadding, ConvolutionLayerTest,
 INSTANTIATE_TEST_CASE_P(Convolution2D_AutoPadValid, ConvolutionLayerTest,
                         ::testing::Combine(
                                 conv2DParams_AutoPadValid,
-                                ::testing::ValuesIn(inputPrecisions),
                                 ::testing::ValuesIn(netPrecisions),
                                 ::testing::Values(std::vector<size_t >({1, 3, 30, 30})),
                                 ::testing::Values(CommonTestUtils::DEVICE_GPU)),
@@ -111,7 +101,6 @@ const auto conv3DParams_FP16 = ::testing::Combine(
 INSTANTIATE_TEST_CASE_P(Convolution3D_FP32, ConvolutionLayerTest,
                         ::testing::Combine(
                                 conv3DParams_FP32,
-                                ::testing::ValuesIn(inputPrecisions),
                                 ::testing::Values(InferenceEngine::Precision::FP32),
                                 ::testing::Values(std::vector<size_t >({1, 3, 10, 10, 10})),
                                 ::testing::Values(CommonTestUtils::DEVICE_GPU)),
@@ -120,23 +109,9 @@ INSTANTIATE_TEST_CASE_P(Convolution3D_FP32, ConvolutionLayerTest,
 INSTANTIATE_TEST_CASE_P(Convolution3D_FP16, ConvolutionLayerTest,
                         ::testing::Combine(
                                 conv3DParams_FP16,
-                                ::testing::ValuesIn(inputPrecisions),
                                 ::testing::Values(InferenceEngine::Precision::FP16),
                                 ::testing::Values(std::vector<size_t >({1, 3, 10, 10, 10})),
                                 ::testing::Values(CommonTestUtils::DEVICE_GPU)),
                         ConvolutionLayerTest::getTestCaseName);
 
-const std::vector<std::vector<size_t >> targetReshapeShapes = {{2, 3, 10, 10, 10},
-                                                                      {1, 3, 12, 12, 12},
-                                                                      {2, 3, 12, 13, 14}};
-const auto convReshape = ::testing::Combine(
-        ::testing::Values(std::vector<size_t >({3, 3, 3})),
-        ::testing::Values(std::vector<size_t >({1, 1, 1})),
-        ::testing::Values(std::vector<ptrdiff_t>({0, 0, 0})),
-        ::testing::Values(std::vector<ptrdiff_t>({0, 0, 0})),
-        ::testing::Values(std::vector<size_t >({1, 1, 1})),
-        ::testing::Values(5),
-        ::testing::ValuesIn(padTypes)
-);
-
 }  // namespace
diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/group_convolution.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/group_convolution.cpp
new file mode 100644 (file)
index 0000000..1af41c0
--- /dev/null
@@ -0,0 +1,108 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <vector>
+
+#include "single_layer_tests/group_convolution.hpp"
+#include "common_test_utils/test_constants.hpp"
+
+using namespace LayerTestsDefinitions;
+
+namespace {
+
+const std::vector<InferenceEngine::Precision> netPrecisions = {
+        InferenceEngine::Precision::FP32
+};
+
+/* ============= 2D GroupConvolution ============= */
+const std::vector<std::vector<size_t >> kernels = {{3, 3}};
+const std::vector<std::vector<size_t >> strides = {{1, 1}};
+const std::vector<std::vector<ptrdiff_t>> padBegins = {{0, 0}};
+const std::vector<std::vector<ptrdiff_t>> padEnds = {{0, 0}};
+const std::vector<std::vector<size_t >> dilations = {{1, 1}};
+const std::vector<size_t> numOutChannels = {8, 16};
+const std::vector<size_t> numGroups = {2, 8};
+
+const auto groupConv2DParams_ExplicitPadding = ::testing::Combine(
+        ::testing::ValuesIn(kernels),
+        ::testing::ValuesIn(strides),
+        ::testing::ValuesIn(padBegins),
+        ::testing::ValuesIn(padEnds),
+        ::testing::ValuesIn(dilations),
+        ::testing::ValuesIn(numOutChannels),
+        ::testing::ValuesIn(numGroups),
+        ::testing::Values(ngraph::op::PadType::EXPLICIT)
+);
+const auto groupConv2DParams_AutoPadValid = ::testing::Combine(
+        ::testing::ValuesIn(kernels),
+        ::testing::ValuesIn(strides),
+        ::testing::Values(std::vector<ptrdiff_t>({0, 0})),
+        ::testing::Values(std::vector<ptrdiff_t>({0, 0})),
+        ::testing::ValuesIn(dilations),
+        ::testing::ValuesIn(numOutChannels),
+        ::testing::ValuesIn(numGroups),
+        ::testing::Values(ngraph::op::PadType::VALID)
+);
+
+INSTANTIATE_TEST_CASE_P(GroupConvolution2D_ExplicitPadding, GroupConvolutionLayerTest,
+                        ::testing::Combine(
+                                groupConv2DParams_ExplicitPadding,
+                                ::testing::ValuesIn(netPrecisions),
+                                ::testing::Values(std::vector<size_t >({1, 16, 30, 30})),
+                                ::testing::Values(CommonTestUtils::DEVICE_GPU)),
+                        GroupConvolutionLayerTest::getTestCaseName);
+
+INSTANTIATE_TEST_CASE_P(GroupConvolution2D_AutoPadValid, GroupConvolutionLayerTest,
+                        ::testing::Combine(
+                                groupConv2DParams_AutoPadValid,
+                                ::testing::ValuesIn(netPrecisions),
+                                ::testing::Values(std::vector<size_t >({1, 16, 30, 30})),
+                                ::testing::Values(CommonTestUtils::DEVICE_GPU)),
+                        GroupConvolutionLayerTest::getTestCaseName);
+
+/* ============= 3D GroupConvolution ============= */
+const std::vector<std::vector<size_t >> kernels3d = {{3, 3, 3}};
+const std::vector<std::vector<ptrdiff_t>> paddings3d = {{0, 0, 0}};
+
+const std::vector<std::vector<size_t >> strides3d = {{1, 1, 1}};
+const std::vector<std::vector<size_t >> dilations3d = {{1, 1, 1}};
+
+const auto groupConv3DParams_ExplicitPadding = ::testing::Combine(
+        ::testing::ValuesIn(kernels3d),
+        ::testing::ValuesIn(strides3d),
+        ::testing::ValuesIn(paddings3d),
+        ::testing::ValuesIn(paddings3d),
+        ::testing::ValuesIn(dilations3d),
+        ::testing::Values(4),
+        ::testing::Values(2),
+        ::testing::Values(ngraph::op::PadType::EXPLICIT)
+);
+const auto groupConv3DParams_AutoPadValid = ::testing::Combine(
+        ::testing::ValuesIn(kernels3d),
+        ::testing::ValuesIn(strides3d),
+        ::testing::Values(std::vector<ptrdiff_t>({0, 0, 0})),
+        ::testing::Values(std::vector<ptrdiff_t>({0, 0, 0})),
+        ::testing::ValuesIn(dilations3d),
+        ::testing::Values(4),
+        ::testing::Values(2),
+        ::testing::Values(ngraph::op::PadType::VALID)
+);
+
+INSTANTIATE_TEST_CASE_P(GroupConvolution3D_ExplicitPadding, GroupConvolutionLayerTest,
+                        ::testing::Combine(
+                                groupConv3DParams_ExplicitPadding,
+                                ::testing::ValuesIn(netPrecisions),
+                                ::testing::Values(std::vector<size_t >({1, 4, 10, 10, 10})),
+                                ::testing::Values(CommonTestUtils::DEVICE_GPU)),
+                        GroupConvolutionLayerTest::getTestCaseName);
+
+INSTANTIATE_TEST_CASE_P(GroupConvolution3D_AutoPadValid, GroupConvolutionLayerTest,
+                        ::testing::Combine(
+                                groupConv3DParams_AutoPadValid,
+                                ::testing::ValuesIn(netPrecisions),
+                                ::testing::Values(std::vector<size_t >({1, 4, 10, 10, 10})),
+                                ::testing::Values(CommonTestUtils::DEVICE_GPU)),
+                        GroupConvolutionLayerTest::getTestCaseName);
+
+}  // namespace
diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/lrn.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/lrn.cpp
new file mode 100644 (file)
index 0000000..3d08b0c
--- /dev/null
@@ -0,0 +1,33 @@
+// Copyright (C) 2020 Intel Corporation
+//
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "single_layer_tests/lrn.hpp"
+
+#include <vector>
+
+#include "common_test_utils/test_constants.hpp"
+
+using namespace LayerTestsDefinitions;
+
+namespace {
+const std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32,
+                                                               InferenceEngine::Precision::FP16};
+
+const double alpha = 9.9e-05;
+const size_t beta = 2;
+const size_t bias = 1.0f;
+const size_t size = 5;
+
+INSTANTIATE_TEST_CASE_P(LrnCheck, LrnLayerTest,
+                        ::testing::Combine(::testing::Values(alpha),
+                                           ::testing::Values(beta),
+                                           ::testing::Values(bias),
+                                           ::testing::Values(size),
+                                           ::testing::ValuesIn(netPrecisions),
+                                           ::testing::Values(std::vector<size_t>({10, 10, 3, 2})),
+                                           ::testing::Values(CommonTestUtils::DEVICE_GPU)),
+                        LrnLayerTest::getTestCaseName);
+
+}  // namespace
diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/multiply.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/multiply.cpp
new file mode 100644 (file)
index 0000000..1ee879b
--- /dev/null
@@ -0,0 +1,32 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <vector>
+#include "single_layer_tests/multiply.hpp"
+#include "common_test_utils/test_constants.hpp"
+
+using namespace LayerTestsDefinitions;
+
+namespace {
+
+    std::vector<std::vector<std::vector<size_t>>> inShapes = {
+            {{2}},
+            {{1, 1, 1, 3}},
+            {{1, 2, 4}},
+            {{1, 4, 4}},
+            {{1, 4, 4, 1}},
+            {{1, 2, 3, 4, 5}},
+    };
+
+    std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32,
+                                                             InferenceEngine::Precision::FP16,
+    };
+
+    INSTANTIATE_TEST_CASE_P(multilpy, MultiplyLayerTest,
+                            ::testing::Combine(
+                                    ::testing::ValuesIn(inShapes),
+                                    ::testing::ValuesIn(netPrecisions),
+                                    ::testing::Values(CommonTestUtils::DEVICE_GPU)),
+                            MultiplyLayerTest::getTestCaseName);
+}  // namespace
diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/mvn.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/mvn.cpp
new file mode 100644 (file)
index 0000000..8cbd56f
--- /dev/null
@@ -0,0 +1,51 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <vector>
+
+#include "single_layer_tests/mvn.hpp"
+#include "common_test_utils/test_constants.hpp"
+
+using namespace LayerTestsDefinitions;
+
+const std::vector<std::vector<size_t>> inputShapes = {
+    {1, 32, 17},
+    {1, 37, 9},
+    {1, 16, 5, 8},
+    {2, 19, 5, 10},
+    {7, 32, 2, 8},
+    {5, 8, 3, 5},
+    {4, 41, 6, 9},
+    {1, 32, 8, 1, 6},
+    {1, 9, 1, 15, 9},
+    {6, 64, 6, 1, 18},
+    {2, 31, 2, 9, 1},
+    {10, 16, 5, 10, 6}
+};
+
+const std::vector<bool> acrossChannels = {
+    true,
+    false
+};
+
+const std::vector<bool> normalizeVariance = {
+    true,
+    false
+};
+
+const std::vector<double> epsilon = {
+    0.000000001
+};
+
+const auto MvnCases = ::testing::Combine(
+    ::testing::ValuesIn(inputShapes),
+    ::testing::Values(InferenceEngine::Precision::FP32),
+    ::testing::ValuesIn(acrossChannels),
+    ::testing::ValuesIn(normalizeVariance),
+    ::testing::ValuesIn(epsilon),
+    ::testing::Values(CommonTestUtils::DEVICE_GPU)
+);
+
+INSTANTIATE_TEST_CASE_P(smoke_CLDNN_TestsMVN, MvnLayerTest, MvnCases, MvnLayerTest::getTestCaseName);
+
index eda432e..8ddd708 100644 (file)
@@ -12,13 +12,6 @@ using namespace ngraph::helpers;
 using namespace LayerTestsDefinitions;
 
 namespace {
-// Common params
-const std::vector<InferenceEngine::Precision> inputPrecisions = {
-        InferenceEngine::Precision::FP32,
-        InferenceEngine::Precision::FP16,
-        InferenceEngine::Precision::U8,
-//         InferenceEngine::Precision::I8 // Too much cases
-};
 
 const std::vector<InferenceEngine::Precision> netPrecisions = {
         InferenceEngine::Precision::FP32,
@@ -52,7 +45,6 @@ const auto maxPool_ExplicitPad_FloorRounding_Params = ::testing::Combine(
 INSTANTIATE_TEST_CASE_P(MaxPool_ExplicitPad_FloorRpunding, PoolingLayerTest,
                         ::testing::Combine(
                                 maxPool_ExplicitPad_FloorRounding_Params,
-                                ::testing::ValuesIn(inputPrecisions),
                                 ::testing::ValuesIn(netPrecisions),
                                 ::testing::Values(std::vector<size_t >({1, 3, 50, 50})),
                                 ::testing::Values(CommonTestUtils::DEVICE_GPU)),
@@ -75,7 +67,6 @@ const auto maxPool_ExplicitPad_CeilRounding_Params = ::testing::Combine(
 INSTANTIATE_TEST_CASE_P(MaxPool_ExplicitPad_CeilRpunding, PoolingLayerTest,
                         ::testing::Combine(
                                 maxPool_ExplicitPad_CeilRounding_Params,
-                                ::testing::ValuesIn(inputPrecisions),
                                 ::testing::ValuesIn(netPrecisions),
                                 ::testing::Values(std::vector<size_t >({1, 3, 50, 50})),
                                 ::testing::Values(CommonTestUtils::DEVICE_GPU)),
@@ -100,7 +91,6 @@ const auto avgPoolExplicitPadCeilRoundingParams = ::testing::Combine(
 INSTANTIATE_TEST_CASE_P(AvgPool_ExplicitPad_CeilRounding, PoolingLayerTest,
                         ::testing::Combine(
                                 avgPoolExplicitPadCeilRoundingParams,
-                                ::testing::ValuesIn(inputPrecisions),
                                 ::testing::ValuesIn(netPrecisions),
                                 ::testing::Values(std::vector<size_t >({1, 3, 30, 30})),
                                 ::testing::Values(CommonTestUtils::DEVICE_GPU)),
@@ -123,7 +113,6 @@ const auto avgPoolExplicitPadFloorRoundingParams = ::testing::Combine(
 INSTANTIATE_TEST_CASE_P(AvgPool_ExplicitPad_FloorRounding, PoolingLayerTest,
                         ::testing::Combine(
                                 avgPoolExplicitPadFloorRoundingParams,
-                                ::testing::ValuesIn(inputPrecisions),
                                 ::testing::ValuesIn(netPrecisions),
                                 ::testing::Values(std::vector<size_t >({1, 3, 30, 30})),
                                 ::testing::Values(CommonTestUtils::DEVICE_GPU)),
@@ -146,7 +135,6 @@ const auto allPools_ValidPad_Params = ::testing::Combine(
 INSTANTIATE_TEST_CASE_P(MAX_and_AVGPool_ValidPad, PoolingLayerTest,
                         ::testing::Combine(
                                 allPools_ValidPad_Params,
-                                ::testing::ValuesIn(inputPrecisions),
                                 ::testing::ValuesIn(netPrecisions),
                                 ::testing::Values(std::vector<size_t >({1, 3, 50, 50})),
                                 ::testing::Values(CommonTestUtils::DEVICE_GPU)),
index 6f858ac..035548c 100644 (file)
@@ -9,12 +9,6 @@
 using namespace LayerTestsDefinitions;
 
 namespace {
-// Common params
-const std::vector<InferenceEngine::Precision> inputPrecisions = {
-            InferenceEngine::Precision::FP32,
-            InferenceEngine::Precision::U8
-};
-
 const std::vector<InferenceEngine::Precision> netPrecisions = {
             InferenceEngine::Precision::FP32,
             InferenceEngine::Precision::FP16
@@ -24,7 +18,6 @@ const std::vector<InferenceEngine::Precision> netPrecisions = {
 INSTANTIATE_TEST_CASE_P(DISABLE_ReshapeCheckDynBatch, ReshapeLayerTest,
         ::testing::Combine(
                 ::testing::Values(true),
-                ::testing::ValuesIn(inputPrecisions),
                 ::testing::ValuesIn(netPrecisions),
                 ::testing::Values(std::vector<size_t>({1, 16, 16, 16})),
                 ::testing::Values(std::vector<size_t>({1, 0, 256})),
@@ -35,7 +28,6 @@ INSTANTIATE_TEST_CASE_P(DISABLE_ReshapeCheckDynBatch, ReshapeLayerTest,
 INSTANTIATE_TEST_CASE_P(ReshapeCheck, ReshapeLayerTest,
         ::testing::Combine(
                 ::testing::Values(true),
-                ::testing::ValuesIn(inputPrecisions),
                 ::testing::ValuesIn(netPrecisions),
                 ::testing::Values(std::vector<size_t>({10, 10, 10, 10})),
                 ::testing::Values(std::vector<size_t>({10, 0, 100})),
diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/select.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/select.cpp
new file mode 100644 (file)
index 0000000..320c6d1
--- /dev/null
@@ -0,0 +1,66 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <vector>
+
+#include "single_layer_tests/select.hpp"
+#include "common_test_utils/test_constants.hpp"
+
+using namespace LayerTestsDefinitions;
+
+const std::vector<InferenceEngine::Precision> inputPrecision = {
+    InferenceEngine::Precision::U8,
+    InferenceEngine::Precision::I16,
+    InferenceEngine::Precision::FP32
+};
+
+const std::vector<std::vector<std::vector<size_t>>> noneShapes = {
+    {{1}, {1}, {1}},
+    {{8}, {8}, {8}},
+    {{4, 5}, {4, 5}, {4, 5}},
+    {{3, 4, 5}, {3, 4, 5}, {3, 4, 5}},
+    {{2, 3, 4, 5}, {2, 3, 4, 5}, {2, 3, 4, 5}}
+};
+
+const auto noneCases = ::testing::Combine(
+    ::testing::ValuesIn(noneShapes),
+    ::testing::ValuesIn(inputPrecision),
+    ::testing::Values(ngraph::op::AutoBroadcastSpec::NONE),
+    ::testing::Values(CommonTestUtils::DEVICE_GPU)
+);
+
+const std::vector<std::vector<std::vector<size_t>>> numpyShapes = {
+    {{1}, {16}, {1}},
+    {{1}, {1}, {16}},
+    {{1}, {8}, {8}},
+    {{8}, {1}, {8}},
+    {{8}, {8}, {8}},
+    {{4, 1}, {1}, {4, 8}},
+    {{3, 8}, {8}, {3, 1}},
+    {{8, 1}, {8, 1}, {8, 1}},
+    {{1}, {5, 8}, {5, 8}},
+    {{8, 1, 1}, {8, 1, 1}, {2, 5}},
+    {{8, 1}, {6, 8, 1}, {6, 1, 1}},
+    {{5, 1}, {8, 1, 7}, {5, 7}},
+    {{2, 8, 1}, {2, 8, 9}, {2, 1, 9}},
+    {{1, 4}, {8, 1, 1, 1}, {4}},
+    {{5, 4, 1}, {8, 5, 1, 1}, {4, 1}},
+    {{1, 4}, {6, 1, 8, 1}, {6, 1, 8, 4}},
+    {{7, 3, 1, 8}, {7, 1, 1, 8}, {3, 2, 8}},
+    {{1, 3, 1}, {8, 2, 3, 1}, {3, 9}},
+    {{5, 1, 8}, {2, 1, 9, 8}, {2, 5, 9, 8}},
+    {{6, 1, 1, 8}, {6, 7, 1, 8}, {2, 1}},
+    {{5, 1, 1, 1}, {5, 7, 8, 6}, {1, 8, 6}}
+};
+
+const auto numpyCases = ::testing::Combine(
+    ::testing::ValuesIn(numpyShapes),
+    ::testing::ValuesIn(inputPrecision),
+    ::testing::Values(ngraph::op::AutoBroadcastSpec::NUMPY),
+    ::testing::Values(CommonTestUtils::DEVICE_GPU)
+);
+
+INSTANTIATE_TEST_CASE_P(smoke_CLDNN_TestsSelect_none, SelectLayerTest, noneCases, SelectLayerTest::getTestCaseName);
+
+INSTANTIATE_TEST_CASE_P(smoke_CLDNN_TestsSelect_numpy, SelectLayerTest, numpyCases, SelectLayerTest::getTestCaseName);
index 85e2ea9..f2a85ba 100644 (file)
 using namespace LayerTestsDefinitions;
 
 namespace {
-// Common params
-const std::vector<InferenceEngine::Precision> inputPrecisions = {
-        InferenceEngine::Precision::FP32,
-        InferenceEngine::Precision::FP16,
-        InferenceEngine::Precision::U8,
-};
 
 const std::vector<InferenceEngine::Precision> netPrecisions = {
         InferenceEngine::Precision::FP32,
@@ -26,7 +20,6 @@ INSTANTIATE_TEST_CASE_P(NumSplitsCheck, SplitLayerTest,
                         ::testing::Combine(
                                 ::testing::Values(1),
                                 ::testing::Values(0, 1, 2, 3),
-                                ::testing::ValuesIn(inputPrecisions),
                                 ::testing::ValuesIn(netPrecisions),
                                 ::testing::Values(std::vector<size_t >({30, 30, 30, 30})),
                                 ::testing::Values(CommonTestUtils::DEVICE_GPU)),
index 348bf9b..ee39ed6 100644 (file)
@@ -14,32 +14,25 @@ namespace {
 stridedSliceParamsTuple ss_only_test_cases[] = {
         stridedSliceParamsTuple({ 2, 2, 2, 2 }, { 0, 0, 0, 0 }, { 2, 2, 2, 2 }, { 1, 1, 1, 1 },
                        {1, 1, 1, 1}, {1, 1, 1, 1},  {1, 1, 1, 1},  {1, 1, 1, 1},  {1, 1, 1, 1},
-                                InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP32,
-                                CommonTestUtils::DEVICE_GPU),
+                                InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU),
         stridedSliceParamsTuple({ 2, 2, 2, 2 }, { 1, 1, 1, 1 }, { 2, 2, 2, 2 }, { 1, 1, 1, 1 },
                        {0, 0, 0, 0}, {1, 1, 1, 1},  {1, 1, 1, 1},  {1, 1, 1, 1},  {1, 1, 1, 1},
-                                InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP32,
-                                CommonTestUtils::DEVICE_GPU),
+                                InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU),
         stridedSliceParamsTuple({ 2, 2, 2, 2 }, { 1, 1, 1, 1 }, { 2, 2, 2, 2 }, { 1, 1, 1, 1 },
                        {0, 0, 0, 0}, {0, 0, 0, 0},  {1, 1, 1, 1},  {1, 1, 1, 1},  {1, 1, 1, 1},
-                                InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP32,
-                                CommonTestUtils::DEVICE_GPU),
+                                InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU),
         stridedSliceParamsTuple({ 2, 2, 4, 3 }, { 0, 0, 0, 0 }, { 2, 2, 4, 3 }, { 1, 1, 2, 1 },
                        {1, 1, 1, 1}, {1, 1, 1, 1},  {1, 1, 1, 1},  {1, 1, 1, 1},  {1, 1, 1, 1},
-                                InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP32,
-                                CommonTestUtils::DEVICE_GPU),
+                                InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU),
         stridedSliceParamsTuple({ 2, 2, 4, 2 }, { 1, 0, 0, 1 }, { 2, 2, 4, 2 }, { 1, 1, 2, 1 },
                        {0, 1, 1, 0}, {1, 1, 0, 0},  {1, 1, 1, 1},  {1, 1, 1, 1},  {1, 1, 1, 1},
-                                InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP32,
-                                CommonTestUtils::DEVICE_GPU),
+                                InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU),
         stridedSliceParamsTuple({ 1, 2, 4, 2 }, { 1, 0, 0, 0 }, { 1, 2, 4, 2 }, { 1, 1, -2, -1 },
                        {1, 1, 1, 1}, {1, 1, 1, 1},  {1, 1, 1, 1},  {1, 1, 1, 1},  {1, 1, 1, 1},
-                                InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP32,
-                                CommonTestUtils::DEVICE_GPU),
+                                InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU),
         stridedSliceParamsTuple({ 2, 2, 4, 2 }, { 1, 0, 0, 0 }, { 1, 2, 4, 2 }, { 1, 1, -2, -1 },
                        {0, 1, 1, 1}, {1, 1, 1, 1},  {1, 1, 1, 1},  {1, 1, 1, 1},  {1, 1, 1, 1},
-                                InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP32,
-                                CommonTestUtils::DEVICE_GPU),
+                                InferenceEngine::Precision::FP32, CommonTestUtils::DEVICE_GPU),
 };
 
 INSTANTIATE_TEST_CASE_P(
diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/subgraph_tests/reshape_squeeze_reshape_relu.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/subgraph_tests/reshape_squeeze_reshape_relu.cpp
new file mode 100644 (file)
index 0000000..0613596
--- /dev/null
@@ -0,0 +1,49 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+#include <vector>
+#include "subgraph_tests/reshape_squeeze_reshape_relu.hpp"
+#include "common_test_utils/test_constants.hpp"
+
+using namespace LayerTestsDefinitions;
+
+namespace {
+    std::vector<std::vector<std::vector<size_t>>> inputs_squeeze {
+            {{1, 1, 3}, {0, 1}},
+            {{1, 1, 3}, {1}},
+            {{1, 3, 1}, {0, 2}},
+            {{3, 1, 1}, {1}},
+            {{1, 4, 1, 3}, {0, 2}},
+            {{3, 1, 2, 4, 4, 3}, {1}},
+            {{1, 1, 1, 1, 1, 3}, {0, 1, 2, 3, 4}},
+            {{1}, {0}},
+    };
+
+    std::vector<std::vector<std::vector<size_t>>> inputs_unsqueeze{
+            {{1}, {0}},
+            {{1}, {0, 1}},
+            {{1}, {0, 1, 2}},
+            {{1, 2, 3}, {0}},
+            {{1, 1, 3}, {1, 2}},
+            {{1, 4, 1, 3}, {0, 2}},
+    };
+
+    std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32,
+                                                             InferenceEngine::Precision::FP16,
+    };
+
+    INSTANTIATE_TEST_CASE_P(reshape_squeeze_reshape_relu, ReshapeSqueezeReshapeRelu,
+                            ::testing::Combine(
+                                    ::testing::ValuesIn(inputs_squeeze),
+                                    ::testing::ValuesIn(netPrecisions),
+                                    ::testing::Values(CommonTestUtils::DEVICE_GPU),
+                                    ::testing::Values(true)),
+                            ReshapeSqueezeReshapeRelu::getTestCaseName);
+
+    INSTANTIATE_TEST_CASE_P(reshape_unsqueeze_reshape_relu, ReshapeSqueezeReshapeRelu,
+                            ::testing::Combine(
+                                    ::testing::ValuesIn(inputs_unsqueeze),
+                                    ::testing::ValuesIn(netPrecisions),
+                                    ::testing::Values(CommonTestUtils::DEVICE_GPU),
+                                    ::testing::Values(false)),
+                            ReshapeSqueezeReshapeRelu::getTestCaseName);
+}  // namespace
index 8a9d3b2..12a7f0b 100644 (file)
 using namespace LayerTestsDefinitions;
 
 namespace {
-const std::vector<InferenceEngine::Precision> inputPrecisions = {
-        InferenceEngine::Precision::FP32,
-        InferenceEngine::Precision::FP16,
-        InferenceEngine::Precision::U8
-};
-
 const std::vector<InferenceEngine::Precision> netPrecisions = {
         InferenceEngine::Precision::FP32,
         InferenceEngine::Precision::FP16
@@ -23,7 +17,6 @@ const std::vector<InferenceEngine::Precision> netPrecisions = {
 
 INSTANTIATE_TEST_CASE_P(NoReshape, SplitConvConcat,
                         ::testing::Combine(
-                                ::testing::ValuesIn(inputPrecisions),
                                 ::testing::ValuesIn(netPrecisions),
                                 ::testing::Values(std::vector<size_t >({1, 6, 40, 40})),
                                 ::testing::Values(CommonTestUtils::DEVICE_GPU)),
index 0bdf012..73aea95 100644 (file)
@@ -4,7 +4,7 @@
 
 #include "vpu/ngraph/operations/dynamic_shape_resolver.hpp"
 
-#include "ngraph/op/parameter.hpp"
+#include "ngraph/opsets/opset3.hpp"
 #include "ngraph/function.hpp"
 
 #include "cpp/ie_cnn_network.h"
@@ -20,12 +20,12 @@ class DynamicShapeResolverTests : public CommonTestUtils::TestsCommon {
 public:
     void SetUp() override {
         const auto tensorType  = ngraph::element::f16;
-        const auto shapeType   = ngraph::element::u64;
+        const auto shapeType   = ngraph::element::i64;
         const auto tensorShape = std::initializer_list<std::size_t>{1, 800};
 
-        const auto tensor = std::make_shared<ngraph::op::Parameter>(tensorType, ngraph::Shape{tensorShape});
-        const auto shape  = std::make_shared<ngraph::op::Parameter>(shapeType, ngraph::Shape{tensorShape.size()});
-        auto dynamicShapeResolver = std::make_shared<ngraph::op::DynamicShapeResolver>(tensor, shape);
+        const auto tensor = std::make_shared<ngraph::opset3::Parameter>(tensorType, ngraph::Shape{tensorShape});
+        const auto shape  = std::make_shared<ngraph::opset3::Parameter>(shapeType, ngraph::Shape{tensorShape.size()});
+        auto dynamicShapeResolver = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(tensor, shape);
         dynamicShapeResolver->set_friendly_name(s_FriendlyName);
         const auto function = std::make_shared<ngraph::Function>(ngraph::NodeVector{dynamicShapeResolver}, ngraph::ParameterVector{tensor, shape});
 
@@ -35,7 +35,7 @@ public:
 
 protected:
     InferenceEngine::CNNLayerPtr getDynamicShapeResolverLayer() const {
-        return cnnNetwork.getLayerByName(s_FriendlyName.c_str());
+        return cnnNetwork.getLayerByName(s_FriendlyName);
     }
     InferenceEngine::CNNNetwork cnnNetwork;
 
@@ -44,10 +44,10 @@ private:
         cnnNetwork.begin();
     }
 
-    static const std::string s_FriendlyName;
+    static const char s_FriendlyName[];
 };
 
-const std::string DynamicShapeResolverTests::s_FriendlyName = "DSR";
+const char DynamicShapeResolverTests::s_FriendlyName[] = "DSR";
 
 TEST_F(DynamicShapeResolverTests, NGraphFunctionCanBeConvertedToCNNNetwork) {
     ASSERT_EQ(cnnNetwork.getInputsInfo().size(), 2);
index 84d2112..672a893 100644 (file)
@@ -2,41 +2,46 @@
 // SPDX-License-Identifier: Apache-2.0
 //
 
-#include <ngraph/op/parameter.hpp>
+#include <ngraph/opsets/opset3.hpp>
 #include <ngraph/function.hpp>
 
 #include <gtest/gtest.h>
 #include <common_test_utils/test_common.hpp>
+#include <common_test_utils/test_constants.hpp>
 #include <details/ie_exception.hpp>
+#include <ie_core.hpp>
+#include <ngraph/ops.hpp>
 
 #include "vpu/ngraph/operations/dynamic_shape_resolver.hpp"
 
+#include "functional_test_utils/plugin_cache.hpp"
+#include "functional_test_utils/layer_test_utils.hpp"
+
 namespace {
 
 using DataType  = ngraph::element::Type_t;
 using DimsType  = ngraph::element::Type_t;
 using DataShape = ngraph::Shape;
 
-class DynamicShapeResolverTests : public CommonTestUtils::TestsCommon, public testing::WithParamInterface<std::tuple<DataType, DimsType, DataShape>> {
+class DynamicShapeResolverTests : public CommonTestUtils::TestsCommon, public testing::WithParamInterface<std::tuple<DataType, DataShape>> {
 public:
     void SetUp() override {
         const auto& parameters = GetParam();
         const auto& dataType   = std::get<0>(parameters);
-        const auto& dimsType   = std::get<1>(parameters);
-        const auto& dataShape  = std::get<2>(parameters);
+        const auto& dataShape  = std::get<1>(parameters);
 
-        data = std::make_shared<ngraph::op::Parameter>(dataType, dataShape);
-        dims = std::make_shared<ngraph::op::Parameter>(dimsType, ngraph::Shape{dataShape.size()});
+        data = std::make_shared<ngraph::opset3::Parameter>(dataType, dataShape);
+        dims = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{dataShape.size()});
     }
 
 protected:
-    std::shared_ptr<ngraph::op::Parameter> data;
-    std::shared_ptr<ngraph::op::Parameter> dims;
+    std::shared_ptr<ngraph::opset3::Parameter> data;
+    std::shared_ptr<ngraph::opset3::Parameter> dims;
 };
 
 TEST_P(DynamicShapeResolverTests, CanValidateAndInferTypes) {
-    std::shared_ptr<ngraph::op::DynamicShapeResolver> dynamicShapeResolver;
-    ASSERT_NO_THROW(dynamicShapeResolver = std::make_shared<ngraph::op::DynamicShapeResolver>(data, dims));
+    std::shared_ptr<ngraph::vpu::op::DynamicShapeResolver> dynamicShapeResolver;
+    ASSERT_NO_THROW(dynamicShapeResolver = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(data, dims));
     ASSERT_NO_THROW(std::make_shared<ngraph::Function>(ngraph::NodeVector{dynamicShapeResolver}, ngraph::ParameterVector{data, dims}));
 }
 
@@ -74,9 +79,17 @@ std::set<ngraph::element::Type_t> allNGraphIntegralNumberTypes() {
     };
 }
 
+std::set<ngraph::element::Type_t> allNGraphStaticTypes() {
+    auto staticTypes = std::set<ngraph::element::Type_t>{};
+    const auto& allTypes = allNGraphTypes();
+    const auto& allDynamicTypes = std::set<ngraph::element::Type_t>{ngraph::element::dynamic};
+    std::set_difference(allTypes.cbegin(), allTypes.cend(), allDynamicTypes.cbegin(), allDynamicTypes.cend(),
+        std::inserter(staticTypes, staticTypes.begin()));
+    return staticTypes;
+}
+
 INSTANTIATE_TEST_CASE_P(NGraph, DynamicShapeResolverTests, testing::Combine(
-    testing::ValuesIn(allNGraphTypes()),
-    testing::ValuesIn(allNGraphIntegralNumberTypes()),
+    testing::ValuesIn(allNGraphStaticTypes()),
     testing::Values(DataShape{1, 800}, DataShape{1, 1})));
 
 
@@ -93,18 +106,29 @@ public:
         const auto& dataPartialShape  = std::get<2>(parameters);
         const auto& dimsPartialShape  = std::get<3>(parameters);
 
-        data = std::make_shared<ngraph::op::Parameter>(dataType, dataPartialShape);
-        dims = std::make_shared<ngraph::op::Parameter>(dimsType, dimsPartialShape);
+        data = std::make_shared<ngraph::opset3::Parameter>(dataType, dataPartialShape);
+        dims = std::make_shared<ngraph::opset3::Parameter>(dimsType, dimsPartialShape);
     }
 
 protected:
-    std::shared_ptr<ngraph::op::Parameter> data;
-    std::shared_ptr<ngraph::op::Parameter> dims;
+    std::shared_ptr<ngraph::opset3::Parameter> data;
+    std::shared_ptr<ngraph::opset3::Parameter> dims;
 };
 
-class DynamicShapeResolverNegativeTestsDimsType : public DynamicShapeResolverNegativeTests {};
+using DynamicShapeResolverNegativeTestsDataType = DynamicShapeResolverNegativeTests;
+TEST_P(DynamicShapeResolverNegativeTestsDataType, ThrowsOnInvalidDimsType) {
+    ASSERT_THROW(std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(data, dims), ngraph::ngraph_error);
+}
+
+INSTANTIATE_TEST_CASE_P(NGraph, DynamicShapeResolverNegativeTestsDataType, testing::Combine(
+    testing::Values(ngraph::element::dynamic),
+    testing::Values(ngraph::element::i64),
+    testing::Values(DataPartialShape{1, 800}),
+    testing::Values(DataPartialShape{2})));
+
+using DynamicShapeResolverNegativeTestsDimsType = DynamicShapeResolverNegativeTests;
 TEST_P(DynamicShapeResolverNegativeTestsDimsType, ThrowsOnInvalidDimsType) {
-    ASSERT_THROW(std::make_shared<ngraph::op::DynamicShapeResolver>(data, dims), ngraph::ngraph_error);
+    ASSERT_THROW(std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(data, dims), ngraph::ngraph_error);
 }
 
 std::set<ngraph::element::Type_t> allNGraphNotIntegralTypes() {
@@ -117,19 +141,19 @@ std::set<ngraph::element::Type_t> allNGraphNotIntegralTypes() {
 }
 
 INSTANTIATE_TEST_CASE_P(NGraph, DynamicShapeResolverNegativeTestsDimsType, testing::Combine(
-    testing::ValuesIn(allNGraphTypes()),
+    testing::ValuesIn(allNGraphStaticTypes()),
     testing::ValuesIn(allNGraphNotIntegralTypes()),
     testing::Values(DataPartialShape{1, 800}),
     testing::Values(DataPartialShape{2})));
 
-class DynamicShapeResolverNegativeTestsDataShape : public DynamicShapeResolverNegativeTests {};
+using DynamicShapeResolverNegativeTestsDataShape = DynamicShapeResolverNegativeTests;
 TEST_P(DynamicShapeResolverNegativeTestsDataShape, ThrowsOnInvalidDimsType) {
-    ASSERT_THROW(std::make_shared<ngraph::op::DynamicShapeResolver>(data, dims), ngraph::ngraph_error);
+    ASSERT_THROW(std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(data, dims), ngraph::ngraph_error);
 }
 
 INSTANTIATE_TEST_CASE_P(NGraph, DynamicShapeResolverNegativeTestsDataShape, testing::Combine(
-    testing::ValuesIn(allNGraphTypes()),
-    testing::ValuesIn(allNGraphIntegralNumberTypes()),
+    testing::ValuesIn(allNGraphStaticTypes()),
+    testing::Values(ngraph::element::i64),
     testing::Values(
         DataPartialShape::dynamic(),
         DataPartialShape{{1, ngraph::Dimension::dynamic()}},
@@ -137,14 +161,14 @@ INSTANTIATE_TEST_CASE_P(NGraph, DynamicShapeResolverNegativeTestsDataShape, test
         DataPartialShape{{ngraph::Dimension::dynamic(), ngraph::Dimension::dynamic()}}),
     testing::Values(DataShape{2})));
 
-class DynamicShapeResolverNegativeTestsDimsShape : public DynamicShapeResolverNegativeTests {};
+using DynamicShapeResolverNegativeTestsDimsShape = DynamicShapeResolverNegativeTests;
 TEST_P(DynamicShapeResolverNegativeTestsDimsShape, ThrowsOnInvalidDimsType) {
-    ASSERT_THROW(std::make_shared<ngraph::op::DynamicShapeResolver>(data, dims), ngraph::ngraph_error);
+    ASSERT_THROW(std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(data, dims), ngraph::ngraph_error);
 }
 
 INSTANTIATE_TEST_CASE_P(NGraph, DynamicShapeResolverNegativeTestsDimsShape, testing::Combine(
     testing::ValuesIn(allNGraphTypes()),
-    testing::ValuesIn(allNGraphIntegralNumberTypes()),
+    testing::Values(ngraph::element::i64),
     testing::Values(DataShape{1, 800}),
     testing::Values(
         DataPartialShape::dynamic(),
@@ -155,4 +179,107 @@ INSTANTIATE_TEST_CASE_P(NGraph, DynamicShapeResolverNegativeTestsDimsShape, test
         DataPartialShape{1},
         DataPartialShape{3})));
 
+typedef std::vector<int32_t> InputData;
+
+typedef std::tuple<
+        InputData,
+        std::string> dsrParams;
+
+class DynamicShapeResolverPluginTests : public testing::WithParamInterface<dsrParams>, public LayerTestsUtils::LayerTestsCommon {
+public:
+    static std::string getTestCaseName(const testing::TestParamInfo<dsrParams> &obj) {
+        InputData inputData;
+        std::string targetDevice;
+        std::tie(inputData,
+                 targetDevice) = obj.param;
+
+        std::ostringstream result;
+        const char separator = '_';
+        result << "inputData=" << CommonTestUtils::vec2str(inputData) << separator;
+        result << "targetDevice=" << targetDevice;
+        return result.str();
+    }
+
+protected:
+    void SetUp() override {
+        std::tie(inputData, targetDevice) = GetParam();
+
+        const auto& inPrecision = ::ngraph::element::Type(::ngraph::element::Type_t::i32);
+
+        const auto& tensor = std::make_shared<ngraph::op::Parameter>(inPrecision, ngraph::Shape{inputData.size()});
+        const auto& nonZero = std::make_shared<ngraph::op::NonZero>(tensor);
+        const auto& gatherIndices = std::make_shared<ngraph::op::Constant>(ngraph::element::i64,
+                                                                               ngraph::Shape{1},
+                                                                               std::vector<int64_t>{0});
+        const auto& gatherAxis = std::make_shared<ngraph::op::Constant>(ngraph::element::i64,
+                                                                            ngraph::Shape{1},
+                                                                            std::vector<int64_t>{1});
+        const auto& gather = std::make_shared<ngraph::opset1::Gather>(nonZero->output(0), gatherIndices, gatherAxis);
+
+        function = std::make_shared<ngraph::Function>(ngraph::NodeVector{gather}, ngraph::ParameterVector{tensor});
+    }
+
+protected:
+    InputData inputData;
+};
+
+TEST_P(DynamicShapeResolverPluginTests, DynamicNetworkWithStaticOutput) {
+    // TODO: reimplement with normal reference function
+    // Currently the network gets the index of the first non-zero element
+    int32_t refOutput{};
+    for (size_t i = 0; i < inputData.size(); i++) {
+        if (inputData[i] != 0) {
+            refOutput = static_cast<int32_t>(i);
+            break;
+        }
+    }
+
+    InferenceEngine::CNNNetwork cnnNet(function);
+
+    for (const auto& outputInfo : cnnNet.getOutputsInfo()) {
+        outputInfo.second->setPrecision(InferenceEngine::Precision::I32);
+    }
+
+    auto ie = PluginCache::get().ie();
+    InferenceEngine::ExecutableNetwork execNet;
+    ASSERT_NO_THROW(execNet = ie->LoadNetwork(cnnNet, targetDevice));
+    auto req = execNet.CreateInferRequest();
+
+    for (const auto &inputItem : cnnNet.getInputsInfo()) {
+        auto blob = make_blob_with_precision(inputItem.second->getTensorDesc());
+        blob->allocate();
+        std::copy_n(inputData.begin(), inputData.size(), blob->buffer().as<int32_t*>());
+        req.SetBlob(inputItem.first, blob);
+    }
+
+    ASSERT_NO_THROW(req.Infer());
+
+    for (const auto &output : cnnNet.getOutputsInfo()) {
+        auto outBlob = req.GetBlob(output.first);
+        auto outBuffer = outBlob->cbuffer().as<int32_t*>();
+
+        ASSERT_EQ(outBlob->size() , 1);
+        ASSERT_EQ(refOutput, outBuffer[0]);
+    }
+}
+
+const std::vector<InputData> inputDatas = {
+    {1, 0, 0, 0, 0},
+    {0, 1, 0, 1, 0},
+    {0, 0, 42, 0, 1},
+    {0, 0, 0, 0, -42}
+};
+
+const auto basicCases = ::testing::Combine(
+        ::testing::ValuesIn(inputDatas),
+        ::testing::Values(CommonTestUtils::DEVICE_MYRIAD)
+);
+
+
+INSTANTIATE_TEST_CASE_P(DynamicShapeResolverPluginTests, DynamicShapeResolverPluginTests,
+                        ::testing::Combine(
+                                ::testing::ValuesIn(inputDatas),
+                                ::testing::Values(CommonTestUtils::DEVICE_MYRIAD)),
+                        DynamicShapeResolverPluginTests::getTestCaseName);
+
 }  // namespace
index 45ba484..e1e856f 100644 (file)
@@ -6,7 +6,7 @@
 
 #include <common_test_utils/test_common.hpp>
 
-#include <ngraph/op/parameter.hpp>
+#include <ngraph/opsets/opset3.hpp>
 #include <ngraph/function.hpp>
 
 #include <details/ie_exception.hpp>
@@ -27,10 +27,10 @@ public:
         const auto& tensorType  = std::get<0>(parameters);
         const auto& tensorShape = std::get<1>(parameters);
 
-        m_param = std::make_shared<ngraph::op::Parameter>(tensorType, tensorShape);
+        m_param = std::make_shared<ngraph::opset3::Parameter>(tensorType, tensorShape);
     }
 protected:
-    std::shared_ptr<ngraph::op::Parameter> m_param;
+    std::shared_ptr<ngraph::opset3::Parameter> m_param;
 };
 
 std::vector<ngraph::PartialShape> testStaticShapes {
@@ -68,8 +68,8 @@ std::vector<ngraph::element::Type> testNGraphNumericTypes {
 //
 
 TEST_P(StaticShapeNonZeroTests, CanValidateAndInferTypes) {
-    std::shared_ptr<ngraph::op::StaticShapeNonZero> op;
-    ASSERT_NO_THROW(op = std::make_shared<ngraph::op::StaticShapeNonZero>(m_param));
+    std::shared_ptr<ngraph::vpu::op::StaticShapeNonZero> op;
+    ASSERT_NO_THROW(op = std::make_shared<ngraph::vpu::op::StaticShapeNonZero>(m_param));
     ASSERT_NO_THROW(std::make_shared<ngraph::Function>(
             ngraph::OutputVector{op->output(0), op->output(1)},
             ngraph::ParameterVector{m_param}));
@@ -84,10 +84,10 @@ INSTANTIATE_TEST_CASE_P(NGraph, StaticShapeNonZeroTests, testing::Combine(
 // Negative tests
 //
 
-class StaticShapeNonZeroTestsNegativeDataType : public StaticShapeNonZeroTests {};
+using StaticShapeNonZeroTestsNegativeDataType = StaticShapeNonZeroTests;
 TEST_P(StaticShapeNonZeroTestsNegativeDataType, ThrowsOnInvalidDataType) {
-    std::shared_ptr<ngraph::op::StaticShapeNonZero> op;
-    ASSERT_THROW(op = std::make_shared<ngraph::op::StaticShapeNonZero>(m_param),
+    std::shared_ptr<ngraph::vpu::op::StaticShapeNonZero> op;
+    ASSERT_THROW(op = std::make_shared<ngraph::vpu::op::StaticShapeNonZero>(m_param),
                  ngraph::NodeValidationFailure);
 }
 
@@ -96,10 +96,10 @@ INSTANTIATE_TEST_CASE_P(NGraph, StaticShapeNonZeroTestsNegativeDataType, testing
         testing::ValuesIn(testStaticShapes))
 );
 
-class StaticShapeNonZeroTestsNegativeDataShape : public StaticShapeNonZeroTests {};
+using StaticShapeNonZeroTestsNegativeDataShape = StaticShapeNonZeroTests;
 TEST_P(StaticShapeNonZeroTestsNegativeDataShape, ThrowsOnInvalidDataShape) {
-    std::shared_ptr<ngraph::op::StaticShapeNonZero> op;
-    ASSERT_THROW(op = std::make_shared<ngraph::op::StaticShapeNonZero>(m_param),
+    std::shared_ptr<ngraph::vpu::op::StaticShapeNonZero> op;
+    ASSERT_THROW(op = std::make_shared<ngraph::vpu::op::StaticShapeNonZero>(m_param),
                  ngraph::NodeValidationFailure);
 }
 
diff --git a/inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_binary_elementwise.cpp b/inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_binary_elementwise.cpp
new file mode 100644 (file)
index 0000000..668d71d
--- /dev/null
@@ -0,0 +1,345 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <ngraph/opsets/opset3.hpp>
+#include <ngraph/shape.hpp>
+#include <ngraph/type/element_type.hpp>
+
+#include <common_test_utils/test_common.hpp>
+
+#include <vpu/ngraph/operations/dynamic_shape_resolver.hpp>
+#include <vpu/ngraph/transformations/dynamic_to_static_shape_binary_elementwise.hpp>
+#include <vpu/ngraph/transformations/dynamic_to_static_shape.hpp>
+#include <vpu/utils/error.hpp>
+
+#include <ngraph_functions/utils/ngraph_helpers.hpp>
+
+namespace {
+
+using DataType = ngraph::element::Type_t;
+using DataDims = ngraph::Shape;
+using refFunction = std::function<std::shared_ptr<ngraph::Function> (const DataType&, const ngraph::NodeTypeInfo&, const DataDims&, const DataDims&)>;
+using EltwiseParams = std::tuple<DataDims, DataDims, refFunction>;
+
+class DynamicToStaticShapeEltwise: public CommonTestUtils::TestsCommon, public testing::WithParamInterface<std::tuple<ngraph::element::Type_t,
+        ngraph::NodeTypeInfo, EltwiseParams>> {
+public:
+    void SetUp() override {
+        const auto& dataType = std::get<0>(GetParam());
+        const auto& eltwiseType = std::get<1>(GetParam());
+        const auto& eltwiseParams = std::get<2>(GetParam());
+
+        const auto& input0_shape = std::get<0>(eltwiseParams);
+        const auto& input1_shape = std::get<1>(eltwiseParams);
+
+        ngraph::helpers::CompareFunctions(*transform(dataType, eltwiseType, input0_shape, input1_shape),
+                                          *std::get<2>(eltwiseParams)(dataType, eltwiseType, input0_shape, input1_shape));
+    }
+
+protected:
+    std::shared_ptr<const ngraph::Function> transform(
+        const ngraph::element::Type_t& dataType,
+        const ngraph::NodeTypeInfo& eltwiseType,
+        const ngraph::Shape& dataDims0,
+        const ngraph::Shape& dataDims1) const {
+        const auto input0 = std::make_shared<ngraph::opset3::Parameter>(dataType, dataDims0);
+        const auto input1 = std::make_shared<ngraph::opset3::Parameter>(dataType, dataDims1);
+
+        const auto input0_dsr = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{dataDims0.size()});
+        const auto input1_dsr = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{dataDims1.size()});
+
+        const auto dsr0 = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(input0, input0_dsr);
+        const auto dsr1 = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(input1, input1_dsr);
+
+        const auto eltwise = ngraph::helpers::getNodeSharedPtr(eltwiseType, {dsr0, dsr1});
+
+        const auto function = std::make_shared<ngraph::Function>(
+            ngraph::NodeVector{eltwise},
+            ngraph::ParameterVector{input0, input1, input0_dsr, input1_dsr},
+            "Actual");
+
+        eltwise->set_output_type(0, eltwise->get_input_element_type(0), ngraph::PartialShape::dynamic(eltwise->get_output_partial_shape(0).rank()));
+
+        const auto transformations = vpu::Transformations{{eltwiseType, vpu::dynamicToStaticShapeBinaryEltwise}};
+        vpu::DynamicToStaticShape(transformations).transform(*function);
+        return function;
+    }
+
+public:
+    static
+    std::shared_ptr<ngraph::Function> reference_simple(
+        const ngraph::element::Type_t& dataType,
+        const ngraph::NodeTypeInfo& eltwiseType,
+        const ngraph::Shape& dataDims0,
+        const ngraph::Shape& dataDims1) {
+        // Data flow subgraph
+        const auto input0 = std::make_shared<ngraph::opset3::Parameter>(dataType, dataDims0);
+        const auto input1 = std::make_shared<ngraph::opset3::Parameter>(dataType, dataDims1);
+
+        const auto input0_dsr = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{dataDims0.size()});
+        const auto input1_dsr = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{dataDims1.size()});
+
+        const auto dsr0 = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(input0, input0_dsr);
+        const auto dsr1 = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(input1, input1_dsr);
+
+        const auto eltwise = ngraph::helpers::getNodeSharedPtr(eltwiseType, {dsr0, dsr1});
+
+        // Shape infer subgraph
+        const auto maximum = std::make_shared<ngraph::opset3::Maximum>(input0_dsr, input1_dsr);
+        const auto dsr_final = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(eltwise, maximum);
+
+        const auto function = std::make_shared<ngraph::Function>(
+            ngraph::NodeVector{dsr_final},
+            ngraph::ParameterVector{input0, input1, input0_dsr, input1_dsr},
+            "Actual");
+
+        return function;
+    }
+
+    static
+    std::shared_ptr<ngraph::Function> reference_broadcast_left(
+        const ngraph::element::Type_t& dataType,
+        const ngraph::NodeTypeInfo& eltwiseType,
+        const ngraph::Shape& dataDims0,
+        const ngraph::Shape& dataDims1) {
+        // Data flow subgraph
+        const auto input0 = std::make_shared<ngraph::opset3::Parameter>(dataType, dataDims0);
+        const auto input1 = std::make_shared<ngraph::opset3::Parameter>(dataType, dataDims1);
+
+        const auto input0_dsr = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{dataDims0.size()});
+        const auto input1_dsr = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{dataDims1.size()});
+
+        const auto dsr0 = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(input0, input0_dsr);
+        const auto dsr1 = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(input1, input1_dsr);
+
+        const auto eltwise = ngraph::helpers::getNodeSharedPtr(eltwiseType, {dsr0, dsr1});
+
+        // Shape infer subgraph
+        const auto broadcast_const = ngraph::opset3::Constant::create(ngraph::element::i64, {dataDims1.size() - dataDims0.size()}, {1});
+        const auto concat = std::make_shared<ngraph::opset3::Concat>(ngraph::OutputVector{broadcast_const, input0_dsr}, 0);
+        const auto maximum = std::make_shared<ngraph::opset3::Maximum>(concat, input1_dsr);
+        const auto dsr_final = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(eltwise, maximum);
+
+        const auto function = std::make_shared<ngraph::Function>(
+            ngraph::NodeVector{dsr_final},
+            ngraph::ParameterVector{input0, input1, input0_dsr, input1_dsr},
+            "Actual");
+
+        return function;
+    }
+
+    static
+    std::shared_ptr<ngraph::Function> reference_broadcast_right(
+        const ngraph::element::Type_t& dataType,
+        const ngraph::NodeTypeInfo& eltwiseType,
+        const ngraph::Shape& dataDims0,
+        const ngraph::Shape& dataDims1) {
+        // Data flow subgraph
+        const auto input0 = std::make_shared<ngraph::opset3::Parameter>(dataType, dataDims0);
+        const auto input1 = std::make_shared<ngraph::opset3::Parameter>(dataType, dataDims1);
+
+        const auto input0_dsr = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{dataDims0.size()});
+        const auto input1_dsr = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{dataDims1.size()});
+
+        const auto dsr0 = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(input0, input0_dsr);
+        const auto dsr1 = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(input1, input1_dsr);
+
+        const auto eltwise = ngraph::helpers::getNodeSharedPtr(eltwiseType, {dsr0, dsr1});
+
+        // Shape infer subgraph
+        const auto broadcast_const = ngraph::opset3::Constant::create(ngraph::element::i64, {dataDims0.size() - dataDims1.size()}, {1});
+        const auto concat = std::make_shared<ngraph::opset3::Concat>(ngraph::OutputVector{broadcast_const, input1_dsr}, 0);
+        const auto maximum = std::make_shared<ngraph::opset3::Maximum>(input0_dsr, concat);
+        const auto dsr_final = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(eltwise, maximum);
+
+        const auto function = std::make_shared<ngraph::Function>(
+            ngraph::NodeVector{dsr_final},
+            ngraph::ParameterVector{input0, input1, input0_dsr, input1_dsr},
+            "Actual");
+
+        return function;
+    }
+};
+
+class DynamicToStaticShapeEltwiseSingleDSR: public CommonTestUtils::TestsCommon, public testing::WithParamInterface<std::tuple<ngraph::element::Type_t,
+        ngraph::NodeTypeInfo, EltwiseParams>> {
+public:
+    void SetUp() override {
+        const auto& dataType = std::get<0>(GetParam());
+        const auto& eltwiseType = std::get<1>(GetParam());
+        const auto& eltwiseParams = std::get<2>(GetParam());
+
+        const auto& input0_shape = std::get<0>(eltwiseParams);
+        const auto& input1_shape = std::get<1>(eltwiseParams);
+
+        ngraph::helpers::CompareFunctions(*transform(dataType, eltwiseType, input0_shape, input1_shape),
+                                          *std::get<2>(eltwiseParams)(dataType, eltwiseType, input0_shape, input1_shape));
+    }
+
+protected:
+    std::shared_ptr<const ngraph::Function> transform(
+        const ngraph::element::Type_t& dataType,
+        const ngraph::NodeTypeInfo& eltwiseType,
+        const ngraph::Shape& dataDims0,
+        const ngraph::Shape& dataDims1) const {
+        const auto input0 = std::make_shared<ngraph::opset3::Parameter>(dataType, dataDims0);
+        const auto input1 = std::make_shared<ngraph::opset3::Parameter>(dataType, dataDims1);
+
+        const auto input0_dsr = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{dataDims0.size()});
+
+        const auto dsr0 = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(input0, input0_dsr);
+
+        const auto eltwise = ngraph::helpers::getNodeSharedPtr(eltwiseType, {dsr0, input1});
+
+        const auto function = std::make_shared<ngraph::Function>(
+            ngraph::NodeVector{eltwise},
+            ngraph::ParameterVector{input0, input1, input0_dsr},
+            "Actual");
+
+        eltwise->set_output_type(0, eltwise->get_input_element_type(0), ngraph::PartialShape::dynamic(eltwise->get_output_partial_shape(0).rank()));
+
+        const auto transformations = vpu::Transformations{{eltwiseType, vpu::dynamicToStaticShapeBinaryEltwise}};
+        vpu::DynamicToStaticShape(transformations).transform(*function);
+        return function;
+    }
+
+public:
+    static
+    std::shared_ptr<ngraph::Function> reference_simple(
+        const ngraph::element::Type_t& dataType,
+        const ngraph::NodeTypeInfo& eltwiseType,
+        const ngraph::Shape& dataDims0,
+        const ngraph::Shape& dataDims1) {
+        // Data flow subgraph
+        const auto input0 = std::make_shared<ngraph::opset3::Parameter>(dataType, dataDims0);
+        const auto input1 = std::make_shared<ngraph::opset3::Parameter>(dataType, dataDims1);
+
+        const auto input0_dsr = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{dataDims0.size()});
+        const auto input1_const = ngraph::opset3::Constant::create(ngraph::element::i64, {dataDims1.size()}, dataDims1);
+
+        const auto dsr0 = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(input0, input0_dsr);
+
+        const auto eltwise = ngraph::helpers::getNodeSharedPtr(eltwiseType, {dsr0, input1});
+
+        // Shape infer subgraph
+        const auto maximum = std::make_shared<ngraph::opset3::Maximum>(input0_dsr, input1_const);
+        const auto dsr_final = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(eltwise, maximum);
+
+        const auto function = std::make_shared<ngraph::Function>(
+            ngraph::NodeVector{dsr_final},
+            ngraph::ParameterVector{input0, input1, input0_dsr},
+            "Actual");
+
+        return function;
+    }
+
+    static
+    std::shared_ptr<ngraph::Function> reference_broadcast_left(
+        const ngraph::element::Type_t& dataType,
+        const ngraph::NodeTypeInfo& eltwiseType,
+        const ngraph::Shape& dataDims0,
+        const ngraph::Shape& dataDims1) {
+        // Data flow subgraph
+        const auto input0 = std::make_shared<ngraph::opset3::Parameter>(dataType, dataDims0);
+        const auto input1 = std::make_shared<ngraph::opset3::Parameter>(dataType, dataDims1);
+
+        const auto input0_dsr = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{dataDims0.size()});
+        const auto input1_const = ngraph::opset3::Constant::create(ngraph::element::i64, {dataDims1.size()}, dataDims1);
+
+        const auto dsr0 = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(input0, input0_dsr);
+
+        const auto eltwise = ngraph::helpers::getNodeSharedPtr(eltwiseType, {dsr0, input1});
+
+        // Shape infer subgraph
+        const auto broadcast_const = ngraph::opset3::Constant::create(ngraph::element::i64, {dataDims1.size() - dataDims0.size()}, {1});
+        const auto concat = std::make_shared<ngraph::opset3::Concat>(ngraph::OutputVector{broadcast_const, input0_dsr}, 0);
+        const auto maximum = std::make_shared<ngraph::opset3::Maximum>(concat, input1_const);
+        const auto dsr_final = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(eltwise, maximum);
+
+        const auto function = std::make_shared<ngraph::Function>(
+            ngraph::NodeVector{dsr_final},
+            ngraph::ParameterVector{input0, input1, input0_dsr},
+            "Actual");
+
+        return function;
+    }
+
+    static
+    std::shared_ptr<ngraph::Function> reference_broadcast_right(
+        const ngraph::element::Type_t& dataType,
+        const ngraph::NodeTypeInfo& eltwiseType,
+        const ngraph::Shape& dataDims0,
+        const ngraph::Shape& dataDims1) {
+        // Data flow subgraph
+        const auto input0 = std::make_shared<ngraph::opset3::Parameter>(dataType, dataDims0);
+        const auto input1 = std::make_shared<ngraph::opset3::Parameter>(dataType, dataDims1);
+
+        const auto input0_dsr = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{dataDims0.size()});
+        const auto input1_const = ngraph::opset3::Constant::create(ngraph::element::i64, {dataDims1.size()}, dataDims1);
+
+        const auto dsr0 = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(input0, input0_dsr);
+
+        const auto eltwise = ngraph::helpers::getNodeSharedPtr(eltwiseType, {dsr0, input1});
+
+        // Shape infer subgraph
+        const auto broadcast_const = ngraph::opset3::Constant::create(ngraph::element::i64, {dataDims0.size() - dataDims1.size()}, {1});
+        const auto concat = std::make_shared<ngraph::opset3::Concat>(ngraph::OutputVector{broadcast_const, input1_const}, 0);
+        const auto maximum = std::make_shared<ngraph::opset3::Maximum>(input0_dsr, concat);
+        const auto dsr_final = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(eltwise, maximum);
+
+        const auto function = std::make_shared<ngraph::Function>(
+            ngraph::NodeVector{dsr_final},
+            ngraph::ParameterVector{input0, input1, input0_dsr},
+            "Actual");
+
+        return function;
+    }
+};
+
+TEST_P(DynamicToStaticShapeEltwise, CompareFunctions) {
+}
+
+INSTANTIATE_TEST_CASE_P(EltwiseBroadcast, DynamicToStaticShapeEltwise, testing::Combine(
+    testing::Values(
+        ngraph::element::f16,
+        ngraph::element::f32,
+        ngraph::element::i32,
+        ngraph::element::i64,
+        ngraph::element::u8),
+    testing::Values(
+        ngraph::opset3::Add::type_info,
+        ngraph::opset3::Divide::type_info,
+//        ngraph::opset3::Equal::type_info, operation broadcast default value needs to be fixed
+        ngraph::opset3::Power::type_info,
+        ngraph::opset3::Multiply::type_info,
+        ngraph::opset3::Subtract::type_info),
+    testing::Values(
+        EltwiseParams{DataDims{1000}, DataDims{1}, DynamicToStaticShapeEltwise::reference_simple},
+        EltwiseParams{DataDims{1000, 1, 1}, DataDims{1000, 1, 1}, DynamicToStaticShapeEltwise::reference_simple},
+        EltwiseParams{DataDims{2, 1000}, DataDims{3, 1, 1}, DynamicToStaticShapeEltwise::reference_broadcast_left},
+        EltwiseParams{DataDims{1000, 64}, DataDims{1}, DynamicToStaticShapeEltwise::reference_broadcast_right})));
+
+TEST_P(DynamicToStaticShapeEltwiseSingleDSR, CompareFunctions) {
+}
+
+INSTANTIATE_TEST_CASE_P(EltwiseBroadcastSingleDSR, DynamicToStaticShapeEltwiseSingleDSR, testing::Combine(
+    testing::Values(
+        ngraph::element::f16,
+        ngraph::element::f32,
+        ngraph::element::i32,
+        ngraph::element::i64,
+        ngraph::element::u8),
+    testing::Values(
+        ngraph::opset3::Add::type_info,
+        ngraph::opset3::Divide::type_info,
+//        ngraph::opset3::Equal::type_info, operation broadcast default value needs to be fixed
+        ngraph::opset3::Power::type_info,
+        ngraph::opset3::Multiply::type_info,
+        ngraph::opset3::Subtract::type_info),
+    testing::Values(
+        EltwiseParams{DataDims{1000}, DataDims{1}, DynamicToStaticShapeEltwiseSingleDSR::reference_simple},
+        EltwiseParams{DataDims{1000, 1, 1}, DataDims{1000, 1, 1}, DynamicToStaticShapeEltwiseSingleDSR::reference_simple},
+        EltwiseParams{DataDims{2, 1000}, DataDims{3, 1, 1}, DynamicToStaticShapeEltwiseSingleDSR::reference_broadcast_left},
+        EltwiseParams{DataDims{1000, 64}, DataDims{1}, DynamicToStaticShapeEltwiseSingleDSR::reference_broadcast_right})));
+}  // namespace
\ No newline at end of file
diff --git a/inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_clamp.cpp b/inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_clamp.cpp
new file mode 100644 (file)
index 0000000..576ee66
--- /dev/null
@@ -0,0 +1,88 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <common_test_utils/test_common.hpp>
+#include <ngraph_functions/utils/ngraph_helpers.hpp>
+#include <ngraph/opsets/opset3.hpp>
+#include <vpu/ngraph/operations/dynamic_shape_resolver.hpp>
+#include <vpu/ngraph/transformations/dynamic_to_static_shape.hpp>
+#include <vpu/ngraph/transformations/dynamic_to_static_shape_unary_elementwise.hpp>
+#include <vpu/utils/error.hpp>
+#include <numeric>
+#include <queue>
+#include <random>
+
+namespace {
+
+using DataType = ngraph::element::Type_t;
+using DataDims = ngraph::Shape;
+
+class DynamicToStaticShapeClamp : public CommonTestUtils::TestsCommon,
+        public testing::WithParamInterface<std::tuple<DataType, DataDims>> {
+public:
+    void SetUp() override {
+        const auto& parameters = GetParam();
+        const auto& dataType = std::get<0>(GetParam());
+        const auto& dataDims = std::get<1>(GetParam());
+
+        ngraph::helpers::CompareFunctions(*transform(dataType, dataDims), *reference(dataType, dataDims));
+    }
+
+protected:
+    std::shared_ptr<const ngraph::Function> transform(
+            const ngraph::element::Type_t& dataType,
+            const ngraph::Shape& dataDims) const {
+        const auto data = std::make_shared<ngraph::opset3::Parameter>(dataType, dataDims);
+        const auto dims = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{dataDims.size()});
+
+        const auto dsr = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(data, dims);
+
+        const auto node = std::make_shared<ngraph::opset3::Clamp>(dsr, 0., 6.);
+
+        auto outputShape = node->get_output_partial_shape(0);
+        const auto function = std::make_shared<ngraph::Function>(
+                ngraph::NodeVector{node},
+                ngraph::ParameterVector{data, dims},
+                "Actual");
+        node->set_output_type(0, dsr->get_input_element_type(0), ngraph::PartialShape::dynamic(outputShape.rank()));
+
+        const auto transformations = vpu::Transformations{{ngraph::opset3::Clamp::type_info, vpu::dynamicToStaticUnaryElementwise}};
+        vpu::DynamicToStaticShape(transformations).transform(*function);
+        return function;
+    }
+
+    std::shared_ptr<const ngraph::Function> reference(
+            const ngraph::element::Type_t& dataType,
+            const ngraph::Shape& dataDims) const {
+        const auto data = std::make_shared<ngraph::opset3::Parameter>(dataType, dataDims);
+        const auto dims = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{dataDims.size()});
+
+        const auto dsr0 = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(data, dims);
+        const auto node = std::make_shared<ngraph::opset3::Clamp>(dsr0, 0., 6.);
+
+        const auto dsr1 = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(node, dims);
+        return std::make_shared<ngraph::Function>(
+                ngraph::NodeVector{dsr1},
+                ngraph::ParameterVector{data, dims},
+                "Expected");
+    }
+};
+
+TEST_P(DynamicToStaticShapeClamp, CompareFunctions) {
+}
+
+INSTANTIATE_TEST_CASE_P(NGraph, DynamicToStaticShapeClamp, testing::Combine(
+    testing::Values(
+        ngraph::element::f16,
+        ngraph::element::f32,
+        ngraph::element::i32,
+        ngraph::element::i64,
+        ngraph::element::u8),
+    testing::Values(
+        DataDims{1000},
+        DataDims{4, 1000},
+        DataDims{3, 128, 256},
+        DataDims{2, 3, 128, 256})));
+
+}  // namespace
diff --git a/inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_convert.cpp b/inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_convert.cpp
new file mode 100644 (file)
index 0000000..c222af0
--- /dev/null
@@ -0,0 +1,88 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <common_test_utils/test_common.hpp>
+#include <ngraph_functions/utils/ngraph_helpers.hpp>
+#include <ngraph/opsets/opset3.hpp>
+#include <vpu/ngraph/operations/dynamic_shape_resolver.hpp>
+#include <vpu/ngraph/transformations/dynamic_to_static_shape_unary_elementwise.hpp>
+#include <vpu/ngraph/transformations/dynamic_to_static_shape.hpp>
+#include <vpu/utils/error.hpp>
+#include <numeric>
+#include <queue>
+#include <random>
+
+namespace {
+
+using DataType = ngraph::element::Type_t;
+using DataDims = ngraph::Shape;
+
+class DynamicToStaticShapeConvert : public CommonTestUtils::TestsCommon,
+        public testing::WithParamInterface<std::tuple<DataType, DataDims>> {
+public:
+    void SetUp() override {
+        const auto& parameters = GetParam();
+        const auto& dataType = std::get<0>(GetParam());
+        const auto& dataDims = std::get<1>(GetParam());
+
+        ngraph::helpers::CompareFunctions(*transform(dataType, dataDims), *reference(dataType, dataDims));
+    }
+
+protected:
+    std::shared_ptr<const ngraph::Function> transform(
+            const ngraph::element::Type_t& dataType,
+            const ngraph::Shape& dataDims) const {
+        const auto data = std::make_shared<ngraph::opset3::Parameter>(dataType, dataDims);
+        const auto dims = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{dataDims.size()});
+
+        const auto dsr = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(data, dims);
+
+        const auto convert = std::make_shared<ngraph::opset3::Convert>(dsr, dataType);
+
+        auto outputShape = convert->get_output_partial_shape(0);
+        const auto function = std::make_shared<ngraph::Function>(
+                ngraph::NodeVector{convert},
+                ngraph::ParameterVector{data, dims},
+                "Actual");
+        convert->set_output_type(0, dsr->get_input_element_type(0), ngraph::PartialShape::dynamic(outputShape.rank()));
+
+        const auto transformations = vpu::Transformations{{ngraph::opset3::Convert::type_info, vpu::dynamicToStaticUnaryElementwise}};
+        vpu::DynamicToStaticShape(transformations).transform(*function);
+        return function;
+    }
+
+    std::shared_ptr<const ngraph::Function> reference(
+            const ngraph::element::Type_t& dataType,
+            const ngraph::Shape& dataDims) const {
+        const auto data = std::make_shared<ngraph::opset3::Parameter>(dataType, dataDims);
+        const auto dims = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{dataDims.size()});
+
+        const auto dsr0 = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(data, dims);
+        const auto convert = std::make_shared<ngraph::opset3::Convert>(dsr0, dataType);
+
+        const auto dsr1 = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(convert, dims);
+        return std::make_shared<ngraph::Function>(
+                ngraph::NodeVector{dsr1},
+                ngraph::ParameterVector{data, dims},
+                "Expected");
+    }
+};
+
+TEST_P(DynamicToStaticShapeConvert, CompareFunctions) {
+}
+
+INSTANTIATE_TEST_CASE_P(NGraph, DynamicToStaticShapeConvert, testing::Combine(
+    testing::Values(
+        ngraph::element::f16,
+        ngraph::element::f32,
+        ngraph::element::i32,
+        ngraph::element::i64,
+        ngraph::element::u8),
+    testing::Values(
+        DataDims{1000},
+        DataDims{4, 1000},
+        DataDims{3, 128, 256},
+        DataDims{2, 3, 128, 256})));
+
+}  // namespace
diff --git a/inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_non_max_suppression.cpp b/inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_non_max_suppression.cpp
new file mode 100644 (file)
index 0000000..06e7440
--- /dev/null
@@ -0,0 +1,138 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <common_test_utils/test_common.hpp>
+#include <ngraph_functions/utils/ngraph_helpers.hpp>
+#include <ngraph/opsets/opset3.hpp>
+#include <vpu/ngraph/operations/dynamic_shape_resolver.hpp>
+#include <vpu/ngraph/transformations/dynamic_to_static_shape.hpp>
+#include <vpu/ngraph/transformations/dynamic_to_static_shape_non_max_suppression.hpp>
+#include <vpu/utils/error.hpp>
+#include <numeric>
+#include <queue>
+#include <random>
+
+namespace {
+
+using DataType = ngraph::element::Type_t;
+using DataDims = ngraph::Shape;
+
+struct NonMaxSuppressionTestCase {
+    int64_t num_batches, num_boxes, num_classes, max_output_boxes_per_class;
+    float iou_threshold, score_threshold;
+};
+
+
+class DynamicToStaticShapeNonMaxSuppression : public CommonTestUtils::TestsCommon,
+        public testing::WithParamInterface<std::tuple<DataType, DataType, NonMaxSuppressionTestCase>> {
+public:
+    void SetUp() override {
+        const auto& parameters = GetParam();
+        const auto& float_type = std::get<0>(parameters);
+        const auto& integer_type = std::get<1>(parameters);
+        const auto& nms_setup = std::get<2>(parameters);
+
+        ngraph::helpers::CompareFunctions(*transform(float_type, integer_type, nms_setup),
+                *reference(float_type, integer_type, nms_setup));
+    }
+
+protected:
+    std::shared_ptr<const ngraph::Function> transform(
+            const ngraph::element::Type_t& float_type,
+            const ngraph::element::Type_t& integer_type,
+            const NonMaxSuppressionTestCase& nms_setup) const {
+        const auto boxes = std::make_shared<ngraph::opset3::Parameter>(
+                float_type, ngraph::PartialShape{nms_setup.num_batches, nms_setup.num_boxes, 4});
+        const auto scores = std::make_shared<ngraph::opset3::Parameter>(
+                float_type, ngraph::PartialShape{nms_setup.num_batches, nms_setup.num_classes, nms_setup.num_boxes});
+        const auto max_output_boxes_per_class = ngraph::opset3::Constant::create(integer_type, {}, std::vector<int64_t>{nms_setup.max_output_boxes_per_class});
+        const auto iou_threshold = ngraph::opset3::Constant::create(float_type, ngraph::Shape{}, std::vector<float>{nms_setup.iou_threshold});
+        const auto score_threshold = ngraph::opset3::Constant::create(float_type, ngraph::Shape{}, std::vector<float>{nms_setup.score_threshold});
+
+
+        const auto dims = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{3});
+        const auto dsr = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(scores, dims);
+
+        const auto node = std::make_shared<ngraph::opset3::NonMaxSuppression>(
+                boxes, dsr, max_output_boxes_per_class, iou_threshold, score_threshold);
+
+        auto outputShape = node->get_output_partial_shape(0);
+        const auto function = std::make_shared<ngraph::Function>(
+                ngraph::NodeVector{node},
+                ngraph::ParameterVector{boxes, scores, dims},
+                "Actual");
+        node->set_output_type(0, dsr->get_input_element_type(0), ngraph::PartialShape::dynamic(outputShape.rank()));
+
+        const auto transformations = vpu::Transformations{{node->type_info, vpu::dynamicToStaticNonMaxSuppression}};
+        vpu::DynamicToStaticShape(transformations).transform(*function);
+        return function;
+    }
+
+    std::shared_ptr<const ngraph::Function> reference(
+            const ngraph::element::Type_t& float_type,
+            const ngraph::element::Type_t& integer_type,
+            const NonMaxSuppressionTestCase& nms_setup) const {
+        const auto boxes = std::make_shared<ngraph::opset3::Parameter>(
+                float_type, ngraph::PartialShape{nms_setup.num_batches, nms_setup.num_boxes, 4});
+        const auto scores = std::make_shared<ngraph::opset3::Parameter>(
+                float_type, ngraph::PartialShape{nms_setup.num_batches, nms_setup.num_classes, nms_setup.num_boxes});
+        const auto max_output_boxes_per_class = ngraph::opset3::Constant::create(integer_type, {}, std::vector<int64_t>{nms_setup.max_output_boxes_per_class});
+        const auto iou_threshold = ngraph::opset3::Constant::create(float_type, {}, std::vector<float>{nms_setup.iou_threshold});
+        const auto score_threshold = ngraph::opset3::Constant::create(float_type, {}, std::vector<float>{nms_setup.score_threshold});
+
+
+        const auto dims = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{3});
+        const auto dsr = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(scores, dims);
+
+        const auto node = std::make_shared<ngraph::opset3::NonMaxSuppression>(
+                boxes, dsr, max_output_boxes_per_class, iou_threshold, score_threshold);
+
+        const auto num_classes_index = ngraph::opset3::Constant::create(ngraph::element::i64, {1}, std::vector<int64_t>{1});
+        const auto num_classes_axis = ngraph::opset3::Constant::create(ngraph::element::i64, {}, std::vector<int64_t>{0});
+        const auto num_classes = std::make_shared<ngraph::opset3::Gather>(dims, num_classes_index, num_classes_axis);
+
+        const auto num_boxes_index = ngraph::opset3::Constant::create(ngraph::element::i64, {1}, std::vector<int64_t>{2});
+        const auto num_boxes_axis = ngraph::opset3::Constant::create(ngraph::element::i64, {}, std::vector<int64_t>{0});
+        const auto num_boxes = std::make_shared<ngraph::opset3::Gather>(dims, num_boxes_index, num_boxes_axis);
+
+        const auto max_boxes = std::make_shared<ngraph::opset3::Convert>(
+                std::make_shared<ngraph::opset3::Unsqueeze>(max_output_boxes_per_class,
+                ngraph::opset3::Constant::create(ngraph::element::i64, {1}, std::vector<int64_t>{0})), dims->get_element_type());
+
+        const auto boxes_overall = std::make_shared<ngraph::opset3::Multiply>(max_boxes, num_classes);
+        const auto selected_boxes = std::make_shared<ngraph::opset3::Minimum>(num_boxes, boxes_overall);
+
+        const auto triplets_const = std::make_shared<ngraph::opset3::Constant>(
+                dims->get_element_type(), ngraph::Shape{1}, std::vector<int64_t>{3});
+
+        const auto final_shape = std::make_shared<ngraph::opset3::Concat>(
+                ngraph::OutputVector{selected_boxes, triplets_const}, 0);
+
+        const auto dsr1 = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(node, final_shape);
+        return std::make_shared<ngraph::Function>(
+                ngraph::NodeVector{dsr1},
+                ngraph::ParameterVector{boxes, scores, dims},
+                "Expected");
+    }
+};
+
+TEST_P(DynamicToStaticShapeNonMaxSuppression, CompareFunctions) {
+}
+
+INSTANTIATE_TEST_CASE_P(NGraph, DynamicToStaticShapeNonMaxSuppression, testing::Combine(
+    testing::Values(
+        ngraph::element::f16,
+        ngraph::element::f32),
+    testing::Values(
+        ngraph::element::i32,
+        ngraph::element::i64,
+        ngraph::element::u8),
+    testing::Values(
+        // num_batches, num_boxes, num_classes, max_output_boxes_per_class, iou_threshold, score_threshold
+        NonMaxSuppressionTestCase{1, 10, 5, 10, 0., 0.},
+        NonMaxSuppressionTestCase{2, 100, 5, 10, 0., 0.},
+        NonMaxSuppressionTestCase{3, 10, 5, 2, 0.5, 0.},
+        NonMaxSuppressionTestCase{1, 1000, 1, 2000, 0.5, 0.})));
+
+}  // namespace
index 51c090d..07486aa 100644 (file)
@@ -3,17 +3,14 @@
 //
 
 #include "vpu/ngraph/transformations/dynamic_to_static_shape_nonzero.hpp"
+#include "vpu/ngraph/transformations/dynamic_to_static_shape.hpp"
 #include "vpu/ngraph/operations/static_shape_nonzero.hpp"
 #include "vpu/ngraph/operations/dynamic_shape_resolver.hpp"
 
-#include "../utils/ngraph_utils.h"
-
+#include <ngraph_functions/utils/ngraph_helpers.hpp>
 #include <ngraph/function.hpp>
-#include <ngraph/opsets/opset1.hpp>
 #include <ngraph/opsets/opset3.hpp>
 
-#include <cpp/ie_cnn_network.h>
-
 #include <common_test_utils/test_common.hpp>
 #include <gtest/gtest.h>
 
@@ -27,51 +24,48 @@ namespace {
 using TensorType  = ngraph::element::Type_t;
 using TensorShape = ngraph::Shape;
 
-class DynamicToStaticShapeNonZeroTests
-        : public CommonTestUtils::TestsCommon,
-          public testing::WithParamInterface<std::tuple<TensorType, TensorShape>> {
+class DynamicToStaticShapeNonZeroTests : public CommonTestUtils::TestsCommon, public testing::WithParamInterface<std::tuple<TensorType, TensorShape>> {
 public:
     void prepareFunctions() {
         const auto& parameters = GetParam();
         const auto& tensorType = std::get<0>(parameters);
         const auto& tensorShape = std::get<1>(parameters);
 
-        // Create a function with only opset3::NonZero
+        // Create a function with only op::NonZero
         // And then run conversion pass
         {
-            const auto input = std::make_shared<ngraph::op::Parameter>(tensorType, tensorShape);
+            const auto input = std::make_shared<ngraph::opset3::Parameter>(tensorType, tensorShape);
 
             const auto nonZero = std::make_shared<ngraph::opset3::NonZero>(input);
             nonZero->set_friendly_name(s_FriendlyName);
 
-            m_resfunction = std::make_shared<ngraph::Function>(
-                    ngraph::NodeVector{nonZero}, ngraph::ParameterVector{input});
-            ngraph::pass::DynamicToStaticShapeNonZero().run_on_function(m_resfunction);
+            actual = std::make_shared<ngraph::Function>(ngraph::NodeVector{nonZero}, ngraph::ParameterVector{input});
+            const auto transformation = vpu::Transformations{{ngraph::opset3::NonZero::type_info, vpu::dynamicToStaticShapeNonZero}};
+            vpu::DynamicToStaticShape(transformation).transform(*actual);
         }
 
         // Create a reference function
         {
             const auto input = std::make_shared<ngraph::opset1::Parameter>(tensorType, tensorShape);
 
-            const auto staticShapeNonZero = std::make_shared<ngraph::op::StaticShapeNonZero>(input);
-            staticShapeNonZero->set_friendly_name(s_FriendlyName + "/static_shape");
-            const auto dynamicShapeResolver = std::make_shared<ngraph::op::DynamicShapeResolver>(
+            const auto staticShapeNonZero = std::make_shared<ngraph::vpu::op::StaticShapeNonZero>(input);
+            staticShapeNonZero->set_friendly_name(std::string(s_FriendlyName) + "/static_shape");
+            const auto dynamicShapeResolver = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(
                     staticShapeNonZero->output(0), staticShapeNonZero->output(1));
-            dynamicShapeResolver->set_friendly_name(s_FriendlyName + "/resolve_shape");
+            dynamicShapeResolver->set_friendly_name(std::string(s_FriendlyName) + "/resolve_shape");
 
-            m_refFunction = std::make_shared<ngraph::Function>(
-                    ngraph::NodeVector{dynamicShapeResolver}, ngraph::ParameterVector{input});
+            expected = std::make_shared<ngraph::Function>(ngraph::NodeVector{dynamicShapeResolver}, ngraph::ParameterVector{input});
         }
     }
 
     void compareFunctions() {
-        FuncTestUtils::CompareFunctions(m_resfunction, m_refFunction);
+        ASSERT_NO_THROW(ngraph::helpers::CompareFunctions(*actual, *expected));
 
-        auto actualResultNode = m_resfunction->get_output_op(0);
+        auto actualResultNode = actual->get_output_op(0);
         auto actualResolverNode = actualResultNode->input(0).get_source_output().get_node_shared_ptr();
         auto actualNonZeroNode = actualResolverNode->input(0).get_source_output().get_node_shared_ptr();
 
-        auto expectedResultNode = m_refFunction->get_output_op(0);
+        auto expectedResultNode = expected->get_output_op(0);
         auto expectedResolverNode = expectedResultNode->input(0).get_source_output().get_node_shared_ptr();
         auto expectedNonZeroNode = expectedResolverNode->input(0).get_source_output().get_node_shared_ptr();
 
@@ -80,31 +74,31 @@ public:
     }
 
 protected:
-    std::shared_ptr<ngraph::Function> m_resfunction;
-    std::shared_ptr<ngraph::Function> m_refFunction;
+    std::shared_ptr<ngraph::Function> actual;
+    std::shared_ptr<ngraph::Function> expected;
 
-    static const std::string s_FriendlyName;
+    static const char s_FriendlyName[];
 };
 
-const std::string DynamicToStaticShapeNonZeroTests::s_FriendlyName = "non_zero";
+const char DynamicToStaticShapeNonZeroTests::s_FriendlyName[] = "NonZero";
 
-TEST_P(DynamicToStaticShapeNonZeroTests, inferAndValidate) {
+TEST_P(DynamicToStaticShapeNonZeroTests, CompareFunctions) {
     prepareFunctions();
     compareFunctions();
 }
 
 INSTANTIATE_TEST_CASE_P(NGraph, DynamicToStaticShapeNonZeroTests, testing::Combine(
-        testing::Values(
-                ngraph::element::f16,
-                ngraph::element::f32,
-                ngraph::element::i32,
-                ngraph::element::i64,
-                ngraph::element::u8),
-        testing::Values(
-                TensorShape{1000},
-                TensorShape{4, 1000},
-                TensorShape{3, 128, 256},
-                TensorShape{2, 3, 128, 256})
+    testing::Values(
+        ngraph::element::f16,
+        ngraph::element::f32,
+        ngraph::element::i32,
+        ngraph::element::i64,
+        ngraph::element::u8),
+    testing::Values(
+        TensorShape{1000},
+        TensorShape{4, 1000},
+        TensorShape{3, 128, 256},
+        TensorShape{2, 3, 128, 256})
 ));
 
 }  // namespace
diff --git a/inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_roialign.cpp b/inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_roialign.cpp
new file mode 100644 (file)
index 0000000..af4cec8
--- /dev/null
@@ -0,0 +1,297 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <common_test_utils/test_common.hpp>
+#include <ngraph_functions/utils/ngraph_helpers.hpp>
+#include <ngraph/opsets/opset3.hpp>
+#include <vpu/ngraph/operations/dynamic_shape_resolver.hpp>
+#include <vpu/ngraph/transformations/dynamic_to_static_shape.hpp>
+#include <vpu/ngraph/transformations/dynamic_to_static_shape_roialign.hpp>
+#include <vpu/utils/error.hpp>
+#include <numeric>
+#include <queue>
+#include <random>
+
+namespace {
+
+using DataType = ngraph::element::Type_t;
+using DataDims = ngraph::Shape;
+
+struct ROIAlignTestCase {
+    ngraph::Shape data_shape;
+    uint64_t num_rois, pooled_h, pooled_w, sampling_ratio;
+    float spatial_scale;
+    std::string mode;
+};
+
+
+class DynamicToStaticShapeROIAlignDataDSR : public CommonTestUtils::TestsCommon,
+        public testing::WithParamInterface<std::tuple<DataType, DataType, ROIAlignTestCase>> {
+public:
+    void SetUp() override {
+        const auto& parameters = GetParam();
+        const auto& float_type = std::get<0>(parameters);
+        const auto& integer_type = std::get<1>(parameters);
+        const auto& roialign_setup = std::get<2>(parameters);
+
+        ngraph::helpers::CompareFunctions(*transform(float_type, integer_type, roialign_setup),
+                *reference(float_type, integer_type, roialign_setup));
+    }
+
+protected:
+    std::shared_ptr<const ngraph::Function> transform(
+            const ngraph::element::Type_t& float_type,
+            const ngraph::element::Type_t& integer_type,
+            const ROIAlignTestCase& roialign_setup) const {
+        const auto data = std::make_shared<ngraph::opset3::Parameter>(float_type, roialign_setup.data_shape);
+        const auto boxes = std::make_shared<ngraph::opset3::Parameter>(float_type, ngraph::Shape{roialign_setup.num_rois, 4});
+        const auto rois = std::make_shared<ngraph::opset3::Parameter>(integer_type, ngraph::Shape{roialign_setup.num_rois});
+
+        const auto dims = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{data->get_shape().size()});
+        const auto dsr = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(data, dims);
+
+        const auto node = std::make_shared<ngraph::opset3::ROIAlign>(dsr, boxes, rois,
+                roialign_setup.pooled_h, roialign_setup.pooled_w, roialign_setup.sampling_ratio, roialign_setup.spatial_scale, roialign_setup.mode);
+
+        auto outputShape = node->get_output_partial_shape(0);
+        const auto function = std::make_shared<ngraph::Function>(
+                ngraph::NodeVector{node},
+                ngraph::ParameterVector{data, boxes, rois, dims},
+                "Actual");
+        node->set_output_type(0, dsr->get_input_element_type(0), ngraph::PartialShape::dynamic(outputShape.rank()));
+
+        const auto transformations = vpu::Transformations{{node->type_info, vpu::dynamicToStaticShapeROIAlign}};
+        vpu::DynamicToStaticShape(transformations).transform(*function);
+        return function;
+    }
+
+    std::shared_ptr<const ngraph::Function> reference(
+            const ngraph::element::Type_t& float_type,
+            const ngraph::element::Type_t& integer_type,
+            const ROIAlignTestCase& roialign_setup) const {
+        const auto data = std::make_shared<ngraph::opset3::Parameter>(float_type, roialign_setup.data_shape);
+        const auto boxes = std::make_shared<ngraph::opset3::Parameter>(float_type, ngraph::Shape{roialign_setup.num_rois, 4});
+        const auto rois = std::make_shared<ngraph::opset3::Parameter>(integer_type, ngraph::Shape{roialign_setup.num_rois});
+
+        const auto dims = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{data->get_shape().size()});
+        const auto dsr = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(data, dims);
+
+        const auto node = std::make_shared<ngraph::opset3::ROIAlign>(dsr, boxes, rois,
+                roialign_setup.pooled_h, roialign_setup.pooled_w, roialign_setup.sampling_ratio, roialign_setup.spatial_scale, roialign_setup.mode);
+
+
+        const auto c_index = ngraph::opset3::Constant::create(dims->get_element_type(), ngraph::Shape{1}, std::vector<uint64_t>{1});
+        const auto c_axis = ngraph::opset3::Constant::create(dims->get_element_type(), ngraph::Shape{1}, std::vector<uint64_t>{0});
+        const auto c = std::make_shared<ngraph::opset3::Gather>(dims, c_index, c_axis);
+
+        const auto num_rois = ngraph::opset3::Constant::create(dims->get_element_type(), ngraph::Shape{1}, std::vector<uint64_t>{roialign_setup.num_rois});
+        const auto pooled_h = ngraph::opset3::Constant::create(dims->get_element_type(), ngraph::Shape{1}, std::vector<uint64_t>{roialign_setup.pooled_h});
+        const auto pooled_w = ngraph::opset3::Constant::create(dims->get_element_type(), ngraph::Shape{1}, std::vector<uint64_t>{roialign_setup.pooled_w});
+
+        const auto output_shape = std::make_shared<ngraph::opset3::Concat>(ngraph::OutputVector{num_rois, c, pooled_h, pooled_w}, 0);
+        const auto dsr1 = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(node, output_shape);
+        return std::make_shared<ngraph::Function>(
+                ngraph::NodeVector{dsr1},
+                ngraph::ParameterVector{data, boxes, rois, dims},
+                "Expected");
+    }
+};
+
+TEST_P(DynamicToStaticShapeROIAlignDataDSR, CompareFunctions) {
+}
+
+INSTANTIATE_TEST_CASE_P(NGraph, DynamicToStaticShapeROIAlignDataDSR, testing::Combine(
+    testing::Values(
+        ngraph::element::f16,
+        ngraph::element::f32),
+   testing::Values(
+        ngraph::element::i32,
+        ngraph::element::i64,
+        ngraph::element::u8),
+    //data_shape, num_rois, pooled_h, pooled_w, sampling_ratio, spatial_scale, mode
+    testing::Values(
+        ROIAlignTestCase{{7, 256, 200, 200}, 1000, 6, 6, 2, 16., "avg"},
+        ROIAlignTestCase{{7, 256, 200, 200}, 1000, 7, 6, 2, 16., "max"})));
+
+class DynamicToStaticShapeROIAlignROIDSR : public CommonTestUtils::TestsCommon,
+        public testing::WithParamInterface<std::tuple<DataType, DataType, ROIAlignTestCase>> {
+public:
+    void SetUp() override {
+        const auto& parameters = GetParam();
+        const auto& float_type = std::get<0>(parameters);
+        const auto& integer_type = std::get<1>(parameters);
+        const auto& roialign_setup = std::get<2>(parameters);
+
+        ngraph::helpers::CompareFunctions(*transform(float_type, integer_type, roialign_setup),
+                                          *reference(float_type, integer_type, roialign_setup));
+    }
+
+protected:
+    std::shared_ptr<const ngraph::Function> transform(
+            const ngraph::element::Type_t& float_type,
+            const ngraph::element::Type_t& integer_type,
+            const ROIAlignTestCase& roialign_setup) const {
+        const auto data = std::make_shared<ngraph::opset3::Parameter>(float_type, roialign_setup.data_shape);
+        const auto boxes = std::make_shared<ngraph::opset3::Parameter>(float_type, ngraph::Shape{roialign_setup.num_rois, 4});
+        const auto rois = std::make_shared<ngraph::opset3::Parameter>(integer_type, ngraph::Shape{roialign_setup.num_rois});
+
+        const auto dims = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{rois->get_shape().size()});
+        const auto dsr = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(rois, dims);
+
+        const auto node = std::make_shared<ngraph::opset3::ROIAlign>(data, boxes, dsr,
+                roialign_setup.pooled_h, roialign_setup.pooled_w, roialign_setup.sampling_ratio, roialign_setup.spatial_scale, roialign_setup.mode);
+
+        auto outputShape = node->get_output_partial_shape(0);
+        const auto function = std::make_shared<ngraph::Function>(
+                ngraph::NodeVector{node},
+                ngraph::ParameterVector{data, boxes, rois, dims},
+                "Actual");
+        node->set_output_type(0, dsr->get_input_element_type(0), ngraph::PartialShape::dynamic(outputShape.rank()));
+
+        const auto transformations = vpu::Transformations{{node->type_info, vpu::dynamicToStaticShapeROIAlign}};
+        vpu::DynamicToStaticShape(transformations).transform(*function);
+        return function;
+    }
+
+    std::shared_ptr<const ngraph::Function> reference(
+            const ngraph::element::Type_t& float_type,
+            const ngraph::element::Type_t& integer_type,
+            const ROIAlignTestCase& roialign_setup) const {
+        const auto data = std::make_shared<ngraph::opset3::Parameter>(float_type, roialign_setup.data_shape);
+        const auto boxes = std::make_shared<ngraph::opset3::Parameter>(float_type, ngraph::Shape{roialign_setup.num_rois, 4});
+        const auto rois = std::make_shared<ngraph::opset3::Parameter>(integer_type, ngraph::Shape{roialign_setup.num_rois});
+
+        const auto dims = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{rois->get_shape().size()});
+        const auto dsr = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(rois, dims);
+
+        const auto node = std::make_shared<ngraph::opset3::ROIAlign>(data, boxes, dsr,
+                roialign_setup.pooled_h, roialign_setup.pooled_w, roialign_setup.sampling_ratio, roialign_setup.spatial_scale, roialign_setup.mode);
+
+        const auto data_shape = ngraph::opset3::Constant::create(dims->get_element_type(), ngraph::Shape{4}, roialign_setup.data_shape);
+        const auto c_index = ngraph::opset3::Constant::create(dims->get_element_type(), ngraph::Shape{1}, std::vector<uint64_t>{1});
+        const auto c_axis = ngraph::opset3::Constant::create(dims->get_element_type(), ngraph::Shape{1}, std::vector<uint64_t>{0});
+        const auto c = std::make_shared<ngraph::opset3::Gather>(data_shape, c_index, c_axis);
+
+        const auto pooled_h = ngraph::opset3::Constant::create(dims->get_element_type(), ngraph::Shape{1}, std::vector<uint64_t>{roialign_setup.pooled_h});
+        const auto pooled_w = ngraph::opset3::Constant::create(dims->get_element_type(), ngraph::Shape{1}, std::vector<uint64_t>{roialign_setup.pooled_w});
+
+        const auto output_shape = std::make_shared<ngraph::opset3::Concat>(ngraph::OutputVector{dims, c, pooled_h, pooled_w}, 0);
+        const auto dsr1 = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(node, output_shape);
+        return std::make_shared<ngraph::Function>(
+                ngraph::NodeVector{dsr1},
+                ngraph::ParameterVector{data, boxes, rois, dims},
+                "Expected");
+    }
+};
+
+TEST_P(DynamicToStaticShapeROIAlignROIDSR, CompareFunctions) {
+}
+
+INSTANTIATE_TEST_CASE_P(NGraph, DynamicToStaticShapeROIAlignROIDSR, testing::Combine(
+        testing::Values(
+                ngraph::element::f16,
+                ngraph::element::f32),
+        testing::Values(
+                ngraph::element::i32,
+                ngraph::element::i64,
+                ngraph::element::u8),
+        //data_shape, num_rois, pooled_h, pooled_w, sampling_ratio, spatial_scale, mode
+        testing::Values(
+                ROIAlignTestCase{{7, 256, 200, 200}, 1000, 6, 6, 2, 16., "avg"},
+                ROIAlignTestCase{{7, 256, 200, 200}, 1000, 7, 6, 2, 16., "max"})));
+
+
+class DynamicToStaticShapeROIAlign : public CommonTestUtils::TestsCommon,
+        public testing::WithParamInterface<std::tuple<DataType, DataType, ROIAlignTestCase>> {
+public:
+    void SetUp() override {
+        const auto& parameters = GetParam();
+        const auto& float_type = std::get<0>(parameters);
+        const auto& integer_type = std::get<1>(parameters);
+        const auto& roialign_setup = std::get<2>(parameters);
+
+        ngraph::helpers::CompareFunctions(*transform(float_type, integer_type, roialign_setup),
+                                          *reference(float_type, integer_type, roialign_setup));
+    }
+
+protected:
+    std::shared_ptr<const ngraph::Function> transform(
+            const ngraph::element::Type_t& float_type,
+            const ngraph::element::Type_t& integer_type,
+            const ROIAlignTestCase& roialign_setup) const {
+        const auto data = std::make_shared<ngraph::opset3::Parameter>(float_type, roialign_setup.data_shape);
+        const auto boxes = std::make_shared<ngraph::opset3::Parameter>(float_type, ngraph::Shape{roialign_setup.num_rois, 4});
+        const auto rois = std::make_shared<ngraph::opset3::Parameter>(integer_type, ngraph::Shape{roialign_setup.num_rois});
+
+        const auto roi_dims = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{rois->get_shape().size()});
+        const auto roi_dsr = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(rois, roi_dims);
+
+        const auto data_dims = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{data->get_shape().size()});
+        const auto data_dsr = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(data, data_dims);
+
+        const auto node = std::make_shared<ngraph::opset3::ROIAlign>(data_dsr, boxes, roi_dsr,
+                roialign_setup.pooled_h, roialign_setup.pooled_w, roialign_setup.sampling_ratio, roialign_setup.spatial_scale, roialign_setup.mode);
+
+        auto outputShape = node->get_output_partial_shape(0);
+        const auto function = std::make_shared<ngraph::Function>(
+                ngraph::NodeVector{node},
+                ngraph::ParameterVector{data, boxes, rois, roi_dims, data_dims},
+                "Actual");
+        node->set_output_type(0, data_dsr->get_input_element_type(0), ngraph::PartialShape::dynamic(outputShape.rank()));
+
+        const auto transformations = vpu::Transformations{{node->type_info, vpu::dynamicToStaticShapeROIAlign}};
+        vpu::DynamicToStaticShape(transformations).transform(*function);
+        return function;
+    }
+
+    std::shared_ptr<const ngraph::Function> reference(
+            const ngraph::element::Type_t& float_type,
+            const ngraph::element::Type_t& integer_type,
+            const ROIAlignTestCase& roialign_setup) const {
+        const auto data = std::make_shared<ngraph::opset3::Parameter>(float_type, roialign_setup.data_shape);
+        const auto boxes = std::make_shared<ngraph::opset3::Parameter>(float_type, ngraph::Shape{roialign_setup.num_rois, 4});
+        const auto rois = std::make_shared<ngraph::opset3::Parameter>(integer_type, ngraph::Shape{roialign_setup.num_rois});
+
+        const auto roi_dims = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{rois->get_shape().size()});
+        const auto roi_dsr = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(rois, roi_dims);
+
+        const auto data_dims = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{data->get_shape().size()});
+        const auto data_dsr = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(data, data_dims);
+
+        const auto node = std::make_shared<ngraph::opset3::ROIAlign>(data_dsr, boxes, roi_dsr,
+                roialign_setup.pooled_h, roialign_setup.pooled_w, roialign_setup.sampling_ratio, roialign_setup.spatial_scale, roialign_setup.mode);
+
+        const auto c_index = ngraph::opset3::Constant::create(data_dims->get_element_type(), ngraph::Shape{1}, std::vector<uint64_t>{1});
+        const auto c_axis = ngraph::opset3::Constant::create(data_dims->get_element_type(), ngraph::Shape{1}, std::vector<uint64_t>{0});
+        const auto c = std::make_shared<ngraph::opset3::Gather>(data_dims, c_index, c_axis);
+
+        const auto pooled_h = ngraph::opset3::Constant::create(data_dims->get_element_type(), ngraph::Shape{1}, std::vector<uint64_t>{roialign_setup.pooled_h});
+        const auto pooled_w = ngraph::opset3::Constant::create(data_dims->get_element_type(), ngraph::Shape{1}, std::vector<uint64_t>{roialign_setup.pooled_w});
+
+        const auto output_shape = std::make_shared<ngraph::opset3::Concat>(ngraph::OutputVector{roi_dims, c, pooled_h, pooled_w}, 0);
+        const auto dsr1 = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(node, output_shape);
+        return std::make_shared<ngraph::Function>(
+                ngraph::NodeVector{dsr1},
+                ngraph::ParameterVector{data, boxes, rois, roi_dims, data_dims},
+                "Expected");
+    }
+};
+
+TEST_P(DynamicToStaticShapeROIAlign, CompareFunctions) {
+}
+
+INSTANTIATE_TEST_CASE_P(NGraph, DynamicToStaticShapeROIAlign, testing::Combine(
+        testing::Values(
+                ngraph::element::f16,
+                ngraph::element::f32),
+        testing::Values(
+                ngraph::element::i32,
+                ngraph::element::i64,
+                ngraph::element::u8),
+        //data_shape, num_rois, pooled_h, pooled_w, sampling_ratio, spatial_scale, mode
+        testing::Values(
+                ROIAlignTestCase{{7, 256, 200, 200}, 1000, 6, 6, 2, 16., "avg"},
+                ROIAlignTestCase{{7, 256, 200, 200}, 1000, 7, 6, 2, 16., "max"})));
+
+}  // namespace
diff --git a/inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_scatter.cpp b/inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_scatter.cpp
new file mode 100644 (file)
index 0000000..81c1297
--- /dev/null
@@ -0,0 +1,108 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <common_test_utils/test_common.hpp>
+#include <ngraph_functions/utils/ngraph_helpers.hpp>
+#include <ngraph/opsets/opset3.hpp>
+#include <vpu/ngraph/operations/dynamic_shape_resolver.hpp>
+#include <vpu/ngraph/transformations/dynamic_to_static_shape.hpp>
+#include <vpu/ngraph/transformations/dynamic_to_static_shape_unary_elementwise.hpp>
+#include <vpu/utils/error.hpp>
+#include <numeric>
+#include <queue>
+#include <random>
+
+namespace {
+
+using DataType = ngraph::element::Type_t;
+
+
+struct ScatterTestCase {
+    ngraph::NodeTypeInfo scatter_type_info;
+    ngraph::Shape data_shape, indices_shape, updates_shape;
+    int64_t axis;
+};
+
+class DynamicToStaticShapeScatter : public CommonTestUtils::TestsCommon,
+        public testing::WithParamInterface<std::tuple<DataType, DataType, ScatterTestCase>> {
+public:
+    void SetUp() override {
+        const auto& parameters = GetParam();
+        const auto& numeric_type = std::get<0>(parameters);
+        const auto& integer_type = std::get<1>(parameters);
+        const auto& scatter_setup = std::get<2>(parameters);
+
+        ngraph::helpers::CompareFunctions(*transform(numeric_type, integer_type, scatter_setup),
+                *reference(numeric_type, integer_type, scatter_setup));
+    }
+
+protected:
+    std::shared_ptr<const ngraph::Function> transform(
+            const ngraph::element::Type_t& numeric_type,
+            const ngraph::element::Type_t& integer_type,
+            const ScatterTestCase& scatter_setup) const {
+        const auto data = std::make_shared<ngraph::opset3::Parameter>(numeric_type, scatter_setup.data_shape);
+        const auto indices = std::make_shared<ngraph::opset3::Parameter>(integer_type, scatter_setup.indices_shape);
+        const auto updates = std::make_shared<ngraph::opset3::Parameter>(numeric_type, scatter_setup.updates_shape);
+        const auto axis = std::make_shared<ngraph::opset3::Constant>(integer_type, ngraph::Shape{1}, std::vector<int64_t>{scatter_setup.axis});
+
+
+        const auto dims = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{scatter_setup.data_shape.size()});
+        const auto dsr = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(data, dims);
+
+        const auto node = ngraph::helpers::getNodeSharedPtr(scatter_setup.scatter_type_info, {dsr, indices, updates, axis});
+
+        auto outputShape = node->get_output_partial_shape(0);
+        const auto function = std::make_shared<ngraph::Function>(
+                ngraph::NodeVector{node},
+                ngraph::ParameterVector{data, indices, updates, dims},
+                "Actual");
+        node->set_output_type(0, dsr->get_input_element_type(0), ngraph::PartialShape::dynamic(outputShape.rank()));
+
+        const auto transformations = vpu::Transformations{{scatter_setup.scatter_type_info, vpu::dynamicToStaticUnaryElementwise}};
+        vpu::DynamicToStaticShape(transformations).transform(*function);
+        return function;
+    }
+
+    std::shared_ptr<const ngraph::Function> reference(
+            const ngraph::element::Type_t& numeric_type,
+            const ngraph::element::Type_t& integer_type,
+            const ScatterTestCase& scatter_setup) const {
+        const auto data = std::make_shared<ngraph::opset3::Parameter>(numeric_type, scatter_setup.data_shape);
+        const auto indices = std::make_shared<ngraph::opset3::Parameter>(integer_type, scatter_setup.indices_shape);
+        const auto updates = std::make_shared<ngraph::opset3::Parameter>(numeric_type, scatter_setup.updates_shape);
+        const auto axis = std::make_shared<ngraph::opset3::Constant>(integer_type, ngraph::Shape{1}, std::vector<int64_t>{scatter_setup.axis});
+
+
+        const auto dims = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{scatter_setup.data_shape.size()});
+        const auto dsr = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(data, dims);
+
+        const auto node = ngraph::helpers::getNodeSharedPtr(scatter_setup.scatter_type_info, {dsr, indices, updates, axis});
+
+        const auto dsr1 = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(node, dims);
+        return std::make_shared<ngraph::Function>(
+                ngraph::NodeVector{dsr1},
+                ngraph::ParameterVector{data, indices, updates, dims},
+                "Expected");
+    }
+};
+
+TEST_P(DynamicToStaticShapeScatter, CompareFunctions) {
+}
+
+INSTANTIATE_TEST_CASE_P(NGraph, DynamicToStaticShapeScatter, testing::Combine(
+    testing::Values(
+        ngraph::element::f16,
+        ngraph::element::f32,
+        ngraph::element::i32,
+        ngraph::element::i64,
+        ngraph::element::u8),
+    testing::Values(
+        ngraph::element::i32,
+        ngraph::element::i64,
+        ngraph::element::u8),
+    testing::Values(
+        ScatterTestCase{ngraph::opset3::ScatterUpdate::type_info, {1000, 256, 10, 15}, {125, 20}, {1000, 125, 20, 10, 15}, 1})));
+
+}  // namespace
diff --git a/inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_squeeze.cpp b/inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_squeeze.cpp
new file mode 100644 (file)
index 0000000..ecf3d19
--- /dev/null
@@ -0,0 +1,115 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <ngraph/type/element_type.hpp>
+#include <ngraph/shape.hpp>
+#include <common_test_utils/test_common.hpp>
+#include <ngraph/op/parameter.hpp>
+#include <vpu/ngraph/operations/dynamic_shape_resolver.hpp>
+#include <numeric>
+#include <random>
+#include <ngraph/ops.hpp>
+#include <ngraph/opsets/opset3.hpp>
+#include <vpu/ngraph/transformations/dynamic_to_static_shape_squeeze.hpp>
+#include <queue>
+#include <ngraph_functions/utils/ngraph_helpers.hpp>
+#include <vpu/ngraph/transformations/dynamic_to_static_shape.hpp>
+#include <vpu/utils/error.hpp>
+
+namespace {
+
+using DataType = ngraph::element::Type_t;
+using DataDims = ngraph::Shape;
+using axis_vec = std::vector<int64_t>;
+
+struct SqueezeTestCase {
+    DataDims input_shape;
+    axis_vec squeeze_axes;
+    axis_vec gather_indices;
+};
+
+class DynamicToStaticShapeSqueeze : public CommonTestUtils::TestsCommon,
+public testing::WithParamInterface<std::tuple<DataType, SqueezeTestCase>> {
+public:
+    void SetUp() override {
+        const auto& parameters = GetParam();
+        const auto& dataType = std::get<0>(parameters);
+        const auto& squeeze_test_case = std::get<1>(parameters);
+
+        const auto& input_shape = squeeze_test_case.input_shape;
+        const auto& squeeze_axes = squeeze_test_case.squeeze_axes;
+        const auto& gather_indices = squeeze_test_case.gather_indices;
+
+        ngraph::helpers::CompareFunctions(*transform(dataType, input_shape, squeeze_axes),
+                *reference(dataType, input_shape, squeeze_axes, gather_indices));
+    }
+
+protected:
+    std::shared_ptr<const ngraph::Function> transform(
+        const ngraph::element::Type_t& data_type,
+        const ngraph::Shape& input_shape,
+        const std::vector<std::int64_t>& squeeze_axes) const {
+        const auto data = std::make_shared<ngraph::opset3::Parameter>(data_type, input_shape);
+        const auto dims = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{input_shape.size()});
+
+        const auto dsr = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(data, dims);
+
+        const auto axes = std::make_shared<ngraph::opset3::Constant>(ngraph::element::i64, ngraph::Shape{squeeze_axes.size()}, squeeze_axes);
+        const auto node = std::make_shared<ngraph::opset3::Squeeze>(dsr, axes);
+
+        const auto function = std::make_shared<ngraph::Function>(
+            ngraph::NodeVector{node},
+            ngraph::ParameterVector{data, dims},
+            "Actual");
+        node->set_output_type(0, dsr->get_input_element_type(0), ngraph::PartialShape::dynamic(node->get_output_partial_shape(0).rank()));
+
+        const auto transformations = vpu::Transformations{{node->type_info, vpu::dynamicToStaticShapeSqueeze}};
+        vpu::DynamicToStaticShape(transformations).transform(*function);
+        return function;
+    }
+
+    std::shared_ptr<const ngraph::Function> reference(
+            const ngraph::element::Type_t& data_type,
+            const ngraph::Shape& input_shape,
+            const std::vector<std::int64_t>& squeeze_axes,
+            const std::vector<std::int64_t>& gather_indices) const {
+        const auto data = std::make_shared<ngraph::opset3::Parameter>(data_type, input_shape);
+        const auto dims = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{input_shape.size()});
+
+        const auto dsr0 = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(data, dims);
+
+        const auto axes = std::make_shared<ngraph::opset3::Constant>(ngraph::element::i64, ngraph::Shape{squeeze_axes.size()}, squeeze_axes);
+        const auto squeeze = std::make_shared<ngraph::opset3::Squeeze>(dsr0, axes);
+
+        const auto gather_axis_const = std::make_shared<ngraph::opset3::Constant>(ngraph::element::i64, ngraph::Shape{1}, std::vector<int64_t>{0});
+        const auto gather_indices_const = std::make_shared<ngraph::opset3::Constant>(
+                ngraph::element::i64, ngraph::Shape{gather_indices.size()}, gather_indices);
+
+        const auto gather = std::make_shared<ngraph::opset3::Gather>(dims, gather_indices_const, gather_axis_const);
+        const auto dsr1 = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(squeeze, gather);
+        return std::make_shared<ngraph::Function>(
+            ngraph::NodeVector{dsr1},
+            ngraph::ParameterVector{data, dims},
+            "Expected");
+    }
+};
+
+TEST_P(DynamicToStaticShapeSqueeze, CompareFunctions) {
+}
+
+INSTANTIATE_TEST_CASE_P(NGraph, DynamicToStaticShapeSqueeze, testing::Combine(
+    testing::Values(
+        ngraph::element::f16,
+        ngraph::element::f32,
+        ngraph::element::i32,
+        ngraph::element::i64,
+        ngraph::element::u8),
+    testing::Values(
+        // input_shape, squeeze_axis, gather_indices
+        SqueezeTestCase{DataDims{1, 1, 1000}, axis_vec{-2}, axis_vec{0, 2}},
+        SqueezeTestCase{DataDims{1, 1000, 1}, axis_vec{0, 2}, axis_vec{1}},
+        SqueezeTestCase{DataDims{1, 1, 1}, axis_vec{1}, axis_vec{0, 2}},
+        SqueezeTestCase{DataDims{1000, 1, 1}, axis_vec{2}, axis_vec{0, 1}})));
+
+}  // namespace
diff --git a/inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_transpose.cpp b/inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_transpose.cpp
new file mode 100644 (file)
index 0000000..aa501d4
--- /dev/null
@@ -0,0 +1,120 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <ngraph/type/element_type.hpp>
+#include <ngraph/shape.hpp>
+#include <common_test_utils/test_common.hpp>
+#include <ngraph/op/parameter.hpp>
+#include <vpu/ngraph/operations/dynamic_shape_resolver.hpp>
+#include <numeric>
+#include <random>
+#include <ngraph/opsets/opset3.hpp>
+#include <vpu/ngraph/transformations/dynamic_to_static_shape_transpose.hpp>
+#include <queue>
+#include <ngraph_functions/utils/ngraph_helpers.hpp>
+#include <vpu/ngraph/transformations/dynamic_to_static_shape.hpp>
+#include <vpu/utils/error.hpp>
+
+namespace {
+
+using DataType = ngraph::element::Type_t;
+using DataDims = ngraph::Shape;
+
+ngraph::PartialShape makeDynamicShape(const ngraph::PartialShape& shape) {
+    if (shape.is_dynamic()) {
+        return shape;
+    }
+
+    const auto& numDimensions = shape.rank().get_length();
+    if (numDimensions <= 1) {
+        return ngraph::PartialShape{{ngraph::Dimension::dynamic()}};
+    }
+
+    auto dynamicShape = shape;
+    for (auto i = numDimensions - 1; i > 0; --i) {
+        dynamicShape[i] = ngraph::Dimension::dynamic();
+    }
+
+    return dynamicShape;
+}
+
+class DynamicToStaticShapeTranspose : public CommonTestUtils::TestsCommon, public testing::WithParamInterface<std::tuple<DataType, DataDims>> {
+public:
+    void SetUp() override {
+        const auto& parameters = GetParam();
+        const auto& dataType = std::get<0>(GetParam());
+        const auto& dataDims = std::get<1>(GetParam());
+
+        auto permutation = std::vector<std::int64_t>(dataDims.size());
+        std::iota(permutation.begin(), permutation.end(), 0);
+        std::shuffle(permutation.begin(), permutation.end(), std::mt19937());
+
+        ngraph::helpers::CompareFunctions(*transform(dataType, dataDims, permutation), *reference(dataType, dataDims, permutation));
+    }
+
+protected:
+    std::shared_ptr<const ngraph::Function> transform(
+        const ngraph::element::Type_t& dataType,
+        const ngraph::Shape& dataDims,
+        const std::vector<std::int64_t>& permutation) const {
+        const auto data = std::make_shared<ngraph::opset3::Parameter>(dataType, dataDims);
+        const auto dims = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{dataDims.size()});
+        const auto transposition = std::make_shared<ngraph::opset3::Constant>(ngraph::element::i64, ngraph::Shape{data->get_shape().size()}, permutation);
+
+        const auto dsr = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(data, dims);
+        const auto transpose = std::make_shared<ngraph::opset3::Transpose>(dsr, transposition);
+
+        auto outputShape = transpose->get_output_partial_shape(0);
+        const auto function = std::make_shared<ngraph::Function>(
+            ngraph::NodeVector{transpose},
+            ngraph::ParameterVector{data, dims},
+            "Actual");
+        transpose->set_output_type(0, dsr->get_input_element_type(0), makeDynamicShape(transposition->get_output_partial_shape(0)));
+
+        const auto transformations = vpu::Transformations{{ngraph::opset3::Transpose::type_info, vpu::dynamicToStaticShapeTranspose}};
+        vpu::DynamicToStaticShape(transformations).transform(*function);
+        return function;
+    }
+
+    std::shared_ptr<const ngraph::Function> reference(
+        const ngraph::element::Type_t& dataType,
+        const ngraph::Shape& dataDims,
+        const std::vector<std::int64_t>& permutation) const {
+        const auto data = std::make_shared<ngraph::opset3::Parameter>(dataType, dataDims);
+        const auto dims = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{dataDims.size()});
+        const auto transposition = std::make_shared<ngraph::opset3::Constant>(ngraph::element::i64, ngraph::Shape{data->get_shape().size()}, permutation);
+
+        const auto dsr0 = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(data, dims);
+        const auto transpose = std::make_shared<ngraph::opset3::Transpose>(dsr0, transposition);
+
+        const auto axis = std::make_shared<ngraph::opset3::Constant>(
+            ngraph::element::u64,
+            ngraph::Shape{std::initializer_list<std::size_t>{1}},
+            std::vector<std::size_t>{0});
+        const auto scatterElementsUpdate = std::make_shared<ngraph::opset3::ScatterElementsUpdate>(dims, transposition, dims, axis);
+        const auto dsr1 = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(transpose, scatterElementsUpdate);
+        return std::make_shared<ngraph::Function>(
+            ngraph::NodeVector{dsr1},
+            ngraph::ParameterVector{data, dims},
+            "Expected");
+    }
+};
+
+TEST_P(DynamicToStaticShapeTranspose, CompareFunctions) {
+}
+
+INSTANTIATE_TEST_CASE_P(NGraph, DynamicToStaticShapeTranspose, testing::Combine(
+    testing::Values(
+        ngraph::element::f16,
+        ngraph::element::f32,
+        ngraph::element::i32,
+        ngraph::element::i64,
+        ngraph::element::u8),
+    testing::Values(
+        DataDims{1000},
+        DataDims{4, 1000},
+        DataDims{3, 128, 256},
+        DataDims{2, 3, 128, 256})));
+
+}  // namespace
diff --git a/inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_unary_elementwise.cpp b/inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_unary_elementwise.cpp
new file mode 100644 (file)
index 0000000..6263fc7
--- /dev/null
@@ -0,0 +1,97 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <common_test_utils/test_common.hpp>
+#include <ngraph_functions/utils/ngraph_helpers.hpp>
+#include <ngraph/opsets/opset3.hpp>
+#include <vpu/ngraph/operations/dynamic_shape_resolver.hpp>
+#include <vpu/ngraph/transformations/dynamic_to_static_shape_unary_elementwise.hpp>
+#include <vpu/ngraph/transformations/dynamic_to_static_shape.hpp>
+#include <vpu/utils/error.hpp>
+#include <numeric>
+#include <queue>
+#include <random>
+
+namespace {
+
+using DataType = ngraph::element::Type_t;
+using DataDims = ngraph::Shape;
+
+class DynamicToStaticShapeUnaryElementwise : public CommonTestUtils::TestsCommon,
+        public testing::WithParamInterface<std::tuple<DataType, DataDims, ngraph::NodeTypeInfo>> {
+public:
+    void SetUp() override {
+        const auto& parameters = GetParam();
+        const auto& dataType = std::get<0>(GetParam());
+        const auto& dataDims = std::get<1>(GetParam());
+        const auto& type_info = std::get<2>(GetParam());
+
+        ngraph::helpers::CompareFunctions(*transform(dataType, dataDims, type_info), *reference(dataType, dataDims, type_info));
+    }
+
+protected:
+    std::shared_ptr<const ngraph::Function> transform(
+            const ngraph::element::Type_t& dataType,
+            const ngraph::Shape& dataDims,
+            const ngraph::NodeTypeInfo type_info) const {
+        const auto data = std::make_shared<ngraph::opset3::Parameter>(dataType, dataDims);
+        const auto dims = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{dataDims.size()});
+
+        const auto dsr = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(data, dims);
+
+        const auto node = ngraph::helpers::getNodeSharedPtr(type_info, {dsr});
+
+        auto outputShape = node->get_output_partial_shape(0);
+        const auto function = std::make_shared<ngraph::Function>(
+                ngraph::NodeVector{node},
+                ngraph::ParameterVector{data, dims},
+                "Actual");
+        node->set_output_type(0, dsr->get_input_element_type(0), ngraph::PartialShape::dynamic(outputShape.rank()));
+
+        const auto transformations = vpu::Transformations{{type_info, vpu::dynamicToStaticUnaryElementwise}};
+        vpu::DynamicToStaticShape(transformations).transform(*function);
+        return function;
+    }
+
+    std::shared_ptr<const ngraph::Function> reference(
+            const ngraph::element::Type_t& dataType,
+            const ngraph::Shape& dataDims,
+            const ngraph::NodeTypeInfo type_info) const {
+        const auto data = std::make_shared<ngraph::opset3::Parameter>(dataType, dataDims);
+        const auto dims = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{dataDims.size()});
+
+        const auto dsr0 = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(data, dims);
+        const auto node = ngraph::helpers::getNodeSharedPtr(type_info, {dsr0});
+
+        const auto dsr1 = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(node, dims);
+        return std::make_shared<ngraph::Function>(
+                ngraph::NodeVector{dsr1},
+                ngraph::ParameterVector{data, dims},
+                "Expected");
+    }
+};
+
+TEST_P(DynamicToStaticShapeUnaryElementwise, CompareFunctions) {
+}
+
+INSTANTIATE_TEST_CASE_P(NGraph, DynamicToStaticShapeUnaryElementwise, testing::Combine(
+    testing::Values(
+        ngraph::element::f16,
+        ngraph::element::f32,
+        ngraph::element::i32,
+        ngraph::element::i64,
+        ngraph::element::u8),
+    testing::Values(
+        DataDims{1000},
+        DataDims{4, 1000},
+        DataDims{3, 128, 256},
+        DataDims{2, 3, 128, 256}),
+    testing::Values(
+        ngraph::opset3::Floor::type_info,
+        ngraph::opset3::Log::type_info,
+        ngraph::opset3::Relu::type_info,
+        ngraph::opset3::Sigmoid::type_info,
+        ngraph::opset3::Sqrt::type_info)));
+
+}  // namespace
diff --git a/inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_unsqueeze.cpp b/inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_unsqueeze.cpp
new file mode 100644 (file)
index 0000000..530c387
--- /dev/null
@@ -0,0 +1,128 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <ngraph/type/element_type.hpp>
+#include <ngraph/shape.hpp>
+#include <common_test_utils/test_common.hpp>
+#include <ngraph/op/parameter.hpp>
+#include <vpu/ngraph/operations/dynamic_shape_resolver.hpp>
+#include <numeric>
+#include <random>
+#include <ngraph/ops.hpp>
+#include <ngraph/opsets/opset3.hpp>
+#include <vpu/ngraph/transformations/dynamic_to_static_shape_unsqueeze.hpp>
+#include <queue>
+#include <ngraph_functions/utils/ngraph_helpers.hpp>
+#include <vpu/ngraph/transformations/dynamic_to_static_shape.hpp>
+#include <vpu/utils/error.hpp>
+
+namespace {
+
+using DataType = ngraph::element::Type_t;
+using DataDims = ngraph::Shape;
+using axis_vec = std::vector<int64_t>;
+int64_t NEW_DIM = -100;
+
+struct UnsqueezeTestCase {
+    DataDims input_shape;
+    axis_vec unsqueeze_axes;
+    axis_vec concat_indices;
+};
+
+class DynamicToStaticShapeUnsqueeze : public CommonTestUtils::TestsCommon,
+public testing::WithParamInterface<std::tuple<DataType, UnsqueezeTestCase>> {
+public:
+    void SetUp() override {
+        const auto& parameters = GetParam();
+        const auto& dataType = std::get<0>(parameters);
+        const auto& unsqueeze_test_case = std::get<1>(parameters);
+
+        const auto& input_shape = unsqueeze_test_case.input_shape;
+        const auto& unsqueeze_axes = unsqueeze_test_case.unsqueeze_axes;
+        const auto& concat_indices = unsqueeze_test_case.concat_indices;
+
+        ngraph::helpers::CompareFunctions(*transform(dataType, input_shape, unsqueeze_axes),
+                *reference(dataType, input_shape, unsqueeze_axes, concat_indices));
+    }
+
+protected:
+    std::shared_ptr<const ngraph::Function> transform(
+        const ngraph::element::Type_t& data_type,
+        const ngraph::Shape& input_shape,
+        const std::vector<std::int64_t>& unsqueeze_axes) const {
+        const auto data = std::make_shared<ngraph::opset3::Parameter>(data_type, input_shape);
+        const auto dims = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{input_shape.size()});
+
+        const auto dsr = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(data, dims);
+
+        const auto axes = std::make_shared<ngraph::opset3::Constant>(ngraph::element::i64, ngraph::Shape{unsqueeze_axes.size()}, unsqueeze_axes);
+        const auto node = std::make_shared<ngraph::opset3::Unsqueeze>(dsr, axes);
+
+        const auto function = std::make_shared<ngraph::Function>(
+            ngraph::NodeVector{node},
+            ngraph::ParameterVector{data, dims},
+            "Actual");
+        node->set_output_type(0, dsr->get_input_element_type(0),
+                ngraph::PartialShape::dynamic(node->get_output_partial_shape(0).rank() + unsqueeze_axes.size()));
+
+        const auto transformations = vpu::Transformations{{node->type_info, vpu::dynamicToStaticShapeUnsqueeze}};
+        vpu::DynamicToStaticShape(transformations).transform(*function);
+        return function;
+    }
+
+    std::shared_ptr<const ngraph::Function> reference(
+            const ngraph::element::Type_t& data_type,
+            const ngraph::Shape& input_shape,
+            const std::vector<std::int64_t>& unsqueeze_axes,
+            const std::vector<std::int64_t>& concat_indices) const {
+        const auto data = std::make_shared<ngraph::opset3::Parameter>(data_type, input_shape);
+        const auto dims = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{input_shape.size()});
+
+        const auto dsr0 = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(data, dims);
+
+        const auto axes = std::make_shared<ngraph::opset3::Constant>(ngraph::element::i64, ngraph::Shape{unsqueeze_axes.size()}, unsqueeze_axes);
+        const auto unsqueeze = std::make_shared<ngraph::opset3::Unsqueeze>(dsr0, axes);
+
+        const auto split_axis = std::make_shared<ngraph::opset3::Constant>(
+                ngraph::element::i64, ngraph::Shape{}, std::vector<int64_t>{0});
+        const auto split = std::make_shared<ngraph::opset3::Split>(dims, split_axis, input_shape.size());
+
+        ngraph::OutputVector new_shape;
+        for (const auto & i : concat_indices) {
+            if (i == NEW_DIM) {
+                const auto new_dim = std::make_shared<ngraph::opset3::Constant>(
+                        split->get_input_element_type(0), ngraph::Shape{1}, std::vector<int64_t>{0});
+                new_shape.push_back(new_dim->output(0));
+            } else {
+                new_shape.push_back(split->output(i));
+            }
+        }
+
+        const auto concat = std::make_shared<ngraph::opset3::Concat>(new_shape, 0);
+        const auto dsr1 = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(unsqueeze, concat);
+        return std::make_shared<ngraph::Function>(
+            ngraph::NodeVector{dsr1},
+            ngraph::ParameterVector{data, dims},
+            "Expected");
+    }
+};
+
+TEST_P(DynamicToStaticShapeUnsqueeze, CompareFunctions) {
+}
+
+INSTANTIATE_TEST_CASE_P(NGraph, DynamicToStaticShapeUnsqueeze, testing::Combine(
+    testing::Values(
+        ngraph::element::f16,
+        ngraph::element::f32,
+        ngraph::element::i32,
+        ngraph::element::i64,
+        ngraph::element::u8),
+    testing::Values(
+        // input_shape, unsqueeze_axis, concat_indices
+        UnsqueezeTestCase{DataDims{10, 100, 1000}, axis_vec{-1, -3}, axis_vec{0, 1, NEW_DIM, 2, NEW_DIM}},
+        UnsqueezeTestCase{DataDims{10, 100, 1000}, axis_vec{0}, axis_vec{NEW_DIM, 0, 1, 2}},
+        UnsqueezeTestCase{DataDims{10}, axis_vec{1}, axis_vec{0, NEW_DIM}},
+        UnsqueezeTestCase{DataDims{10}, axis_vec{0}, axis_vec{NEW_DIM, 0}})));
+
+}  // namespace
diff --git a/inference-engine/tests/functional/plugin/myriad/ngraph/utils/ngraph_utils.h b/inference-engine/tests/functional/plugin/myriad/ngraph/utils/ngraph_utils.h
deleted file mode 100644 (file)
index dd5cf87..0000000
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright (C) 2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <ngraph/function.hpp>
-
-#include <gtest/gtest.h>
-
-#include <memory>
-#include <queue>
-#include <string>
-
-namespace FuncTestUtils {
-
-using ComparingNodesPair = typename std::pair<std::shared_ptr<ngraph::Node>, std::shared_ptr<ngraph::Node>>;
-using ComparingNodesBFSQueue = typename std::queue<ComparingNodesPair>;
-
-//
-// This function compares two nGraph functions and requires them to have exactly one output
-// Check nodes types
-// Check number of inputs
-// Check shapes of each Node
-//
-void CompareFunctions(const std::shared_ptr<ngraph::Function>& fActual,
-                      const std::shared_ptr<ngraph::Function>& fExpected) {
-    const auto fActualResults = fActual->get_results();
-    const auto fExpectedResults = fExpected->get_results();
-
-    ASSERT_EQ(fActualResults.size(), 1);
-    ASSERT_EQ(fExpectedResults.size(), 1);
-
-    const auto typeInfoToStr = [](const ngraph::Node::type_info_t& typeInfo) {
-        return std::string(typeInfo.name) + "/" + std::to_string(typeInfo.version);
-    };
-
-    ComparingNodesBFSQueue comparingNodes;
-    comparingNodes.push({fActualResults[0], fExpectedResults[0]});
-    while (!comparingNodes.empty()) {
-        const auto node1 = comparingNodes.front().first;
-        const auto node2 = comparingNodes.front().second;
-        comparingNodes.pop();
-
-        ASSERT_EQ(node1->get_type_info(), node2->get_type_info())
-                                    << "Functions compare: data types must be equal "
-                                    << typeInfoToStr(node1->get_type_info()) << " != "
-                                    << typeInfoToStr(node2->get_type_info());
-
-        ASSERT_EQ(node1->inputs().size(), node2->inputs().size())
-                                    << "Functions compare: numbers of inputs are different: "
-                                    << node1->inputs().size() << " and " << node2->inputs().size();
-
-        for (int i = 0; i < node1->inputs().size(); ++i) {
-            const auto partialShape1 = node1->input(i).get_partial_shape();
-            const auto partialShape2 = node2->input(i).get_partial_shape();
-            ASSERT_TRUE(partialShape1.relaxes(partialShape2) && partialShape1.refines(partialShape2))
-                                        << "Functions compare: Different shape detected "
-                                        << partialShape1 << " and " << partialShape2;
-
-            comparingNodes.push({node1->input_value(i).get_node_shared_ptr(),
-                                 node2->input_value(i).get_node_shared_ptr()});
-        }
-    }
-}
-
-}  // namespace FuncTestUtils
index 48a66d8..1236363 100644 (file)
@@ -18,9 +18,6 @@ std::vector<std::vector<std::vector<size_t>>> inShapes = {
         {{10, 10, 10, 10}, {10, 10, 10, 10}, {10, 10, 10, 10}, {10, 10, 10, 10}},
         {{10, 10, 10, 10}, {10, 10, 10, 10}, {10, 10, 10, 10}, {10, 10, 10, 10}, {10, 10, 10, 10}}
 };
-std::vector<InferenceEngine::Precision> inputPrecisions = {InferenceEngine::Precision::FP32,
-                                                           InferenceEngine::Precision::FP16,
-                                                           InferenceEngine::Precision::U8};
 std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP16};
 
 
@@ -28,7 +25,6 @@ INSTANTIATE_TEST_CASE_P(Axis_1_and_3, ConcatLayerTest,
                         ::testing::Combine(
                                 ::testing::Values(1, 3),
                                 ::testing::ValuesIn(inShapes),
-                                ::testing::ValuesIn(inputPrecisions),
                                 ::testing::ValuesIn(netPrecisions),
                                 ::testing::Values(CommonTestUtils::DEVICE_MYRIAD)),
                         ConcatLayerTest::getTestCaseName);
@@ -39,7 +35,6 @@ INSTANTIATE_TEST_CASE_P(Axis_2, ConcatLayerTest,
                         ::testing::Combine(
                                 ::testing::Values(2),
                                 ::testing::ValuesIn(inShapes),
-                                ::testing::Values(InferenceEngine::Precision::FP16),
                                 ::testing::ValuesIn(netPrecisions),
                                 ::testing::Values(CommonTestUtils::DEVICE_MYRIAD)),
                         ConcatLayerTest::getTestCaseName);
index 75ab4d5..4afe815 100644 (file)
@@ -11,13 +11,6 @@ using namespace LayerTestsDefinitions;
 
 namespace {
 
-// Common params
-const std::vector<InferenceEngine::Precision> inputPrecisions = {
-        InferenceEngine::Precision::FP32,
-         InferenceEngine::Precision::FP16,
-        InferenceEngine::Precision::U8,
-};
-
 const std::vector<InferenceEngine::Precision> netPrecisions = {
         InferenceEngine::Precision::FP16
 };
@@ -61,7 +54,6 @@ const auto conv2DParams_AutoPadValid = ::testing::Combine(
 INSTANTIATE_TEST_CASE_P(Convolution2D_ExplicitPadding, ConvolutionLayerTest,
                         ::testing::Combine(
                                 conv2DParams_ExplicitPadding,
-                                ::testing::ValuesIn(inputPrecisions),
                                 ::testing::ValuesIn(netPrecisions),
                                 ::testing::Values(std::vector<size_t >({1, 3, 30, 30})),
                                 ::testing::Values(CommonTestUtils::DEVICE_MYRIAD)),
@@ -70,7 +62,6 @@ INSTANTIATE_TEST_CASE_P(Convolution2D_ExplicitPadding, ConvolutionLayerTest,
 INSTANTIATE_TEST_CASE_P(Convolution2D_AutoPadValid, ConvolutionLayerTest,
                         ::testing::Combine(
                                 conv2DParams_AutoPadValid,
-                                ::testing::ValuesIn(inputPrecisions),
                                 ::testing::ValuesIn(netPrecisions),
                                 ::testing::Values(std::vector<size_t >({1, 3, 30, 30})),
                                 ::testing::Values(CommonTestUtils::DEVICE_MYRIAD)),
diff --git a/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/single_layer_tests/maximum.cpp b/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/single_layer_tests/maximum.cpp
new file mode 100644 (file)
index 0000000..f1e3145
--- /dev/null
@@ -0,0 +1,32 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <vector>
+#include "single_layer_tests/maximum.hpp"
+#include "common_test_utils/test_constants.hpp"
+
+using namespace LayerTestsDefinitions;
+
+namespace {
+
+std::vector<std::vector<std::vector<size_t>>> inShapes = {
+        {{2}},
+        {{1, 1, 1, 3}},
+        {{1, 2, 4}},
+        {{1, 4, 4}},
+        {{1, 4, 4, 1}},
+};
+
+std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::I32,
+                                                         InferenceEngine::Precision::FP16,
+};
+
+INSTANTIATE_TEST_CASE_P(maximum, MaximumLayerTest,
+                        ::testing::Combine(
+                                ::testing::ValuesIn(inShapes),
+                                ::testing::ValuesIn(netPrecisions),
+                                ::testing::Values(CommonTestUtils::DEVICE_MYRIAD)),
+                        MaximumLayerTest::getTestCaseName);
+
+}  // namespace
diff --git a/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/single_layer_tests/multiply.cpp b/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/single_layer_tests/multiply.cpp
new file mode 100644 (file)
index 0000000..93df0f9
--- /dev/null
@@ -0,0 +1,31 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <vector>
+#include "single_layer_tests/multiply.hpp"
+#include "common_test_utils/test_constants.hpp"
+
+using namespace LayerTestsDefinitions;
+
+namespace {
+
+std::vector<std::vector<std::vector<size_t>>> inShapes = {
+        {{2}},
+        {{1, 1, 1, 3}},
+        {{1, 2, 4}},
+        {{1, 4, 4}},
+        {{1, 4, 4, 1}},
+};
+
+std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32,
+                                                         InferenceEngine::Precision::FP16,
+};
+
+INSTANTIATE_TEST_CASE_P(multilpy, MultiplyLayerTest,
+                        ::testing::Combine(
+                                ::testing::ValuesIn(inShapes),
+                                ::testing::ValuesIn(netPrecisions),
+                                ::testing::Values(CommonTestUtils::DEVICE_MYRIAD)),
+                        MultiplyLayerTest::getTestCaseName);
+}  // namespace
index 57acc05..3facc4c 100644 (file)
@@ -28,17 +28,11 @@ const std::vector<InferenceEngine::Precision> inputPrecisions = {
         InferenceEngine::Precision::U8,
 };
 
-const std::vector<InferenceEngine::Precision> netPrecisions = {
-        InferenceEngine::Precision::FP16
-};
-
 // Enable this when #-29056 is ready
 INSTANTIATE_TEST_CASE_P(DISABLED_nonzero, NonZeroLayerTest,
         ::testing::Combine(
                 ::testing::ValuesIn(inShapes),
                 ::testing::ValuesIn(inputPrecisions),
-                ::testing::ValuesIn(netPrecisions),
-                ::testing::Values(CommonTestUtils::DEVICE_MYRIAD),
-                ::testing::Values(ConfigMap({{VPU_CONFIG_KEY(DETECT_NETWORK_BATCH), CONFIG_VALUE(NO)}}))),
+                ::testing::Values(CommonTestUtils::DEVICE_MYRIAD)),
          NonZeroLayerTest::getTestCaseName);
 }  // namespace
index 674cf2a..9f0e487 100644 (file)
@@ -12,12 +12,6 @@ using namespace ngraph::helpers;
 using namespace LayerTestsDefinitions;
 
 namespace {
-// Common params
-const std::vector<InferenceEngine::Precision> inputPrecisions = {
-        InferenceEngine::Precision::FP32,
-        InferenceEngine::Precision::FP16,
-        InferenceEngine::Precision::U8,
-};
 
 const std::vector<InferenceEngine::Precision> netPrecisions = {
         InferenceEngine::Precision::FP16
@@ -50,7 +44,6 @@ const auto maxPool_ExplicitPad_FloorRounding_Params = ::testing::Combine(
 INSTANTIATE_TEST_CASE_P(MaxPool_ExplicitPad_FloorRpunding, PoolingLayerTest,
                         ::testing::Combine(
                                 maxPool_ExplicitPad_FloorRounding_Params,
-                                ::testing::ValuesIn(inputPrecisions),
                                 ::testing::ValuesIn(netPrecisions),
                                 ::testing::Values(std::vector<size_t >({1, 3, 50, 50})),
                                 ::testing::Values(CommonTestUtils::DEVICE_MYRIAD)),
@@ -74,7 +67,6 @@ const auto maxPool_ExplicitPad_CeilRounding_Params = ::testing::Combine(
 INSTANTIATE_TEST_CASE_P(MaxPool_ExplicitPad_CeilRpunding, PoolingLayerTest,
                         ::testing::Combine(
                                 maxPool_ExplicitPad_CeilRounding_Params,
-                                ::testing::ValuesIn(inputPrecisions),
                                 ::testing::ValuesIn(netPrecisions),
                                 ::testing::Values(std::vector<size_t >({1, 3, 50, 50})),
                                 ::testing::Values(CommonTestUtils::DEVICE_MYRIAD)),
@@ -100,7 +92,6 @@ const auto avgPoolExplicitPadCeilRoundingParams = ::testing::Combine(
 INSTANTIATE_TEST_CASE_P(AvgPool_ExplicitPad_CeilRounding, PoolingLayerTest,
                         ::testing::Combine(
                                 avgPoolExplicitPadCeilRoundingParams,
-                                ::testing::ValuesIn(inputPrecisions),
                                 ::testing::ValuesIn(netPrecisions),
                                 ::testing::Values(std::vector<size_t >({1, 3, 30, 30})),
                                 ::testing::Values(CommonTestUtils::DEVICE_MYRIAD)),
@@ -123,7 +114,6 @@ const auto avgPoolExplicitPadFloorRoundingParams = ::testing::Combine(
 INSTANTIATE_TEST_CASE_P(AvgPool_ExplicitPad_FloorRounding, PoolingLayerTest,
                         ::testing::Combine(
                                 avgPoolExplicitPadFloorRoundingParams,
-                                ::testing::ValuesIn(inputPrecisions),
                                 ::testing::ValuesIn(netPrecisions),
                                 ::testing::Values(std::vector<size_t >({1, 3, 30, 30})),
                                 ::testing::Values(CommonTestUtils::DEVICE_MYRIAD)),
@@ -147,7 +137,6 @@ const auto allPools_ValidPad_Params = ::testing::Combine(
 INSTANTIATE_TEST_CASE_P(MAX_and_AVGPool_ValidPad, PoolingLayerTest,
                         ::testing::Combine(
                                 allPools_ValidPad_Params,
-                                ::testing::ValuesIn(inputPrecisions),
                                 ::testing::ValuesIn(netPrecisions),
                                 ::testing::Values(std::vector<size_t >({1, 3, 50, 50})),
                                 ::testing::Values(CommonTestUtils::DEVICE_MYRIAD)),
index a411374..3cad2f5 100644 (file)
 using namespace LayerTestsDefinitions;
 
 namespace {
-// Common params
-const std::vector<InferenceEngine::Precision> inputPrecisions = {
-        InferenceEngine::Precision::FP32,
-        InferenceEngine::Precision::FP16,
-        InferenceEngine::Precision::U8,
-};
-
 const std::vector<InferenceEngine::Precision> netPrecisions = {
         InferenceEngine::Precision::FP16
 };
@@ -29,7 +22,6 @@ INSTANTIATE_TEST_CASE_P(NumSplitsCheck, SplitLayerTest,
                                 //  Failed to infer shapes for Split layer (Split_2) with error:
                                 //  The sum of the dimensions on the axis(0) is not equal out_sizes: [30]
                                 ::testing::Values(1, 2, 3),
-                                ::testing::ValuesIn(inputPrecisions),
                                 ::testing::ValuesIn(netPrecisions),
                                 ::testing::Values(std::vector<size_t >({30, 30, 30, 30})),
                                 ::testing::Values(CommonTestUtils::DEVICE_MYRIAD)),
diff --git a/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/subgraph_tests/reshape_squeeze_reshape_relu.cpp b/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/subgraph_tests/reshape_squeeze_reshape_relu.cpp
new file mode 100644 (file)
index 0000000..b5df338
--- /dev/null
@@ -0,0 +1,43 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+#include <vector>
+#include "subgraph_tests/reshape_squeeze_reshape_relu.hpp"
+#include "common_test_utils/test_constants.hpp"
+
+using namespace LayerTestsDefinitions;
+
+namespace {
+    std::vector<std::vector<std::vector<size_t>>> inputs{
+            {{1, 1, 3}, {0, 1}},
+            {{1, 3, 1}, {0, 2}},
+            {{1, 3, 1}, {0}},
+            {{3, 1, 1}, {1, 2}},
+            {{3, 1, 1}, {2}},
+            {{4, 1, 3, 1, 1, 3}, {1, 3}},
+            {{4, 3}, {1, 2}},
+            {{4, 3}, {0}},
+            {{1}, {0}},
+    };
+
+
+
+    std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32,
+                                                             InferenceEngine::Precision::FP16,
+    };
+
+    INSTANTIATE_TEST_CASE_P(DISABLED_reshape_squeeze_reshape_relu, ReshapeSqueezeReshapeRelu,
+                            ::testing::Combine(
+                                    ::testing::ValuesIn(inputs),
+                                    ::testing::ValuesIn(netPrecisions),
+                                    ::testing::Values(CommonTestUtils::DEVICE_MYRIAD),
+                                    ::testing::Values(true)),
+                            ReshapeSqueezeReshapeRelu::getTestCaseName);
+
+    INSTANTIATE_TEST_CASE_P(DISABLED_reshape_unsqueeze_reshape_relu, ReshapeSqueezeReshapeRelu,
+                            ::testing::Combine(
+                                    ::testing::ValuesIn(inputs),
+                                    ::testing::ValuesIn(netPrecisions),
+                                    ::testing::Values(CommonTestUtils::DEVICE_MYRIAD),
+                                    ::testing::Values(false)),
+                            ReshapeSqueezeReshapeRelu::getTestCaseName);
+}  // namespace
index c6769c0..8519d9f 100644 (file)
 using namespace LayerTestsDefinitions;
 
 namespace {
-const std::vector<InferenceEngine::Precision> inputPrecisions = {
-        InferenceEngine::Precision::FP32,
-        InferenceEngine::Precision::FP16,
-        InferenceEngine::Precision::U8
-};
 
 const std::vector<InferenceEngine::Precision> netPrecisions = {
         InferenceEngine::Precision::FP16
@@ -22,7 +17,6 @@ const std::vector<InferenceEngine::Precision> netPrecisions = {
 
 INSTANTIATE_TEST_CASE_P(NoReshape, SplitConvConcat,
                         ::testing::Combine(
-                                ::testing::ValuesIn(inputPrecisions),
                                 ::testing::ValuesIn(netPrecisions),
                                 ::testing::Values(std::vector<size_t >({1, 6, 40, 40})),
                                 ::testing::Values(CommonTestUtils::DEVICE_MYRIAD)),
diff --git a/inference-engine/tests/functional/plugin/myriad/subgraph_tests/dsr_binary_elementwise.cpp b/inference-engine/tests/functional/plugin/myriad/subgraph_tests/dsr_binary_elementwise.cpp
new file mode 100644 (file)
index 0000000..3a38bf5
--- /dev/null
@@ -0,0 +1,111 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <functional_test_utils/layer_test_utils.hpp>
+#include <ngraph_functions/builders.hpp>
+#include <vpu/ngraph/operations/dynamic_shape_resolver.hpp>
+
+namespace {
+
+using DataType = ngraph::element::Type_t;
+using DataDims = ngraph::Shape;
+
+using Parameters = std::tuple<
+    DataType,
+    DataDims,
+    DataDims,
+    ngraph::NodeTypeInfo,
+    LayerTestsUtils::TargetDevice
+>;
+
+class DSR_BinaryElementwise : public testing::WithParamInterface<Parameters>,
+        public LayerTestsUtils::LayerTestsCommon {
+protected:
+    void SetUp() override {
+        const auto& parameters = GetParam();
+        const auto& dataType = std::get<0>(parameters);
+        const auto& dataDims0 = std::get<1>(parameters);
+        const auto& dataDims1 = std::get<2>(parameters);
+        const auto& eltwiseType = std::get<3>(parameters);
+        targetDevice = std::get<4>(parameters);
+
+        const auto input0 = std::make_shared<ngraph::opset3::Parameter>(dataType, dataDims0);
+        const auto input1 = std::make_shared<ngraph::opset3::Parameter>(dataType, dataDims1);
+
+        const auto input0_const = ngraph::opset3::Constant::create(ngraph::element::i64, {dataDims0.size()}, dataDims0);
+        const auto input1_const = ngraph::opset3::Constant::create(ngraph::element::i64, {dataDims1.size()}, dataDims1);
+
+        const auto dsr0 = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(input0, input0_const);
+        const auto dsr1 = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(input1, input1_const);
+
+        const auto eltwise = ngraph::helpers::getNodeSharedPtr(eltwiseType, {dsr0, dsr1});
+
+        function = std::make_shared<ngraph::Function>(
+            ngraph::NodeVector{eltwise},
+            ngraph::ParameterVector{input0, input1},
+            eltwiseType.name);
+    }
+};
+
+class DSR_BinaryElementwiseSingleDSR : public testing::WithParamInterface<Parameters>,
+        public LayerTestsUtils::LayerTestsCommon {
+protected:
+    void SetUp() override {
+        const auto& parameters = GetParam();
+        const auto& dataType = std::get<0>(parameters);
+        const auto& dataDims0 = std::get<1>(parameters);
+        const auto& dataDims1 = std::get<2>(parameters);
+        const auto& eltwiseType = std::get<3>(parameters);
+        targetDevice = std::get<4>(parameters);
+
+        const auto input0 = std::make_shared<ngraph::opset3::Parameter>(dataType, dataDims0);
+        const auto input1 = std::make_shared<ngraph::opset3::Parameter>(dataType, dataDims1);
+
+        const auto input0_const = ngraph::opset3::Constant::create(ngraph::element::i64, {dataDims0.size()}, dataDims0);
+        const auto dsr0 = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(input0, input0_const);
+
+        const auto eltwise = ngraph::helpers::getNodeSharedPtr(eltwiseType, {dsr0, input1});
+
+        function = std::make_shared<ngraph::Function>(
+            ngraph::NodeVector{eltwise},
+            ngraph::ParameterVector{input0, input1},
+            eltwiseType.name);
+    }
+};
+
+TEST_P(DSR_BinaryElementwise, CompareWithReference) {
+    Run();
+}
+
+INSTANTIATE_TEST_CASE_P(DISABLED_DynamicBinaryElementwise, DSR_BinaryElementwise,
+    ::testing::Combine(
+        ::testing::Values(ngraph::element::f16, ngraph::element::f32, ngraph::element::i32),
+        ::testing::Values(ngraph::Shape{1}, ngraph::Shape{1, 1}, ngraph::Shape{1, 1, 1}),
+        ::testing::Values(ngraph::Shape{100}, ngraph::Shape{100, 1}, ngraph::Shape{100, 100}),
+        ::testing::Values(ngraph::opset3::Add::type_info,
+                          ngraph::opset3::Multiply::type_info,
+                          ngraph::opset3::Divide::type_info,
+                          ngraph::opset3::Subtract::type_info,
+//                        ngraph::opset3::Equal::type_info, operation broadcast default value needs to be fixed
+                          ngraph::opset3::Power::type_info),
+        ::testing::Values(CommonTestUtils::DEVICE_MYRIAD)));
+
+TEST_P(DSR_BinaryElementwiseSingleDSR, CompareWithReference) {
+    Run();
+}
+
+INSTANTIATE_TEST_CASE_P(DISABLED_DynamicBinaryElementwiseSingleDSR, DSR_BinaryElementwiseSingleDSR,
+    ::testing::Combine(
+        ::testing::Values(ngraph::element::f16, ngraph::element::f32, ngraph::element::i32),
+        ::testing::Values(ngraph::Shape{1}, ngraph::Shape{1, 1}, ngraph::Shape{1, 1, 1}),
+        ::testing::Values(ngraph::Shape{100}, ngraph::Shape{100, 1}, ngraph::Shape{100, 100}),
+        ::testing::Values(ngraph::opset3::Add::type_info,
+                          ngraph::opset3::Multiply::type_info,
+                          ngraph::opset3::Divide::type_info,
+                          ngraph::opset3::Subtract::type_info,
+//                        ngraph::opset3::Equal::type_info, operation broadcast default value needs to be fixed
+                          ngraph::opset3::Power::type_info),
+        ::testing::Values(CommonTestUtils::DEVICE_MYRIAD)));
+
+}  // namespace
diff --git a/inference-engine/tests/functional/plugin/myriad/subgraph_tests/dsr_clamp.cpp b/inference-engine/tests/functional/plugin/myriad/subgraph_tests/dsr_clamp.cpp
new file mode 100644 (file)
index 0000000..b1dcfb1
--- /dev/null
@@ -0,0 +1,50 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <functional_test_utils/layer_test_utils.hpp>
+#include <ngraph_functions/builders.hpp>
+#include <vpu/ngraph/operations/dynamic_shape_resolver.hpp>
+
+namespace {
+
+using DataType = ngraph::element::Type_t;
+using DataDims = ngraph::Shape;
+
+using Parameters = std::tuple<
+    DataType,
+    DataDims,
+    LayerTestsUtils::TargetDevice
+>;
+
+class DSR_Clamp : public testing::WithParamInterface<Parameters>,
+        public LayerTestsUtils::LayerTestsCommon {
+protected:
+    void SetUp() override {
+        const auto& parameters = GetParam();
+        const auto& dataType = std::get<0>(parameters);
+        const auto& dataDims = std::get<1>(parameters);
+        targetDevice = std::get<2>(parameters);
+
+        const auto data = std::make_shared<ngraph::opset3::Parameter>(dataType, dataDims);
+        const auto dims = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{dataDims.size()});
+        const auto dsr  = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(data, dims);
+
+        const auto node = std::make_shared<ngraph::opset3::Clamp>(dsr, 0., 6.);
+
+        const auto result = std::make_shared<ngraph::opset3::Result>(node);
+        function = std::make_shared<ngraph::Function>(ngraph::ResultVector{result}, ngraph::ParameterVector{data, dims}, "DSR-Clamp");
+    }
+};
+
+TEST_P(DSR_Clamp, CompareWithReference) {
+    Run();
+}
+
+INSTANTIATE_TEST_CASE_P(DISABLED_DynamicClamp, DSR_Clamp,
+    ::testing::Combine(
+        ::testing::Values(ngraph::element::f16, ngraph::element::f32, ngraph::element::i32),
+        ::testing::Values(ngraph::Shape{1, 800}),
+        ::testing::Values(CommonTestUtils::DEVICE_MYRIAD)));
+
+}  // namespace
diff --git a/inference-engine/tests/functional/plugin/myriad/subgraph_tests/dsr_convert.cpp b/inference-engine/tests/functional/plugin/myriad/subgraph_tests/dsr_convert.cpp
new file mode 100644 (file)
index 0000000..bf56e85
--- /dev/null
@@ -0,0 +1,50 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <functional_test_utils/layer_test_utils.hpp>
+#include <ngraph_functions/builders.hpp>
+#include <vpu/ngraph/operations/dynamic_shape_resolver.hpp>
+
+namespace {
+
+using DataType = ngraph::element::Type_t;
+using DataDims = ngraph::Shape;
+
+using Parameters = std::tuple<
+    DataType,
+    DataDims,
+    LayerTestsUtils::TargetDevice
+>;
+
+class DSR_Convert : public testing::WithParamInterface<Parameters>,
+        public LayerTestsUtils::LayerTestsCommon {
+protected:
+    void SetUp() override {
+        const auto& parameters = GetParam();
+        const auto& dataType = std::get<0>(parameters);
+        const auto& dataDims = std::get<1>(parameters);
+        targetDevice = std::get<2>(parameters);
+
+        const auto data = std::make_shared<ngraph::opset3::Parameter>(dataType, dataDims);
+        const auto dims = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{dataDims.size()});
+        const auto dsr  = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(data, dims);
+
+        const auto node = std::make_shared<ngraph::opset3::Convert>(dsr, dataType);
+
+        const auto result = std::make_shared<ngraph::opset3::Result>(node);
+        function = std::make_shared<ngraph::Function>(ngraph::ResultVector{result}, ngraph::ParameterVector{data, dims}, "DSR-Convert");
+    }
+};
+
+TEST_P(DSR_Convert, CompareWithReference) {
+    Run();
+}
+
+INSTANTIATE_TEST_CASE_P(DISABLED_DynamicConvert, DSR_Convert,
+    ::testing::Combine(
+        ::testing::Values(ngraph::element::f16, ngraph::element::f32, ngraph::element::i32),
+        ::testing::Values(ngraph::Shape{1, 800}),
+        ::testing::Values(CommonTestUtils::DEVICE_MYRIAD)));
+
+}  // namespace
diff --git a/inference-engine/tests/functional/plugin/myriad/subgraph_tests/dsr_non_max_suppression.cpp b/inference-engine/tests/functional/plugin/myriad/subgraph_tests/dsr_non_max_suppression.cpp
new file mode 100644 (file)
index 0000000..6f36943
--- /dev/null
@@ -0,0 +1,81 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <functional_test_utils/layer_test_utils.hpp>
+#include <ngraph_functions/builders.hpp>
+#include <vpu/ngraph/operations/dynamic_shape_resolver.hpp>
+
+namespace {
+
+using DataType = ngraph::element::Type_t;
+using DataDims = ngraph::Shape;
+
+struct NonMaxSuppressionTestCase {
+    int64_t num_batches, num_boxes, num_classes, max_output_boxes_per_class;
+    float iou_threshold, score_threshold;
+};
+
+using Parameters = std::tuple<
+    DataType,
+    DataType,
+    NonMaxSuppressionTestCase,
+    LayerTestsUtils::TargetDevice
+>;
+
+class DSR_NonMaxSuppression : public testing::WithParamInterface<Parameters>,
+        public LayerTestsUtils::LayerTestsCommon {
+protected:
+    void SetUp() override {
+        const auto& parameters = GetParam();
+        const auto& float_type = std::get<0>(parameters);
+        const auto& integer_type = std::get<1>(parameters);
+        const auto& nms_setup = std::get<2>(parameters);
+        targetDevice = std::get<3>(parameters);
+
+        const auto boxes = std::make_shared<ngraph::opset3::Parameter>(
+                float_type, ngraph::PartialShape{nms_setup.num_batches, nms_setup.num_boxes, 4});
+        const auto scores = std::make_shared<ngraph::opset3::Parameter>(
+                float_type, ngraph::PartialShape{nms_setup.num_batches, nms_setup.num_classes, nms_setup.num_boxes});
+        const auto max_output_boxes_per_class = std::make_shared<ngraph::opset3::Constant>(
+                integer_type, ngraph::Shape{}, std::vector<int64_t>{nms_setup.max_output_boxes_per_class});
+        const auto iou_threshold = std::make_shared<ngraph::opset3::Constant>(
+                float_type, ngraph::Shape{}, std::vector<float>{nms_setup.iou_threshold});
+        const auto score_threshold = std::make_shared<ngraph::opset3::Constant>(
+                float_type, ngraph::Shape{}, std::vector<float>{nms_setup.score_threshold});
+
+
+        const auto dims = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{3});
+        const auto dsr = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(scores, dims);
+
+        const auto node = std::make_shared<ngraph::opset3::NonMaxSuppression>(
+                boxes, dsr, max_output_boxes_per_class, iou_threshold, score_threshold);
+
+        const auto result = std::make_shared<ngraph::opset3::Result>(node);
+        function = std::make_shared<ngraph::Function>(ngraph::ResultVector{result},
+                ngraph::ParameterVector{boxes, scores, dims}, "DSR-NMS");
+    }
+};
+
+TEST_P(DSR_NonMaxSuppression, CompareWithReference) {
+    Run();
+}
+
+INSTANTIATE_TEST_CASE_P(DISABLED_DynamicNonMaxSupression, DSR_NonMaxSuppression,
+    ::testing::Combine(
+         ::testing::Values(
+                    ngraph::element::f16,
+                    ngraph::element::f32),
+         ::testing::Values(
+                    ngraph::element::i32,
+                    ngraph::element::i64,
+                    ngraph::element::u8),
+         ::testing::Values(
+                    // num_batches, num_boxes, num_classes, max_output_boxes_per_class, iou_threshold, score_threshold
+                    NonMaxSuppressionTestCase{1, 10, 5, 10, 0., 0.},
+                    NonMaxSuppressionTestCase{2, 100, 5, 10, 0., 0.},
+                    NonMaxSuppressionTestCase{3, 10, 5, 2, 0.5, 0.},
+                    NonMaxSuppressionTestCase{1, 1000, 1, 2000, 0.5, 0.}),
+         ::testing::Values(CommonTestUtils::DEVICE_MYRIAD)));
+
+}  // namespace
diff --git a/inference-engine/tests/functional/plugin/myriad/subgraph_tests/dsr_roialign.cpp b/inference-engine/tests/functional/plugin/myriad/subgraph_tests/dsr_roialign.cpp
new file mode 100644 (file)
index 0000000..7295f1e
--- /dev/null
@@ -0,0 +1,165 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <functional_test_utils/layer_test_utils.hpp>
+#include <ngraph_functions/builders.hpp>
+#include <vpu/ngraph/operations/dynamic_shape_resolver.hpp>
+
+namespace {
+
+using DataType = ngraph::element::Type_t;
+using DataDims = ngraph::Shape;
+
+struct ROIAlignTestCase {
+    ngraph::Shape data_shape;
+    uint64_t num_rois, pooled_h, pooled_w, sampling_ratio;
+    float spatial_scale;
+    std::string mode;
+};
+
+using Parameters = std::tuple<
+    DataType,
+    DataType,
+    ROIAlignTestCase,
+    LayerTestsUtils::TargetDevice
+>;
+
+class DSR_ROIAlignDataDSR : public testing::WithParamInterface<Parameters>,
+        public LayerTestsUtils::LayerTestsCommon {
+protected:
+    void SetUp() override {
+        const auto& parameters = GetParam();
+        const auto& float_type = std::get<0>(parameters);
+        const auto& integer_type = std::get<1>(parameters);
+        const auto& roialign_setup = std::get<2>(parameters);
+        targetDevice = std::get<3>(parameters);
+
+        const auto data = std::make_shared<ngraph::opset3::Parameter>(float_type, roialign_setup.data_shape);
+        const auto boxes = std::make_shared<ngraph::opset3::Parameter>(float_type, ngraph::Shape{roialign_setup.num_rois, 4});
+        const auto rois = std::make_shared<ngraph::opset3::Parameter>(integer_type, ngraph::Shape{roialign_setup.num_rois});
+
+        const auto dims = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{data->get_shape().size()});
+        const auto dsr = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(data, dims);
+
+        const auto node = std::make_shared<ngraph::opset3::ROIAlign>(dsr, boxes, rois,
+                roialign_setup.pooled_h, roialign_setup.pooled_w, roialign_setup.sampling_ratio, roialign_setup.spatial_scale, roialign_setup.mode);
+
+        const auto result = std::make_shared<ngraph::opset3::Result>(node);
+        function = std::make_shared<ngraph::Function>(ngraph::ResultVector{result}, ngraph::ParameterVector{data, dims, boxes, rois}, "DSR-ROIAlign");
+    }
+};
+
+TEST_P(DSR_ROIAlignDataDSR, CompareWithReference) {
+    Run();
+}
+
+INSTANTIATE_TEST_CASE_P(DISABLED_DynamicROIAlignDataDSR, DSR_ROIAlignDataDSR,
+    ::testing::Combine(
+        ::testing::Values(
+                    ngraph::element::f16,
+                    ngraph::element::f32),
+        ::testing::Values(
+                    ngraph::element::i32,
+                    ngraph::element::i64,
+                    ngraph::element::u8),
+        //data_shape, num_rois, pooled_h, pooled_w, sampling_ratio, spatial_scale, mode
+        ::testing::Values(
+                    ROIAlignTestCase{{7, 256, 200, 200}, 1000, 6, 6, 2, 16., "avg"},
+                    ROIAlignTestCase{{7, 256, 200, 200}, 1000, 7, 6, 2, 16., "max"}),
+        ::testing::Values(CommonTestUtils::DEVICE_MYRIAD)));
+
+
+class DSR_ROIAlignROIDSR : public testing::WithParamInterface<Parameters>,
+        public LayerTestsUtils::LayerTestsCommon {
+protected:
+    void SetUp() override {
+        const auto& parameters = GetParam();
+        const auto& float_type = std::get<0>(parameters);
+        const auto& integer_type = std::get<1>(parameters);
+        const auto& roialign_setup = std::get<2>(parameters);
+        targetDevice = std::get<3>(parameters);
+
+        const auto data = std::make_shared<ngraph::opset3::Parameter>(float_type, roialign_setup.data_shape);
+        const auto boxes = std::make_shared<ngraph::opset3::Parameter>(float_type, ngraph::Shape{roialign_setup.num_rois, 4});
+        const auto rois = std::make_shared<ngraph::opset3::Parameter>(integer_type, ngraph::Shape{roialign_setup.num_rois});
+
+        const auto dims = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{rois->get_shape().size()});
+        const auto dsr = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(rois, dims);
+
+        const auto node = std::make_shared<ngraph::opset3::ROIAlign>(data, boxes, dsr,
+                roialign_setup.pooled_h, roialign_setup.pooled_w, roialign_setup.sampling_ratio, roialign_setup.spatial_scale, roialign_setup.mode);
+
+        const auto result = std::make_shared<ngraph::opset3::Result>(node);
+        function = std::make_shared<ngraph::Function>(ngraph::ResultVector{result}, ngraph::ParameterVector{data, dims, boxes, rois}, "DSR-ROIAlign");
+    }
+};
+
+TEST_P(DSR_ROIAlignROIDSR, CompareWithReference) {
+    Run();
+}
+
+INSTANTIATE_TEST_CASE_P(DISABLED_DynamicROIAlign, DSR_ROIAlignROIDSR,
+    ::testing::Combine(
+        ::testing::Values(
+                    ngraph::element::f16,
+                    ngraph::element::f32),
+        ::testing::Values(
+                    ngraph::element::i32,
+                    ngraph::element::i64,
+                    ngraph::element::u8),
+        //data_shape, num_rois, pooled_h, pooled_w, sampling_ratio, spatial_scale, mode
+        ::testing::Values(
+                    ROIAlignTestCase{{7, 256, 200, 200}, 1000, 6, 6, 2, 16., "avg"},
+                    ROIAlignTestCase{{7, 256, 200, 200}, 1000, 7, 6, 2, 16., "max"}),
+        ::testing::Values(CommonTestUtils::DEVICE_MYRIAD)));
+
+class DSR_ROIAlign : public testing::WithParamInterface<Parameters>,
+        public LayerTestsUtils::LayerTestsCommon {
+protected:
+    void SetUp() override {
+        const auto& parameters = GetParam();
+        const auto& float_type = std::get<0>(parameters);
+        const auto& integer_type = std::get<1>(parameters);
+        const auto& roialign_setup = std::get<2>(parameters);
+        targetDevice = std::get<3>(parameters);
+
+        const auto data = std::make_shared<ngraph::opset3::Parameter>(float_type, roialign_setup.data_shape);
+        const auto boxes = std::make_shared<ngraph::opset3::Parameter>(float_type, ngraph::Shape{roialign_setup.num_rois, 4});
+        const auto rois = std::make_shared<ngraph::opset3::Parameter>(integer_type, ngraph::Shape{roialign_setup.num_rois});
+
+        const auto roi_dims = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{rois->get_shape().size()});
+        const auto roi_dsr = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(rois, roi_dims);
+
+        const auto data_dims = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{data->get_shape().size()});
+        const auto data_dsr = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(data, data_dims);
+
+        const auto node = std::make_shared<ngraph::opset3::ROIAlign>(data_dsr, boxes, roi_dsr,
+                roialign_setup.pooled_h, roialign_setup.pooled_w, roialign_setup.sampling_ratio, roialign_setup.spatial_scale, roialign_setup.mode);
+
+        const auto result = std::make_shared<ngraph::opset3::Result>(node);
+        function = std::make_shared<ngraph::Function>(ngraph::ResultVector{result},
+                ngraph::ParameterVector{data, data_dims, boxes, rois, roi_dims}, "DSR-ROIAlign");
+    }
+};
+
+TEST_P(DSR_ROIAlign, CompareWithReference) {
+    Run();
+}
+
+INSTANTIATE_TEST_CASE_P(DISABLED_DynamicROIAlign, DSR_ROIAlign,
+    ::testing::Combine(
+        ::testing::Values(
+                    ngraph::element::f16,
+                    ngraph::element::f32),
+        ::testing::Values(
+                    ngraph::element::i32,
+                    ngraph::element::i64,
+                    ngraph::element::u8),
+        //data_shape, num_rois, pooled_h, pooled_w, sampling_ratio, spatial_scale, mode
+        ::testing::Values(
+                    ROIAlignTestCase{{7, 256, 200, 200}, 1000, 6, 6, 2, 16., "avg"},
+                    ROIAlignTestCase{{7, 256, 200, 200}, 1000, 7, 6, 2, 16., "max"}),
+        ::testing::Values(CommonTestUtils::DEVICE_MYRIAD)));
+
+}  // namespace
diff --git a/inference-engine/tests/functional/plugin/myriad/subgraph_tests/dsr_scatter.cpp b/inference-engine/tests/functional/plugin/myriad/subgraph_tests/dsr_scatter.cpp
new file mode 100644 (file)
index 0000000..bc875a6
--- /dev/null
@@ -0,0 +1,74 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <functional_test_utils/layer_test_utils.hpp>
+#include <ngraph_functions/builders.hpp>
+#include <vpu/ngraph/operations/dynamic_shape_resolver.hpp>
+
+namespace {
+
+using DataType = ngraph::element::Type_t;
+
+
+struct ScatterTestCase {
+    ngraph::NodeTypeInfo scatter_type_info;
+    ngraph::Shape data_shape, indices_shape, updates_shape;
+    int64_t axis;
+};
+
+using Parameters = std::tuple<
+    DataType,
+    DataType,
+    ScatterTestCase,
+    LayerTestsUtils::TargetDevice
+>;
+
+class DSR_Scatter : public testing::WithParamInterface<Parameters>,
+        public LayerTestsUtils::LayerTestsCommon {
+protected:
+    void SetUp() override {
+        const auto& parameters = GetParam();
+        const auto& numeric_type = std::get<0>(parameters);
+        const auto& integer_type = std::get<1>(parameters);
+        const auto& scatter_setup = std::get<2>(parameters);
+        targetDevice = std::get<3>(parameters);
+
+        const auto data = std::make_shared<ngraph::opset3::Parameter>(numeric_type, scatter_setup.data_shape);
+        const auto indices = std::make_shared<ngraph::opset3::Parameter>(integer_type, scatter_setup.indices_shape);
+        const auto updates = std::make_shared<ngraph::opset3::Parameter>(numeric_type, scatter_setup.updates_shape);
+        const auto axis = std::make_shared<ngraph::opset3::Constant>(integer_type, ngraph::Shape{1}, std::vector<int64_t>{scatter_setup.axis});
+
+
+        const auto dims = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{scatter_setup.data_shape.size()});
+        const auto dsr = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(data, dims);
+
+        const auto node = ngraph::helpers::getNodeSharedPtr(scatter_setup.scatter_type_info, {dsr, indices, updates, axis});
+
+        const auto result = std::make_shared<ngraph::opset3::Result>(node);
+        function = std::make_shared<ngraph::Function>(ngraph::ResultVector{result},
+                ngraph::ParameterVector{data, indices, updates, dims}, scatter_setup.scatter_type_info.name);
+    }
+};
+
+TEST_P(DSR_Scatter, CompareWithReference) {
+    Run();
+}
+
+INSTANTIATE_TEST_CASE_P(DISABLED_DynamicScatter, DSR_Scatter,
+    ::testing::Combine(
+            testing::Values(
+                    ngraph::element::f16,
+                    ngraph::element::f32,
+                    ngraph::element::i32,
+                    ngraph::element::i64,
+                    ngraph::element::u8),
+            testing::Values(
+                    ngraph::element::i32,
+                    ngraph::element::i64,
+                    ngraph::element::u8),
+            testing::Values(
+                    ScatterTestCase{ngraph::opset3::ScatterUpdate::type_info, {1000, 256, 10, 15}, {125, 20}, {1000, 125, 20, 10, 15}, 1}),
+    ::testing::Values(CommonTestUtils::DEVICE_MYRIAD)));
+
+}  // namespace
diff --git a/inference-engine/tests/functional/plugin/myriad/subgraph_tests/dsr_squeeze.cpp b/inference-engine/tests/functional/plugin/myriad/subgraph_tests/dsr_squeeze.cpp
new file mode 100644 (file)
index 0000000..729abed
--- /dev/null
@@ -0,0 +1,66 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <functional_test_utils/layer_test_utils.hpp>
+#include <ngraph_functions/builders.hpp>
+#include <vpu/ngraph/operations/dynamic_shape_resolver.hpp>
+
+namespace {
+
+using DataType = ngraph::element::Type_t;
+using DataDims = ngraph::Shape;
+using axis_vec = std::vector<int64_t>;
+
+struct SqueezeTestCase {
+    DataDims input_shape;
+    axis_vec squeeze_axes;
+};
+
+using Parameters = std::tuple<
+    DataType,
+    SqueezeTestCase,
+    LayerTestsUtils::TargetDevice
+>;
+
+class DSR_Squeeze : public testing::WithParamInterface<Parameters>, public LayerTestsUtils::LayerTestsCommon {
+protected:
+    void SetUp() override {
+        const auto& parameters = GetParam();
+        const auto& data_type = std::get<0>(parameters);
+        const auto& squeeze_test_case = std::get<1>(parameters);
+
+        const auto& input_shape = squeeze_test_case.input_shape;
+        const auto& squeeze_axes = squeeze_test_case.squeeze_axes;
+
+        targetDevice = std::get<2>(GetParam());
+
+        const auto data = std::make_shared<ngraph::opset3::Parameter>(data_type, input_shape);
+        const auto dims = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{input_shape.size()});
+
+        const auto dsr = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(data, dims);
+
+        const auto axes = std::make_shared<ngraph::opset3::Constant>(ngraph::element::i64, ngraph::Shape{squeeze_axes.size()}, squeeze_axes);
+        const auto node = std::make_shared<ngraph::opset3::Squeeze>(dsr, axes);
+
+        const auto result = std::make_shared<ngraph::opset3::Result>(node);
+        function = std::make_shared<ngraph::Function>(ngraph::ResultVector{result}, ngraph::ParameterVector{data, dims}, "DSR-Squeeze");
+    }
+};
+
+TEST_P(DSR_Squeeze, CompareWithReference) {
+    Run();
+}
+
+INSTANTIATE_TEST_CASE_P(DISABLED_DynamicSqueeze, DSR_Squeeze,
+    ::testing::Combine(
+        ::testing::Values(ngraph::element::f16, ngraph::element::f32, ngraph::element::i32),
+        ::testing::Values(
+                // input_shape, squeeze_axis
+                SqueezeTestCase{DataDims{1, 1, 1000}, axis_vec{-2}},
+                SqueezeTestCase{DataDims{1, 1000, 1}, axis_vec{0, 2}},
+                SqueezeTestCase{DataDims{1, 1, 1}, axis_vec{1}},
+                SqueezeTestCase{DataDims{1000, 1, 1}, axis_vec{2}}),
+        ::testing::Values(CommonTestUtils::DEVICE_MYRIAD)));
+
+}  // namespace
diff --git a/inference-engine/tests/functional/plugin/myriad/subgraph_tests/dsr_transpose.cpp b/inference-engine/tests/functional/plugin/myriad/subgraph_tests/dsr_transpose.cpp
new file mode 100644 (file)
index 0000000..8198d7a
--- /dev/null
@@ -0,0 +1,53 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <functional_test_utils/layer_test_utils.hpp>
+#include <ngraph_functions/builders.hpp>
+#include <vpu/ngraph/operations/dynamic_shape_resolver.hpp>
+
+namespace {
+
+using DataType = ngraph::element::Type_t;
+using DataDims = ngraph::Shape;
+
+using Parameters = std::tuple<
+    DataType,
+    DataDims,
+    LayerTestsUtils::TargetDevice
+>;
+
+class DSR_Transpose : public testing::WithParamInterface<Parameters>, public LayerTestsUtils::LayerTestsCommon {
+protected:
+    void SetUp() override {
+        const auto& parameters = GetParam();
+        const auto& dataType = std::get<0>(GetParam());
+        const auto& dataDims = std::get<1>(GetParam());
+        targetDevice = std::get<2>(GetParam());
+
+        const auto data = std::make_shared<ngraph::opset3::Parameter>(dataType, dataDims);
+        const auto dims = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{dataDims.size()});
+        const auto dsr  = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(data, dims);
+
+        auto permutation = std::vector<std::int64_t>(dataDims.size());
+        std::iota(permutation.begin(), permutation.end(), 0);
+        std::shuffle(permutation.begin(), permutation.end(), std::mt19937());
+        const auto transposition = std::make_shared<ngraph::opset3::Constant>(ngraph::element::i64, ngraph::Shape{dataDims.size()}, permutation);
+        const auto transpose = std::make_shared<ngraph::opset3::Transpose>(dsr, transposition);
+
+        const auto result = std::make_shared<ngraph::opset3::Result>(transpose);
+        function = std::make_shared<ngraph::Function>(ngraph::ResultVector{result}, ngraph::ParameterVector{data, dims}, "DSR-Transpose");
+    }
+};
+
+TEST_P(DSR_Transpose, CompareWithReference) {
+    Run();
+}
+
+INSTANTIATE_TEST_CASE_P(DISABLED_DynamicTranspose, DSR_Transpose,
+    ::testing::Combine(
+        ::testing::Values(ngraph::element::f16, ngraph::element::f32, ngraph::element::i32),
+        ::testing::Values(ngraph::Shape{1, 800}),
+        ::testing::Values(CommonTestUtils::DEVICE_MYRIAD)));
+
+}  // namespace
diff --git a/inference-engine/tests/functional/plugin/myriad/subgraph_tests/dsr_unary_elementwise.cpp b/inference-engine/tests/functional/plugin/myriad/subgraph_tests/dsr_unary_elementwise.cpp
new file mode 100644 (file)
index 0000000..075d298
--- /dev/null
@@ -0,0 +1,57 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <functional_test_utils/layer_test_utils.hpp>
+#include <ngraph_functions/builders.hpp>
+#include <vpu/ngraph/operations/dynamic_shape_resolver.hpp>
+
+namespace {
+
+using DataType = ngraph::element::Type_t;
+using DataDims = ngraph::Shape;
+
+using Parameters = std::tuple<
+    DataType,
+    DataDims,
+    ngraph::NodeTypeInfo,
+    LayerTestsUtils::TargetDevice
+>;
+
+class DSR_UnaryElementwise : public testing::WithParamInterface<Parameters>,
+        public LayerTestsUtils::LayerTestsCommon {
+protected:
+    void SetUp() override {
+        const auto& parameters = GetParam();
+        const auto& dataType = std::get<0>(parameters);
+        const auto& dataDims = std::get<1>(parameters);
+        const auto& type_info = std::get<2>(parameters);
+        targetDevice = std::get<3>(parameters);
+
+        const auto data = std::make_shared<ngraph::opset3::Parameter>(dataType, dataDims);
+        const auto dims = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{dataDims.size()});
+        const auto dsr  = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(data, dims);
+
+        const auto node = ngraph::helpers::getNodeSharedPtr(type_info, {dsr});
+
+        const auto result = std::make_shared<ngraph::opset3::Result>(node);
+        function = std::make_shared<ngraph::Function>(ngraph::ResultVector{result}, ngraph::ParameterVector{data, dims}, type_info.name);
+    }
+};
+
+TEST_P(DSR_UnaryElementwise, CompareWithReference) {
+    Run();
+}
+
+INSTANTIATE_TEST_CASE_P(DISABLED_DynamicUnaryElementwise, DSR_UnaryElementwise,
+    ::testing::Combine(
+        ::testing::Values(ngraph::element::f16, ngraph::element::f32, ngraph::element::i32),
+        ::testing::Values(ngraph::Shape{1, 800}),
+        ::testing::Values(ngraph::opset3::Floor::type_info,
+                          ngraph::opset3::Log::type_info,
+                          ngraph::opset3::Relu::type_info,
+                          ngraph::opset3::Sigmoid::type_info,
+                          ngraph::opset3::Sqrt::type_info),
+        ::testing::Values(CommonTestUtils::DEVICE_MYRIAD)));
+
+}  // namespace
diff --git a/inference-engine/tests/functional/plugin/myriad/subgraph_tests/dsr_unsqueeze.cpp b/inference-engine/tests/functional/plugin/myriad/subgraph_tests/dsr_unsqueeze.cpp
new file mode 100644 (file)
index 0000000..df85d68
--- /dev/null
@@ -0,0 +1,67 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <functional_test_utils/layer_test_utils.hpp>
+#include <ngraph_functions/builders.hpp>
+#include <vpu/ngraph/operations/dynamic_shape_resolver.hpp>
+
+
+namespace {
+
+using DataType = ngraph::element::Type_t;
+using DataDims = ngraph::Shape;
+using axis_vec = std::vector<int64_t>;
+
+struct UnsqueezeTestCase {
+    DataDims input_shape;
+    axis_vec unsqueeze_axes;
+};
+
+using Parameters = std::tuple<
+    DataType,
+    UnsqueezeTestCase,
+    LayerTestsUtils::TargetDevice
+>;
+
+class DSR_Unsqueeze : public testing::WithParamInterface<Parameters>, public LayerTestsUtils::LayerTestsCommon {
+protected:
+    void SetUp() override {
+        const auto& parameters = GetParam();
+        const auto& data_type = std::get<0>(parameters);
+        const auto& squeeze_test_case = std::get<1>(parameters);
+
+        const auto& input_shape = squeeze_test_case.input_shape;
+        const auto& unsqueeze_axes = squeeze_test_case.unsqueeze_axes;
+
+        targetDevice = std::get<2>(GetParam());
+
+        const auto data = std::make_shared<ngraph::opset3::Parameter>(data_type, input_shape);
+        const auto dims = std::make_shared<ngraph::opset3::Parameter>(ngraph::element::i64, ngraph::Shape{input_shape.size()});
+
+        const auto dsr = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(data, dims);
+
+        const auto axes = std::make_shared<ngraph::opset3::Constant>(ngraph::element::i64, ngraph::Shape{unsqueeze_axes.size()}, unsqueeze_axes);
+        const auto node = std::make_shared<ngraph::opset3::Unsqueeze>(dsr, axes);
+
+        const auto result = std::make_shared<ngraph::opset3::Result>(node);
+        function = std::make_shared<ngraph::Function>(ngraph::ResultVector{result}, ngraph::ParameterVector{data, dims}, "DSR-Unsqueeze");
+    }
+};
+
+TEST_P(DSR_Unsqueeze, CompareWithReference) {
+    Run();
+}
+
+INSTANTIATE_TEST_CASE_P(DISABLED_DynamicUnsqueeze, DSR_Unsqueeze,
+    ::testing::Combine(
+        ::testing::Values(ngraph::element::f16, ngraph::element::f32, ngraph::element::i32),
+        ::testing::Values(
+                // input_shape, unsqueeze_axis
+                UnsqueezeTestCase{DataDims{10, 100, 1000}, axis_vec{-1, -3}},
+                UnsqueezeTestCase{DataDims{10, 100, 1000}, axis_vec{0}},
+                UnsqueezeTestCase{DataDims{10}, axis_vec{1}},
+                UnsqueezeTestCase{DataDims{10}, axis_vec{0}}),
+        ::testing::Values(CommonTestUtils::DEVICE_MYRIAD)));
+
+}  // namespace
index 1af0d1d..2dd87fc 100644 (file)
@@ -20,7 +20,7 @@ typedef std::tuple<
         InferenceEngine::SizeVector,
         std::string> basicParams;
 
-class ExecGraphUniqueNodeNames : public LayerTestsUtils::LayerTestsCommonClass<LayerTestsUtils::basicParams> {
+class ExecGraphUniqueNodeNames : public LayerTestsUtils::LayerTestsCommonDeprecated<LayerTestsUtils::basicParams> {
 public:
     static std::string getTestCaseName(testing::TestParamInfo<LayerTestsUtils::basicParams> obj);
 
diff --git a/inference-engine/tests/functional/plugin/shared/include/ngraph_conversion_tests/conv_bias_fusion.hpp b/inference-engine/tests/functional/plugin/shared/include/ngraph_conversion_tests/conv_bias_fusion.hpp
new file mode 100644 (file)
index 0000000..deb707b
--- /dev/null
@@ -0,0 +1,27 @@
+// Copyright (C) 2020 Intel Corporation
+//
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <tuple>
+#include <string>
+#include <functional>
+#include <vector>
+#include <memory>
+
+#include "ie_core.hpp"
+#include "ngraph/opsets/opset1.hpp"
+
+#include "functional_test_utils/blob_utils.hpp"
+#include "common_test_utils/common_utils.hpp"
+#include "functional_test_utils/layer_test_utils.hpp"
+
+namespace NGraphConversionTestsDefinitions {
+
+class ConvBiasFusion : public CommonTestUtils::TestsCommon, public testing::WithParamInterface<std::string> {
+public:
+    static std::string getTestCaseName(const testing::TestParamInfo<std::string> & obj);
+};
+}  // namespace NGraphConversionTestsDefinitions
diff --git a/inference-engine/tests/functional/plugin/shared/include/other/add_output.hpp b/inference-engine/tests/functional/plugin/shared/include/other/add_output.hpp
new file mode 100644 (file)
index 0000000..b1fc8d2
--- /dev/null
@@ -0,0 +1,26 @@
+// Copyright (C) 2020 Intel Corporation
+//
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+
+#include <map>
+
+#include "common_test_utils/common_layers_params.hpp"
+#include "common_test_utils/common_utils.hpp"
+#include "common_test_utils/test_common.hpp"
+#include "common_test_utils/test_constants.hpp"
+#include "common_test_utils/xml_net_builder/ir_net.hpp"
+#include "common_test_utils/xml_net_builder/xml_filler.hpp"
+#include "ie_core.hpp"
+
+class AddOutputTestsCommonClass : public CommonTestUtils::TestsCommon,
+                                  public testing::WithParamInterface<std::tuple<std::string, std::string>> {
+private:
+    static std::string generate_model();
+
+public:
+    static std::string getTestCaseName(testing::TestParamInfo<std::tuple<std::string, std::string>> obj);
+    void run_test();
+};
index 4044822..945f9be 100644 (file)
@@ -39,7 +39,8 @@ static std::map<ngraph::helpers::ActivationTypes, std::string> activationNames =
         {ngraph::helpers::ActivationTypes::Exp,       "Exp"},
         {ngraph::helpers::ActivationTypes::Log,       "Log"},
         {ngraph::helpers::ActivationTypes::Sign,      "Sign"},
-        {ngraph::helpers::ActivationTypes::Abs,       "Abs"}
+        {ngraph::helpers::ActivationTypes::Abs,       "Abs"},
+        {ngraph::helpers::ActivationTypes::Gelu,      "Gelu"}
 };
 
 typedef std::tuple<
@@ -50,7 +51,7 @@ typedef std::tuple<
         std::string> activationParams;
 
 class ActivationLayerTest
-        : public LayerTestsUtils::LayerTestsCommonClass<activationParams> {
+        : public LayerTestsUtils::LayerTestsCommonDeprecated<activationParams> {
 public:
     static std::string getTestCaseName(const testing::TestParamInfo<activationParams> &obj);
 
diff --git a/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/add.hpp b/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/add.hpp
new file mode 100644 (file)
index 0000000..4906a7d
--- /dev/null
@@ -0,0 +1,33 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <tuple>
+#include <string>
+#include <vector>
+#include <map>
+#include <memory>
+#include "ngraph_functions/builders.hpp"
+#include "ngraph_functions/utils/ngraph_helpers.hpp"
+
+#include "functional_test_utils/layer_test_utils.hpp"
+
+namespace LayerTestsDefinitions {
+    typedef std::tuple<
+            InferenceEngine::Precision,         // Network precision
+            std::vector<std::vector<size_t>>,   // Input shapes
+            std::string,                        // Device name
+            std::map<std::string, std::string>  // Config
+            > addParams;
+
+class AddLayerTest : public testing::WithParamInterface<addParams>,
+                     public LayerTestsUtils::LayerTestsCommon {
+public:
+    static std::string getTestCaseName(testing::TestParamInfo<addParams> obj);
+protected:
+    void SetUp() override;
+};
+
+}  // namespace LayerTestsDefinitions
index 3700af2..5a1d08f 100644 (file)
@@ -18,11 +18,11 @@ using batchToSpaceParamsTuple = typename std::tuple<
         std::vector<size_t>,               // crops begin
         std::vector<size_t>,               // crops end
         std::vector<size_t>,               // Input shapes
-        InferenceEngine::Precision,        // Input precision
         InferenceEngine::Precision,        // Network precision
         std::string>;                      // Device name>;
 
-class BatchToSpaceLayerTest : public LayerTestsUtils::LayerTestsCommonClass<batchToSpaceParamsTuple> {
+class BatchToSpaceLayerTest : public testing::WithParamInterface<batchToSpaceParamsTuple>,
+                              public LayerTestsUtils::LayerTestsCommon {
 public:
     static std::string getTestCaseName(const testing::TestParamInfo<batchToSpaceParamsTuple> &obj);
 
index 6310e82..53f0b61 100644 (file)
@@ -18,12 +18,11 @@ namespace LayerTestsDefinitions {
 using concatParamsTuple = typename std::tuple<
         size_t,                            // Concat axis
         std::vector<std::vector<size_t>>,  // Input shapes
-        InferenceEngine::Precision,        // Input precision
         InferenceEngine::Precision,        // Network precision
         std::string>;                      // Device name
 
-class ConcatLayerTest
-        : public LayerTestsUtils::LayerTestsCommonClass<concatParamsTuple> {
+class ConcatLayerTest : public testing::WithParamInterface<concatParamsTuple>,
+                        public LayerTestsUtils::LayerTestsCommon {
 public:
     static std::string getTestCaseName(const testing::TestParamInfo<concatParamsTuple> &obj);
 
index 296e35f..d7caea2 100644 (file)
 #include "ngraph_functions/utils/ngraph_helpers.hpp"
 
 typedef std::tuple<
-        InferenceEngine::SizeVector,
-        InferenceEngine::SizeVector,
-        std::vector<ptrdiff_t>,
-        std::vector<ptrdiff_t>,
-        InferenceEngine::SizeVector,
-        size_t,
-        ngraph::op::PadType> convSpecificParams;
+        InferenceEngine::SizeVector,    // Kernel size
+        InferenceEngine::SizeVector,    // Strides
+        std::vector<ptrdiff_t>,         // Pad begin
+        std::vector<ptrdiff_t>,         // Pad end
+        InferenceEngine::SizeVector,    // Dilation
+        size_t,                         // Num out channels
+        ngraph::op::PadType             // Padding type
+> convSpecificParams;
 typedef std::tuple<
         convSpecificParams,
-        InferenceEngine::Precision,
-        InferenceEngine::Precision,
-        InferenceEngine::SizeVector,
-        LayerTestsUtils::TargetDevice> convLayerTestParamsSet;
+        InferenceEngine::Precision,     // Net precision
+        InferenceEngine::SizeVector,    // Input shapes
+        LayerTestsUtils::TargetDevice   // Device name
+> convLayerTestParamsSet;
 namespace LayerTestsDefinitions {
 
 
-class ConvolutionLayerTest : public testing::WithParamInterface<convLayerTestParamsSet>, public LayerTestsUtils::FuncTestsCommon {
+class ConvolutionLayerTest : public testing::WithParamInterface<convLayerTestParamsSet>,
+                             public LayerTestsUtils::LayerTestsCommon {
 public:
     static std::string getTestCaseName(testing::TestParamInfo<convLayerTestParamsSet> obj);
 
diff --git a/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/group_convolution.hpp b/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/group_convolution.hpp
new file mode 100644 (file)
index 0000000..ea389d4
--- /dev/null
@@ -0,0 +1,42 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <tuple>
+#include <vector>
+#include <string>
+#include <memory>
+
+#include "functional_test_utils/layer_test_utils.hpp"
+#include "ngraph_functions/builders.hpp"
+#include "ngraph_functions/utils/ngraph_helpers.hpp"
+
+typedef std::tuple<
+        InferenceEngine::SizeVector,
+        InferenceEngine::SizeVector,
+        std::vector<ptrdiff_t>,
+        std::vector<ptrdiff_t>,
+        InferenceEngine::SizeVector,
+        size_t,
+        size_t,
+        ngraph::op::PadType> groupConvSpecificParams;
+typedef std::tuple<
+        groupConvSpecificParams,
+        InferenceEngine::Precision,
+        InferenceEngine::SizeVector,
+        LayerTestsUtils::TargetDevice> groupConvLayerTestParamsSet;
+
+namespace LayerTestsDefinitions {
+
+class GroupConvolutionLayerTest : public testing::WithParamInterface<groupConvLayerTestParamsSet>,
+                                  public LayerTestsUtils::LayerTestsCommon {
+public:
+    static std::string getTestCaseName(testing::TestParamInfo<groupConvLayerTestParamsSet> obj);
+
+protected:
+    void SetUp() override;
+};
+
+}  // namespace LayerTestsDefinitions
\ No newline at end of file
diff --git a/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/lrn.hpp b/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/lrn.hpp
new file mode 100644 (file)
index 0000000..cb785b2
--- /dev/null
@@ -0,0 +1,40 @@
+// Copyright (C) 2020 Intel Corporation
+//
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <tuple>
+#include <string>
+#include <vector>
+#include <memory>
+
+#include "ngraph_functions/builders.hpp"
+#include "ngraph_functions/utils/ngraph_helpers.hpp"
+
+#include "functional_test_utils/layer_test_utils.hpp"
+
+namespace LayerTestsDefinitions {
+
+typedef std::tuple<
+        double,                        // Alpha
+        size_t,                        // Beta
+        size_t,                        // Bias
+        size_t,                        // Size,
+        InferenceEngine::Precision,    // Network precision
+        InferenceEngine::SizeVector,   // Input shapes
+        std::string                    // Device name
+> lrnLayerTestParamsSet;
+
+class LrnLayerTest
+        : public testing::WithParamInterface<lrnLayerTestParamsSet>,
+          public LayerTestsUtils::LayerTestsCommon {
+public:
+    static std::string getTestCaseName(testing::TestParamInfo<lrnLayerTestParamsSet> obj);
+
+protected:
+    void SetUp() override;
+};
+
+}  // namespace LayerTestsDefinitions
diff --git a/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/maximum.hpp b/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/maximum.hpp
new file mode 100644 (file)
index 0000000..c3a027e
--- /dev/null
@@ -0,0 +1,31 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+#pragma once
+
+#include <tuple>
+#include <string>
+#include <vector>
+#include <memory>
+#include "functional_test_utils/layer_test_utils.hpp"
+#include "ngraph_functions/builders.hpp"
+#include "ngraph_functions/utils/ngraph_helpers.hpp"
+#include "common_test_utils/test_constants.hpp"
+
+namespace LayerTestsDefinitions {
+
+using MaximumParamsTuple = typename std::tuple<
+        std::vector<std::vector<size_t>>, //input shapes
+        InferenceEngine::Precision,       //Network precision
+        std::string>;                     //Device name
+
+class MaximumLayerTest:
+        public testing::WithParamInterface<MaximumParamsTuple>,
+        public LayerTestsUtils::LayerTestsCommon{
+public:
+    std::shared_ptr<ngraph::Function> fn;
+    static std::string getTestCaseName(const testing::TestParamInfo<MaximumParamsTuple>& obj);
+protected:
+    void SetUp() override;
+};
+}  // namespace LayerTestsDefinitions
diff --git a/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/multiply.hpp b/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/multiply.hpp
new file mode 100644 (file)
index 0000000..18ce004
--- /dev/null
@@ -0,0 +1,31 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+#pragma once
+
+#include <tuple>
+#include <string>
+#include <vector>
+#include <memory>
+#include "functional_test_utils/layer_test_utils.hpp"
+#include "ngraph_functions/builders.hpp"
+#include "ngraph_functions/utils/ngraph_helpers.hpp"
+#include "common_test_utils/test_constants.hpp"
+
+namespace LayerTestsDefinitions {
+
+using MultiplyParamsTuple = typename std::tuple<
+        std::vector<std::vector<size_t>>, //input shapes
+        InferenceEngine::Precision,       //Network precision
+        std::string>;                     //Device name
+
+class MultiplyLayerTest:
+        public testing::WithParamInterface<MultiplyParamsTuple>,
+        public LayerTestsUtils::LayerTestsCommon{
+public:
+    std::shared_ptr<ngraph::Function> fn;
+    static std::string getTestCaseName(const testing::TestParamInfo<MultiplyParamsTuple> &obj);
+protected:
+    void SetUp() override;
+};
+}  // namespace LayerTestsDefinitions
diff --git a/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/mvn.hpp b/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/mvn.hpp
new file mode 100644 (file)
index 0000000..88bc54a
--- /dev/null
@@ -0,0 +1,31 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <tuple>
+#include <string>
+
+#include "functional_test_utils/layer_test_utils.hpp"
+#include "ngraph_functions/builders.hpp"
+
+namespace LayerTestsDefinitions {
+
+typedef std::tuple<
+        InferenceEngine::SizeVector, // Input shapes
+        InferenceEngine::Precision,  // Input precision
+        bool,                        // Across channels
+        bool,                        // Normalize variance
+        double,                      // Epsilon
+        std::string> mvnParams;      // Device name
+
+class MvnLayerTest : public testing::WithParamInterface<mvnParams>, public LayerTestsUtils::LayerTestsCommon {
+public:
+    static std::string getTestCaseName(testing::TestParamInfo<mvnParams> obj);
+
+protected:
+    void SetUp() override;
+};
+
+}  // namespace LayerTestsDefinitions
\ No newline at end of file
index 399ff1a..0b38f40 100644 (file)
@@ -20,14 +20,12 @@ namespace LayerTestsDefinitions {
 using ConfigMap = typename std::map<std::string, std::string>;
 
 using NonZeroLayerTestParamsSet = typename std::tuple<
-        InferenceEngine::SizeVector,          // Input shapes
-        InferenceEngine::Precision,           // Input precision
-        InferenceEngine::Precision,           // Network precision
-        std::string,                          // Device name
-        ConfigMap>;                           // Config map
-
-class NonZeroLayerTest
-        : public LayerTestsUtils::LayerTestsCommonClass<NonZeroLayerTestParamsSet> {
+    InferenceEngine::SizeVector,          // Input shapes
+    InferenceEngine::Precision,           // Input precision
+    LayerTestsUtils::TargetDevice>;       // Device name
+
+class NonZeroLayerTest : public testing::WithParamInterface<NonZeroLayerTestParamsSet>,
+                         public LayerTestsUtils::LayerTestsCommon {
 public:
     static std::string getTestCaseName(testing::TestParamInfo<NonZeroLayerTestParamsSet> obj);
 
index 01e0344..d9c3bc5 100644 (file)
 namespace LayerTestsDefinitions {
 
 typedef std::tuple<
-        ngraph::helpers::PoolingTypes,
-        InferenceEngine::SizeVector,
-        InferenceEngine::SizeVector,
-        InferenceEngine::SizeVector,
-        InferenceEngine::SizeVector,
-        ngraph::op::RoundingType,
-        ngraph::op::PadType,
-        bool> poolSpecificParams;
+        ngraph::helpers::PoolingTypes,  // Pooling type, max or avg
+        std::vector<size_t>,            // Kernel size
+        std::vector<size_t>,            // Stride
+        std::vector<size_t>,            // Pad begin
+        std::vector<size_t>,            // Pad end
+        ngraph::op::RoundingType,       // Rounding type
+        ngraph::op::PadType,            // Pad type
+        bool                            // Exclude pad
+> poolSpecificParams;
 typedef std::tuple<
         poolSpecificParams,
-        InferenceEngine::Precision,
-        InferenceEngine::Precision,
-        InferenceEngine::SizeVector,
-        std::string> poolLayerTestParamsSet;
+        InferenceEngine::Precision,     // Net precision
+        std::vector<size_t>,            // Input shape
+        std::string                     // Device name
+> poolLayerTestParamsSet;
 
-class PoolingLayerTest
-        : public LayerTestsUtils::LayerTestsCommonClass<poolLayerTestParamsSet> {
+class PoolingLayerTest : public testing::WithParamInterface<poolLayerTestParamsSet>,
+                         public LayerTestsUtils::LayerTestsCommon {
 public:
     static std::string getTestCaseName(testing::TestParamInfo<poolLayerTestParamsSet> obj);
 
index 76c7365..2775f4a 100644 (file)
 #include "functional_test_utils/layer_test_utils.hpp"
 
 namespace LayerTestsDefinitions {
-    typedef std::tuple<
-            bool,                               // SpecialZero
-            InferenceEngine::Precision,         // Input precision
-            InferenceEngine::Precision,         // Network precision
-            std::vector<size_t>,                // Input shapes
-            std::vector<size_t>,                // OutForm Shapes
-            std::string,                        // Device name
-            std::map<std::string, std::string>  // Config
-            > reshapeParams;
+typedef std::tuple<
+        bool,                               // SpecialZero
+        InferenceEngine::Precision,         // Network precision
+        std::vector<size_t>,                // Input shapes
+        std::vector<size_t>,                // OutForm Shapes
+        std::string,                        // Device name
+        std::map<std::string, std::string>  // Config
+> reshapeParams;
 
-class ReshapeLayerTest
-        : public LayerTestsUtils::LayerTestsCommonClass<reshapeParams> {
+class ReshapeLayerTest : public testing::WithParamInterface<reshapeParams>,
+                         public LayerTestsUtils::LayerTestsCommon {
 public:
     static std::string getTestCaseName(testing::TestParamInfo<reshapeParams> obj);
+
 protected:
     void SetUp() override;
 };
diff --git a/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/select.hpp b/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/select.hpp
new file mode 100644 (file)
index 0000000..f3eca6d
--- /dev/null
@@ -0,0 +1,34 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <tuple>
+#include <string>
+#include <vector>
+#include <functional_test_utils/layer_test_utils.hpp>
+
+#include "ngraph_functions/select.hpp"
+
+namespace LayerTestsDefinitions {
+
+typedef std::tuple<
+        std::vector<std::vector<size_t>>,  // mask, then, else shapes
+        InferenceEngine::Precision,        // then, else precision
+        ngraph::op::AutoBroadcastSpec,     // broadcast
+        std::string> selectTestParams;   // Device name
+
+class SelectLayerTest : public LayerTestsUtils::LayerTestsCommonDeprecated<selectTestParams> {
+public:
+    NGraphFunctions::Select layer;
+    std::vector<std::vector<size_t>> inputShapes;
+    ngraph::op::AutoBroadcastSpec broadcast;
+
+    static std::string getTestCaseName(const testing::TestParamInfo <selectTestParams> &obj);
+
+protected:
+    void SetUp() override;
+};
+
+}  // namespace LayerTestsDefinitions
\ No newline at end of file
index 93613fd..57fbd66 100644 (file)
 
 namespace LayerTestsDefinitions {
 
-using softMaxLayerTestParams =
-    std::tuple<
+using softMaxLayerTestParams = std::tuple<
         InferenceEngine::Precision,         // netPrecision
-        InferenceEngine::Precision,         // inputPrecision
         InferenceEngine::Layout,            // inputLayout
         InferenceEngine::SizeVector,        // inputShape
         size_t,                             // axis
         std::string,                        // targetDevice
         std::map<std::string, std::string>  // config
-    >;
+>;
 
-class SoftMaxLayerTest :
-        public LayerTestsUtils::LayerTestsCommonClass<softMaxLayerTestParams> {
+class SoftMaxLayerTest : public testing::WithParamInterface<softMaxLayerTestParams>,
+                         public LayerTestsUtils::LayerTestsCommon {
 public:
     static std::string getTestCaseName(testing::TestParamInfo<softMaxLayerTestParams> obj);
 
index 34ad87f..0a45f48 100644 (file)
@@ -18,11 +18,11 @@ using spaceToBatchParamsTuple = typename std::tuple<
         std::vector<size_t>,               // pads_begin
         std::vector<size_t>,               // pads_end
         std::vector<size_t>,               // Input shapes
-        InferenceEngine::Precision,        // Input precision
         InferenceEngine::Precision,        // Network precision
         std::string>;                      // Device name>;
 
-class SpaceToBatchLayerTest : public LayerTestsUtils::LayerTestsCommonClass<spaceToBatchParamsTuple> {
+class SpaceToBatchLayerTest : public testing::WithParamInterface<spaceToBatchParamsTuple>,
+                              public LayerTestsUtils::LayerTestsCommon {
 public:
     static std::string getTestCaseName(const testing::TestParamInfo<spaceToBatchParamsTuple> &obj);
 
index 617377c..a36574f 100644 (file)
 namespace LayerTestsDefinitions {
 
 typedef std::tuple<
-        size_t,
-        size_t,
-        InferenceEngine::Precision,
-        InferenceEngine::Precision,
-        InferenceEngine::SizeVector,
-        std::string> splitParams;
-
-class SplitLayerTest
-        : public LayerTestsUtils::LayerTestsCommonClass<splitParams> {
+        size_t,                         // Num splits
+        size_t,                         // Axis
+        InferenceEngine::Precision,     // Net precision
+        std::vector<size_t>,            // Input shapes
+        std::string                     // Target device name
+> splitParams;
+
+class SplitLayerTest : public testing::WithParamInterface<splitParams>,
+                       public LayerTestsUtils::LayerTestsCommon {
 public:
     static std::string getTestCaseName(testing::TestParamInfo<splitParams> obj);
 
index 206ff44..3b7d25e 100644 (file)
@@ -23,11 +23,11 @@ using stridedSliceParamsTuple = typename std::tuple<
         std::vector<int64_t>,              // New axis mask
         std::vector<int64_t>,              // Shrink axis mask
         std::vector<int64_t>,              // Ellipsis axis mask
-        InferenceEngine::Precision,        // Input precision
         InferenceEngine::Precision,        // Network precision
         std::string>;                      // Device name>;
 
-class StridedSliceLayerTest : public LayerTestsUtils::LayerTestsCommonClass<stridedSliceParamsTuple> {
+class StridedSliceLayerTest : public testing::WithParamInterface<stridedSliceParamsTuple>,
+                              public LayerTestsUtils::LayerTestsCommon {
 public:
     static std::string getTestCaseName(const testing::TestParamInfo<stridedSliceParamsTuple> &obj);
 
diff --git a/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/reshape_squeeze_reshape_relu.hpp b/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/reshape_squeeze_reshape_relu.hpp
new file mode 100644 (file)
index 0000000..8b97cf7
--- /dev/null
@@ -0,0 +1,29 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+#pragma once
+
+#include <tuple>
+#include <string>
+#include <vector>
+#include <memory>
+#include "functional_test_utils/layer_test_utils.hpp"
+#include "ngraph_functions/builders.hpp"
+#include "ngraph_functions/utils/ngraph_helpers.hpp"
+
+namespace LayerTestsDefinitions {
+
+using ReshapeSqueezeReshapeReluTuple = typename std::tuple<
+        std::vector<std::vector<size_t>>, //input shapes and squeeze_indices
+        InferenceEngine::Precision,       //Network precision
+        std::string,                      //Device name
+        bool>;                            //Squeeze -> true, unsqueeze -> false
+
+class ReshapeSqueezeReshapeRelu
+        : public testing::WithParamInterface<ReshapeSqueezeReshapeReluTuple>,
+          public LayerTestsUtils::LayerTestsCommon {
+public:
+    static std::string getTestCaseName(const testing::TestParamInfo<ReshapeSqueezeReshapeReluTuple> &obj);
+protected:
+    void SetUp() override;
+};
+} // namespace LayerTestsDefinitions
index 08593c1..fc0e6ff 100644 (file)
@@ -15,7 +15,8 @@
 
 namespace LayerTestsDefinitions {
 
-class SplitConvConcat : public LayerTestsUtils::LayerTestsCommonClass<LayerTestsUtils::basicParams> {
+class SplitConvConcat : public testing::WithParamInterface<LayerTestsUtils::basicParams>,
+                        public LayerTestsUtils::LayerTestsCommon {
 public:
     static std::string getTestCaseName(testing::TestParamInfo<LayerTestsUtils::basicParams> obj);
 
index ac1c574..7a4b213 100644 (file)
@@ -26,7 +26,7 @@ std::string ExecGraphUniqueNodeNames::getTestCaseName(testing::TestParamInfo<Lay
     InferenceEngine::Precision inputPrecision, netPrecision;
     InferenceEngine::SizeVector inputShapes, newInputShapes;
     std::string targetDevice;
-    std::tie(inputPrecision, netPrecision, inputShapes, targetDevice) = obj.param;
+    std::tie(netPrecision, inputShapes, targetDevice) = obj.param;
 
     std::ostringstream result;
     result << "IS=" << CommonTestUtils::vec2str(inputShapes) << "_";
@@ -38,8 +38,8 @@ std::string ExecGraphUniqueNodeNames::getTestCaseName(testing::TestParamInfo<Lay
 
 void ExecGraphUniqueNodeNames::SetUp() {
     std::vector<size_t> inputShape;
-    InferenceEngine::Precision inputPrecision, netPrecision;
-    std::tie(inputPrecision, netPrecision, inputShape, targetDevice) = this->GetParam();
+    InferenceEngine::Precision netPrecision;
+    std::tie(netPrecision, inputShape, targetDevice) = this->GetParam();
 
     auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
 
diff --git a/inference-engine/tests/functional/plugin/shared/src/ngraph_conversion_tests/conv_bias_fusion.cpp b/inference-engine/tests/functional/plugin/shared/src/ngraph_conversion_tests/conv_bias_fusion.cpp
new file mode 100644 (file)
index 0000000..2db1729
--- /dev/null
@@ -0,0 +1,45 @@
+// Copyright (C) 2020 Intel Corporation
+//
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "ngraph_conversion_tests/conv_bias_fusion.hpp"
+
+namespace NGraphConversionTestsDefinitions {
+
+
+std::string ConvBiasFusion::getTestCaseName(const testing::TestParamInfo<std::string> & obj) {
+    return "Device=" + obj.param;
+}
+
+TEST_P(ConvBiasFusion, ConvBiasFusion) {
+    std::string device = this->GetParam();
+    std::shared_ptr<ngraph::Function> f(nullptr);
+    {
+        auto input = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, ngraph::Shape{1, 3, 64, 64});
+        auto weights = ngraph::opset1::Constant::create(ngraph::element::f32, ngraph::Shape{6, 3, 1, 1}, {1});
+        auto biases = ngraph::opset1::Constant::create(ngraph::element::f32, ngraph::Shape{6, 1, 1}, {1});
+        auto conv = std::make_shared<ngraph::opset1::Convolution>(input, weights, ngraph::Strides{1, 1},
+                ngraph::CoordinateDiff{0, 0}, ngraph::CoordinateDiff{0, 0}, ngraph::Strides{1, 1});
+        auto add = std::make_shared<ngraph::opset1::Add>(conv, biases);
+
+        input->set_friendly_name("parameter");
+        conv->set_friendly_name("conv");
+        add->set_friendly_name("add");
+
+        f = std::make_shared<ngraph::Function>(ngraph::NodeVector{add}, ngraph::ParameterVector{input});
+    }
+
+    auto network = InferenceEngine::CNNNetwork(f);
+
+    InferenceEngine::Core ie;
+    InferenceEngine::ExecutableNetwork exeNetwork = ie.LoadNetwork(network, device);
+    auto net = exeNetwork.GetExecGraphInfo();
+
+    IE_SUPPRESS_DEPRECATED_START
+    auto add_layer = net.getLayerByName("add");
+    ASSERT_EQ(add_layer->params["originalLayersNames"], "add,conv");
+    IE_SUPPRESS_DEPRECATED_END
+}
+
+}  // namespace NGraphConversionTestsDefinitions
\ No newline at end of file
index 9147242..69ae287 100644 (file)
@@ -12,7 +12,32 @@ void PluginSpecificConversion::SetUp() {
 }
 
 std::string PluginSpecificConversion::getTestCaseName(const testing::TestParamInfo<std::string> & obj) {
-    return obj.param;
+    return "Device=" + obj.param;
+}
+
+TEST_P(PluginSpecificConversion, addOutputAfterLoadNetwork) {
+    std::shared_ptr<ngraph::Function> f(nullptr);
+
+    {
+        auto input = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, ngraph::Shape{1, 3, 300, 300});
+        auto const1 = ngraph::opset1::Constant::create(ngraph::element::f32, ngraph::Shape{1, 1, 1, 1}, {1});
+        auto add1 = std::make_shared<ngraph::opset1::Add>(input, const1);
+        add1->set_friendly_name("add1");
+        auto const2 = ngraph::opset1::Constant::create(ngraph::element::f32, ngraph::Shape{1, 1, 1, 1}, {1});
+        auto add2 = std::make_shared<ngraph::opset1::Add>(add1, const2);
+        f = std::make_shared<ngraph::Function>(ngraph::NodeVector{add2}, ngraph::ParameterVector{input});
+    }
+
+    auto network = InferenceEngine::CNNNetwork(f);
+
+    try {
+        InferenceEngine::Core ie;
+        InferenceEngine::ExecutableNetwork exeNetwork = ie.LoadNetwork(network, device);
+        network.addOutput("add1");
+        InferenceEngine::ExecutableNetwork exeNetwork2 = ie.LoadNetwork(network, device);
+    } catch (InferenceEngine::details::InferenceEngineException& ex) {
+        FAIL() << ex.what();
+    }
 }
 
 TEST_P(PluginSpecificConversion, GeluConversionTest) {
@@ -34,9 +59,8 @@ TEST_P(PluginSpecificConversion, GeluConversionTest) {
         // Parameter->Activation->Output
         ASSERT_EQ(net.layerCount(), 3);
     } else if (device == "GPU") {
-        // Parameter--->ScaleShift-------------->Eltwise
-        //          `-->ScaleShift->ScaleShift-`
-        ASSERT_EQ(net.layerCount(), 5);
+        // Parameter--->Activation--->
+        ASSERT_EQ(net.layerCount(), 2);
     }
 }
 
diff --git a/inference-engine/tests/functional/plugin/shared/src/other/add_output.cpp b/inference-engine/tests/functional/plugin/shared/src/other/add_output.cpp
new file mode 100644 (file)
index 0000000..3c66b8d
--- /dev/null
@@ -0,0 +1,76 @@
+// Copyright (C) 2020 Intel Corporation
+//
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "other/add_output.hpp"
+
+// TODO: Replace IRBuilder with NGraph when it supports Memory Layer
+std::string AddOutputTestsCommonClass::generate_model() {
+    CommonTestUtils::IRBuilder_v6 test_model_builder("model");
+
+    auto precision = InferenceEngine::Precision::FP32;
+
+    auto Memory_1_layer =
+        test_model_builder.AddLayer("Memory_1", "Memory", precision, {{"id", "r_1-3"}, {"index", "1"}, {"size", "2"}})
+            .AddOutPort({1, 200})
+            .getLayer();
+    auto Input_2_layer = test_model_builder.AddLayer("Input_2", "input", precision).AddOutPort({1, 200}).getLayer();
+    auto Eltwise_3_layer = test_model_builder.AddLayer("Eltwise_3", "Eltwise", precision, {{"operation", "mul"}})
+                               .AddInPort({1, 200})
+                               .AddInPort({1, 200})
+                               .AddOutPort({1, 200})
+                               .getLayer();
+
+    auto Activation_4_layer =
+        test_model_builder.AddLayer("Activation_4", "Activation", precision, {{"type", "sigmoid"}})
+            .AddInPort({1, 200})
+            .AddOutPort({1, 200})
+            .getLayer();
+    auto Memory_5_layer =
+        test_model_builder.AddLayer("Memory_5", "Memory", precision, {{"id", "r_1-3"}, {"index", "0"}, {"size", "2"}})
+            .AddInPort({1, 200})
+            .getLayer();
+
+    test_model_builder.AddEdge(Memory_1_layer.out(0), Eltwise_3_layer.in(0));
+    test_model_builder.AddEdge(Input_2_layer.out(0), Eltwise_3_layer.in(1));
+    test_model_builder.AddEdge(Eltwise_3_layer.out(0), Activation_4_layer.in(0));
+    test_model_builder.AddEdge(Activation_4_layer.out(0), Memory_5_layer.in(0));
+
+    auto serial = test_model_builder.serialize();
+
+    return serial;
+}
+
+std::string AddOutputTestsCommonClass::getTestCaseName(
+    testing::TestParamInfo<std::tuple<std::string, std::string>> obj) {
+    std::string layer;
+    std::string engine;
+
+    std::tie(layer, engine) = obj.param;
+    return layer + "_" + engine;
+}
+
+void AddOutputTestsCommonClass::run_test() {
+    std::string layer_name;
+    std::string engine_type;
+
+    std::tie(layer_name, engine_type) = this->GetParam();
+
+    auto model = this->generate_model();
+
+    InferenceEngine::Core ie;
+    InferenceEngine::CNNNetwork network;
+    InferenceEngine::ExecutableNetwork executableNet;
+
+    auto null_blob = CommonTestUtils::getWeightsBlob(0);
+    network = ie.ReadNetwork(model, null_blob);
+    network.addOutput(layer_name);
+    executableNet = ie.LoadNetwork(network, engine_type);
+
+    auto outputs = executableNet.GetOutputsInfo();
+
+    auto layer_output = outputs[layer_name];
+
+    ASSERT_EQ(true, layer_output && "layer not found in outputs");
+}
diff --git a/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/add.cpp b/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/add.cpp
new file mode 100644 (file)
index 0000000..2453779
--- /dev/null
@@ -0,0 +1,52 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <tuple>
+#include <string>
+#include <vector>
+#include <memory>
+#include <map>
+#include <ie_core.hpp>
+
+#include "functional_test_utils/blob_utils.hpp"
+#include "functional_test_utils/layer_test_utils.hpp"
+#include "common_test_utils/common_utils.hpp"
+#include "single_layer_tests/add.hpp"
+
+namespace LayerTestsDefinitions {
+    std::string AddLayerTest::getTestCaseName(testing::TestParamInfo<addParams> obj) {
+    InferenceEngine::Precision netPrecision;
+    std::vector<InferenceEngine::SizeVector> inputShapes;
+    std::string targetDevice;
+    std::map<std::string, std::string> config;
+    std::tie(netPrecision, inputShapes, targetDevice, config) = obj.param;
+    std::ostringstream result;
+    result << "IS=" << CommonTestUtils::vec2str(inputShapes) << "_";
+    result << "netPRC=" << netPrecision.name() << "_";
+    result << "targetDevice=" << targetDevice;
+    return result.str();
+}
+
+void AddLayerTest::SetUp() {
+    std::vector<InferenceEngine::SizeVector> inputShapes;
+    InferenceEngine::Precision netPrecision;
+    std::tie(netPrecision, inputShapes, targetDevice, configuration) = this->GetParam();
+    auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
+    auto paramsIn = ngraph::builder::makeParams(ngPrc, {inputShapes});
+    auto paramIn = ngraph::helpers::convert2OutputVector(
+            ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(paramsIn));
+    IE_ASSERT(paramIn.size() == 2);
+    auto add = std::make_shared<ngraph::opset1::Add>(paramsIn[0], paramsIn[1]);
+    ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(add)};
+    function = std::make_shared<ngraph::Function>(results, paramsIn, "Add");
+}
+
+TEST_P(AddLayerTest, CompareWithRefs) {
+    Run();
+
+    if (targetDevice == std::string{CommonTestUtils::DEVICE_GPU}) {
+        PluginCache::get().reset();
+    }
+}
+}  // namespace LayerTestsDefinitions
index 53ef5c0..e735591 100644 (file)
@@ -22,12 +22,11 @@ namespace LayerTestsDefinitions {
 
 std::string BatchToSpaceLayerTest::getTestCaseName(const testing::TestParamInfo<batchToSpaceParamsTuple> &obj) {
     std::vector<size_t> inShapes, blockShape, cropsBegin, cropsEnd;
-    InferenceEngine::Precision inPrc, netPrc;
+    InferenceEngine::Precision  netPrc;
     std::string targetName;
-    std::tie(blockShape, cropsBegin, cropsEnd, inShapes, inPrc, netPrc, targetName) = obj.param;
+    std::tie(blockShape, cropsBegin, cropsEnd, inShapes, netPrc, targetName) = obj.param;
     std::ostringstream result;
     result << "IS=" << CommonTestUtils::vec2str(inShapes) << "_";
-    result << "inPRC=" << inPrc.name() << "_";
     result << "netPRC=" << netPrc.name() << "_";
     result << "BS=" << CommonTestUtils::vec2str(blockShape) << "_";
     result << "CB=" << CommonTestUtils::vec2str(cropsBegin) << "_";
@@ -38,20 +37,23 @@ std::string BatchToSpaceLayerTest::getTestCaseName(const testing::TestParamInfo<
 
 void BatchToSpaceLayerTest::SetUp() {
     std::vector<size_t> inputShape, blockShape, cropsBegin, cropsEnd;
-    std::tie(blockShape, cropsBegin, cropsEnd, inputShape, inputPrecision, netPrecision,
-             targetDevice) = this->GetParam();
-
+    InferenceEngine::Precision netPrecision;
+    std::tie(blockShape, cropsBegin, cropsEnd, inputShape, netPrecision, targetDevice) = this->GetParam();
     auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
     auto params = ngraph::builder::makeParams(ngPrc, {inputShape});
     auto paramOuts = ngraph::helpers::convert2OutputVector(
             ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(params));
     auto b2s = ngraph::builder::makeBatchToSpace(paramOuts[0], ngPrc, blockShape, cropsBegin, cropsEnd);
     ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(b2s)};
-    fnPtr = std::make_shared<ngraph::Function>(results, params, "BatchToSpace");
+    function = std::make_shared<ngraph::Function>(results, params, "BatchToSpace");
 }
 
 TEST_P(BatchToSpaceLayerTest, CompareWithRefs) {
-    inferAndValidate();
+    Run();
+
+    if (targetDevice == std::string{CommonTestUtils::DEVICE_GPU}) {
+        PluginCache::get().reset();
+    }
 };
 
 }  // namespace LayerTestsDefinitions
index 791dd0e..251ec74 100644 (file)
@@ -23,13 +23,12 @@ namespace LayerTestsDefinitions {
 std::string ConcatLayerTest::getTestCaseName(const testing::TestParamInfo<concatParamsTuple> &obj) {
     size_t axis;
     std::vector<std::vector<size_t>> inputShapes;
-    InferenceEngine::Precision netPrecision, inputPrecision;
+    InferenceEngine::Precision netPrecision;
     std::string targetName;
-    std::tie(axis, inputShapes, inputPrecision, netPrecision, targetName) = obj.param;
+    std::tie(axis, inputShapes, netPrecision, targetName) = obj.param;
     std::ostringstream result;
     result << "IS=" << CommonTestUtils::vec2str(inputShapes) << "_";
     result << "axis=" << axis << "_";
-    result << "inPRC=" << inputPrecision.name() << "_";
     result << "netPRC=" << netPrecision.name() << "_";
     result << "targetDevice=" << targetName << "_";
     return result.str();
@@ -38,18 +37,23 @@ std::string ConcatLayerTest::getTestCaseName(const testing::TestParamInfo<concat
 void ConcatLayerTest::SetUp() {
     size_t axis;
     std::vector<std::vector<size_t>> inputShape;
-    std::tie(axis, inputShape, inputPrecision, netPrecision, targetDevice) = this->GetParam();
+    InferenceEngine::Precision netPrecision;
+    std::tie(axis, inputShape, netPrecision, targetDevice) = this->GetParam();
     auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
     auto params = ngraph::builder::makeParams(ngPrc, inputShape);
     auto paramOuts = ngraph::helpers::convert2OutputVector(
             ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(params));
     auto concat = std::make_shared<ngraph::opset1::Concat>(paramOuts, axis);
     ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(concat)};
-    fnPtr = std::make_shared<ngraph::Function>(results, params, "concat");
+    function = std::make_shared<ngraph::Function>(results, params, "concat");
 }
 
 
 TEST_P(ConcatLayerTest, CompareWithRefs) {
-    inferAndValidate();
+    Run();
+
+    if (targetDevice == std::string{CommonTestUtils::DEVICE_GPU}) {
+        PluginCache::get().reset();
+    }
 };
 }  // namespace LayerTestsDefinitions
\ No newline at end of file
index 0782701..f701df8 100644 (file)
@@ -22,10 +22,10 @@ namespace LayerTestsDefinitions {
 
 std::string ConvolutionLayerTest::getTestCaseName(testing::TestParamInfo<convLayerTestParamsSet> obj) {
     convSpecificParams convParams;
-    InferenceEngine::Precision inputPrecision, netPrecision;
+    InferenceEngine::Precision netPrecision;
     InferenceEngine::SizeVector inputShapes;
     std::string targetDevice;
-    std::tie(convParams, inputPrecision, netPrecision, inputShapes, targetDevice) = obj.param;
+    std::tie(convParams, netPrecision, inputShapes, targetDevice) = obj.param;
     ngraph::op::PadType padType;
     InferenceEngine::SizeVector kernel, stride, dilation;
     std::vector<ptrdiff_t> padBegin, padEnd;
@@ -35,13 +35,12 @@ std::string ConvolutionLayerTest::getTestCaseName(testing::TestParamInfo<convLay
     std::ostringstream result;
     result << "IS=" << CommonTestUtils::vec2str(inputShapes) << "_";
     result << "K" << CommonTestUtils::vec2str(kernel) << "_";
-    result << "S" << CommonTestUtils::vec2str(stride) << "_";;
-    result << "PB" << CommonTestUtils::vec2str(padBegin) << "_";;
-    result << "PE" << CommonTestUtils::vec2str(padEnd) << "_";;
-    result << "D=" << CommonTestUtils::vec2str(dilation) << "_";;
+    result << "S" << CommonTestUtils::vec2str(stride) << "_";
+    result << "PB" << CommonTestUtils::vec2str(padBegin) << "_";
+    result << "PE" << CommonTestUtils::vec2str(padEnd) << "_";
+    result << "D=" << CommonTestUtils::vec2str(dilation) << "_";
     result << "O=" << convOutChannels << "_";
     result << "AP=" << padType << "_";
-    result << "inPRC=" << inputPrecision.name() << "_";
     result << "netPRC=" << netPrecision.name() << "_";
     result << "targetDevice=" << targetDevice;
     return result.str();
@@ -50,9 +49,8 @@ std::string ConvolutionLayerTest::getTestCaseName(testing::TestParamInfo<convLay
 void ConvolutionLayerTest::SetUp() {
     convSpecificParams convParams;
     std::vector<size_t> inputShape;
-    auto inputPrecision = InferenceEngine::Precision::UNSPECIFIED;
     auto netPrecision   = InferenceEngine::Precision::UNSPECIFIED;
-    std::tie(convParams, inputPrecision, netPrecision, inputShape, targetDevice) = this->GetParam();
+    std::tie(convParams, netPrecision, inputShape, targetDevice) = this->GetParam();
     ngraph::op::PadType padType;
     InferenceEngine::SizeVector kernel, stride, dilation;
     std::vector<ptrdiff_t> padBegin, padEnd;
diff --git a/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/group_convolution.cpp b/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/group_convolution.cpp
new file mode 100644 (file)
index 0000000..b511fc7
--- /dev/null
@@ -0,0 +1,78 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <tuple>
+#include <vector>
+#include <string>
+#include <memory>
+#include <functional>
+#include <functional_test_utils/skip_tests_config.hpp>
+
+#include "ie_core.hpp"
+
+#include "common_test_utils/common_utils.hpp"
+#include "functional_test_utils/blob_utils.hpp"
+#include "functional_test_utils/plugin_cache.hpp"
+#include "functional_test_utils/layer_test_utils.hpp"
+
+#include "single_layer_tests/group_convolution.hpp"
+
+namespace LayerTestsDefinitions {
+
+std::string GroupConvolutionLayerTest::getTestCaseName(testing::TestParamInfo<groupConvLayerTestParamsSet> obj) {
+    groupConvSpecificParams groupConvParams;
+    InferenceEngine::Precision netPrecision;
+    InferenceEngine::SizeVector inputShapes;
+    std::string targetDevice;
+    std::tie(groupConvParams, netPrecision, inputShapes, targetDevice) = obj.param;
+    ngraph::op::PadType padType;
+    InferenceEngine::SizeVector kernel, stride, dilation;
+    std::vector<ptrdiff_t> padBegin, padEnd;
+    size_t convOutChannels, numGroups;
+    std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, numGroups, padType) = groupConvParams;
+
+    std::ostringstream result;
+    result << "IS=" << CommonTestUtils::vec2str(inputShapes) << "_";
+    result << "K" << CommonTestUtils::vec2str(kernel) << "_";
+    result << "S" << CommonTestUtils::vec2str(stride) << "_";
+    result << "PB" << CommonTestUtils::vec2str(padBegin) << "_";
+    result << "PE" << CommonTestUtils::vec2str(padEnd) << "_";
+    result << "D=" << CommonTestUtils::vec2str(dilation) << "_";
+    result << "O=" << convOutChannels << "_";
+    result << "G=" << numGroups << "_";
+    result << "AP=" << padType << "_";
+    result << "netPRC=" << netPrecision.name() << "_";
+    result << "targetDevice=" << targetDevice;
+    return result.str();
+}
+
+void GroupConvolutionLayerTest::SetUp() {
+    groupConvSpecificParams groupConvParams;
+    std::vector<size_t> inputShape;
+    auto netPrecision   = InferenceEngine::Precision::UNSPECIFIED;
+    std::tie(groupConvParams, netPrecision, inputShape, targetDevice) = this->GetParam();
+    ngraph::op::PadType padType;
+    InferenceEngine::SizeVector kernel, stride, dilation;
+    std::vector<ptrdiff_t> padBegin, padEnd;
+    size_t convOutChannels, numGroups;
+    std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, numGroups, padType) = groupConvParams;
+    auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
+    auto params = ngraph::builder::makeParams(ngPrc, {inputShape});
+    auto paramOuts = ngraph::helpers::convert2OutputVector(
+            ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(params));
+    auto groupConv = std::dynamic_pointer_cast<ngraph::opset1::GroupConvolution>(
+            ngraph::builder::makeGroupConvolution(paramOuts[0], ngPrc, kernel, stride, padBegin,
+                                             padEnd, dilation, padType, convOutChannels, numGroups));
+    ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(groupConv)};
+    function = std::make_shared<ngraph::Function>(results, params, "groupConvolution");
+}
+
+TEST_P(GroupConvolutionLayerTest, CompareWithRefs) {
+    Run();
+
+    if (targetDevice == std::string{CommonTestUtils::DEVICE_GPU}) {
+        PluginCache::get().reset();
+    }
+}
+}  // namespace LayerTestsDefinitions
diff --git a/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/lrn.cpp b/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/lrn.cpp
new file mode 100644 (file)
index 0000000..746ce10
--- /dev/null
@@ -0,0 +1,55 @@
+// Copyright (C) 2020 Intel Corporation
+//
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <functional>
+#include <string>
+#include <tuple>
+#include <vector>
+
+#include "single_layer_tests/lrn.hpp"
+
+namespace LayerTestsDefinitions {
+
+std::string LrnLayerTest::getTestCaseName(testing::TestParamInfo<lrnLayerTestParamsSet> obj) {
+    double alpha;
+    size_t beta, bias, size;
+    InferenceEngine::Precision  netPrecision;
+    std::vector<size_t> inputShapes;
+    std::string targetDevice;
+    std::tie(alpha, beta, bias, size, netPrecision, inputShapes, targetDevice) = obj.param;
+
+    std::ostringstream result;
+    const char separator = '_';
+    result << "IS=" << CommonTestUtils::vec2str(inputShapes) << separator;
+    result << "Alpha=" << alpha << separator;
+    result << "Beta=" << beta << separator;
+    result << "Bias=" << bias << separator;
+    result << "Size=" << size << separator;
+    result << "netPRC=" << netPrecision.name() << separator;
+    result << "targetDevice=" << targetDevice;
+
+    return result.str();
+}
+
+void LrnLayerTest::SetUp() {
+    std::vector<size_t> inputShapes;
+    auto netPrecision   = InferenceEngine::Precision::UNSPECIFIED;
+    size_t alpha, beta, bias, size;
+    std::tie(alpha, beta, bias, size, netPrecision, inputShapes, targetDevice) = GetParam();
+
+    auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
+    auto params = ngraph::builder::makeParams(ngPrc, {inputShapes});
+    auto paramIn =
+        ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(params));
+
+    auto lrn = std::make_shared<ngraph::opset1::LRN>(paramIn[0], alpha, beta, bias, size);
+    ngraph::ResultVector results {std::make_shared<ngraph::opset1::Result>(lrn)};
+    function = std::make_shared<ngraph::Function>(results, params, "lrn");
+}
+
+TEST_P(LrnLayerTest, CompareWithRefs) {
+    Run();
+}
+}  // namespace LayerTestsDefinitions
diff --git a/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/maximum.cpp b/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/maximum.cpp
new file mode 100644 (file)
index 0000000..c8e1103
--- /dev/null
@@ -0,0 +1,53 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <tuple>
+#include <string>
+#include <vector>
+#include <memory>
+#include <functional>
+#include <debug.h>
+#include "ie_core.hpp"
+#include "common_test_utils/common_utils.hpp"
+#include "functional_test_utils/blob_utils.hpp"
+#include "functional_test_utils/precision_utils.hpp"
+#include "functional_test_utils/plugin_cache.hpp"
+#include "functional_test_utils/skip_tests_config.hpp"
+#include "single_layer_tests/maximum.hpp"
+
+namespace LayerTestsDefinitions {
+    std::string MaximumLayerTest::getTestCaseName(const testing::TestParamInfo<MaximumParamsTuple> &obj) {
+        std::vector<std::vector<size_t>> inputShapes;
+        InferenceEngine::Precision netPrecision;
+        std::string targetName;
+        std::tie(inputShapes, netPrecision, targetName) = obj.param;
+        std::ostringstream results;
+
+        results << "IS=" << CommonTestUtils::vec2str(inputShapes) << "_";
+        results << "netPRC=" << netPrecision.name() << "_";
+        results << "targetDevice=" << targetName << "_";
+        return results.str();
+    }
+
+    void MaximumLayerTest::SetUp() {
+        std::vector<std::vector<size_t>> inputShapes;
+        InferenceEngine::Precision netPrecision;
+        std::tie(inputShapes, netPrecision, targetDevice) = this->GetParam();
+        const std::size_t inputDim = InferenceEngine::details::product(inputShapes[0]);
+        auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
+        std::vector<size_t> shapeInput{1, inputDim};
+        auto input = ngraph::builder::makeParams(ngPrc, {shapeInput});
+        auto constMul = ngraph::builder::makeConstant(ngPrc, ngraph::Shape{1}, std::vector<float>{-1.0f});
+        auto max = std::make_shared<ngraph::opset1::Maximum>(input[0], constMul);
+        function = std::make_shared<ngraph::Function>(max, input, "maximum");
+    }
+
+    TEST_P(MaximumLayerTest, CompareWithRefs){
+        Run();
+
+        if (targetDevice == std::string{CommonTestUtils::DEVICE_GPU}) {
+            PluginCache::get().reset();
+        }
+    };
+} // namespace LayerTestsDefinitions
diff --git a/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/multiply.cpp b/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/multiply.cpp
new file mode 100644 (file)
index 0000000..a9052f4
--- /dev/null
@@ -0,0 +1,53 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <tuple>
+#include <string>
+#include <vector>
+#include <memory>
+#include <functional>
+#include <debug.h>
+#include "ie_core.hpp"
+#include "common_test_utils/common_utils.hpp"
+#include "functional_test_utils/blob_utils.hpp"
+#include "functional_test_utils/precision_utils.hpp"
+#include "functional_test_utils/plugin_cache.hpp"
+#include "functional_test_utils/skip_tests_config.hpp"
+#include "single_layer_tests/multiply.hpp"
+
+namespace LayerTestsDefinitions {
+    std::string MultiplyLayerTest::getTestCaseName(const testing::TestParamInfo<MultiplyParamsTuple> &obj) {
+        std::vector<std::vector<size_t>> inputShapes;
+        InferenceEngine::Precision netPrecision;
+        std::string targetName;
+        std::tie(inputShapes, netPrecision, targetName) = obj.param;
+        std::ostringstream results;
+
+        results << "IS=" << CommonTestUtils::vec2str(inputShapes) << "_";
+        results << "netPRC=" << netPrecision.name() << "_";
+        results << "targetDevice=" << targetName << "_";
+        return results.str();
+    }
+
+    void MultiplyLayerTest::SetUp() {
+        std::vector<std::vector<size_t>> inputShapes;
+        InferenceEngine::Precision netPrecision;
+        std::tie(inputShapes, netPrecision, targetDevice) = this->GetParam();
+        const std::size_t input_dim = InferenceEngine::details::product(inputShapes[0]);
+        auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
+        std::vector<size_t> shape_input{1, input_dim};
+        auto input = ngraph::builder::makeParams(ngPrc, {shape_input});
+        auto const_mul = ngraph::builder::makeConstant(ngPrc, ngraph::Shape{1}, std::vector<float>{-1.0f});
+        auto mul = std::make_shared<ngraph::opset1::Multiply>(input[0], const_mul);
+        function = std::make_shared<ngraph::Function>(mul, input, "multiply");
+    }
+
+    TEST_P(MultiplyLayerTest, CompareWithRefs){
+        Run();
+
+        if (targetDevice == std::string{CommonTestUtils::DEVICE_GPU}) {
+            PluginCache::get().reset();
+        }
+    };
+} // namespace LayerTestsDefinitions
diff --git a/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/mvn.cpp b/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/mvn.cpp
new file mode 100644 (file)
index 0000000..995a7b0
--- /dev/null
@@ -0,0 +1,63 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <tuple>
+#include <string>
+#include <vector>
+#include <memory>
+#include <functional>
+#include <functional_test_utils/skip_tests_config.hpp>
+
+#include "ie_core.hpp"
+#include "ngraph_functions/utils/ngraph_helpers.hpp"
+
+#include "common_test_utils/common_utils.hpp"
+#include "functional_test_utils/blob_utils.hpp"
+#include "functional_test_utils/plugin_cache.hpp"
+#include "functional_test_utils/layer_test_utils.hpp"
+
+#include "single_layer_tests/mvn.hpp"
+
+namespace LayerTestsDefinitions {
+
+std::string MvnLayerTest::getTestCaseName(testing::TestParamInfo<mvnParams> obj) {
+    InferenceEngine::SizeVector inputShapes;
+    InferenceEngine::Precision inputPrecision;
+    bool acrossChannels, normalizeVariance;
+    double eps;
+    std::string targetDevice;
+    std::tie(inputShapes, inputPrecision, acrossChannels, normalizeVariance, eps, targetDevice) = obj.param;
+    std::ostringstream result;
+    result << "IS=" << CommonTestUtils::vec2str(inputShapes) << "_";
+    result << "Precision=" << inputPrecision.name() << "_";
+    result << "AcrossChannels=" << (acrossChannels ? "TRUE" : "FALSE") << "_";
+    result << "NormalizeVariance=" << (normalizeVariance ? "TRUE" : "FALSE") << "_";
+    result << "Epsilon=" << eps << "_";
+    result << "TargetDevice=" << targetDevice;
+    return result.str();
+}
+
+void MvnLayerTest::SetUp() {
+    InferenceEngine::SizeVector inputShapes;
+    InferenceEngine::Precision inputPrecision;
+    bool acrossChanels, normalizeVariance;
+    double eps;
+    std::tie(inputShapes, inputPrecision, acrossChanels, normalizeVariance, eps, targetDevice) = this->GetParam();
+    auto inType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision);
+    auto param = ngraph::builder::makeParams(inType, {inputShapes});
+    auto paramOuts = ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(param));
+    auto mvn = std::dynamic_pointer_cast<ngraph::op::MVN>(ngraph::builder::makeMVN(paramOuts[0], acrossChanels, normalizeVariance, eps));
+    ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(mvn)};
+    function = std::make_shared<ngraph::Function>(results, param, "mvn");
+}
+
+TEST_P(MvnLayerTest, CompareWithRefs) {
+    Run();
+
+    if (targetDevice == std::string{CommonTestUtils::DEVICE_GPU}) {
+        PluginCache::get().reset();
+    }
+};
+
+}  // namespace LayerTestsDefinitions
\ No newline at end of file
index 5855ba6..b6e9f3b 100644 (file)
@@ -20,33 +20,37 @@ namespace LayerTestsDefinitions {
 
 std::string NonZeroLayerTest::getTestCaseName(testing::TestParamInfo<NonZeroLayerTestParamsSet> obj) {
     std::vector<size_t> inputShape;
-    InferenceEngine::Precision inputPrecision, netPrecision;
+    InferenceEngine::Precision inputPrecision;
     std::string targetDevice;
-    ConfigMap config;
-    std::tie(inputShape, inputPrecision, netPrecision, targetDevice, config) = obj.param;
+    std::tie(inputShape, inputPrecision, targetDevice) = obj.param;
 
     std::ostringstream result;
     result << "IS=" << CommonTestUtils::vec2str(inputShape) << "_";
     result << "inPRC=" << inputPrecision.name() << "_";
-    result << "netPRC=" << netPrecision.name() << "_";
     result << "targetDevice=" << targetDevice;
     return result.str();
 }
 
 void NonZeroLayerTest::SetUp() {
-    std::vector<size_t> inputShape;
-    std::tie(inputShape, inputPrecision, netPrecision, targetDevice, config) = this->GetParam();
+    SetRefMode(LayerTestsUtils::RefMode::CONSTANT_FOLDING);
+    auto inputShape     = std::vector<std::size_t>{};
+    auto inputPrecision = InferenceEngine::Precision::UNSPECIFIED;
+    std::tie(inputShape, inputPrecision, targetDevice) = GetParam();
 
-    auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
-    auto paramNode = std::make_shared<ngraph::opset1::Parameter>(ngPrc, ngraph::Shape(inputShape));
+    const auto& precision = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision);
+    const auto& paramNode = std::make_shared<ngraph::opset1::Parameter>(precision, ngraph::Shape(inputShape));
 
     auto nonZeroOp = std::make_shared<ngraph::opset3::NonZero>(paramNode->output(0));
 
     ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(nonZeroOp)};
-    fnPtr = std::make_shared<ngraph::Function>(results, ngraph::ParameterVector{paramNode}, "non_zero");
+    function = std::make_shared<ngraph::Function>(results, ngraph::ParameterVector{paramNode}, "non_zero");
 }
 
-TEST_P(NonZeroLayerTest, CompareWithRefs) {
-    inferAndValidate();
+TEST_P(NonZeroLayerTest, CompareWithReference) {
+    Run();
+
+    if (targetDevice == std::string{CommonTestUtils::DEVICE_GPU}) {
+        PluginCache::get().reset();
+    }
 }
 }  // namespace LayerTestsDefinitions
index 1edb73d..1038314 100644 (file)
@@ -24,10 +24,10 @@ namespace LayerTestsDefinitions {
 
 std::string PoolingLayerTest::getTestCaseName(testing::TestParamInfo<poolLayerTestParamsSet> obj) {
     poolSpecificParams poolParams;
-    InferenceEngine::Precision inputPrecision, netPrecision;
+    InferenceEngine::Precision netPrecision;
     std::vector<size_t> inputShapes, newInputShapes;
     std::string targetDevice;
-    std::tie(poolParams, inputPrecision, netPrecision, inputShapes, targetDevice) = obj.param;
+    std::tie(poolParams, netPrecision, inputShapes, targetDevice) = obj.param;
     ngraph::helpers::PoolingTypes poolType;
     std::vector<size_t> kernel, stride;
     std::vector<size_t> padBegin, padEnd;
@@ -49,13 +49,12 @@ std::string PoolingLayerTest::getTestCaseName(testing::TestParamInfo<poolLayerTe
     }
     result << "K" << CommonTestUtils::vec2str(kernel) << "_";
     result << "S" << CommonTestUtils::vec2str(stride) << "_";
-    result << "PB" << CommonTestUtils::vec2str(padBegin) << "_";;
+    result << "PB" << CommonTestUtils::vec2str(padBegin) << "_";
     result << "PE" << CommonTestUtils::vec2str(padEnd) << "_";
     if (padType == ngraph::op::PadType::EXPLICIT) {
         result << "Rounding=" << roundingType << "_";
     }
     result << "AutoPad=" << padType << "_";
-    result << "inPRC=" << inputPrecision.name() << "_";
     result << "netPRC=" << netPrecision.name() << "_";
     result << "targetDevice=" << targetDevice;
     return result.str();
@@ -64,7 +63,8 @@ std::string PoolingLayerTest::getTestCaseName(testing::TestParamInfo<poolLayerTe
 void PoolingLayerTest::SetUp() {
     poolSpecificParams poolParams;
     std::vector<size_t> inputShape;
-    std::tie(poolParams, inputPrecision, netPrecision, inputShape, targetDevice) = this->GetParam();
+    InferenceEngine::Precision netPrecision;
+    std::tie(poolParams, netPrecision, inputShape, targetDevice) = this->GetParam();
     ngraph::helpers::PoolingTypes poolType;
     std::vector<size_t> kernel, stride;
     std::vector<size_t> padBegin, padEnd;
@@ -92,10 +92,14 @@ void PoolingLayerTest::SetUp() {
     }
 
     ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(pooling)};
-    fnPtr = std::make_shared<ngraph::Function>(results, params, "pooling");
+    function = std::make_shared<ngraph::Function>(results, params, "pooling");
 }
 
 TEST_P(PoolingLayerTest, CompareWithRefs) {
-    inferAndValidate();
+    Run();
+
+    if (targetDevice == std::string{CommonTestUtils::DEVICE_GPU}) {
+        PluginCache::get().reset();
+    }
 }
 }  // namespace LayerTestsDefinitions
\ No newline at end of file
index e425fda..4909464 100644 (file)
 
 namespace LayerTestsDefinitions {
     std::string ReshapeLayerTest::getTestCaseName(testing::TestParamInfo<reshapeParams> obj) {
-    InferenceEngine::Precision inputPrecision, netPrecision;
+    InferenceEngine::Precision netPrecision;
     InferenceEngine::SizeVector inputShapes, outFormShapes;
     std::string targetDevice;
     std::map<std::string, std::string> config;
     bool specialZero;
-    std::tie(specialZero, inputPrecision, netPrecision, inputShapes, outFormShapes,
-            targetDevice, config) = obj.param;
+    std::tie(specialZero, netPrecision, inputShapes, outFormShapes, targetDevice, config) = obj.param;
     std::ostringstream result;
     result << "IS=" << CommonTestUtils::vec2str(inputShapes) << "_";
     result << "specialZero=" << specialZero << "_";
-    result << "inPRC=" << inputPrecision.name() << "_";
     result << "netPRC=" << netPrecision.name() << "_";
     result << "targetDevice=" << targetDevice;
     return result.str();
@@ -36,8 +34,8 @@ namespace LayerTestsDefinitions {
 void ReshapeLayerTest::SetUp() {
     InferenceEngine::SizeVector inputShapes, outFormShapes;
     bool specialZero;
-    std::tie(specialZero, inputPrecision, netPrecision, inputShapes, outFormShapes,
-            targetDevice, config) = this->GetParam();
+    InferenceEngine::Precision netPrecision;
+    std::tie(specialZero, netPrecision, inputShapes, outFormShapes, targetDevice, configuration) = this->GetParam();
     auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
     auto paramsIn = ngraph::builder::makeParams(ngPrc, {inputShapes});
     auto paramIn = ngraph::helpers::convert2OutputVector(
@@ -47,10 +45,14 @@ void ReshapeLayerTest::SetUp() {
     auto reshape = std::dynamic_pointer_cast<ngraph::opset1::Reshape>(
             std::make_shared<ngraph::opset1::Reshape>(paramIn[0], constNode, specialZero));
     ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(reshape)};
-    fnPtr = std::make_shared<ngraph::Function>(results, paramsIn, "Reshape");
+    function = std::make_shared<ngraph::Function>(results, paramsIn, "Reshape");
 }
 
 TEST_P(ReshapeLayerTest, CompareWithRefsDynamicBath) {
-    inferAndValidate();
+    Run();
+
+    if (targetDevice == std::string{CommonTestUtils::DEVICE_GPU}) {
+        PluginCache::get().reset();
+    }
 }
 }  // namespace LayerTestsDefinitions
\ No newline at end of file
diff --git a/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/select.cpp b/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/select.cpp
new file mode 100644 (file)
index 0000000..17a1909
--- /dev/null
@@ -0,0 +1,99 @@
+ // Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <tuple>
+#include <string>
+#include <vector>
+#include <functional>
+
+#include <ie_core.hpp>
+#include <ie_precision.hpp>
+
+#include "functional_test_utils/blob_utils.hpp"
+#include "functional_test_utils/precision_utils.hpp"
+#include "common_test_utils/common_utils.hpp"
+#include "functional_test_utils/skip_tests_config.hpp"
+#include "functional_test_utils/plugin_cache.hpp"
+
+#include "single_layer_tests/select.hpp"
+
+namespace LayerTestsDefinitions {
+
+    std::string SelectLayerTest::getTestCaseName(const testing::TestParamInfo<selectTestParams> &obj) {
+        std::vector<std::vector<size_t>> dataShapes(3);
+        InferenceEngine::Precision dataType;
+        ngraph::op::AutoBroadcastSpec broadcast;
+        std::string targetDevice;
+        std::tie(dataShapes, dataType, broadcast, targetDevice) = obj.param;
+        std::ostringstream result;
+        result << "COND=BOOL_" << CommonTestUtils::vec2str(dataShapes[0]);
+        result << "_THEN=" << dataType.name() << "_" << CommonTestUtils::vec2str(dataShapes[1]);
+        result << "_ELSE=" << dataType.name() << "_" << CommonTestUtils::vec2str(dataShapes[2]);
+        result << "_" << broadcast.m_type;
+        result << "_targetDevice=" << targetDevice;
+        return result.str();
+    }
+
+    void SelectLayerTest::SetUp() {
+        inputShapes.resize(NGraphFunctions::Select::numOfInputs);
+        std::tie(inputShapes, inputPrecision, broadcast, targetDevice) = this->GetParam();
+        layer = NGraphFunctions::Select(FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision), inputShapes, broadcast);
+    }
+
+    TEST_P(SelectLayerTest, CompareWithRefImpl) {
+        SKIP_IF_CURRENT_TEST_IS_DISABLED()
+
+        InferenceEngine::CNNNetwork cnnNet(layer.fnPtr);
+
+        auto outputName = cnnNet.getOutputsInfo().begin()->first;
+
+        auto ie = PluginCache::get().ie();
+        auto execNet = ie->LoadNetwork(cnnNet, targetDevice);
+        auto req = execNet.CreateInferRequest();
+
+        std::vector<InferenceEngine::Blob::Ptr> inBlobs;
+
+        std::vector<uint32_t> range = {2, 30, 30};
+        std::vector<int32_t> startFrom = {0, 0, 30};
+        int i = 0;
+        for (const auto &inputItem : cnnNet.getInputsInfo()) {
+            auto currentBlob = FuncTestUtils::createAndFillBlob(inputItem.second->getTensorDesc(), range[i], startFrom[i]);
+            req.SetBlob(inputItem.first, currentBlob);
+            inBlobs.push_back(currentBlob);
+            i++;
+        }
+
+        std::vector<InferenceEngine::Blob::Ptr> castedBlobs = inBlobs;
+        std::vector<const float *> inRawData;
+        for (size_t i = 0; i < castedBlobs.size(); i++) {
+            castedBlobs[i] = FuncTestUtils::copyBlobWithCast<InferenceEngine::Precision::FP32>(inBlobs[i]);
+            inRawData.push_back(castedBlobs[i]->cbuffer().as<float *>());
+        }
+
+        req.Infer();
+
+        auto outBlob = req.GetBlob(outputName);
+        auto resShape = outBlob->getTensorDesc().getDims();
+        const auto& outPrecision = outBlob->getTensorDesc().getPrecision();
+
+        size_t outElementsCount = std::accumulate(begin(resShape), end(resShape), 1, std::multiplies<size_t>());
+        std::vector<float> refOutData = layer.RefImpl<float>(inRawData, inputShapes, resShape);
+
+        if (outPrecision != InferenceEngine::Precision::I32 && outPrecision != InferenceEngine::Precision::FP32)
+            THROW_IE_EXCEPTION << "Test for select layer doesn't support output precision different from I32 or FP32";
+
+        if (outPrecision == InferenceEngine::Precision::I32) {
+            std::vector<int32_t> convRefOutData(outElementsCount);
+            for (size_t i = 0; i < outElementsCount; i++)
+                convRefOutData[i] = static_cast<int32_t>(refOutData[i]);
+            FuncTestUtils::compareRawBuffers(outBlob->cbuffer().as<int32_t *>(), convRefOutData.data(), outElementsCount, outElementsCount);
+        } else {
+            auto thr = FuncTestUtils::GetComparisonThreshold(InferenceEngine::Precision::FP32);
+            FuncTestUtils::compareRawBuffers(outBlob->cbuffer().as<float *>(), refOutData.data(), outElementsCount, outElementsCount, thr);
+        }
+
+        layer.fnPtr.reset();
+    }
+
+}  // namespace LayerTestsDefinitions
index 430d5e7..b8e3744 100644 (file)
@@ -27,7 +27,7 @@ std::string SoftMaxLayerTest::getTestCaseName(testing::TestParamInfo<softMaxLaye
     size_t axis;
     std::string targetDevice;
     std::map<std::string, std::string> config;
-    std::tie(netPrecision, inputPrecision, inputLayout, inputShape, axis, targetDevice, config) = obj.param;
+    std::tie(netPrecision, inputLayout, inputShape, axis, targetDevice, config) = obj.param;
 
     std::ostringstream result;
     result << "netPRC=" << netPrecision.name() << "_";
@@ -42,10 +42,11 @@ std::string SoftMaxLayerTest::getTestCaseName(testing::TestParamInfo<softMaxLaye
 
 void SoftMaxLayerTest::SetUp() {
     InferenceEngine::SizeVector inputShape;
+    InferenceEngine::Precision netPrecision;
     size_t axis;
-    std::tie(netPrecision, inputPrecision, inputLayout, inputShape, axis, targetDevice, config) = GetParam();
-    outputPrecision = inputPrecision;
-    outputLayout = inputLayout;
+
+    std::tie(netPrecision, inLayout, inputShape, axis, targetDevice, configuration) = GetParam();
+    outLayout = inLayout;
 
     const auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
 
@@ -58,11 +59,15 @@ void SoftMaxLayerTest::SetUp() {
 
     const ngraph::ResultVector results {std::make_shared<ngraph::opset1::Result>(softMax)};
 
-    fnPtr = std::make_shared<ngraph::Function>(results, params, "softMax");
+    function = std::make_shared<ngraph::Function>(results, params, "softMax");
 }
 
 TEST_P(SoftMaxLayerTest, CompareWithRefs) {
-    inferAndValidate();
+    Run();
+
+    if (targetDevice == std::string{CommonTestUtils::DEVICE_GPU}) {
+        PluginCache::get().reset();
+    }
 }
 
 }  // namespace LayerTestsDefinitions
index 4fec56a..16f872a 100644 (file)
@@ -22,12 +22,11 @@ namespace LayerTestsDefinitions {
 
 std::string SpaceToBatchLayerTest::getTestCaseName(const testing::TestParamInfo<spaceToBatchParamsTuple> &obj) {
     std::vector<size_t> inShapes, blockShape, padsBegin, padsEnd;
-    InferenceEngine::Precision inPrc, netPrc;
+    InferenceEngine::Precision netPrc;
     std::string targetName;
-    std::tie(blockShape, padsBegin, padsEnd, inShapes, inPrc, netPrc, targetName) = obj.param;
+    std::tie(blockShape, padsBegin, padsEnd, inShapes, netPrc, targetName) = obj.param;
     std::ostringstream result;
     result << "IS=" << CommonTestUtils::vec2str(inShapes) << "_";
-    result << "inPRC=" << inPrc.name() << "_";
     result << "netPRC=" << netPrc.name() << "_";
     result << "BS=" << CommonTestUtils::vec2str(blockShape) << "_";
     result << "PB=" << CommonTestUtils::vec2str(padsBegin) << "_";
@@ -38,8 +37,8 @@ std::string SpaceToBatchLayerTest::getTestCaseName(const testing::TestParamInfo<
 
 void SpaceToBatchLayerTest::SetUp() {
     std::vector<size_t> inputShape, blockShape, padsBegin, padsEnd;
-    std::tie(blockShape, padsBegin, padsEnd, inputShape, inputPrecision, netPrecision,
-             targetDevice) = this->GetParam();
+    InferenceEngine::Precision inputPrecision, netPrecision;
+    std::tie(blockShape, padsBegin, padsEnd, inputShape, netPrecision, targetDevice) = this->GetParam();
 
     auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
     auto params = ngraph::builder::makeParams(ngPrc, {inputShape});
@@ -47,11 +46,15 @@ void SpaceToBatchLayerTest::SetUp() {
             ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(params));
     auto s2b = ngraph::builder::makeSpaceToBatch(paramOuts[0], ngPrc, blockShape, padsBegin, padsEnd);
     ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(s2b)};
-    fnPtr = std::make_shared<ngraph::Function>(results, params, "SpaceToBatch");
+    function = std::make_shared<ngraph::Function>(results, params, "SpaceToBatch");
 }
 
 TEST_P(SpaceToBatchLayerTest, CompareWithRefs) {
-    inferAndValidate();
+    Run();
+
+    if (targetDevice == std::string{CommonTestUtils::DEVICE_GPU}) {
+        PluginCache::get().reset();
+    }
 }
 
 }  // namespace LayerTestsDefinitions
index 943bdb2..d3aedb2 100644 (file)
@@ -23,25 +23,26 @@ namespace LayerTestsDefinitions {
 
 std::string SplitLayerTest::getTestCaseName(testing::TestParamInfo<splitParams> obj) {
     size_t numSplits, axis;
-    InferenceEngine::Precision inputPrecision, netPrecision;
+    InferenceEngine::Precision netPrecision;
     InferenceEngine::SizeVector inputShapes;
     std::string targetDevice;
-    std::tie(numSplits, axis, inputPrecision, netPrecision, inputShapes, targetDevice) = obj.param;
+    std::tie(numSplits, axis, netPrecision, inputShapes, targetDevice) = obj.param;
     std::ostringstream result;
     result << "IS=" << CommonTestUtils::vec2str(inputShapes) << "_";
     result << "numSplits=" << numSplits << "_";
     result << "axis=" << axis << "_";
     result << "IS";
-    result << "inPRC=" << inputPrecision.name() << "_";
     result << "netPRC=" << netPrecision.name() << "_";
     result << "targetDevice=" << targetDevice;
     return result.str();
 }
 
 void SplitLayerTest::SetUp() {
+    SetRefMode(LayerTestsUtils::RefMode::CONSTANT_FOLDING);
     size_t axis, numSplits;
     std::vector<size_t> inputShape;
-    std::tie(numSplits, axis, inputPrecision, netPrecision, inputShape, targetDevice) = this->GetParam();
+    InferenceEngine::Precision netPrecision;
+    std::tie(numSplits, axis, netPrecision, inputShape, targetDevice) = this->GetParam();
     auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
     auto params = ngraph::builder::makeParams(ngPrc, {inputShape});
     auto paramOuts = ngraph::helpers::convert2OutputVector(
@@ -52,11 +53,15 @@ void SplitLayerTest::SetUp() {
     for (int i = 0; i < numSplits; i++) {
         results.push_back(std::make_shared<ngraph::opset1::Result>(split));
     }
-    fnPtr = std::make_shared<ngraph::Function>(results, params, "split");
+    function = std::make_shared<ngraph::Function>(results, params, "split");
 }
 
 TEST_P(SplitLayerTest, CompareWithRefs) {
-    inferAndValidate();
+    Run();
+
+    if (targetDevice == std::string{CommonTestUtils::DEVICE_GPU}) {
+        PluginCache::get().reset();
+    }
 };
 
 }  // namespace LayerTestsDefinitions
\ No newline at end of file
index 7bc076b..850866c 100644 (file)
@@ -24,12 +24,12 @@ std::string StridedSliceLayerTest::getTestCaseName(const testing::TestParamInfo<
     InferenceEngine::SizeVector inputShape;
     std::vector<int64_t> begin, end, stride;
     std::vector<int64_t> begin_mask, new_axis_mask, end_mask, shrink_mask, ellipsis_mask;
-    InferenceEngine::Precision inPrc, netPrc;
+    InferenceEngine::Precision netPrc;
     std::string targetName;
-    std::tie(inputShape, begin, end, stride, begin_mask, end_mask, new_axis_mask, shrink_mask, ellipsis_mask, inPrc, netPrc, targetName) = obj.param;
+    std::tie(inputShape, begin, end, stride, begin_mask, end_mask, new_axis_mask, shrink_mask, ellipsis_mask, netPrc,
+             targetName) = obj.param;
     std::ostringstream result;
     result << "inShape=" << CommonTestUtils::vec2str(inputShape) << "_";
-    result << "inPRC=" << inPrc.name() << "_";
     result << "netPRC=" << netPrc.name() << "_";
     result << "begin=" << CommonTestUtils::vec2str(begin) << "_";
     result << "end=" << CommonTestUtils::vec2str(end) << "_";
@@ -47,19 +47,26 @@ void StridedSliceLayerTest::SetUp() {
     InferenceEngine::SizeVector inputShape;
     std::vector<int64_t> begin, end, stride;
     std::vector<int64_t> begin_mask, end_mask, new_axis_mask, shrink_mask, ellipsis_mask;
+    InferenceEngine::Precision netPrecision;
     std::tie(inputShape, begin, end, stride, begin_mask, end_mask, new_axis_mask, shrink_mask, ellipsis_mask,
-             inputPrecision, netPrecision, targetDevice) = this->GetParam();
+             netPrecision, targetDevice) = this->GetParam();
 
     auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
     auto params = ngraph::builder::makeParams(ngPrc, {inputShape});
-    auto paramOuts = ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(params));
-    auto ss = ngraph::builder::makeStridedSlice(paramOuts[0], begin, end, stride, ngPrc, begin_mask, end_mask, new_axis_mask, shrink_mask, ellipsis_mask);
+    auto paramOuts = ngraph::helpers::convert2OutputVector(
+            ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(params));
+    auto ss = ngraph::builder::makeStridedSlice(paramOuts[0], begin, end, stride, ngPrc, begin_mask, end_mask,
+                                                new_axis_mask, shrink_mask, ellipsis_mask);
     ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(ss)};
-    fnPtr = std::make_shared<ngraph::Function>(results, params, "StridedSlice");
+    function = std::make_shared<ngraph::Function>(results, params, "StridedSlice");
 }
 
 TEST_P(StridedSliceLayerTest, CompareWithRefs) {
-    inferAndValidate();
+    Run();
+
+    if (targetDevice == std::string{CommonTestUtils::DEVICE_GPU}) {
+        PluginCache::get().reset();
+    }
 }
 
 }  // namespace LayerTestsDefinitions
diff --git a/inference-engine/tests/functional/plugin/shared/src/subgraph_tests/reshape_squeeze_reshape_relu.cpp b/inference-engine/tests/functional/plugin/shared/src/subgraph_tests/reshape_squeeze_reshape_relu.cpp
new file mode 100644 (file)
index 0000000..1211b0d
--- /dev/null
@@ -0,0 +1,65 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+#include <tuple>
+#include <string>
+#include <vector>
+#include <memory>
+#include <debug.h>
+#include "common_test_utils/common_utils.hpp"
+#include "functional_test_utils/precision_utils.hpp"
+#include "functional_test_utils/skip_tests_config.hpp"
+#include "subgraph_tests/reshape_squeeze_reshape_relu.hpp"
+
+namespace LayerTestsDefinitions {
+    std::string ReshapeSqueezeReshapeRelu::getTestCaseName(const testing::TestParamInfo<ReshapeSqueezeReshapeReluTuple> &obj) {
+        std::vector<std::vector<size_t>> input;
+        InferenceEngine::Precision netPrecision;
+        std::string targetName;
+        bool is_squeeze;
+        std::tie(input, netPrecision, targetName, is_squeeze) = obj.param;
+        std::ostringstream results;
+
+        results << "IS=" << CommonTestUtils::vec2str(input[0]) << "_";
+        results << "indices=" << CommonTestUtils::vec2str(input[1]) << "_";
+        results << "netPRC=" << netPrecision.name() << "_";
+        results << "targetDevice=" << targetName << "_";
+        return results.str();
+    }
+
+    void ReshapeSqueezeReshapeRelu::SetUp() {
+        std::vector<std::vector<size_t>> inputs;
+        InferenceEngine::Precision netPrecision;
+        bool is_squeeze;
+        std::tie(inputs, netPrecision, targetDevice, is_squeeze) = this->GetParam();
+        const std::size_t input_dim = InferenceEngine::details::product(inputs[0]);
+        auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
+        std::vector<size_t> shape_input{1, input_dim};
+        auto input = ngraph::builder::makeParams(ngPrc, {shape_input});
+        auto reshape1_pattern = std::make_shared<ngraph::op::Constant>(ngraph::element::i64,
+                                                                       ngraph::Shape{inputs[0].size()},
+                                                                       inputs[0]);
+        auto reshape1 = std::make_shared<ngraph::op::v1::Reshape>(input[0], reshape1_pattern, false);
+        auto squeeze = [&]() {
+            if (is_squeeze) {
+                return ngraph::builder::makeSqueeze(reshape1, ngPrc, inputs[1]);
+            }
+            return ngraph::builder::makeUnsqueeze(reshape1, ngPrc, inputs[1]);
+        };
+        auto reshape2_pattern = std::make_shared<ngraph::op::Constant>(ngraph::element::i64,
+                                                                       ngraph::Shape{2},
+                                                                       std::vector<size_t>{1, input_dim});
+        auto reshape2 = std::make_shared<ngraph::op::v1::Reshape>(squeeze(), reshape2_pattern, false);
+        auto func = std::make_shared<ngraph::opset1::Relu>(reshape2);
+        if (is_squeeze)
+            function = std::make_shared<ngraph::Function>(func, input, "reshape_squeeze_reshape_relu");
+        else
+            function = std::make_shared<ngraph::Function>(func, input, "reshape_unsqueeze_reshape_relu");
+    }
+
+    TEST_P(ReshapeSqueezeReshapeRelu, CompareWithRefs){
+        Run();
+        if (targetDevice == std::string{CommonTestUtils::DEVICE_GPU}) {
+            PluginCache::get().reset();
+        }    };
+} // namespace LayerTestsDefinitions
index 59ed1b4..8a31029 100644 (file)
 namespace LayerTestsDefinitions {
 
 std::string SplitConvConcat::getTestCaseName(testing::TestParamInfo<LayerTestsUtils::basicParams> obj) {
-    InferenceEngine::Precision inputPrecision, netPrecision;
+    InferenceEngine::Precision netPrecision;
     InferenceEngine::SizeVector inputShapes, newInputShapes;
     std::string targetDevice;
-    std::tie(inputPrecision, netPrecision, inputShapes, targetDevice) = obj.param;
+    std::tie(netPrecision, inputShapes, targetDevice) = obj.param;
 
     std::ostringstream result;
     result << "IS=" << CommonTestUtils::vec2str(inputShapes) << "_";
-    result << "inPRC=" << inputPrecision.name() << "_";
     result << "netPRC=" << netPrecision.name() << "_";
     result << "targetDevice=" << targetDevice;
     return result.str();
@@ -37,7 +36,8 @@ std::string SplitConvConcat::getTestCaseName(testing::TestParamInfo<LayerTestsUt
 
 void SplitConvConcat::SetUp() {
     std::vector<size_t> inputShape;
-    std::tie(inputPrecision, netPrecision, inputShape, targetDevice) = this->GetParam();
+    InferenceEngine::Precision netPrecision;
+    std::tie(netPrecision, inputShape, targetDevice) = this->GetParam();
     auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
 
     auto params = ngraph::builder::makeParams(ngPrc, {inputShape});
@@ -54,11 +54,15 @@ void SplitConvConcat::SetUp() {
     auto concat = std::make_shared<ngraph::opset1::Concat>(ngraph::OutputVector{relu1->output(0), relu2->output(0)}, 1);
 
     ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(concat)};
-    fnPtr = std::make_shared<ngraph::Function>(results, params, "SplitConvConcat");
+    function = std::make_shared<ngraph::Function>(results, params, "SplitConvConcat");
 }
 
 TEST_P(SplitConvConcat, CompareWithRefImpl) {
-    inferAndValidate();
+    Run();
+
+    if (targetDevice == std::string{CommonTestUtils::DEVICE_GPU}) {
+        PluginCache::get().reset();
+    }
 };
 
 }  // namespace LayerTestsDefinitions
\ No newline at end of file
index 94e16c9..21b2b0b 100644 (file)
@@ -9,8 +9,4 @@ endif()
 
 if (ENABLE_FUNCTIONAL_TESTS)
     add_subdirectory(functional_test_utils)
-endif()
-
-if(ENABLE_FUZZING)
-    add_subdirectory(fuzz_test_utils)
 endif()
\ No newline at end of file
index 8a4ffd4..efead72 100644 (file)
@@ -18,7 +18,11 @@ function(add_gtest_libraries)
 
     get_target_property(gmock_include_dirs gtest INTERFACE_INCLUDE_DIRECTORIES)
     set_target_properties(gmock PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${gmock_include_dirs};${gmock_SOURCE_DIR}/include")
+
+    set_target_properties(gtest gtest_main gmock gmock_main
+                          PROPERTIES FOLDER thirdparty)
 endfunction()
+
 add_gtest_libraries()
 
 if (MSVC)
index 5ad75af..2ed182e 100644 (file)
@@ -11,6 +11,7 @@ const char DEVICE_GPU[] = "GPU";
 const char DEVICE_HDDL[] = "HDDL";
 const char DEVICE_FPGA[] = "FPGA";
 const char DEVICE_MYRIAD[] = "MYRIAD";
+const char DEVICE_KEEMBAY[] = "KMB";
 const char DEVICE_MULTI[] = "MULTI";
 const char DEVICE_HETERO[] = "HETERO";
 
index 7cd467e..75b9450 100644 (file)
@@ -225,6 +225,12 @@ InferenceEngine::Blob::Ptr inline copyBlobWithCast(const InferenceEngine::Blob::
         case InferenceEngine::Precision::U8:
             newBlob = FuncTestUtils::convertBlobPrecision<InferenceEngine::Precision::U8, targetPRC>(blob);
             break;
+        case InferenceEngine::Precision::I32:
+            newBlob = FuncTestUtils::convertBlobPrecision<InferenceEngine::Precision::I32, targetPRC>(blob);
+            break;
+        case InferenceEngine::Precision::BOOL:
+            newBlob = FuncTestUtils::convertBlobPrecision<InferenceEngine::Precision::BOOL, targetPRC>(blob);
+            break;
         default:
             THROW_IE_EXCEPTION << "Conversion from blob with precision " << blob->getTensorDesc().getPrecision().name()
                                << " not implemented yet!";
@@ -249,6 +255,7 @@ InferenceEngine::Blob::Ptr inline createAndFillBlob(const InferenceEngine::Tenso
         CASE(InferenceEngine::Precision::I64)
         CASE(InferenceEngine::Precision::BIN)
         CASE(InferenceEngine::Precision::I32)
+        CASE(InferenceEngine::Precision::BOOL)
 #undef CASE
         default:
             THROW_IE_EXCEPTION << "Wrong precision specified: " << td.getPrecision().name();
index 3a6480f..e0a6873 100644 (file)
@@ -6,65 +6,88 @@
 
 namespace LayerTestsUtils {
 
-FuncTestsCommon::FuncTestsCommon() {
+LayerTestsCommon::LayerTestsCommon() {
     core = PluginCache::get().ie(targetDevice).get();
 }
 
-void FuncTestsCommon::Run() {
+void LayerTestsCommon::Run() {
     SKIP_IF_CURRENT_TEST_IS_DISABLED()
 
-    Configure();
+    ConfigurePlugin();
     LoadNetwork();
     Infer();
     Validate();
 }
 
-FuncTestsCommon::~FuncTestsCommon() {
+LayerTestsCommon::~LayerTestsCommon() {
     if (!configuration.empty()) {
         PluginCache::get().reset();
     }
 }
 
-InferenceEngine::Blob::Ptr FuncTestsCommon::GenerateInput(const InferenceEngine::InputInfo& info) const {
+InferenceEngine::Blob::Ptr LayerTestsCommon::GenerateInput(const InferenceEngine::InputInfo &info) const {
     return FuncTestUtils::createAndFillBlob(info.getTensorDesc());
 }
 
-void FuncTestsCommon::Compare(const std::vector<std::uint8_t>& expected, const InferenceEngine::Blob::Ptr& actual) {
+void LayerTestsCommon::Compare(const std::vector<std::uint8_t> &expected, const InferenceEngine::Blob::Ptr &actual) {
     ASSERT_EQ(expected.size(), actual->byteSize());
-    const autoexpectedBuffer = expected.data();
+    const auto &expectedBuffer = expected.data();
 
     auto memory = InferenceEngine::as<InferenceEngine::MemoryBlob>(actual);
     IE_ASSERT(memory);
     const auto lockedMemory = memory->wmap();
-    const auto actualBuffer = lockedMemory.as<const std::uint8_t*>();
+    const auto actualBuffer = lockedMemory.as<const std::uint8_t *>();
 
-    const autoprecision = actual->getTensorDesc().getPrecision();
-    const autosize = actual->size();
+    const auto &precision = actual->getTensorDesc().getPrecision();
+    const auto &size = actual->size();
     switch (precision) {
         case InferenceEngine::Precision::FP32:
-            Compare(reinterpret_cast<const float*>(expectedBuffer), reinterpret_cast<const float*>(actualBuffer), size, 1e-2f);
+            Compare(reinterpret_cast<const float *>(expectedBuffer), reinterpret_cast<const float *>(actualBuffer),
+                    size, 1e-2f);
             break;
         case InferenceEngine::Precision::I32:
-            Compare(reinterpret_cast<const std::int32_t*>(expectedBuffer), reinterpret_cast<const std::int32_t*>(actualBuffer), size, 0);
+            Compare(reinterpret_cast<const std::int32_t *>(expectedBuffer),
+                    reinterpret_cast<const std::int32_t *>(actualBuffer), size, 0);
             break;
         default:
             FAIL() << "Comparator for " << precision << " precision isn't supported";
     }
 }
 
-void FuncTestsCommon::Configure() const {
+void LayerTestsCommon::ConfigurePlugin() const {
     if (!configuration.empty()) {
         core->SetConfig(configuration, targetDevice);
     }
 }
 
-void FuncTestsCommon::LoadNetwork() {
+void LayerTestsCommon::ConfigureNetwork() const {
+    for (const auto &in : cnnNetwork.getInputsInfo()) {
+        if (inLayout != InferenceEngine::Layout::ANY) {
+            in.second->setLayout(inLayout);
+        }
+        if (inPrc != InferenceEngine::Precision::UNSPECIFIED) {
+            in.second->setPrecision(inPrc);
+        }
+    }
+
+    for (const auto &out : cnnNetwork.getOutputsInfo()) {
+        if (outLayout != InferenceEngine::Layout::ANY) {
+            out.second->setLayout(outLayout);
+        }
+        if (outPrc != InferenceEngine::Precision::UNSPECIFIED) {
+            out.second->setPrecision(outPrc);
+        }
+    }
+}
+
+void LayerTestsCommon::LoadNetwork() {
     cnnNetwork = InferenceEngine::CNNNetwork{function};
+    ConfigureNetwork();
     executableNetwork = core->LoadNetwork(cnnNetwork, targetDevice);
     inferRequest = executableNetwork.CreateInferRequest();
 
-    for (const autoinput : cnnNetwork.getInputsInfo()) {
-        const autoinfo = input.second;
+    for (const auto &input : cnnNetwork.getInputsInfo()) {
+        const auto &info = input.second;
 
         auto blob = GenerateInput(*info);
         inferRequest.SetBlob(info->name(), blob);
@@ -72,50 +95,61 @@ void FuncTestsCommon::LoadNetwork() {
     }
 }
 
-void FuncTestsCommon::Infer() {
+void LayerTestsCommon::Infer() {
     inferRequest.Infer();
 }
 
-std::vector<InferenceEngine::Blob::Ptr> FuncTestsCommon::GetOutputs() {
+std::vector<InferenceEngine::Blob::Ptr> LayerTestsCommon::GetOutputs() {
     auto outputs = std::vector<InferenceEngine::Blob::Ptr>{};
-    for (const autooutput : cnnNetwork.getOutputsInfo()) {
-        const autoname = output.first;
+    for (const auto &output : cnnNetwork.getOutputsInfo()) {
+        const auto &name = output.first;
         outputs.push_back(inferRequest.GetBlob(name));
     }
     return outputs;
 }
 
-void FuncTestsCommon::Validate() {
+void LayerTestsCommon::Validate() {
     // nGraph interpreter does not support f16
     // IE converts f16 to f32
     ngraph::pass::ConvertPrecision<ngraph::element::Type_t::f16, ngraph::element::Type_t::f32>().run_on_function(function);
     function->validate_nodes_and_infer_types();
-
     auto referenceInputs = std::vector<std::vector<std::uint8_t>>(inputs.size());
     for (std::size_t i = 0; i < inputs.size(); ++i) {
-        const autoinput = inputs[i];
-        const autoinputSize = input->byteSize();
+        const auto &input = inputs[i];
+        const auto &inputSize = input->byteSize();
 
-        autoreferenceInput = referenceInputs[i];
+        auto &referenceInput = referenceInputs[i];
         referenceInput.resize(inputSize);
 
         auto memory = InferenceEngine::as<InferenceEngine::MemoryBlob>(input);
         IE_ASSERT(memory);
         const auto lockedMemory = memory->wmap();
-        const auto buffer = lockedMemory.as<const std::uint8_t*>();
+        const auto buffer = lockedMemory.as<const std::uint8_t *>();
         std::copy(buffer, buffer + inputSize, referenceInput.data());
     }
+    std::vector<std::vector<std::uint8_t>> expectedOutputs;
+    switch (refMode) {
+        case INTERPRETER:
+            expectedOutputs = ngraph::helpers::interpreterFunction(function, referenceInputs);
+            break;
+        case CONSTANT_FOLDING:
+            const auto &foldedFunc =  ngraph::helpers::foldFunction(function, referenceInputs);
+            expectedOutputs = ngraph::helpers::getConstData(foldedFunc);
+            break;
+    }
 
-    const auto& expectedOutputs = ngraph::helpers::interpreterFunction(function, referenceInputs);
-    const auto& actualOutputs = GetOutputs();
+    const auto &actualOutputs = GetOutputs();
     IE_ASSERT(actualOutputs.size() == expectedOutputs.size())
-        << "nGraph interpreter has " << expectedOutputs.size() << " outputs, while IE " << actualOutputs.size();
+    << "nGraph interpreter has " << expectedOutputs.size() << " outputs, while IE " << actualOutputs.size();
 
     for (std::size_t outputIndex = 0; outputIndex < expectedOutputs.size(); ++outputIndex) {
-        const autoexpected = expectedOutputs[outputIndex];
-        const autoactual = actualOutputs[outputIndex];
+        const auto &expected = expectedOutputs[outputIndex];
+        const auto &actual = actualOutputs[outputIndex];
         Compare(expected, actual);
     }
 }
 
+void LayerTestsCommon::SetRefMode(RefMode mode) {
+    refMode = mode;
+}
 }  // namespace LayerTestsUtils
index 861b87c..60cacea 100644 (file)
 
 namespace LayerTestsUtils {
 typedef std::tuple<
-        InferenceEngine::Precision,  // Input Precision
         InferenceEngine::Precision,  // Network Precision
         InferenceEngine::SizeVector, // Input Shape
         std::string                  // Target Device
 > basicParams;
 
 template<typename paramType>
-class LayerTestsCommonClass : public CommonTestUtils::TestsCommon, public testing::WithParamInterface<paramType> {
+class LayerTestsCommonDeprecated : public CommonTestUtils::TestsCommon, public testing::WithParamInterface<paramType> {
 public:
     InferenceEngine::Precision netPrecision;
     InferenceEngine::Precision inputPrecision;
@@ -47,7 +46,7 @@ public:
     std::shared_ptr<ngraph::Function> fnPtr;
     std::map<std::string, std::string> config;
 
-    LayerTestsCommonClass() {
+    LayerTestsCommonDeprecated() {
         netPrecision = InferenceEngine::Precision::UNSPECIFIED;
         inputPrecision = InferenceEngine::Precision::UNSPECIFIED;
         outputPrecision = InferenceEngine::Precision::UNSPECIFIED;
@@ -58,7 +57,7 @@ public:
     void inline inferAndValidate() {
         // Skip test according to plugin specific disabledTestPatterns() (if any)
         SKIP_IF_CURRENT_TEST_IS_DISABLED()
-        // Create CNNNetwork from ngrpah::Function
+        // Create CNNNetwork from ngraph::Function
         InferenceEngine::CNNNetwork cnnNet(fnPtr);
         // Set target input/output Precisions for the network
         setNetInOutPrecision(cnnNet, inputPrecision, outputPrecision);
@@ -94,11 +93,11 @@ public:
             const auto defLayout = InferenceEngine::TensorDesc::getLayoutByDims(inBlobs[i]->getTensorDesc().getDims());
 
             if (precision == InferenceEngine::Precision::FP32 && layout == defLayout) {
-                inRawData.push_back(inBlobs[i]->cbuffer().template as<const float*>());
+                inRawData.push_back(inBlobs[i]->cbuffer().template as<const float *>());
             } else {
                 auto castedBlob = FuncTestUtils::copyBlobWithCast<InferenceEngine::Precision::FP32>(inBlobs[i]);
                 castedBlob = FuncTestUtils::convertBlobLayout(castedBlob, defLayout);
-                inRawData.push_back(castedBlob->cbuffer().template as<const float*>());
+                inRawData.push_back(castedBlob->cbuffer().template as<const float *>());
                 castedBlobs.push_back(castedBlob);
             }
         }
@@ -115,20 +114,20 @@ public:
             auto currentBlob = req.GetBlob(output.first);
 
             outElementsCount.push_back(
-                std::accumulate(
-                    std::begin(output.second->getDims()), std::end(output.second->getDims()),
-                    size_t {1}, std::multiplies<size_t>()));
+                    std::accumulate(
+                            std::begin(output.second->getDims()), std::end(output.second->getDims()),
+                            size_t{1}, std::multiplies<size_t>()));
 
             const auto precision = currentBlob->getTensorDesc().getPrecision();
             const auto layout = currentBlob->getTensorDesc().getLayout();
             const auto defLayout = InferenceEngine::TensorDesc::getLayoutByDims(currentBlob->getTensorDesc().getDims());
 
             if (precision == InferenceEngine::Precision::FP32 && layout == defLayout) {
-                outBlobsRawData.push_back(currentBlob->cbuffer().template as<float*>());
+                outBlobsRawData.push_back(currentBlob->cbuffer().template as<float *>());
             } else {
                 auto castedBlob = FuncTestUtils::copyBlobWithCast<InferenceEngine::Precision::FP32>(currentBlob);
                 castedBlob = FuncTestUtils::convertBlobLayout(castedBlob, defLayout);
-                outBlobsRawData.push_back(castedBlob->cbuffer().template as<float*>());
+                outBlobsRawData.push_back(castedBlob->cbuffer().template as<float *>());
                 castedBlobs.push_back(castedBlob);
             }
         }
@@ -150,7 +149,7 @@ public:
 
 protected:
     static void setNetInOutPrecision(InferenceEngine::CNNNetwork &cnnNet, InferenceEngine::Precision inPrc,
-                              InferenceEngine::Precision outPrc = InferenceEngine::Precision::UNSPECIFIED) {
+                                     InferenceEngine::Precision outPrc = InferenceEngine::Precision::UNSPECIFIED) {
         if (inPrc != InferenceEngine::Precision::UNSPECIFIED) {
             for (const auto &inputItem : cnnNet.getInputsInfo()) {
                 inputItem.second->setPrecision(inPrc);
@@ -163,15 +162,15 @@ protected:
         }
     }
 
-    static void setNetInOutLayout(InferenceEngine::CNNNetworkcnnNet, InferenceEngine::Layout inputLayout,
+    static void setNetInOutLayout(InferenceEngine::CNNNetwork &cnnNet, InferenceEngine::Layout inputLayout,
                                   InferenceEngine::Layout outputLayout = InferenceEngine::Layout::ANY) {
         if (inputLayout != InferenceEngine::Layout::ANY) {
-            for (const autoinputItem : cnnNet.getInputsInfo()) {
+            for (const auto &inputItem : cnnNet.getInputsInfo()) {
                 inputItem.second->setLayout(inputLayout);
             }
         }
         if (outputLayout != InferenceEngine::Layout::ANY) {
-            for (const autooutput : cnnNet.getOutputsInfo()) {
+            for (const auto &output : cnnNet.getOutputsInfo()) {
                 output.second->setLayout(outputLayout);
             }
         }
@@ -219,21 +218,32 @@ inline std::vector<std::shared_ptr<ngraph::Node>> findTargetNodes(std::shared_pt
 
 using TargetDevice = std::string;
 
-class FuncTestsCommon : public CommonTestUtils::TestsCommon {
+enum RefMode {
+    INTERPRETER,
+    CONSTANT_FOLDING
+};
+
+class LayerTestsCommon : public CommonTestUtils::TestsCommon {
 public:
-    virtual InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo& info) const;
+    virtual InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const;
+
     virtual void Run();
-    virtual void Compare(const std::vector<std::uint8_t>& expected, const InferenceEngine::Blob::Ptr& actual);
+
+    virtual void Compare(const std::vector<std::uint8_t> &expected, const InferenceEngine::Blob::Ptr &actual);
+
+    virtual void SetRefMode(RefMode mode);
 
 protected:
-     FuncTestsCommon();
-    ~FuncTestsCommon() override;
+    LayerTestsCommon();
+
+    ~LayerTestsCommon() override;
 
     template<class T>
-    void Compare(const T* expected, const T* actual, std::size_t size, T threshold) {
+    void Compare(const T *expected, const T *actual, std::size_t size, T threshold) {
+        std::cout << std::endl;
         for (std::size_t i = 0; i < size; ++i) {
-            const autoref = expected[i];
-            const autores = actual[i];
+            const auto &ref = expected[i];
+            const auto &res = actual[i];
 
             const auto absoluteDifference = std::abs(res - ref);
             if (absoluteDifference <= threshold) {
@@ -242,27 +252,44 @@ protected:
 
             const auto max = std::max(std::abs(res), std::abs(ref));
             ASSERT_TRUE(max != 0 && ((absoluteDifference / max) <= threshold))
-                << "Relative comparison of values expected: " << ref << " and actual: " << res << " at index " << i << " with threshold " << threshold
-                << " failed";
+                                        << "Relative comparison of values expected: " << ref << " and actual: " << res
+                                        << " at index " << i << " with threshold " << threshold
+                                        << " failed";
         }
     }
 
+    RefMode GetRefMode() {
+        return refMode;
+    }
+
     TargetDevice targetDevice;
     std::shared_ptr<ngraph::Function> function;
     std::map<std::string, std::string> configuration;
+    // Non default values of layouts/precisions will be set to CNNNetwork
+    InferenceEngine::Layout inLayout = InferenceEngine::Layout::ANY;
+    InferenceEngine::Layout outLayout = InferenceEngine::Layout::ANY;
+    InferenceEngine::Precision inPrc = InferenceEngine::Precision::UNSPECIFIED;
+    InferenceEngine::Precision outPrc = InferenceEngine::Precision::UNSPECIFIED;
+    InferenceEngine::ExecutableNetwork executableNetwork;
 
 private:
-    void Configure() const;
+    void ConfigurePlugin() const;
+
+    void ConfigureNetwork() const;
+
     void LoadNetwork();
+
     void Infer();
+
     std::vector<InferenceEngine::Blob::Ptr> GetOutputs();
+
     void Validate();
 
-    InferenceEngine::Corecore = nullptr;
+    InferenceEngine::Core *core = nullptr;
     InferenceEngine::CNNNetwork cnnNetwork;
-    InferenceEngine::ExecutableNetwork executableNetwork;
     InferenceEngine::InferRequest inferRequest;
     std::vector<InferenceEngine::Blob::Ptr> inputs;
+    RefMode refMode = RefMode::INTERPRETER;
 };
 
 }  // namespace LayerTestsUtils
index 1f70469..e5ed689 100644 (file)
@@ -156,6 +156,7 @@ namespace FuncTestUtils {
                             " (new, old): " + item.second + ", " + refLayer->params[item.first]);
                 }
             } else {
+                if (item.first == "originalLayersNames") continue;
                 // autob is a WA for nGraph ops
                 if ((item.first != "auto_broadcast" && item.first != "autob") || item.second != "numpy") {
                     success = false;
@@ -316,6 +317,9 @@ namespace FuncTestUtils {
             refNetwork.begin();
             IE_SUPPRESS_DEPRECATED_END
         }
+        if (network.getName() != refNetwork.getName())
+            THROW_IE_EXCEPTION << "CNNNetworks have different names! " << network.getName()
+                               << " and " << refNetwork.getName();
 
         if (network.getBatchSize() != refNetwork.getBatchSize())
             THROW_IE_EXCEPTION << "CNNNetworks have different batch size! " << std::to_string(network.getBatchSize())
index c9c73e2..c4a92d6 100644 (file)
@@ -34,6 +34,20 @@ std::shared_ptr<ngraph::Node> makeConvolution(const ngraph::Output<Node> &in,
                                               const std::vector<float> &filterWeights = {},
                                               const std::vector<float> &biasesWeights = {});
 
+std::shared_ptr<ngraph::Node> makeGroupConvolution(const ngraph::Output<Node> &in,
+                                                   const element::Type &type,
+                                                   const std::vector<size_t> &filterSize,
+                                                   const std::vector<size_t> &strides,
+                                                   const std::vector<ptrdiff_t> &padsBegin,
+                                                   const std::vector<ptrdiff_t> &padsEnd,
+                                                   const std::vector<size_t> &dilations,
+                                                   const op::PadType &autoPad,
+                                                   size_t numOutChannels,
+                                                   size_t numGroups,
+                                                   bool addBiases = false,
+                                                   const std::vector<float> &filterWeights = {},
+                                                   const std::vector<float> &biasesWeights = {});
+
 std::shared_ptr<ngraph::Node> makeSplit(const ngraph::Output<Node> &in,
                                         const element::Type &type,
                                         size_t numSplits,
@@ -65,5 +79,18 @@ std::shared_ptr<ngraph::Node> makeStridedSlice(const ngraph::Output<Node> &in,
                                                const std::vector<int64_t> &new_axis_mask = std::vector<int64_t>{},
                                                const std::vector<int64_t> &shrink_mask = std::vector<int64_t>{},
                                                const std::vector<int64_t> &ellipsis_mask = std::vector<int64_t>{});
+
+std::shared_ptr<ngraph::Node> makeMVN(const ngraph::Output<Node> &in,
+                                      bool acrossChannels,
+                                      bool normalizeVariance,
+                                      double eps);
+
+std::shared_ptr<ngraph::Node> makeSqueeze(const ngraph::Output<Node> &in,
+                                          const element::Type &type,
+                                          const std::vector<size_t> &squeeze_indices);
+
+std::shared_ptr<ngraph::Node> makeUnsqueeze(const ngraph::Output<Node> &in,
+                                            const element::Type &type,
+                                            const std::vector<size_t> &squeeze_indices);
 }  // namespace builder
 }  // namespace ngraph
diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/select.hpp b/inference-engine/tests/ngraph_functions/include/ngraph_functions/select.hpp
new file mode 100644 (file)
index 0000000..36c1bac
--- /dev/null
@@ -0,0 +1,51 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <memory>
+#include <vector>
+#include <functional>
+#include <numeric>
+
+#include <ngraph/opsets/opset1.hpp>
+#include <ngraph/runtime/reference/select.hpp>
+
+namespace NGraphFunctions {
+
+class Select {
+public:
+    enum { CONDITION, THEN, ELSE, numOfInputs };
+    std::shared_ptr<ngraph::Function> fnPtr;
+
+    Select() = default;
+
+    explicit Select(ngraph::element::Type inType, const std::vector<std::vector<size_t>> &inputShapes, ngraph::op::AutoBroadcastSpec broadcast);
+
+    template<typename outType>
+    std::vector<outType> RefImpl(const std::vector<const outType*> &inData, const std::vector<std::vector<size_t>> &inDataShapes,
+                                                                                                                const std::vector<size_t> &outputShapes) {
+        size_t outElementsCount = std::accumulate(begin(outputShapes), end(outputShapes), 1, std::multiplies<size_t>());
+
+        std::vector<ngraph::Shape> shapes;
+        for (auto shape : inDataShapes)
+            shapes.push_back(ngraph::Shape(shape));
+
+        size_t maskElementsCount = std::accumulate(begin(inDataShapes[CONDITION]), end(inDataShapes[CONDITION]), 1, std::multiplies<size_t>());
+        std::vector<char> mask(maskElementsCount);
+        for (size_t i = 0; i < maskElementsCount; i++)
+            mask[i] = static_cast<char>(inData[CONDITION][i]);
+
+        std::vector<outType> dstData(outElementsCount);
+        ngraph::runtime::reference::select<outType>(mask.data(), inData[THEN], inData[ELSE], dstData.data(), shapes[CONDITION], shapes[THEN], shapes[ELSE],
+                                                                                                                                            broadcastType);
+
+        return dstData;
+    }
+
+private:
+    ngraph::op::AutoBroadcastSpec broadcastType;
+};
+
+}  // namespace NGraphFunctions
index 6fbdc07..a543b38 100644 (file)
@@ -91,7 +91,8 @@ enum ActivationTypes {
     Exp,
     Log,
     Sign,
-    Abs
+    Abs,
+    Gelu
 };
 
 ngraph::OutputVector convert2OutputVector(const std::vector<std::shared_ptr<ngraph::Node>> &nodes);
@@ -142,7 +143,24 @@ inferFnWithInterp(const std::shared_ptr<ngraph::Function> &fn,
     return outData;
 }
 
-std::vector<std::vector<std::uint8_t>> interpreterFunction(const std::shared_ptr<Function>& function, const std::vector<std::vector<std::uint8_t>>& inputs);
+std::vector<std::vector<std::uint8_t>> interpreterFunction(const std::shared_ptr<Function> &function,
+                                                           const std::vector<std::vector<std::uint8_t>> &inputs);
+
+//
+// This function compares two nGraph functions and requires them to have exactly one output
+// Check nodes types
+// Check number of inputs
+// Check shapes of each Node
+//
+void CompareFunctions(const Function& actual, const Function& expected);
+
+
+std::shared_ptr<Function> foldFunction(const std::shared_ptr<Function> &function,
+                                       const std::vector<std::vector<std::uint8_t>> &inputs);
+
+std::vector<std::vector<std::uint8_t>> getConstData(const std::shared_ptr<Function> &function);
+
+std::shared_ptr<ngraph::Node> getNodeSharedPtr(const ngraph::NodeTypeInfo &type_info, const ngraph::OutputVector &outputVector);
 
 }  // namespace helpers
 }  // namespace ngraph
index 9745164..03d9a53 100644 (file)
@@ -37,6 +37,8 @@ std::shared_ptr<ngraph::Node> makeActivation(const ngraph::Output<Node> &in,
             return std::make_shared<ngraph::op::Sign>(in);
         case ngraph::helpers::ActivationTypes::Abs:
             return std::make_shared<ngraph::op::Abs>(in);
+        case ngraph::helpers::ActivationTypes::Gelu:
+            return std::make_shared<ngraph::op::Gelu>(in);
         default:
             throw std::runtime_error("Can't create layer for this activation type");
     }
diff --git a/inference-engine/tests/ngraph_functions/src/group_convolution.cpp b/inference-engine/tests/ngraph_functions/src/group_convolution.cpp
new file mode 100644 (file)
index 0000000..495bb20
--- /dev/null
@@ -0,0 +1,50 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+//
+
+#include <vector>
+#include <memory>
+
+#include "ngraph_functions/builders.hpp"
+
+namespace ngraph {
+namespace builder {
+
+std::shared_ptr<Node> makeGroupConvolution(const ngraph::Output<Node> &in,
+                                      const element::Type &type,
+                                      const std::vector<size_t> &filterSize,
+                                      const std::vector<size_t> &strides,
+                                      const std::vector<ptrdiff_t> &padsBegin,
+                                      const std::vector<ptrdiff_t> &padsEnd,
+                                      const std::vector<size_t> &dilations,
+                                      const op::PadType &autoPad,
+                                      size_t numOutChannels,
+                                      size_t numGroups,
+                                      bool addBiases,
+                                      const std::vector<float> &filterWeights,
+                                      const std::vector<float> &biasesWeights) {
+    bool randomFilterWeights = filterWeights.empty();
+    auto shape = in.get_shape();
+    std::vector<size_t> filterWeightsShape = {numOutChannels, shape[1]};
+    if (filterWeightsShape[0] % numGroups || filterWeightsShape[1] % numGroups)
+        throw std::runtime_error("incorrected shape for GroupConvolution");
+    filterWeightsShape[0] /= numGroups;
+    filterWeightsShape[1] /= numGroups;
+    filterWeightsShape.insert(filterWeightsShape.begin(), numGroups);
+    filterWeightsShape.insert(filterWeightsShape.end(), filterSize.begin(), filterSize.end());
+    auto filterWeightsNode = makeConstant(type, filterWeightsShape, filterWeights, randomFilterWeights);
+    auto conv = std::make_shared<opset1::GroupConvolution>(in, filterWeightsNode, strides, padsBegin, padsEnd, dilations,
+                                                      autoPad);
+    if (addBiases) {
+        bool randomBiases = biasesWeights.empty();
+        auto biasesWeightsNode = makeConstant(type, {}, biasesWeights, randomBiases);
+        auto add = std::make_shared<ngraph::opset1::Add>(conv, biasesWeightsNode);
+        return add;
+    } else {
+        return conv;
+    }
+}
+
+}  // namespace builder
+}  // namespace ngraph
\ No newline at end of file
diff --git a/inference-engine/tests/ngraph_functions/src/mvn.cpp b/inference-engine/tests/ngraph_functions/src/mvn.cpp
new file mode 100644 (file)
index 0000000..6a26eb5
--- /dev/null
@@ -0,0 +1,28 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "ngraph_functions/builders.hpp"
+
+namespace ngraph {
+namespace builder {
+
+std::shared_ptr<ngraph::Node> makeMVN(const ngraph::Output<Node> &in,
+                                      bool acrossChannels,
+                                      bool normalizeVariance,
+                                      double eps) {
+    auto mvnNode = std::make_shared<ngraph::op::MVN>(in, acrossChannels, normalizeVariance, eps);
+
+    // Ngraph MVN implementation implicitly adds 0th dimension to reduction axes set which is not valid behavior
+    ngraph::AxisSet axes;
+    const size_t startAxis = acrossChannels ? 1 : 2;
+    const size_t numOfDims = in.get_shape().size();
+    for (size_t i = startAxis; i < numOfDims; i++)
+        axes.insert(i);
+    mvnNode->set_reduction_axes(axes);
+
+    return mvnNode;
+}
+
+}  // namespace builder
+}  // namespace ngraph
\ No newline at end of file
diff --git a/inference-engine/tests/ngraph_functions/src/select.cpp b/inference-engine/tests/ngraph_functions/src/select.cpp
new file mode 100644 (file)
index 0000000..fc2127f
--- /dev/null
@@ -0,0 +1,32 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <memory>
+#include <vector>
+
+#include "ngraph_functions/select.hpp"
+#include "ngraph_functions/utils/ngraph_helpers.hpp"
+
+namespace NGraphFunctions {
+
+    Select::Select(ngraph::element::Type inType, const std::vector<std::vector<size_t>> &inputShapes, ngraph::op::AutoBroadcastSpec broadcast) {
+        ngraph::ParameterVector paramNodesVector;
+
+        auto paramNode = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::Type_t::boolean, ngraph::Shape(inputShapes[CONDITION]));
+        paramNodesVector.push_back(paramNode);
+        for (size_t i = 1; i < inputShapes.size(); i++) {
+            paramNode = std::make_shared<ngraph::opset1::Parameter>(inType, ngraph::Shape(inputShapes[i]));
+            paramNodesVector.push_back(paramNode);
+        }
+        auto paramOuts = ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(paramNodesVector));
+        broadcastType = broadcast;
+
+        auto SelectNode = std::make_shared<ngraph::opset1::Select>(paramOuts[CONDITION], paramOuts[THEN], paramOuts[ELSE], broadcastType);
+
+        auto result = std::make_shared<ngraph::opset1::Result>(SelectNode);
+
+        fnPtr = std::make_shared<ngraph::Function>(ngraph::ResultVector{result}, paramNodesVector, "select");
+    }
+
+}  // namespace NGraphFunctions
\ No newline at end of file
diff --git a/inference-engine/tests/ngraph_functions/src/squeeze.cpp b/inference-engine/tests/ngraph_functions/src/squeeze.cpp
new file mode 100644 (file)
index 0000000..1d8c7ad
--- /dev/null
@@ -0,0 +1,21 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+#include <vector>
+#include <memory>
+
+#include "ngraph_functions/builders.hpp"
+
+namespace ngraph {
+namespace builder {
+std::shared_ptr<ngraph::Node> makeSqueeze(const ngraph::Output<Node> &in,
+                                          const element::Type &type,
+                                          const std::vector<size_t> &squeeze_indices) {
+    auto squeeze_node = std::make_shared<ngraph::opset1::Constant>(type,
+            ngraph::Shape{squeeze_indices.size()},
+            std::vector<size_t>{squeeze_indices});
+    auto squeeze = std::make_shared<ngraph::opset1::Squeeze>(in, squeeze_node);
+    return squeeze;
+}
+} // namespace builder
+} // namespace ngraph
diff --git a/inference-engine/tests/ngraph_functions/src/unsqueeze.cpp b/inference-engine/tests/ngraph_functions/src/unsqueeze.cpp
new file mode 100644 (file)
index 0000000..a86aae1
--- /dev/null
@@ -0,0 +1,21 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+#include <vector>
+#include <memory>
+
+#include "ngraph_functions/builders.hpp"
+
+namespace ngraph {
+namespace builder {
+std::shared_ptr<ngraph::Node> makeUnsqueeze(const ngraph::Output<Node> &in,
+                                          const element::Type &type,
+                                          const std::vector<size_t> &squeeze_indices) {
+    auto squeeze_node = std::make_shared<ngraph::opset1::Constant>(type,
+                                                                   ngraph::Shape{squeeze_indices.size()},
+                                                                   std::vector<size_t>{squeeze_indices});
+    auto unsqueeze = std::make_shared<ngraph::opset1::Unsqueeze>(in, squeeze_node);
+    return unsqueeze;
+}
+} // namespace builder
+} // namespace ngraph
index fe91c4a..be02b12 100644 (file)
@@ -4,17 +4,21 @@
 
 #include <vector>
 #include <memory>
+#include <queue>
 
 #include <ngraph/opsets/opset1.hpp>
+#include <ngraph/opsets/opset3.hpp>
+#include <ngraph/specialize_function.hpp>
 
 #include <ngraph_functions/utils/ngraph_helpers.hpp>
+#include <ngraph/opsets/opset.hpp>
 
 namespace ngraph {
 namespace helpers {
 
-ngraph::OutputVector convert2OutputVector(const std::vector<std::shared_ptr<ngraph::Node>> &nodes) {
-    ngraph::OutputVector outs;
-    std::for_each(nodes.begin(), nodes.end(), [&outs](const std::shared_ptr<ngraph::Node> &n) {
+OutputVector convert2OutputVector(const std::vector<std::shared_ptr<Node>> &nodes) {
+    OutputVector outs;
+    std::for_each(nodes.begin(), nodes.end(), [&outs](const std::shared_ptr<Node> &n) {
         for (const auto &out_p : n->outputs()) {
             outs.push_back(out_p);
         }
@@ -22,29 +26,32 @@ ngraph::OutputVector convert2OutputVector(const std::vector<std::shared_ptr<ngra
     return outs;
 }
 
-std::vector<std::vector<std::uint8_t>> interpreterFunction(const std::shared_ptr<Function>& function, const std::vector<std::vector<std::uint8_t>>& inputs) {
-    ngraph::runtime::Backend::set_backend_shared_library_search_directory("");
+std::vector<std::vector<std::uint8_t>> interpreterFunction(const std::shared_ptr<Function> &function,
+                                                           const std::vector<std::vector<std::uint8_t>> &inputs) {
+    runtime::Backend::set_backend_shared_library_search_directory("");
     ngraph_register_interpreter_backend();
-    auto backend = ngraph::runtime::Backend::create("INTERPRETER");
+    auto backend = runtime::Backend::create("INTERPRETER");
 
-    const autoparameters = function->get_parameters();
-    const autoparametersNumber = parameters.size();
-    const autoinputsNumber = inputs.size();
+    const auto &parameters = function->get_parameters();
+    const auto &parametersNumber = parameters.size();
+    const auto &inputsNumber = inputs.size();
     NGRAPH_CHECK(parametersNumber == inputsNumber,
-        "Got function (", function->get_friendly_name(), ") with ", parametersNumber, " parameters, but ", inputsNumber, " input blobs");
+                 "Got function (", function->get_friendly_name(), ") with ", parametersNumber, " parameters, but ",
+                 inputsNumber, " input blobs");
 
     auto inputTensors = std::vector<std::shared_ptr<runtime::Tensor>>{};
-    for (const autoparameter : parameters) {
-        const autoparameterIndex = function->get_parameter_index(parameter);
-        const autoparameterShape = parameter->get_shape();
-        const auto& parameterType  = parameter->get_element_type();
-        const auto& parameterSize  = ngraph::shape_size(parameterShape) * parameterType.size();
-
-        const autoinput = inputs[parameterIndex];
-        const autoinputSize = input.size();
+    for (const auto &parameter : parameters) {
+        const auto &parameterIndex = function->get_parameter_index(parameter);
+        const auto &parameterShape = parameter->get_shape();
+        const auto &parameterType = parameter->get_element_type();
+        const auto &parameterSize = shape_size(parameterShape) * parameterType.size();
+
+        const auto &input = inputs[parameterIndex];
+        const auto &inputSize = input.size();
         NGRAPH_CHECK(parameterSize == inputSize,
-            "Got parameter (", parameter->get_friendly_name(), ") of size ", parameterSize, " bytes, but corresponding input with index ", parameterIndex,
-            " has ", inputSize, " bytes");
+                     "Got parameter (", parameter->get_friendly_name(), ") of size ", parameterSize,
+                     " bytes, but corresponding input with index ", parameterIndex,
+                     " has ", inputSize, " bytes");
 
         auto tensor = backend->create_tensor(parameterType, parameterShape);
         tensor->write(input.data(), parameterSize);
@@ -52,22 +59,132 @@ std::vector<std::vector<std::uint8_t>> interpreterFunction(const std::shared_ptr
     }
 
     auto outputTensors = std::vector<std::shared_ptr<runtime::Tensor>>{};
-    const auto& results = function->get_results();
-    std::transform(results.cbegin(), results.cend(), std::back_inserter(outputTensors), [&backend](const std::shared_ptr<op::Result>& result) {
-        return backend->create_tensor(result->get_element_type(), result->get_shape()); });
+    const auto &results = function->get_results();
+    std::transform(results.cbegin(), results.cend(), std::back_inserter(outputTensors),
+                   [&backend](const std::shared_ptr<op::Result> &result) {
+                       return backend->create_tensor(result->get_element_type(), result->get_shape());
+                   });
 
     auto handle = backend->compile(function);
     handle->call_with_validate(outputTensors, inputTensors);
     auto outputs = std::vector<std::vector<std::uint8_t>>(results.size());
-    for (const autoresult : results) {
-        const autoresultIndex = function->get_result_index(result);
-        autooutput = outputs[resultIndex];
-        output.resize(ngraph::shape_size(result->get_shape()) * result->get_element_type().size());
+    for (const auto &result : results) {
+        const auto &resultIndex = function->get_result_index(result);
+        auto &output = outputs[resultIndex];
+        output.resize(shape_size(result->get_shape()) * result->get_element_type().size());
         outputTensors[resultIndex]->read(output.data(), output.size());
     }
 
     return outputs;
 }
 
+std::shared_ptr<Function> foldFunction(const std::shared_ptr<Function> &function,
+                                       const std::vector<std::vector<std::uint8_t>> &inputs) {
+    std::vector<element::Type> paramElementTypes;
+    std::vector<PartialShape> paramShapes;
+    for (const auto &param : function->get_parameters()) {
+        paramElementTypes.emplace_back(param->get_element_type());
+        paramShapes.emplace_back(param->get_shape());
+    }
+
+    auto inBuffers = std::vector<void *>(inputs.size());
+    std::transform(inputs.cbegin(), inputs.cend(), inBuffers.begin(),
+                   [](const std::vector<std::uint8_t> &input) {
+                       // const_cast added to satisfy specialize_function interface
+                       // which requires inputs as std::vector<void *>
+                       return const_cast<std::uint8_t *>(input.data());
+                   });
+
+    const auto &foldedFunc = specialize_function(function, paramElementTypes, paramShapes, inBuffers, true, true);
+    for (const auto &op : foldedFunc->get_ops()) {
+        NGRAPH_CHECK(op->is_constant() || op->is_output() || op->is_parameter(),
+                     "Function was not fully folded to constant state!\n",
+                     "At least one non constant node with type ", op->get_type_name(),
+                     " present in function.");
+    }
+    return foldedFunc;
+}
+
+std::vector<std::vector<std::uint8_t>> getConstData(const std::shared_ptr<Function> &function) {
+    size_t numOutputs = function->get_output_size();
+    auto outputs = std::vector<std::vector<std::uint8_t>>(numOutputs);
+    for (size_t i = 0; i < numOutputs; i++) {
+        const auto &output = function->output(i).get_node_shared_ptr();
+        NGRAPH_CHECK(output->inputs().size() == 1);
+        auto parrentNode = output->input_value(0).get_node_shared_ptr();
+        NGRAPH_CHECK(parrentNode->is_constant(), "Function was not fully folded to constant state!\n",
+                     "Parent node of one of results is not constant and has type ", parrentNode->get_type_name());
+        const auto data = std::dynamic_pointer_cast<opset1::Constant>(parrentNode)->get_data_ptr<std::uint8_t>();
+        const auto dataSize = shape_size(parrentNode->get_shape()) * parrentNode->get_element_type().size();
+        outputs[i].resize(dataSize);
+        std::copy(data, data + dataSize, outputs[i].data());
+    }
+    return outputs;
+}
+
+namespace {
+
+using ComparingNodesPair = std::pair<std::shared_ptr<ngraph::Node>, std::shared_ptr<ngraph::Node>>;
+
+std::string toString(const NodeTypeInfo& typeInfo) {
+    return std::string(typeInfo.name) + " ver. " + std::to_string(typeInfo.version);
+}
+
+void CompareShapes(const PartialShape& actual, const PartialShape& expected) {
+    NGRAPH_CHECK(actual.relaxes(expected) && actual.refines(expected), "Functions compare: Different shape detected ", actual, " and ", expected);
+}
+
+void CompareNodes(const Node& actual, const Node& expected) {
+    const auto& actualType   = actual.get_type_info();
+    const auto& expectedType = expected.get_type_info();
+    NGRAPH_CHECK(actualType == expectedType, "Functions compare: data types must be equal ", toString(actualType), " != ", toString(expectedType));
+
+    const auto& numActualInputs = actual.inputs().size();
+    const auto& numExpectedInputs = expected.inputs().size();
+    NGRAPH_CHECK(numActualInputs == numExpectedInputs, "Functions compare: numbers of inputs are different: ", numActualInputs, " and ", numExpectedInputs);
+}
+
+}  // namespace
+
+void CompareFunctions(const Function& actual, const Function& expected) {
+    const auto& actualResults = actual.get_results();
+    NGRAPH_CHECK(actualResults.size() == 1, "Got ", actualResults.size(), " outputs for function, but only single output functions are supported");
+    const auto& actualResult = actualResults.front();
+
+    const auto& expectedResults = expected.get_results();
+    NGRAPH_CHECK(expectedResults.size() == 1, "Got ", expectedResults.size(), " outputs for function, but only single output functions are supported");
+    const auto& expectedResult = expectedResults.front();
+
+    std::queue<ComparingNodesPair> nodes;
+    nodes.emplace(actualResult, expectedResult);
+    while (!nodes.empty()) {
+        const auto& checkingNodes = nodes.front();
+        const auto& actualNode    = checkingNodes.first;
+        const auto& expectedNode  = checkingNodes.second;
+        nodes.pop();
+
+        CompareNodes(*actualNode, *expectedNode);
+
+        for (std::size_t i = 0; i < actualNode->inputs().size(); ++i) {
+            const auto& actualShape = actualNode->input(i).get_partial_shape();
+            const auto& expectedShape = expectedNode->input(i).get_partial_shape();
+            CompareShapes(actualShape, expectedShape);
+
+            nodes.emplace(actualNode->input_value(i).get_node_shared_ptr(), expectedNode->input_value(i).get_node_shared_ptr());
+        }
+    }
+}
+
+std::shared_ptr<ngraph::Node> getNodeSharedPtr(const ngraph::NodeTypeInfo &type_info, const ngraph::OutputVector &outputVector) {
+    for (const auto& opset : {ngraph::get_opset3(), ngraph::get_opset2(), ngraph::get_opset1()})
+        if (opset.contains_type(type_info)) {
+            const auto ngraphNode = std::shared_ptr<ngraph::Node>(opset.create(type_info.name));
+            ngraphNode->set_arguments(outputVector);
+            ngraphNode->validate_and_infer_types();
+            return ngraphNode;
+        }
+    NGRAPH_UNREACHABLE("supported opsets does not contain op with name: ", type_info.name, " version: ", type_info.version);
+}
+
 }  // namespace helpers
 }  // namespace ngraph
index 7769b67..853e53a 100644 (file)
@@ -22,4 +22,3 @@ endif ()
 if(NGRAPH_ONNX_IMPORT_ENABLE)
     add_subdirectory(frontends/onnx_import)
 endif()
-
index 39fb1d5..0721032 100644 (file)
@@ -2,6 +2,8 @@
 # SPDX-License-Identifier: Apache-2.0
 #
 
+disable_deprecated_warnings()
+
 set(TARGET_NAME vpuUnitTests)
 
 include(${XLINK_DIR}/XLink.cmake)
@@ -15,10 +17,11 @@ addIeTargetTest(
             ${IE_MAIN_SOURCE_DIR}/thirdparty/movidius/mvnc/include/watchdog
             ${XLINK_INCLUDE}
             ${XLINK_PLATFORM_INCLUDE}
+            ${CMAKE_CURRENT_SOURCE_DIR}/base/
         LINK_LIBRARIES
+            vpu_graph_transformer_test_static
             unitTestUtils
             mvnc
-            vpu_graph_transformer_test_static
         ADD_CPPLINT
         LABELS
             VPU
diff --git a/inference-engine/tests/unit/vpu/base/graph_transformer_tests.cpp b/inference-engine/tests/unit/vpu/base/graph_transformer_tests.cpp
new file mode 100644 (file)
index 0000000..80a3f6f
--- /dev/null
@@ -0,0 +1,319 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "graph_transformer_tests.hpp"
+
+#include <vpu/utils/io.hpp>
+
+#include <atomic>
+#include <iomanip>
+
+namespace vpu {
+
+StagePtr TestStage::cloneImpl() const {
+    return std::make_shared<TestStage>(*this);
+}
+
+namespace {
+
+template <typename Value>
+void setInOutPortInfo(
+        const Stage& stage,
+        const std::string& attrBaseName,
+        StageDataInfo<Value>& info) {
+    auto inAttrName = formatString("test_input_%s_info", attrBaseName);
+    auto outAttrName = formatString("test_output_%s_info", attrBaseName);
+
+    if (stage->attrs().has(inAttrName)) {
+        const auto& inputInfo = stage->attrs().get<InOutPortMap<Value>>(inAttrName);
+
+        for (const auto& p : inputInfo) {
+            info.setInput(stage->inputEdge(p.first), p.second);
+        }
+    }
+
+    if (stage->attrs().has(outAttrName)) {
+        const auto& outputInfo = stage->attrs().get<InOutPortMap<Value>>(outAttrName);
+
+        for (const auto& p : outputInfo) {
+            info.setOutput(stage->outputEdge(p.first), p.second);
+        }
+    }
+}
+
+} // namespace
+
+InputInfo InputInfo::fromNetwork(int ind) {
+    InputInfo info;
+    info.type = InputType::Original;
+    info.originalInputInd = ind;
+    return info;
+}
+
+InputInfo InputInfo::fromPrevStage(int ind) {
+    InputInfo info;
+    info.type = InputType::PrevStageOutput;
+    info.prevStageInd = ind;
+    info.prevStageOutputInd = 0;
+    return info;
+}
+
+InputInfo& InputInfo::output(int ind) {
+    assert(type == InputType::PrevStageOutput);
+    prevStageOutputInd = ind;
+    return *this;
+}
+
+OutputInfo OutputInfo::fromNetwork(int ind) {
+    OutputInfo info;
+    info.type = OutputType::Original;
+    info.originalOutputInd = ind;
+    return info;
+}
+
+OutputInfo OutputInfo::intermediate(const DataDesc& desc) {
+    OutputInfo info;
+    info.type = OutputType::Intermediate;
+    info.desc = desc;
+    return info;
+}
+
+void TestStage::propagateDataOrderImpl(StageDataInfo<DimsOrder>& orderInfo) {
+    setInOutPortInfo(this, "DataOrder", orderInfo);
+}
+
+void TestStage::getDataStridesRequirementsImpl(StageDataInfo<StridesRequirement>& stridesInfo) {
+    setInOutPortInfo(this, "Strides", stridesInfo);
+}
+
+void TestStage::finalizeDataLayoutImpl() {
+}
+
+void TestStage::getBatchSupportInfoImpl(StageDataInfo<BatchSupport>& batchInfo) {
+    setInOutPortInfo(this, "Batch", batchInfo);
+
+    if (attrs().has("test_input_Batch_info")) {
+        for (const auto& outEdge : outputEdges()) {
+            batchInfo.setOutput(outEdge, BatchSupport::Split);
+        }
+    }
+}
+
+void TestStage::serializeParamsImpl(BlobSerializer&) const {
+}
+
+void TestStage::serializeDataImpl(BlobSerializer&) const {
+}
+
+TestModel::TestModel(const Model& model) : _model(model) {}
+
+const Model& TestModel::getBaseModel() const {
+    return _model;
+}
+
+const DataVector& TestModel::getInputs() const {
+    return _inputs;
+}
+
+const DataVector& TestModel::getOutputs() const {
+    return _outputs;
+}
+
+const StageVector& TestModel::getStages() const {
+    return _stages;
+}
+
+void TestModel::createInputs(std::vector<DataDesc> inputDescs) {
+    const auto numInputs = inputDescs.size();
+
+    _model->attrs().set<int>("numInputs", numInputs);
+    _inputs.resize(numInputs);
+
+    for (int i = 0; i < numInputs; ++i) {
+        _inputs[i] = _model->addInputData(formatString("Input %d", i), inputDescs[i]);
+    }
+}
+
+void TestModel::createOutputs(std::vector<DataDesc> outputDescs) {
+    const auto numOutputs = outputDescs.size();
+
+    _model->attrs().set<int>("numOutputs", numOutputs);
+    _outputs.resize(numOutputs);
+
+    for (int i = 0; i < numOutputs; ++i) {
+        _outputs[i] = _model->addOutputData(formatString("Output %d", i), outputDescs[i]);
+    }
+}
+
+Stage TestModel::addStage(
+        std::initializer_list<InputInfo> curInputInfos,
+        std::initializer_list<OutputInfo> curOutputInfos) {
+    DataVector curInputs;
+    for (const auto& info : curInputInfos) {
+        if (info.type == InputType::Original) {
+            curInputs.push_back(_inputs.at(info.originalInputInd));
+        } else {
+            curInputs.push_back(_stages.at(info.prevStageInd)->output(info.prevStageOutputInd));
+        }
+    }
+
+    DataVector curOutputs;
+    for (const auto& info : curOutputInfos) {
+        if (info.type == OutputType::Original) {
+            curOutputs.push_back(_outputs.at(info.originalOutputInd));
+        } else {
+            curOutputs.push_back(_model->addNewData(formatString("Data %d / %d", _stages.size(), curOutputs.size()), info.desc));
+        }
+    }
+
+    auto stage = _model->addNewStage<TestStage>(
+            formatString("Stage %m%m%d", std::setw(2), std::setfill('0'), _stages.size()),
+            StageType::None,
+            nullptr,
+            curInputs,
+            curOutputs);
+    stage->attrs().set<int>("test_ind", _stages.size());
+
+    _stages.push_back(stage);
+
+    return stage;
+}
+void TestModel::setStageDataOrderInfo(
+        int stageInd,
+        const InOutPortMap<DimsOrder>& inputInfo,
+        const InOutPortMap<DimsOrder>& outputInfo) {
+    if (!inputInfo.empty()) {
+        _stages.at(stageInd)->attrs().set("test_input_DataOrder_info", inputInfo);
+    }
+    if (!outputInfo.empty()) {
+        _stages.at(stageInd)->attrs().set("test_input_DataOrder_info", outputInfo);
+    }
+}
+
+void TestModel::setStageStridesInfo(
+        int stageInd,
+        const InOutPortMap<StridesRequirement>& inputInfo,
+        const InOutPortMap<StridesRequirement>& outputInfo) {
+    if (!inputInfo.empty()) {
+        _stages.at(stageInd)->attrs().set("test_input_Strides_info", inputInfo);
+    }
+    if (!outputInfo.empty()) {
+        _stages.at(stageInd)->attrs().set("test_input_Strides_info", outputInfo);
+    }
+}
+
+void TestModel::setStageBatchInfo(
+        int stageInd,
+        const InOutPortMap<BatchSupport>& inputInfo) {
+    if (!inputInfo.empty()) {
+        _stages.at(stageInd)->attrs().set("test_input_Batch_info", inputInfo);
+    }
+}
+
+template <class StageRange>
+void checkStageTestInds(const StageRange& stageRange, std::initializer_list<int> expectedInds) {
+    auto stageVector = toVector(stageRange);
+
+    ASSERT_EQ(expectedInds.size(), stageVector.size());
+
+    size_t stageInd = 0;
+    for (auto expectedInd : expectedInds) {
+        ASSERT_EQ(expectedInd, stageVector[stageInd]->attrs().template get<int>("test_ind"));
+        ++stageInd;
+    }
+}
+
+template <class StageRange>
+void checkStageTestInds(const StageRange& stageRange, std::initializer_list<int> expectedInds, const std::function<void(const Stage&)>& extraCheck) {
+    auto stageVector = toVector(stageRange);
+
+    ASSERT_EQ(expectedInds.size(), stageVector.size());
+
+    size_t stageInd = 0;
+    for (auto expectedInd : expectedInds) {
+        ASSERT_EQ(expectedInd, stageVector[stageInd]->attrs().template get<int>("test_ind"));
+        ++stageInd;
+
+        ASSERT_NO_FATAL_FAILURE(extraCheck(stageVector[stageInd]));
+    }
+}
+
+bool checkExecutionOrder(const Model& model, const std::vector<int>& execOrder) {
+    auto it = execOrder.begin();
+
+    for (const auto& stage : model->getStages()) {
+        if (stage->id() == *it) {
+            ++it;
+        }
+    }
+
+    return it == execOrder.end();
+}
+
+void GraphTransformerTest::SetUp() {
+    _log = std::make_shared<Logger>(
+            "Test",
+            LogLevel::Debug,
+            consoleOutput());
+
+    stageBuilder = std::make_shared<StageBuilder>();
+    frontEnd = std::make_shared<FrontEnd>(stageBuilder);
+    backEnd = std::make_shared<BackEnd>();
+    passManager = std::make_shared<PassManager>(stageBuilder, backEnd);
+}
+
+void GraphTransformerTest::TearDown() {
+    for (const auto& model : _models) {
+        backEnd->dumpModel(model);
+    }
+
+    if (compileEnvInitialized) {
+        CompileEnv::free();
+    }
+}
+
+void GraphTransformerTest::InitCompileEnv() {
+    if (const auto envVar = std::getenv("IE_VPU_DUMP_INTERNAL_GRAPH_FILE_NAME")) {
+        config.dumpInternalGraphFileName = envVar;
+    }
+    if (const auto envVar = std::getenv("IE_VPU_DUMP_INTERNAL_GRAPH_DIRECTORY")) {
+        config.dumpInternalGraphDirectory = envVar;
+    }
+    if (const auto envVar = std::getenv("IE_VPU_DUMP_ALL_PASSES")) {
+        config.dumpAllPasses = std::stoi(envVar) != 0;
+    }
+
+    CompileEnv::init(platform, config, _log);
+    compileEnvInitialized = true;
+}
+
+namespace {
+
+    std::atomic<int> g_counter(0);
+
+}
+
+Model GraphTransformerTest::CreateModel() {
+    const auto& env = CompileEnv::get();
+
+    auto unitTest = testing::UnitTest::GetInstance();
+    IE_ASSERT(unitTest != nullptr);
+    auto curTestInfo = unitTest->current_test_info();
+    IE_ASSERT(curTestInfo != nullptr);
+
+    auto model = std::make_shared<ModelObj>(
+            formatString("%s/%s", curTestInfo->test_case_name(), curTestInfo->name()));
+    model->attrs().set<int>("index", g_counter.fetch_add(1));
+    model->attrs().set<Resources>("resources", env.resources);
+
+    _models.push_back(model);
+
+    return model;
+}
+
+TestModel GraphTransformerTest::CreateTestModel() {
+    return TestModel(CreateModel());
+}
+
+} // namespace vpu
diff --git a/inference-engine/tests/unit/vpu/base/graph_transformer_tests.hpp b/inference-engine/tests/unit/vpu/base/graph_transformer_tests.hpp
new file mode 100644 (file)
index 0000000..a25215f
--- /dev/null
@@ -0,0 +1,151 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <vpu/compile_env.hpp>
+#include <vpu/model/stage.hpp>
+#include <vpu/model/model.hpp>
+#include <vpu/frontend/frontend.hpp>
+#include <vpu/middleend/pass_manager.hpp>
+#include <vpu/backend/backend.hpp>
+#include <vpu/utils/ie_helpers.hpp>
+
+#include <gtest/gtest.h>
+
+#include <list>
+
+namespace vpu {
+
+template <typename Value>
+using InOutPortMap = std::unordered_map<int, Value>;
+
+class TestStage final : public StageNode {
+public:
+    using StageNode::StageNode;
+
+private:
+    StagePtr cloneImpl() const override;
+
+    void propagateDataOrderImpl(StageDataInfo<DimsOrder>& orderInfo) override;
+
+    void getDataStridesRequirementsImpl(StageDataInfo<StridesRequirement>& stridesInfo) override;
+
+    void finalizeDataLayoutImpl() override;
+
+    void getBatchSupportInfoImpl(StageDataInfo<BatchSupport>& batchInfo) override;
+
+    void serializeParamsImpl(BlobSerializer&) const override;
+
+    void serializeDataImpl(BlobSerializer&) const override;
+};
+
+enum class InputType {
+    Original,
+    PrevStageOutput,
+    Intermediate
+};
+
+struct InputInfo final {
+    InputType type = InputType::Original;
+    int originalInputInd = -1;
+    int prevStageInd = -1;
+    int prevStageOutputInd = -1;
+
+    static InputInfo fromNetwork(int ind = 0);
+
+    static InputInfo fromPrevStage(int ind);
+
+    InputInfo& output(int ind);
+};
+
+enum class OutputType {
+    Original,
+    Intermediate
+};
+
+struct OutputInfo final {
+    OutputType type = OutputType::Original;
+    int originalOutputInd = -1;
+    DataDesc desc = DataDesc();
+
+    static OutputInfo fromNetwork(int ind = 0);
+
+    static OutputInfo intermediate(const DataDesc& desc = DataDesc());
+};
+
+class TestModel final {
+public:
+    TestModel() = default;
+    TestModel(const Model& model);
+
+    const Model& getBaseModel() const;
+    const DataVector& getInputs() const;
+    const DataVector& getOutputs() const;
+    const StageVector& getStages() const;
+
+    void createInputs(std::vector<DataDesc> inputDescs);
+    void createOutputs(std::vector<DataDesc> outputDescs);
+
+    Stage addStage(
+            std::initializer_list<InputInfo> curInputInfos,
+            std::initializer_list<OutputInfo> curOutputInfos);
+
+    void setStageDataOrderInfo(
+            int stageInd,
+            const InOutPortMap<DimsOrder>& inputInfo,
+            const InOutPortMap<DimsOrder>& outputInfo);
+
+    void setStageStridesInfo(
+            int stageInd,
+            const InOutPortMap<StridesRequirement>& inputInfo,
+            const InOutPortMap<StridesRequirement>& outputInfo);
+
+    void setStageBatchInfo(
+            int stageInd,
+            const InOutPortMap<BatchSupport>& inputInfo);
+
+private:
+    Model _model;
+
+    DataVector _inputs;
+    DataVector _outputs;
+    StageVector _stages;
+};
+
+template <class StageRange>
+void checkStageTestInds(const StageRange& stageRange, std::initializer_list<int> expectedInds);
+
+template <class StageRange>
+void checkStageTestInds(const StageRange& stageRange, std::initializer_list<int> expectedInds, const std::function<void(const Stage&)>& extraCheck);
+
+bool checkExecutionOrder(const Model& model, const std::vector<int>& execOrder);
+
+class GraphTransformerTest : public ::testing::Test {
+public:
+    Platform platform = Platform::MYRIAD_X;
+    CompilationConfig config;
+
+    StageBuilder::Ptr stageBuilder;
+    FrontEnd::Ptr frontEnd;
+    PassManager::Ptr passManager;
+    BackEnd::Ptr backEnd;
+
+    bool compileEnvInitialized = false;
+
+    void SetUp() override;
+    void TearDown() override;
+
+    void InitCompileEnv();
+
+    Model CreateModel();
+
+    TestModel CreateTestModel();
+
+private:
+    Logger::Ptr _log;
+    std::list<ModelPtr> _models;
+};
+
+} // namespace vpu
diff --git a/inference-engine/tests/unit/vpu/frontend_tests/dsr_parsing_tests.cpp b/inference-engine/tests/unit/vpu/frontend_tests/dsr_parsing_tests.cpp
new file mode 100644 (file)
index 0000000..8c9de8b
--- /dev/null
@@ -0,0 +1,213 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "graph_transformer_tests.hpp"
+
+#include <vpu/ngraph/operations/static_shape_nonzero.hpp>
+#include <vpu/ngraph/operations/dynamic_shape_resolver.hpp>
+
+#include "ngraph/ngraph.hpp"
+#include "ngraph/opsets/opset3.hpp"
+
+namespace vpu {
+
+namespace ie = InferenceEngine;
+
+class DSRParsingTests : public GraphTransformerTest {
+protected:
+    void SetUp() override {
+        ASSERT_NO_FATAL_FAILURE(GraphTransformerTest::SetUp());
+
+        ASSERT_NO_FATAL_FAILURE(InitCompileEnv());
+
+        _testModel = CreateTestModel();
+    }
+
+    ie::CNNLayerPtr createDSRLayer() {
+        return std::make_shared<ie::CNNLayer>(ie::LayerParams{"DSR", "DynamicShapeResolver", ie::Precision::I32});
+    }
+
+protected:
+    TestModel _testModel;
+    DataDesc _dataDesc = {800};
+    DataDesc _correstShapeDesc = {1};
+    DataDesc _incorrestShapeDesc = {2};
+};
+
+TEST_F(DSRParsingTests, DSRParserAssertsOnInputDSR) {
+    _testModel.createInputs({_dataDesc, _correstShapeDesc});
+    _testModel.createOutputs({_dataDesc});
+
+    const auto& dsrLayer = createDSRLayer();
+
+    ASSERT_ANY_THROW(frontEnd->parseDSR(_testModel.getBaseModel(), dsrLayer,
+                                        {_testModel.getInputs()[0], _testModel.getInputs()[1]}, _testModel.getOutputs()));
+}
+
+TEST_F(DSRParsingTests, DSRParserAssertsOnIncorrectDimensions) {
+    _testModel.createInputs({_dataDesc});
+    _testModel.createOutputs({_dataDesc});
+
+    const auto& inputStage = _testModel.addStage({InputInfo::fromNetwork(0)},
+            {OutputInfo::intermediate(_dataDesc), OutputInfo::intermediate(_incorrestShapeDesc)});
+
+    const auto& dsrLayer = createDSRLayer();
+
+    ASSERT_ANY_THROW(frontEnd->parseDSR(_testModel.getBaseModel(), dsrLayer,
+                                        {inputStage->output(0), inputStage->output(1)}, _testModel.getOutputs()));
+}
+
+TEST_F(DSRParsingTests, DSRParserAssertsOnIncorrectNumInputs) {
+    _testModel.createInputs({_dataDesc});
+    _testModel.createOutputs({_dataDesc});
+
+    const auto& inputStage = _testModel.addStage({InputInfo::fromNetwork(0)},
+                                                {OutputInfo::intermediate(_dataDesc)});
+
+    const auto& dsrLayer = createDSRLayer();
+
+    ASSERT_ANY_THROW(frontEnd->parseDSR(_testModel.getBaseModel(), dsrLayer,
+                                        {inputStage->output(0)}, _testModel.getOutputs()));
+}
+
+TEST_F(DSRParsingTests, DSRParserAssertsOnIncorrectNumOutputs) {
+    _testModel.createInputs({_dataDesc});
+    _testModel.createOutputs({_dataDesc, _dataDesc});
+
+    const auto& inputStage = _testModel.addStage({InputInfo::fromNetwork(0)},
+            {OutputInfo::intermediate(_dataDesc), OutputInfo::intermediate(_correstShapeDesc)});
+
+    const auto& dsrLayer = createDSRLayer();
+
+    ASSERT_ANY_THROW(frontEnd->parseDSR(_testModel.getBaseModel(), dsrLayer,
+                                        {inputStage->output(0), inputStage->output(1)}, _testModel.getOutputs()));
+}
+
+TEST_F(DSRParsingTests, DSRParserDoesntAssertOnCorrectIO) {
+    _testModel.createInputs({_dataDesc});
+    _testModel.createOutputs({_dataDesc});
+
+    const auto& inputStage = _testModel.addStage({InputInfo::fromNetwork(0)},
+                                                {OutputInfo::intermediate(_dataDesc), OutputInfo::intermediate(_correstShapeDesc)});
+
+    const auto& dsrLayer = createDSRLayer();
+
+    ASSERT_NO_THROW(frontEnd->parseDSR(_testModel.getBaseModel(), dsrLayer,
+                                       {inputStage->output(0), inputStage->output(1)}, _testModel.getOutputs()));
+}
+
+class DSRParsingFromNgraphTests : public DSRParsingTests {
+protected:
+    void checkShapeConnection(const Data& parent, const Data& child) {
+        ASSERT_NE(child->parentDataToShapeEdge(), nullptr);
+        ASSERT_EQ(child->childDataToShapeEdges().size(), 0);
+        const auto& parentDataToShapeEdge = child->parentDataToShapeEdge();
+        ASSERT_EQ(parentDataToShapeEdge->parent(), parent);
+        ASSERT_EQ(parentDataToShapeEdge->child(), child);
+
+        ASSERT_EQ(parent->parentDataToShapeEdge(), nullptr);
+
+        const auto& childDataToShapeEdges = parent->childDataToShapeEdges();
+        ASSERT_EQ(childDataToShapeEdges.size(), 1);
+
+        ASSERT_EQ(childDataToShapeEdges.front(), parentDataToShapeEdge);
+    }
+};
+
+TEST_F(DSRParsingFromNgraphTests, DSRParserCreatesTwoOutputsOnOutputDSR) {
+    const auto& inPrecision = ::ngraph::element::Type(::ngraph::element::Type_t::i32);
+
+    const auto& tensor = std::make_shared<ngraph::opset3::Parameter>(inPrecision, ngraph::Shape{1, 800});
+    const auto& staticShapeNonZero = std::make_shared<ngraph::vpu::op::StaticShapeNonZero>(tensor);
+    const auto& dynamicShapeResolver = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(
+            staticShapeNonZero->output(0), staticShapeNonZero->output(1));
+
+    const auto& fnPtr = std::make_shared<ngraph::Function>(ngraph::NodeVector{dynamicShapeResolver}, ngraph::ParameterVector{tensor});
+
+    InferenceEngine::CNNNetwork cnnNet(fnPtr);
+    for (const auto& outputInfo : cnnNet.getOutputsInfo()) {
+        outputInfo.second->setPrecision(ie::Precision::I32);
+    }
+
+    ModelPtr model;
+    ASSERT_NO_THROW(model = frontEnd->buildInitialModel(cnnNet));
+    int numOutputs = 0;
+    for (const auto& data : model->datas()) {
+        if (data->usage() == DataUsage::Output) {
+            numOutputs++;
+        }
+    }
+    ASSERT_EQ(numOutputs, 2);
+}
+
+TEST_F(DSRParsingFromNgraphTests, DSRWithSingleProducerCreatesConnectionBetweenDataAndShape) {
+    const auto& inPrecision = ::ngraph::element::Type(::ngraph::element::Type_t::i32);
+
+    const auto& tensor = std::make_shared<ngraph::opset3::Parameter>(inPrecision, ngraph::Shape{800});
+    const auto& staticShapeNonZero = std::make_shared<ngraph::vpu::op::StaticShapeNonZero>(tensor);
+    const auto& dynamicShapeResolver = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(
+            staticShapeNonZero->output(0), staticShapeNonZero->output(1));
+    const auto& gatherIndices = std::make_shared<ngraph::opset3::Constant>(ngraph::element::i64, ngraph::Shape{1}, std::vector<int64_t>{0});
+    const auto& gatherAxis = std::make_shared<ngraph::opset3::Constant>(ngraph::element::i64, ngraph::Shape{1}, std::vector<int64_t>{1});
+    const auto& gather = std::make_shared<ngraph::opset3::Gather>(dynamicShapeResolver->output(0), gatherIndices, gatherAxis);
+
+    const auto& fnPtr = std::make_shared<ngraph::Function>(ngraph::NodeVector{gather}, ngraph::ParameterVector{tensor});
+
+    InferenceEngine::CNNNetwork cnnNet(fnPtr);
+
+    ModelPtr model;
+    ASSERT_NO_THROW(model = frontEnd->buildInitialModel(cnnNet));
+
+    Stage nonZeroStage = nullptr;
+
+    for (const auto& stage : model->getStages()) {
+        if (stage->type() != StageType::NonZero) {
+            continue;
+        }
+        nonZeroStage = stage;
+    }
+
+    ASSERT_NE(nonZeroStage, nullptr);
+
+    checkShapeConnection(nonZeroStage->output(1), nonZeroStage->output(0));
+}
+
+TEST_F(DSRParsingFromNgraphTests, DSRWithTwoProducersCreatesConnectionBetweenDataAndShape) {
+    const auto& inPrecision = ::ngraph::element::Type(::ngraph::element::Type_t::i32);
+
+    const auto& tensor = std::make_shared<ngraph::opset3::Parameter>(inPrecision, ngraph::Shape{800});
+    const auto& staticShapeNonZero = std::make_shared<ngraph::vpu::op::StaticShapeNonZero>(tensor);
+    const auto& reluData = std::make_shared<ngraph::opset3::Relu>(staticShapeNonZero->output(0));
+    const auto& reluShape = std::make_shared<ngraph::opset3::Relu>(staticShapeNonZero->output(1));
+    const auto& dynamicShapeResolver = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(
+        reluData->output(0), reluShape->output(0));
+    const auto& gatherIndices = std::make_shared<ngraph::opset3::Constant>(ngraph::element::i64, ngraph::Shape{1}, std::vector<int64_t>{0});
+    const auto& gatherAxis = std::make_shared<ngraph::opset3::Constant>(ngraph::element::i64, ngraph::Shape{1}, std::vector<int64_t>{1});
+    const auto& gather = std::make_shared<ngraph::opset3::Gather>(dynamicShapeResolver->output(0), gatherIndices, gatherAxis);
+
+    const auto& fnPtr = std::make_shared<ngraph::Function>(ngraph::NodeVector{gather}, ngraph::ParameterVector{tensor});
+
+    InferenceEngine::CNNNetwork cnnNet(fnPtr);
+
+    ModelPtr model;
+    ASSERT_NO_THROW(model = frontEnd->buildInitialModel(cnnNet));
+
+    Stage nonZeroStage = nullptr;
+
+    for (const auto& stage : model->getStages()) {
+        if (stage->type() != StageType::NonZero) {
+            continue;
+        }
+        nonZeroStage = stage;
+    }
+
+    ASSERT_NE(nonZeroStage, nullptr);
+
+    const auto& stageReluData = nonZeroStage->output(0)->singleConsumer();
+    const auto& stageReluShape = nonZeroStage->output(1)->singleConsumer();
+
+    checkShapeConnection(stageReluShape->output(0), stageReluData->output(0));
+}
+
+} // namespace vpu
diff --git a/inference-engine/tests/unit/vpu/middleend_tests/edges_tests/data_to_shape_edge.cpp b/inference-engine/tests/unit/vpu/middleend_tests/edges_tests/data_to_shape_edge.cpp
new file mode 100644 (file)
index 0000000..19e5a38
--- /dev/null
@@ -0,0 +1,281 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "graph_transformer_tests.hpp"
+
+namespace vpu {
+
+namespace ie = InferenceEngine;
+
+class DataToShapeEdgeProcessingTests : public GraphTransformerTest {
+protected:
+    void SetUp() override {
+        ASSERT_NO_FATAL_FAILURE(GraphTransformerTest::SetUp());
+
+        ASSERT_NO_FATAL_FAILURE(InitCompileEnv());
+
+        _middleEnd = passManager->buildMiddleEnd();
+        _testModel = CreateTestModel();
+    }
+
+    void setupNetWithNonProcessingShape() {
+        //
+        //                       -> [Shape]
+        // [Input] -> (ShapeProd)      |
+        //                       -> [Data] -> (Stage) -> [Output]
+        //
+
+        const auto& dataDesc = DataDesc({800});
+        const auto& shapeDesc = DataDesc({1});
+
+        _testModel.createInputs({dataDesc});
+        _testModel.createOutputs({dataDesc});
+
+        auto shapeParent = _testModel.addStage({InputInfo::fromNetwork()}, {OutputInfo::intermediate(dataDesc),
+                                                                            OutputInfo::intermediate(shapeDesc)});
+        _testModel.addStage({InputInfo::fromPrevStage(0)}, {OutputInfo::fromNetwork()});
+
+        auto model = _testModel.getBaseModel();
+        model->connectDataWithShape(shapeParent->output(1), shapeParent->output(0));
+    }
+
+    void setupNetWithShapeBeingProcessedOnce() {
+        //
+        //                       -> [Shape] -> (ShapeProc) -> [Shape]
+        // [Input] -> (ShapeProd)      |                         |
+        //                       -> [Data]  -> (DataProc)  -> [Data] -> (Stage) -> [Output]
+        //
+
+        const auto& dataDesc = DataDesc({800});
+        const auto& shapeDesc = DataDesc({1});
+
+        _testModel.createInputs({dataDesc});
+        _testModel.createOutputs({dataDesc});
+
+        auto model = _testModel.getBaseModel();
+
+        auto dataAndShapeParent = _testModel.addStage({InputInfo::fromNetwork()}, {OutputInfo::intermediate(dataDesc),
+                                                                            OutputInfo::intermediate(shapeDesc)});
+        model->connectDataWithShape(dataAndShapeParent->output(1), dataAndShapeParent->output(0));
+
+        auto dataChild = _testModel.addStage({InputInfo::fromPrevStage(0).output(0)}, {OutputInfo::intermediate(dataDesc)});
+        auto shapeChild = _testModel.addStage({InputInfo::fromPrevStage(0).output(1)}, {OutputInfo::intermediate(shapeDesc)});
+        _testModel.addStage({InputInfo::fromPrevStage(1)}, {OutputInfo::fromNetwork()});
+
+        model->connectDataWithShape(shapeChild->output(0), dataChild->output(0));
+    }
+
+    void setupNetWithShapeBeingProcessedTwice() {
+        //
+        //                       -> [Shape] -> (ShapeProc) -> [Shape] -> (ShapeProc) -> [Shape]
+        // [Input] -> (ShapeProd)      |                         |                         |
+        //                       -> [Data]  -> (DataProc)  -> [Data]  -> (DataProc)  -> [Data] -> (Stage) -> [Output]
+        //
+
+        const auto& dataDesc = DataDesc({800});
+        const auto& shapeDesc = DataDesc({1});
+
+        _testModel.createInputs({dataDesc});
+        _testModel.createOutputs({dataDesc});
+
+        auto model = _testModel.getBaseModel();
+
+        auto dataAndShapeParent = _testModel.addStage({InputInfo::fromNetwork()}, {OutputInfo::intermediate(dataDesc),
+                                                                                   OutputInfo::intermediate(shapeDesc)});
+        model->connectDataWithShape(dataAndShapeParent->output(1), dataAndShapeParent->output(0));
+
+        auto dataChild = _testModel.addStage({InputInfo::fromPrevStage(0).output(0)}, {OutputInfo::intermediate(dataDesc)});
+        auto shapeChild = _testModel.addStage({InputInfo::fromPrevStage(0).output(1)}, {OutputInfo::intermediate(shapeDesc)});
+        model->connectDataWithShape(shapeChild->output(0), dataChild->output(0));
+
+        dataChild = _testModel.addStage({InputInfo::fromPrevStage(1).output(0)}, {OutputInfo::intermediate(dataDesc)});
+        shapeChild = _testModel.addStage({InputInfo::fromPrevStage(2).output(0)}, {OutputInfo::intermediate(shapeDesc)});
+        model->connectDataWithShape(shapeChild->output(0), dataChild->output(0));
+
+        _testModel.addStage({InputInfo::fromPrevStage(3)}, {OutputInfo::fromNetwork()});
+    }
+
+protected:
+    TestModel _testModel;
+    PassSet::Ptr _middleEnd = nullptr;
+};
+
+TEST_F(DataToShapeEdgeProcessingTests, ShapeDataWithoutConsumerDoesntThrow) {
+    setupNetWithNonProcessingShape();
+
+    ASSERT_NO_THROW(_middleEnd->run(_testModel.getBaseModel()));
+}
+
+TEST_F(DataToShapeEdgeProcessingTests, DataToShapeEdgeSharesMemory) {
+    setupNetWithNonProcessingShape();
+
+    const auto& model = _testModel.getBaseModel();
+
+    ASSERT_NO_THROW(_middleEnd->run(model));
+
+    Stage shapeProducer = nullptr;
+    for (const auto& stage : model->getStages()) {
+        // Find shape produced stage
+        if (stage->numOutputs() == 2) {
+            shapeProducer = stage;
+        }
+    }
+
+    ASSERT_NE(shapeProducer, nullptr);
+
+    const auto& data = shapeProducer->output(0);
+    const auto& shape = shapeProducer->output(1);
+
+    const auto& shapeDataLocation = shape->dataLocation();
+    const auto& dataShapeLocation = data->shapeLocation();
+
+    ASSERT_EQ(shapeDataLocation.location, dataShapeLocation.dimsLocation);
+    ASSERT_EQ(shapeDataLocation.offset, dataShapeLocation.dimsOffset);
+}
+
+TEST_F(DataToShapeEdgeProcessingTests, ShapeProcessingOnceDoesntThrow) {
+    setupNetWithShapeBeingProcessedOnce();
+
+    ASSERT_NO_THROW(_middleEnd->run(_testModel.getBaseModel()));
+}
+
+TEST_F(DataToShapeEdgeProcessingTests, ShapeProcessingOnceSharesMemory) {
+    setupNetWithShapeBeingProcessedOnce();
+
+    const auto& model = _testModel.getBaseModel();
+
+    ASSERT_NO_THROW(_middleEnd->run(model));
+
+    Stage shapeProducer = nullptr;
+    for (const auto& stage : model->getStages()) {
+        // Find shape produced stage
+        if (stage->numOutputs() == 2) {
+            shapeProducer = stage;
+        }
+    }
+
+    ASSERT_NE(shapeProducer, nullptr);
+
+    const auto& data = shapeProducer->output(0);
+    const auto& shape = shapeProducer->output(1);
+
+    const auto& shapeDataLocation = shape->dataLocation();
+    const auto& dataShapeLocation = data->shapeLocation();
+
+    ASSERT_EQ(shapeDataLocation.location, dataShapeLocation.dimsLocation);
+    ASSERT_EQ(shapeDataLocation.offset, dataShapeLocation.dimsOffset);
+
+    const auto& processedData = data->singleConsumer()->output(0);
+    const auto& processedShape = shape->singleConsumer()->output(0);
+
+    const auto& processedShapeDataLocation = processedShape->dataLocation();
+    const auto& processedDataShapeLocation = processedData->shapeLocation();
+
+    ASSERT_EQ(processedShapeDataLocation.location, processedDataShapeLocation.dimsLocation);
+    ASSERT_EQ(processedShapeDataLocation.offset, processedDataShapeLocation.dimsOffset);
+}
+
+TEST_F(DataToShapeEdgeProcessingTests, ShapeProcessingOnceHasCorrectExecutionOrder) {
+    setupNetWithShapeBeingProcessedOnce();
+
+    const auto& model = _testModel.getBaseModel();
+
+    ASSERT_NO_THROW(_middleEnd->run(model));
+
+    Stage shapeProducer = nullptr;
+    for (const auto& stage : model->getStages()) {
+        // Find shape produced stage
+        if (stage->numOutputs() == 2) {
+            shapeProducer = stage;
+        }
+    }
+
+    ASSERT_NE(shapeProducer, nullptr);
+
+    const auto dataProcessor = shapeProducer->output(0)->singleConsumer();
+    const auto shapeProcessor = shapeProducer->output(1)->singleConsumer();
+
+    ASSERT_TRUE(checkExecutionOrder(model, {shapeProcessor->id(), dataProcessor->id()}));
+}
+
+TEST_F(DataToShapeEdgeProcessingTests, ShapeProcessingTwiceDoesntThrow) {
+    setupNetWithShapeBeingProcessedTwice();
+
+    ASSERT_NO_THROW(_middleEnd->run(_testModel.getBaseModel()));
+}
+
+TEST_F(DataToShapeEdgeProcessingTests, ShapeProcessingTwiceSharesMemory) {
+    setupNetWithShapeBeingProcessedTwice();
+
+    const auto& model = _testModel.getBaseModel();
+
+    ASSERT_NO_THROW(_middleEnd->run(model));
+
+    Stage shapeProducer = nullptr;
+    for (const auto& stage : model->getStages()) {
+        // Find shape produced stage
+        if (stage->numOutputs() == 2) {
+            shapeProducer = stage;
+        }
+    }
+
+    ASSERT_NE(shapeProducer, nullptr);
+
+    const auto& data = shapeProducer->output(0);
+    const auto& shape = shapeProducer->output(1);
+
+    const auto& shapeDataLocation = shape->dataLocation();
+    const auto& dataShapeLocation = data->shapeLocation();
+
+    ASSERT_EQ(shapeDataLocation.location, dataShapeLocation.dimsLocation);
+    ASSERT_EQ(shapeDataLocation.offset, dataShapeLocation.dimsOffset);
+
+    const auto& dataProcessedOnce = data->singleConsumer()->output(0);
+    const auto& shapeProcessedOnce = shape->singleConsumer()->output(0);
+
+    auto processedShapeDataLocation = shapeProcessedOnce->dataLocation();
+    auto processedDataShapeLocation = dataProcessedOnce->shapeLocation();
+
+    ASSERT_EQ(processedShapeDataLocation.location, processedDataShapeLocation.dimsLocation);
+    ASSERT_EQ(processedShapeDataLocation.offset, processedDataShapeLocation.dimsOffset);
+
+    const auto dataProcessedTwice = dataProcessedOnce->singleConsumer()->output(0);
+    const auto shapeProcessedTwice = shapeProcessedOnce->singleConsumer()->output(0);
+
+    processedShapeDataLocation = shapeProcessedTwice->dataLocation();
+    processedDataShapeLocation = dataProcessedTwice->shapeLocation();
+
+    ASSERT_EQ(processedShapeDataLocation.location, processedDataShapeLocation.dimsLocation);
+    ASSERT_EQ(processedShapeDataLocation.offset, processedDataShapeLocation.dimsOffset);
+}
+
+TEST_F(DataToShapeEdgeProcessingTests, ShapeProcessingTwiceHasCorrectExecutionOrder) {
+    setupNetWithShapeBeingProcessedTwice();
+
+    const auto& model = _testModel.getBaseModel();
+
+    ASSERT_NO_THROW(_middleEnd->run(model));
+
+    Stage shapeProducer = nullptr;
+    for (const auto& stage : model->getStages()) {
+        // Find shape produced stage
+        if (stage->numOutputs() == 2) {
+            shapeProducer = stage;
+        }
+    }
+
+    ASSERT_NE(shapeProducer, nullptr);
+
+    const auto dataFirstProcessor = shapeProducer->output(0)->singleConsumer();
+    const auto shapeFirstProcessor = shapeProducer->output(1)->singleConsumer();
+
+    ASSERT_TRUE(checkExecutionOrder(model, {shapeFirstProcessor->id(), dataFirstProcessor->id()}));
+
+    const auto dataSecondProcessor = dataFirstProcessor->output(0)->singleConsumer();
+    const auto shapeSecondProcessor = shapeFirstProcessor->output(0)->singleConsumer();
+
+    ASSERT_TRUE(checkExecutionOrder(model, {shapeSecondProcessor->id(), dataSecondProcessor->id()}));
+}
+
+} // namespace vpu
diff --git a/inference-engine/tests/unit/vpu/middleend_tests/edges_tests/stage_dependency_edge.cpp b/inference-engine/tests/unit/vpu/middleend_tests/edges_tests/stage_dependency_edge.cpp
new file mode 100644 (file)
index 0000000..e41b52f
--- /dev/null
@@ -0,0 +1,104 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "graph_transformer_tests.hpp"
+
+namespace vpu {
+
+namespace ie = InferenceEngine;
+
+class StageDependencyEdgeProcessingTests : public GraphTransformerTest {
+protected:
+    void SetUp() override {
+        ASSERT_NO_FATAL_FAILURE(GraphTransformerTest::SetUp());
+
+        ASSERT_NO_FATAL_FAILURE(InitCompileEnv());
+
+        _middleEnd = passManager->buildMiddleEnd();
+        _testModel = CreateTestModel();
+    }
+
+protected:
+    TestModel _testModel;
+    PassSet::Ptr _middleEnd = nullptr;
+};
+
+TEST_F(StageDependencyEdgeProcessingTests, AddStageDependencyAssertsOnOutputData) {
+    //
+    //                    -> [Data] -> (Stage) -> [Output]
+    // [Input] -> (Stage)                            |
+    //                    -> [Data] ------------> (Stage) -> [Output]
+    //
+
+    const DataDesc desc{1};
+
+    _testModel.createInputs({desc});
+    _testModel.createOutputs({desc, desc});
+
+    _testModel.addStage({InputInfo::fromNetwork()}, {OutputInfo::intermediate(desc),
+                                                     OutputInfo::intermediate(desc)});
+    auto dependentStage = _testModel.addStage({InputInfo::fromPrevStage(0).output(0)}, {OutputInfo::fromNetwork(0)});
+    auto dependencyProducer = _testModel.addStage({InputInfo::fromPrevStage(0).output(1)}, {OutputInfo::fromNetwork(1)});
+
+    auto model = _testModel.getBaseModel();
+
+    ASSERT_ANY_THROW(model->addStageDependency(dependentStage, dependencyProducer->output(0)));
+}
+
+TEST_F(StageDependencyEdgeProcessingTests, NetWithTwoStagesHasCorrectExecOrder) {
+    //
+    //                    -> [Data] -> (Stage) -> [Data] -> (Stage) -> [Output]
+    // [Input] -> (Stage)                            |
+    //                    -> [Data] ------------> (Stage) -> [Output]
+    //
+
+    const DataDesc desc{1};
+
+    _testModel.createInputs({desc});
+    _testModel.createOutputs({desc, desc});
+
+    _testModel.addStage({InputInfo::fromNetwork()}, {OutputInfo::intermediate(desc),
+                                                     OutputInfo::intermediate(desc)});
+    auto dependentStage = _testModel.addStage({InputInfo::fromPrevStage(0).output(0)}, {OutputInfo::fromNetwork(0)});
+    auto dependencyProducer = _testModel.addStage({InputInfo::fromPrevStage(0).output(1)}, {OutputInfo::intermediate(desc)});
+    _testModel.addStage({InputInfo::fromPrevStage(2)}, {OutputInfo::fromNetwork(1)});
+
+    auto model = _testModel.getBaseModel();
+
+    ASSERT_TRUE(checkExecutionOrder(model, {dependentStage->id(), dependencyProducer->id()}));
+
+    ASSERT_NO_THROW(model->addStageDependency(dependentStage, dependencyProducer->output(0)));
+
+    ASSERT_TRUE(checkExecutionOrder(model, {dependencyProducer->id(), dependentStage->id()}));
+}
+
+TEST_F(StageDependencyEdgeProcessingTests, NetWithThreeStagesHasCorrectExecOrder) {
+    //
+    //                    -> [Data] -> (Stage) -> [Data] -> (Stage) -> [Data] -> (Stage) -> [Output]
+    // [Input] -> (Stage)                                                 |
+    //                    -> [Data] ---------------------------------> (Stage) -> [Output]
+    //
+
+    const DataDesc desc{1};
+
+    _testModel.createInputs({desc});
+    _testModel.createOutputs({desc, desc});
+
+    _testModel.addStage({InputInfo::fromNetwork()}, {OutputInfo::intermediate(desc),
+                                                     OutputInfo::intermediate(desc)});
+    auto dependentStage = _testModel.addStage({InputInfo::fromPrevStage(0).output(0)}, {OutputInfo::fromNetwork(0)});
+    _testModel.addStage({InputInfo::fromPrevStage(0).output(1)}, {OutputInfo::intermediate(desc)});
+    auto dependencyProducer = _testModel.addStage({InputInfo::fromPrevStage(2)}, {OutputInfo::intermediate(desc)});
+    _testModel.addStage({InputInfo::fromPrevStage(3)}, {OutputInfo::fromNetwork(1)});
+
+    auto model = _testModel.getBaseModel();
+
+    ASSERT_TRUE(checkExecutionOrder(model, {dependentStage->id(), dependencyProducer->id()}));
+
+    ASSERT_NO_THROW(model->addStageDependency(dependentStage, dependencyProducer->output(0)));
+
+    ASSERT_TRUE(checkExecutionOrder(model, {dependencyProducer->id(), dependentStage->id()}));
+}
+
+} // namespace vpu
\ No newline at end of file
index 397297c..5b3563e 100644 (file)
@@ -12,6 +12,18 @@ add_subdirectory(mock_engine)
 
 add_subdirectory(helpers)
 
+if (ENABLE_GAPI_TESTS)
+    add_subdirectory(fluid_preproc)
+endif()
+
+if (ENABLE_FUNCTIONAL_TESTS)
+    add_subdirectory(functional)
+endif()
+
+if (ENABLE_BEH_TESTS)
+    add_subdirectory(behavior)
+endif()
+
 disable_deprecated_warnings()
 
 if(ENABLE_TESTS)
diff --git a/inference-engine/tests_deprecated/behavior/CMakeLists.txt b/inference-engine/tests_deprecated/behavior/CMakeLists.txt
new file mode 100644 (file)
index 0000000..581e056
--- /dev/null
@@ -0,0 +1,23 @@
+# Copyright (C) 2018-2020 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+#
+
+add_subdirectory(shared_tests)
+
+disable_deprecated_warnings()
+
+if (ENABLE_MKL_DNN)
+    add_subdirectory(mkldnn)
+endif()
+
+if (ENABLE_CLDNN)
+    add_subdirectory(cldnn)
+endif()
+
+if (ENABLE_GNA)
+    add_subdirectory(gna)
+endif()
+
+if (ENABLE_HDDL OR ENABLE_MYRIAD)
+    add_subdirectory(vpu)
+endif()
diff --git a/inference-engine/tests_deprecated/behavior/cldnn/CMakeLists.txt b/inference-engine/tests_deprecated/behavior/cldnn/CMakeLists.txt
new file mode 100644 (file)
index 0000000..fb98a55
--- /dev/null
@@ -0,0 +1,33 @@
+# Copyright (C) 2018-2020 Intel Corporation
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+
+set(TARGET_NAME ClDnnBehaviorTests)
+
+file(GLOB_RECURSE TEST_INCLUDE
+        ${CMAKE_CURRENT_SOURCE_DIR}/*.hpp)
+
+file(GLOB_RECURSE TEST_SRC
+        ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp
+        )
+
+list(APPEND DEPENDENCIES
+        clDNNPlugin)
+
+source_group("src" FILES ${TEST_SRC})
+source_group("include" FILES ${TEST_INCLUDE})
+
+add_executable(${TARGET_NAME}
+        ${TEST_SRC}
+        ${TEST_INCLUDE})
+
+target_compile_definitions(${TARGET_NAME} PRIVATE
+        INSTANTIATE_TESTS=1)
+
+target_link_libraries(${TARGET_NAME} PRIVATE IEBehaviorSharedTests)
+
+add_test(NAME ${TARGET_NAME}
+        COMMAND ${TARGET_NAME})
+
+add_dependencies(${TARGET_NAME} ${DEPENDENCIES})
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/cpp_wrappers/holders_tests.cpp b/inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/cpp_wrappers/holders_tests.cpp
new file mode 100644 (file)
index 0000000..8a8876d
--- /dev/null
@@ -0,0 +1,17 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "holders_tests.hpp"
+
+INSTANTIATE_TEST_CASE_P(ReleaseOrderTests, CPP_HoldersTests, testing::Combine(testing::ValuesIn(std::vector<std::vector<int>> {
+    // 0 - plugin
+    // 1 - executable_network
+    // 2 - infer_request
+    {0,1,2},
+    {0,2,1},
+    {1,0,2},
+    {1,2,0},
+    {2,0,1},
+    {2,1,0},
+}), testing::Values(std::string("GPU"))));
diff --git a/inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin.cpp b/inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin.cpp
new file mode 100644 (file)
index 0000000..d72defc
--- /dev/null
@@ -0,0 +1,14 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin.h"
+#include "behavior_test_plugins.hpp"
+#include "cldnn_test_data.hpp"
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTest, ValuesIn(supportedValues),
+                        getTestCaseName);
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestInput, ValuesIn(allInputSupportedValues),
+                        getTestCaseName);
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestOutput, ValuesIn(allOutputSupportedValues),
+                        getOutputTestCaseName);
diff --git a/inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_config.cpp b/inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_config.cpp
new file mode 100644 (file)
index 0000000..01cde8f
--- /dev/null
@@ -0,0 +1,18 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_config.hpp"
+#include "cldnn_test_data.hpp"
+
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginIncorrectConfigTest, ValuesIn(withIncorrectConfValues),
+                        getTestCaseName);
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginIncorrectConfigTestInferRequestAPI,
+                        ValuesIn(supportedValues),
+                        getTestCaseName);
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginCorrectConfigTestInferRequestAPI,
+                        ValuesIn(supportedValues),
+                        getTestCaseName);
diff --git a/inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_exec_graph_info.cpp b/inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_exec_graph_info.cpp
new file mode 100644 (file)
index 0000000..f41607d
--- /dev/null
@@ -0,0 +1,14 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_exec_graph_info.hpp"
+#include "cldnn_test_data.hpp"
+
+// Disabled due to a bug on CentOS that leads to segmentation fault of application on exit
+// when perf counters are enabled
+//INSTANTIATE_TEST_CASE_P(
+//        BehaviorTest,
+//        BehaviorPluginTestExecGraphInfo,
+//        ValuesIn(supportedValues),
+//        getTestCaseName);
diff --git a/inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request.cpp b/inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request.cpp
new file mode 100644 (file)
index 0000000..220d66d
--- /dev/null
@@ -0,0 +1,8 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_infer_request.hpp"
+#include "cldnn_test_data.hpp"
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestInferRequest, ValuesIn(requestsSupportedValues), getTestCaseName);
diff --git a/inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_callback.cpp b/inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_callback.cpp
new file mode 100644 (file)
index 0000000..919fccd
--- /dev/null
@@ -0,0 +1,9 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_infer_request_callback.hpp"
+#include "cldnn_test_data.hpp"
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestInferRequestCallback, ValuesIn(requestsSupportedValues),
+                        getTestCaseName);
diff --git a/inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_config.cpp b/inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_config.cpp
new file mode 100644 (file)
index 0000000..9e5800d
--- /dev/null
@@ -0,0 +1,9 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_infer_request_config.hpp"
+#include "cldnn_test_data.hpp"
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestInferRequestConfigExclusiveAsync, ValuesIn(supportedValues),
+                        getConfigTestCaseName);
diff --git a/inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_input.cpp b/inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_input.cpp
new file mode 100644 (file)
index 0000000..0197166
--- /dev/null
@@ -0,0 +1,9 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_infer_request_input.hpp"
+#include "cldnn_test_data.hpp"
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestInferRequestInput, ValuesIn(allInputSupportedValues),
+                        getTestCaseName);
diff --git a/inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_output.cpp b/inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_output.cpp
new file mode 100644 (file)
index 0000000..9719941
--- /dev/null
@@ -0,0 +1,9 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_infer_request_output.hpp"
+#include "cldnn_test_data.hpp"
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestInferRequestOutput, ValuesIn(allOutputSupportedValues),
+                        getOutputTestCaseName);
diff --git a/inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_layers.cpp b/inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_layers.cpp
new file mode 100644 (file)
index 0000000..d35e5b8
--- /dev/null
@@ -0,0 +1,16 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_layers.hpp"
+
+memory_test_params memory_test_cases[] = {
+        memory_test_params("GPU", "FP32", memory_case),
+};
+
+// FIXME
+//#if (defined INSTANTIATE_TESTS)
+//INSTANTIATE_TEST_CASE_P(BehaviorTest, MemoryLayerTest,
+//    ::testing::ValuesIn(memory_test_cases),
+//    getTestName<memory_test_params>);
+//#endif
diff --git a/inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_layout.cpp b/inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_layout.cpp
new file mode 100644 (file)
index 0000000..9336739
--- /dev/null
@@ -0,0 +1,21 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_layout.hpp"
+
+layout_test_params power_test_cases[] = {
+    layout_test_params("GPU", "FP32", Layout::NC, power_params({ { 1, 3 } }, 1, 2, 2)),
+    layout_test_params("GPU", "FP32", Layout::NCHW, power_params({ { 1, 3, 16, 16 } }, 1, 2, 2)),
+};
+
+layout_test_params conv_test_cases[] = {
+    layout_test_params("GPU", "FP32", Layout::NC, power_params({ { 1, 3 } }, 1, 2, 2)),
+    layout_test_params("GPU", "FP32", Layout::NCHW, power_params({ { 1, 3, 16, 16 } }, 1, 2, 2)),
+};
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, LayoutTestCanLoadPower,
+    ::testing::ValuesIn(power_test_cases), getTestName);
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, LayoutTestCanLoadConv,
+    ::testing::ValuesIn(conv_test_cases), getTestName);
diff --git a/inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_perf_counters.cpp b/inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_perf_counters.cpp
new file mode 100644 (file)
index 0000000..dc309f4
--- /dev/null
@@ -0,0 +1,14 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_perf_counters.hpp"
+#include "cldnn_test_data.hpp"
+
+// Disabled due to a bug on CentOS that leads to segmentation fault of application on exit
+// when perf counters are enabled
+//INSTANTIATE_TEST_CASE_P(
+//        BehaviorTest,
+//        BehaviorPluginTestPerfCounters,
+//        ValuesIn(supportedValues),
+//        getTestCaseName);
diff --git a/inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_set_preprocess.cpp b/inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_set_preprocess.cpp
new file mode 100644 (file)
index 0000000..75009b9
--- /dev/null
@@ -0,0 +1,11 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_set_preprocess.hpp"
+#include "cldnn_test_data.hpp"
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest,
+                        BehaviorPluginTestPreProcess,
+                        ValuesIn(supportedValues),
+                        getTestCaseName);
diff --git a/inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_unsupported.cpp b/inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_unsupported.cpp
new file mode 100644 (file)
index 0000000..3aa7335
--- /dev/null
@@ -0,0 +1,12 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_unsupported.hpp"
+#include "cldnn_test_data.hpp"
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestAllUnsupported, ValuesIn(allUnSupportedValues),
+    getTestCaseName);
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestTypeUnsupported, ValuesIn(typeUnSupportedValues),
+    getTestCaseName);
diff --git a/inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_version.cpp b/inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_version.cpp
new file mode 100644 (file)
index 0000000..1915219
--- /dev/null
@@ -0,0 +1,8 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_version.hpp"
+#include "cldnn_test_data.hpp"
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestVersion, ValuesIn(add_element_into_array(supportedValues, BEH_HETERO)), getTestCaseName);
diff --git a/inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/cldnn_test_data.hpp b/inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/plugin_tests/cldnn_test_data.hpp
new file mode 100644 (file)
index 0000000..1b1ec48
--- /dev/null
@@ -0,0 +1,86 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin.h"
+#include <cldnn/cldnn_config.hpp>
+
+// correct params
+#define BEH_CLDNN BehTestParams("GPU", \
+                                FuncTestUtils::TestModel::convReluNormPoolFcModelFP32.model_xml_str, \
+                                FuncTestUtils::TestModel::convReluNormPoolFcModelFP32.weights_blob, \
+                                Precision::FP32)
+#define BEH_HETERO BehTestParams("HETERO", \
+                                 FuncTestUtils::TestModel::convReluNormPoolFcModelFP32.model_xml_str, \
+                                 FuncTestUtils::TestModel::convReluNormPoolFcModelFP32.weights_blob, \
+                                 Precision::FP32)
+
+// all parameters are unsupported - reversed
+#define BEH_US_ALL_CLDNN   BehTestParams("GPU", \
+                                         FuncTestUtils::TestModel::convReluNormPoolFcModelQ78.model_xml_str, \
+                                         FuncTestUtils::TestModel::convReluNormPoolFcModelQ78.weights_blob, \
+                                         Precision::Q78)
+
+const BehTestParams supportedValues[] = {
+        BEH_CLDNN,
+};
+
+const BehTestParams requestsSupportedValues[] = {
+        BEH_CLDNN,
+};
+
+const BehTestParams allInputSupportedValues[] = {
+        BEH_CLDNN, BEH_CLDNN.withIn(Precision::FP16), BEH_CLDNN.withIn(Precision::U8), BEH_CLDNN.withIn(Precision::I16),
+        BEH_CLDNN.withIn(Precision::I32),
+        BEH_CLDNN.withIn(Precision::U8).withConfig({{KEY_GPU_THROUGHPUT_STREAMS, GPU_THROUGHPUT_AUTO}}),
+        BEH_CLDNN.withIn(Precision::FP16).withConfig({{KEY_GPU_THROUGHPUT_STREAMS, GPU_THROUGHPUT_AUTO}}),
+        BEH_CLDNN.withIn(Precision::I16).withConfig({{KEY_GPU_THROUGHPUT_STREAMS, GPU_THROUGHPUT_AUTO}}),
+        BEH_CLDNN.withIn(Precision::I32).withConfig({{KEY_GPU_THROUGHPUT_STREAMS, GPU_THROUGHPUT_AUTO}}),
+};
+
+const BehTestParams allOutputSupportedValues[] = {
+        BEH_CLDNN, BEH_CLDNN.withOut(Precision::FP16),
+        BEH_CLDNN.withIn(Precision::FP16).withConfig({{KEY_GPU_THROUGHPUT_STREAMS, GPU_THROUGHPUT_AUTO}}),
+};
+
+const BehTestParams typeUnSupportedValues[] = {
+        BEH_CLDNN.withIn(Precision::Q78), BEH_CLDNN.withIn(Precision::I8),
+};
+
+const BehTestParams allUnSupportedValues[] = {
+        BEH_US_ALL_CLDNN,
+};
+
+const std::vector<BehTestParams> withCorrectConfValues = {
+        BEH_CLDNN.withConfig({{KEY_GPU_THROUGHPUT_STREAMS, GPU_THROUGHPUT_AUTO}}),
+        BEH_CLDNN.withConfig({{KEY_GPU_THROUGHPUT_STREAMS, "2"}}),
+        BEH_CLDNN.withConfig({{KEY_PERF_COUNT, NO}}),
+        /*BEH_CLDNN.withConfig({ { KEY_PERF_COUNT, YES } }),*/
+        BEH_CLDNN.withConfig({{KEY_DUMP_KERNELS, NO}}),
+        BEH_CLDNN.withConfig({{KEY_DUMP_KERNELS, YES}}),
+        BEH_CLDNN.withConfig({{KEY_TUNING_MODE, TUNING_DISABLED}}),
+//         Too long inference of AlexNet (980 secs)
+        BEH_CLDNN.withConfig({{KEY_TUNING_MODE, TUNING_CREATE},
+                              {KEY_TUNING_FILE, "tfile"}}),
+        BEH_CLDNN.withConfig({{KEY_DEVICE_ID, "0"}}),
+};
+
+const BehTestParams withIncorrectConfValues[] = {
+        BEH_CLDNN.withConfig({{KEY_GPU_THROUGHPUT_STREAMS, "OFF"}}),
+        BEH_CLDNN.withConfig({{KEY_PERF_COUNT, "ON"}}),
+        BEH_CLDNN.withConfig({{KEY_CONFIG_FILE, "unknown_file"}}),
+        BEH_CLDNN.withConfig({{KEY_DUMP_KERNELS, "ON"}}),
+        BEH_CLDNN.withConfig({{KEY_TUNING_MODE, "TUNING_UNKNOWN_MODE"}}),
+        BEH_CLDNN.withConfig({{KEY_DEVICE_ID, "DEVICE_UNKNOWN"}}),
+        // FIXME: [IE clDNN] The plugin doesn't throw GENERAL_ERROR if use non-exist tuning file. CVS-8593
+        //BEH_CLDNN.withConfig({ { KEY_TUNING_MODE, TUNING_USE_EXISTING },
+        //                       { KEY_TUNING_FILE, "unknown_file" } }),
+};
+
+const std::vector<BehTestParams> withCorrectConfValuesNetworkOnly = {
+        BEH_CLDNN.withConfig({{}}),
+};
+
+const BehTestParams withIncorrectConfKeys[] = {
+        BEH_CLDNN.withIncorrectConfigItem(),
+};
diff --git a/inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/skip_tests_config.cpp b/inference-engine/tests_deprecated/behavior/cldnn/shared_tests_instances/skip_tests_config.cpp
new file mode 100644 (file)
index 0000000..8db78c1
--- /dev/null
@@ -0,0 +1,13 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <vector>
+#include <string>
+
+#include "functional_test_utils/skip_tests_config.hpp"
+
+std::vector<std::string> disabledTestPatterns() {
+    return {
+    };
+}
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/behavior/gna/CMakeLists.txt b/inference-engine/tests_deprecated/behavior/gna/CMakeLists.txt
new file mode 100644 (file)
index 0000000..38f5d2f
--- /dev/null
@@ -0,0 +1,33 @@
+# Copyright (C) 2018-2020 Intel Corporation
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+
+set(TARGET_NAME GnaBehaviorTests)
+
+file(GLOB_RECURSE TEST_INCLUDE
+        ${CMAKE_CURRENT_SOURCE_DIR}/*.hpp)
+
+file(GLOB_RECURSE TEST_SRC
+        ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp
+        )
+
+list(APPEND DEPENDENCIES
+        GNAPlugin)
+
+source_group("src" FILES ${TEST_SRC})
+source_group("include" FILES ${TEST_INCLUDE})
+
+add_executable(${TARGET_NAME}
+        ${TEST_SRC}
+        ${TEST_INCLUDE})
+
+target_compile_definitions(${TARGET_NAME} PRIVATE
+        INSTANTIATE_TESTS=1)
+
+target_link_libraries(${TARGET_NAME} PRIVATE IEBehaviorSharedTests)
+
+add_test(NAME ${TARGET_NAME}
+        COMMAND ${TARGET_NAME})
+
+add_dependencies(${TARGET_NAME} ${DEPENDENCIES})
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/cpp_wrappers/holders_tests.cpp b/inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/cpp_wrappers/holders_tests.cpp
new file mode 100644 (file)
index 0000000..7b7ed39
--- /dev/null
@@ -0,0 +1,17 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "holders_tests.hpp"
+
+INSTANTIATE_TEST_CASE_P(ReleaseOrderTests, CPP_HoldersTests, testing::Combine(testing::ValuesIn(std::vector<std::vector<int>> {
+    // 0 - plugin
+    // 1 - executable_network
+    // 2 - infer_request
+    {0,1,2},
+    {0,2,1},
+    {1,0,2},
+    {1,2,0},
+    {2,0,1},
+    {2,1,0},
+}), testing::Values(std::string("GNA"))));
diff --git a/inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin.cpp b/inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin.cpp
new file mode 100644 (file)
index 0000000..ce4219d
--- /dev/null
@@ -0,0 +1,14 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin.h"
+#include "behavior_test_plugins.hpp"
+#include "gna_test_data.hpp"
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTest, ValuesIn(supportedValues),
+                        getTestCaseName);
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestInput, ValuesIn(allInputSupportedValues),
+                        getTestCaseName);
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestOutput, ValuesIn(allOutputSupportedValues),
+                        getOutputTestCaseName);
diff --git a/inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin_config.cpp b/inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin_config.cpp
new file mode 100644 (file)
index 0000000..fe72b8e
--- /dev/null
@@ -0,0 +1,17 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_config.hpp"
+#include "gna_test_data.hpp"
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginIncorrectConfigTest, ValuesIn(withIncorrectConfValues),
+                        getTestCaseName);
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginIncorrectConfigTestInferRequestAPI,
+                        ValuesIn(withIncorrectConfKeys),
+                        getTestCaseName);
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginCorrectConfigTestInferRequestAPI,
+                        ValuesIn(supportedValues),
+                        getTestCaseName);
diff --git a/inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin_exec_graph_info.cpp b/inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin_exec_graph_info.cpp
new file mode 100644 (file)
index 0000000..9341140
--- /dev/null
@@ -0,0 +1,12 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_exec_graph_info.hpp"
+#include "gna_test_data.hpp"
+
+INSTANTIATE_TEST_CASE_P(
+        BehaviorTest,
+        BehaviorPluginTestExecGraphInfo,
+        ValuesIn(supportedValues),
+        getTestCaseName);
diff --git a/inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request.cpp b/inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request.cpp
new file mode 100644 (file)
index 0000000..3a2a25c
--- /dev/null
@@ -0,0 +1,8 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_infer_request.hpp"
+#include "gna_test_data.hpp"
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestInferRequest, ValuesIn(requestsSupportedValues), getTestCaseName);
diff --git a/inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_callback.cpp b/inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_callback.cpp
new file mode 100644 (file)
index 0000000..906805e
--- /dev/null
@@ -0,0 +1,10 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_infer_request_callback.hpp"
+#include "gna_test_data.hpp"
+
+// TODO: support InferRequestCallback in GNAPlugin
+//INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestInferRequestCallback, ValuesIn(requestsSupportedValues),
+//                        getTestCaseName);
diff --git a/inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_config.cpp b/inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_config.cpp
new file mode 100644 (file)
index 0000000..6eb64e9
--- /dev/null
@@ -0,0 +1,43 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_infer_request_config.hpp"
+#include "gna_test_data.hpp"
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestInferRequestConfig,
+                        ValuesIn(withCorrectConfValues),
+                        getConfigTestCaseName);
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestInferRequestConfigExclusiveAsync, ValuesIn(supportedValues),
+                        getConfigTestCaseName);
+
+bool CheckGnaHw() {
+    if (auto envVar = std::getenv("IE_GNA_HW")) {
+        return std::stoi(envVar) != 0;
+    }
+    return false;
+}
+
+class BehaviorPluginTestInferRequestWithGnaHw : public BehaviorPluginTestInferRequest {
+};
+
+TEST_P(BehaviorPluginTestInferRequestWithGnaHw, CanInferOrFailWithGnaHw) {
+    TestEnv::Ptr testEnv;
+    std::map<std::string, std::string> config = GetParam().config;
+
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv, config));
+    sts = testEnv->inferRequest->Infer(&response);
+
+    if (CheckGnaHw()) {
+        ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+    } else {
+        ASSERT_EQ(StatusCode::GENERAL_ERROR, sts);
+        ASSERT_TRUE(strContains(response.msg, "Bad GNA status") ||         // GNA1 message
+                    strContains(response.msg, "Unsuccessful Gna2Status")); // GNA2 message
+    }
+}
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestInferRequestWithGnaHw,
+                        ValuesIn(withGnaHwConfValue),
+                        getConfigTestCaseName);
diff --git a/inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_input.cpp b/inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_input.cpp
new file mode 100644 (file)
index 0000000..235c647
--- /dev/null
@@ -0,0 +1,9 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_infer_request_input.hpp"
+#include "gna_test_data.hpp"
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestInferRequestInput, ValuesIn(allInputSupportedValues),
+                        getTestCaseName);
diff --git a/inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_output.cpp b/inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_output.cpp
new file mode 100644 (file)
index 0000000..bb12332
--- /dev/null
@@ -0,0 +1,9 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_infer_request_output.hpp"
+#include "gna_test_data.hpp"
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestInferRequestOutput, ValuesIn(allOutputSupportedValues),
+                        getOutputTestCaseName);
diff --git a/inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin_layers.cpp b/inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin_layers.cpp
new file mode 100644 (file)
index 0000000..bbda316
--- /dev/null
@@ -0,0 +1,35 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_layers.hpp"
+#include "gna_test_data.hpp"
+
+
+conv_test_params deconv_test_cases[] = {
+        conv_test_params(CommonTestUtils::DEVICE_GNA, conv_case)
+};
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, DeconvolutionLayerTest,
+                        ::testing::ValuesIn(deconv_test_cases),
+                        getTestName<conv_test_params>);
+
+pool_test_params roi_pool_test_cases[] = {
+        pool_test_params(CommonTestUtils::DEVICE_GNA, "FP32", pool_case),
+};
+
+// TODO: fix this
+//INSTANTIATE_TEST_CASE_P(BehaviorTest, PoolingLayerTest,
+//                        ::testing::Values(pool_test_params("GNAPlugin", "FP32", pool_case)),
+//                        getTestName<pool_test_params>);
+//
+//INSTANTIATE_TEST_CASE_P(BehaviorTest, ReLULayerTest,
+//                        ::testing::Values(activ_test_params("GNAPlugin", "FP32", activation_case)),
+//                        getTestName<activ_test_params>);
+
+// FIXME
+//#if (defined INSTANTIATE_TESTS)
+//INSTANTIATE_TEST_CASE_P(BehaviorTest, MemoryLayerTest,
+//    ::testing::ValuesIn(memory_test_cases),
+//    getTestName<memory_test_params>);
+//#endif
diff --git a/inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin_layout.cpp b/inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin_layout.cpp
new file mode 100644 (file)
index 0000000..385fc84
--- /dev/null
@@ -0,0 +1,15 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_layout.hpp"
+
+layout_test_params activ_test_cases[] = {
+//    layout_test_params(CommonTestUtils::DEVICE_GNA, "FP32", Layout::C, power_params({ { 3 } }, 1, 2, 2)),
+    layout_test_params(CommonTestUtils::DEVICE_GNA, "FP32", Layout::NC, power_params({ { 1, 3 } }, 1, 2, 2)),
+    layout_test_params(CommonTestUtils::DEVICE_GNA, "FP32", Layout::CHW, power_params({ { 3, 32, 16 } }, 1, 2, 2)),
+    layout_test_params(CommonTestUtils::DEVICE_GNA, "FP32", Layout::NCHW, power_params({ { 1, 3, 16, 16 } }, 2, 2, 2)),
+};
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, LayoutTestCanLoadActiv,
+    ::testing::ValuesIn(activ_test_cases), getTestName);
diff --git a/inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin_unsupported.cpp b/inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin_unsupported.cpp
new file mode 100644 (file)
index 0000000..6220d69
--- /dev/null
@@ -0,0 +1,14 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_unsupported.hpp"
+#include "gna_test_data.hpp"
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestAllUnsupported, ValuesIn(allUnSupportedValues),
+    getTestCaseName);
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestTypeUnsupported, ValuesIn(typeUnSupportedValues),
+    getTestCaseName);
+    INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestBatchUnsupported, ValuesIn(batchUnSupportedValues),
+        getTestCaseName);
diff --git a/inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin_version.cpp b/inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/behavior_test_plugin_version.cpp
new file mode 100644 (file)
index 0000000..7b3c76d
--- /dev/null
@@ -0,0 +1,8 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_version.hpp"
+#include "gna_test_data.hpp"
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestVersion, ValuesIn(add_element_into_array(supportedValues, BEH_HETERO)), getTestCaseName);
diff --git a/inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/gna_test_data.cpp b/inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/gna_test_data.cpp
new file mode 100644 (file)
index 0000000..341247b
--- /dev/null
@@ -0,0 +1,305 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "gna_test_data.hpp"
+#include "single_layer_common.hpp"
+#include "common_test_utils/common_layers_params.hpp"
+#include "functional_test_utils/test_model/test_model.hpp"
+
+using TestModel = FuncTestUtils::TestModel::TestModel;
+
+std::string getRawCnnModel() {
+    return (R"V0G0N(
+<net name="_NAME_" version="_VER_" batch="1">
+       <layers>
+               <layer name="input_1" type="input" id="1" precision="_PRC_">
+                       <output>
+                               <port id="1">
+                                       <!--connected to , Reshape_2-->
+                                       <dim>1</dim>
+                                       <dim>1056</dim>
+                               </port>
+                       </output>
+               </layer>
+               <layer name="Reshape_2" type="Reshape" id="2" precision="_PRC_">
+                       <input>
+                               <port id="2">
+                                       <!--connected to input_1-->
+                                       <dim>1</dim>
+                                       <dim>1056</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="3">
+                                       <!--connected to , Convolution_3-->
+                                       <dim>1</dim>
+                                       <dim>33</dim>
+                                       <dim>1</dim>
+                                       <dim>32</dim>
+                               </port>
+                       </output>
+               </layer>
+               <layer name="Convolution_3" type="Convolution" id="3" precision="_PRC_">
+                       <convolution_data kernel-x="9" kernel-y="1" output="128" pad-x="0" pad-y="0" stride-x="1" stride-y="1" />
+                       <input>
+                               <port id="4">
+                                       <!--connected to Reshape_2-->
+                                       <dim>1</dim>
+                                       <dim>33</dim>
+                                       <dim>1</dim>
+                                       <dim>32</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="5">
+                                       <!--connected to , Pooling_5-->
+                                       <dim>1</dim>
+                                       <dim>128</dim>
+                                       <dim>1</dim>
+                                       <dim>24</dim>
+                               </port>
+                       </output>
+                       <weights offset="0" size="_CONV_WS_" precision="_PRC_" />
+                       <biases offset="_CONV_WS_" size="_CONV_BS_" precision="_PRC_" />
+               </layer>
+               <layer name="Pooling_5" type="Pooling" id="5" precision="_PRC_">
+                       <data kernel-x="3" kernel-y="1" pad-x="0" pad-y="0" pool-method="max" stride-x="3" stride-y="1" />
+                       <input>
+                               <port id="8">
+                                       <!--connected to Convolution_3-->
+                                       <dim>1</dim>
+                                       <dim>128</dim>
+                                       <dim>1</dim>
+                                       <dim>24</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="9">
+                                       <!--connected to , Reshape_6-->
+                                       <dim>1</dim>
+                                       <dim>128</dim>
+                                       <dim>1</dim>
+                                       <dim>8</dim>
+                               </port>
+                       </output>
+               </layer>
+               <layer name="Reshape_6" type="Reshape" id="6" precision="_PRC_">
+                       <input>
+                               <port id="10">
+                                       <!--connected to Pooling_5-->
+                                       <dim>1</dim>
+                                       <dim>128</dim>
+                                       <dim>1</dim>
+                                       <dim>8</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="11">
+                                       <!--connected to , ScaleShift_7-->
+                                       <dim>1</dim>
+                                       <dim>1024</dim>
+                               </port>
+                       </output>
+               </layer>
+               <layer name="ScaleShift_7" type="ScaleShift" id="7" precision="_PRC_">
+                       <input>
+                               <port id="13">
+                                       <!--connected to Reshape_6-->
+                                       <dim>1</dim>
+                                       <dim>1024</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="12">
+                                       <!--connected to , Activation_8-->
+                                       <dim>1</dim>
+                                       <dim>1024</dim>
+                               </port>
+                       </output>
+                       <weights offset="_SS_W_OFFS_" size="_SS_WS_" precision="_PRC_" />
+                       <biases offset="_SS_B_OFFS_" size="_SS_BS_" precision="_PRC_" />
+               </layer>
+               <layer name="Activation_8" type="Activation" id="8" precision="_PRC_">
+                       <data type="sigmoid" />
+                       <input>
+                               <port id="14">
+                                       <!--connected to ScaleShift_7-->
+                                       <dim>1</dim>
+                                       <dim>1024</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="15">
+                                       <dim>1</dim>
+                                       <dim>1024</dim>
+                               </port>
+                       </output>
+               </layer>
+    </layers>
+    <edges>
+               <edge from-layer="1" from-port="1" to-layer="2" to-port="2" />
+               <edge from-layer="2" from-port="3" to-layer="3" to-port="4" />
+               <edge from-layer="5" from-port="9" to-layer="6" to-port="10" />
+               <edge from-layer="6" from-port="11" to-layer="7" to-port="13" />
+               <edge from-layer="7" from-port="12" to-layer="8" to-port="14" />
+    </edges>
+</net>
+)V0G0N");
+}
+
+TestModel getGnaCnnModel(InferenceEngine::Precision netPrc) {
+    std::string model_str = getRawCnnModel();
+    /* Default values for FP32 are used */
+    size_t convWeigthsLen = 38016;  // kernel_x * kernel_y * in_channels * out_channels
+    size_t convWeigthsSize = convWeigthsLen * 4;  // 4 (bytes in FP32)
+    size_t convBiasesLen = 128;  // out_channels
+    size_t convBiasesSize = convBiasesLen * 4;  // 4 (bytes in FP32)
+    size_t scaleShiftWeigthsLen = 1024;  // out_channels
+    size_t scaleShiftWeigthsSize = scaleShiftWeigthsLen * 4;  // 4 (bytes in FP32)
+    size_t scaleShiftBiasesLen = 1024;  // out_channels
+    size_t scaleShiftBiasesSize = scaleShiftBiasesLen * 4;  // 4 (bytes in FP32)
+    switch (netPrc) {
+        case InferenceEngine::Precision::FP32:
+            break;
+        default:
+            std::string err = "GnaCnnModel can not be constructed with precision ";
+            err += netPrc.name();
+            throw std::runtime_error(err);
+    }
+    std::string irName = std::string("GnaCnnModel") + netPrc.name();
+    REPLACE_WITH_STR(model_str, "_NAME_", irName);
+    REPLACE_WITH_NUM(model_str, "_VER_", 2);
+    REPLACE_WITH_STR(model_str, "_PRC_", netPrc.name());
+    REPLACE_WITH_NUM(model_str, "_CONV_WS_", convWeigthsSize);
+    REPLACE_WITH_NUM(model_str, "_CONV_BS_", convBiasesSize);
+    REPLACE_WITH_NUM(model_str, "_SS_W_OFFS_", convWeigthsSize + convBiasesSize);
+    REPLACE_WITH_NUM(model_str, "_SS_WS_", scaleShiftWeigthsSize);
+    REPLACE_WITH_NUM(model_str, "_SS_B_OFFS_", convWeigthsSize + convBiasesSize + scaleShiftWeigthsSize);
+    REPLACE_WITH_NUM(model_str, "_SS_BS_", scaleShiftBiasesSize);
+    return TestModel(model_str, CommonTestUtils::getWeightsBlob(
+            convWeigthsSize + convBiasesSize + scaleShiftWeigthsSize + scaleShiftBiasesSize));
+}
+
+std::string getRawMemoryModel() {
+    return (R"V0G0N(
+<net Name="activationAfterSplit" version="_VER_" precision="_PRC_" batch="1">
+    <layers>
+        <layer name="input_1" type="input" id="0" precision="_PRC_">
+            <output>
+                <port id="0">
+                    <dim>1</dim>
+                    <dim>10</dim>
+                </port>
+            </output>
+        </layer>
+        <layer name="Memory_27" type="Memory" id="27" precision="_PRC_">
+            <data id="r_27-28" index="0" size="2" />
+            <input>
+                <port id="60">
+                    <!--connected to Activation_38-->
+                    <dim>1</dim>
+                    <dim>10</dim>
+                </port>
+            </input>
+        </layer>
+        <layer name="Memory_28" type="Memory" id="28" precision="_PRC_">
+            <data id="r_27-28" index="1" size="2" />
+            <output>
+                <port id="59">
+                    <!--connected to , Eltwise_8-->
+                    <dim>1</dim>
+                    <dim>10</dim>
+                </port>
+            </output>
+        </layer>
+        <layer name="FullyConnected" id="2" type="InnerProduct" precision="_PRC_">
+            <fc out-size="10" />
+            <input>
+                <port id="0">
+                    <dim>1</dim>
+                    <dim>10</dim>
+                </port>
+            </input>
+            <output>
+                <port id="1">
+                    <dim>1</dim>
+                    <dim>10</dim>
+                </port>
+            </output>
+            <biases offset="0" size="_FC_BS_" />
+            <weights offset="_FC_BS_" size="_FC_WS_" />
+        </layer>
+        <layer name="Activation_38" type="Activation" id="38" precision="_PRC_">
+            <data type="tanh" />
+            <input>
+                <port id="82">
+                    <!--connected to Eltwise_37-->
+                    <dim>1</dim>
+                    <dim>10</dim>
+                </port>
+            </input>
+            <output>
+                <port id="83">
+                    <!--connected to , Eltwise_41-->
+                    <dim>1</dim>
+                    <dim>10</dim>
+                </port>
+            </output>
+        </layer>
+        <layer name="Eltwise_8" type="Eltwise" id="11" precision="_PRC_">
+            <data operation="sum" />
+            <input>
+                <port id="0">
+                    <!--connected to FC1-->
+                    <dim>1</dim>
+                    <dim>10</dim>
+                </port>
+                <port id="1">
+                    <!--connected to FC2-->
+                    <dim>1</dim>
+                    <dim>10</dim>
+                </port>
+            </input>
+            <output>
+                <port id="2">
+                    <dim>1</dim>
+                    <dim>10</dim>
+                </port>
+            </output>
+        </layer>
+    </layers>
+    <edges>
+        <edge from-layer="0" from-port="0" to-layer="2" to-port="0" />
+        <edge from-layer="2" from-port="1" to-layer="38" to-port="82" />
+        <edge from-layer="38" from-port="83" to-layer="11" to-port="0" />
+        <edge from-layer="28" from-port="59" to-layer="11" to-port="1" />
+        <edge from-layer="38" from-port="83" to-layer="27" to-port="60" />
+    </edges>
+</net>
+)V0G0N");
+}
+
+TestModel getGnaMemoryModel(InferenceEngine::Precision netPrc) {
+    std::string model_str = getRawMemoryModel();
+    /* Default values for FP32 are used */
+    size_t fcBiasesLen = 10;  // num of fc_out_channels
+    size_t fcWeigthsLen = 100;  // fc_in_channels * fc_out_channels
+    size_t fcBiasesSize = fcBiasesLen * 4;  // 4 bytes for FP32
+    size_t fcWeigthsSize = fcWeigthsLen * 4;  // 4 bytes for FP32
+    switch (netPrc) {
+        case InferenceEngine::Precision::FP32:
+            break;
+        default:
+            std::string err = "getGnaMemoryModel can not be constructed with precision ";
+            err += netPrc.name();
+            throw std::runtime_error(err);
+    }
+    std::string irName = std::string("GnaMemoryModel") + netPrc.name();
+    REPLACE_WITH_STR(model_str, "_NAME_", irName);
+    REPLACE_WITH_NUM(model_str, "_VER_", 2);
+    REPLACE_WITH_STR(model_str, "_PRC_", netPrc.name());
+    REPLACE_WITH_NUM(model_str, "_FC_BS_", fcBiasesSize);
+    REPLACE_WITH_NUM(model_str, "_FC_WS_", fcWeigthsSize);
+    return TestModel(model_str, CommonTestUtils::getWeightsBlob(fcBiasesSize + fcWeigthsSize));
+}
diff --git a/inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/gna_test_data.hpp b/inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/plugin_tests/gna_test_data.hpp
new file mode 100644 (file)
index 0000000..b622330
--- /dev/null
@@ -0,0 +1,99 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin.h"
+
+using TestModel = FuncTestUtils::TestModel::TestModel;
+
+TestModel getGnaCnnModel(InferenceEngine::Precision netPrc);
+
+TestModel getGnaMemoryModel(InferenceEngine::Precision netPrc);
+
+const TestModel gnaCnnModelFP32 = getGnaCnnModel(InferenceEngine::Precision::FP32);
+const TestModel gnaMemoryModelFP32 = getGnaMemoryModel(InferenceEngine::Precision::FP32);
+
+// correct params
+#define BEH_HETERO BehTestParams("HETERO", \
+                                 FuncTestUtils::TestModel::convReluNormPoolFcModelFP32.model_xml_str, \
+                                 FuncTestUtils::TestModel::convReluNormPoolFcModelFP32.weights_blob, \
+                                 Precision::FP32)
+#define BEH_CNN_GNA BehTestParams("GNA", \
+                                  gnaCnnModelFP32.model_xml_str, \
+                                  gnaCnnModelFP32.weights_blob, \
+                                  Precision::FP32)
+#define BEH_RNN_GNA BehTestParams("GNA", \
+                                  gnaMemoryModelFP32.model_xml_str, \
+                                  gnaMemoryModelFP32.weights_blob, \
+                                  Precision::FP32)
+#define BEH_GNA BEH_RNN_GNA
+
+// all parameters are unsupported - reversed
+#define BEH_US_ALL_GNA     BehTestParams("GNA", \
+                                         FuncTestUtils::TestModel::convReluNormPoolFcModelFP16.model_xml_str, \
+                                         FuncTestUtils::TestModel::convReluNormPoolFcModelFP16.weights_blob, \
+                                         Precision::FP16)
+
+const BehTestParams supportedValues[] = {
+        BEH_GNA,
+};
+
+const BehTestParams requestsSupportedValues[] = {
+        BEH_GNA,
+};
+
+const BehTestParams allInputSupportedValues[] = {
+        BEH_GNA, BEH_GNA.withIn(Precision::U8), BEH_GNA.withIn(Precision::I16),
+};
+
+const BehTestParams allOutputSupportedValues[] = {
+        BEH_GNA,
+};
+
+const BehTestParams typeUnSupportedValues[] = {
+        BEH_RNN_GNA.withIn(Precision::FP16), BEH_RNN_GNA.withIn(Precision::Q78), BEH_RNN_GNA.withIn(Precision::I8),
+        BEH_RNN_GNA.withIn(Precision::I32),
+        BEH_CNN_GNA.withIn(Precision::FP16), BEH_CNN_GNA.withIn(Precision::Q78), BEH_CNN_GNA.withIn(Precision::I8),
+        BEH_CNN_GNA.withIn(Precision::I32),
+};
+
+const BehTestParams batchUnSupportedValues[] = {
+        BEH_RNN_GNA.withBatchSize(2),
+        BEH_CNN_GNA.withBatchSize(2),
+};
+
+const BehTestParams allUnSupportedValues[] = {
+        BEH_US_ALL_GNA,
+};
+
+const std::vector<BehTestParams> withCorrectConfValues = {
+        BEH_GNA.withConfig({{KEY_GNA_SCALE_FACTOR, "1.0"}}),
+        BEH_GNA.withConfig({{KEY_GNA_PRECISION, "I8"}}),
+        BEH_GNA.withConfig({{KEY_GNA_FIRMWARE_MODEL_IMAGE, "gfile"}}),
+        BEH_GNA.withConfig({{KEY_GNA_DEVICE_MODE, GNA_AUTO}}),
+        BEH_GNA.withConfig({{KEY_GNA_DEVICE_MODE, GNA_SW_FP32}}),
+        BEH_GNA.withConfig({{KEY_GNA_DEVICE_MODE, GNA_SW}}),
+        BEH_GNA.withConfig({{KEY_GNA_DEVICE_MODE, GNA_SW_EXACT}}),
+        BEH_GNA.withConfig({{KEY_GNA_COMPACT_MODE, NO}}),
+};
+
+const std::vector<BehTestParams> withGnaHwConfValue = {
+        BEH_GNA.withConfig({{KEY_GNA_DEVICE_MODE, GNA_HW}}),
+};
+
+const BehTestParams withIncorrectConfValues[] = {
+        BEH_GNA.withConfig({{KEY_GNA_DEVICE_MODE,   GNA_SW_FP32},
+                            {KEY_GNA_LIB_N_THREADS, "2"}}),
+        BEH_GNA.withConfig({{KEY_GNA_SCALE_FACTOR, "NAN"}}),
+        BEH_GNA.withConfig({{KEY_GNA_PRECISION, "FP8"}}),
+        BEH_GNA.withConfig({{KEY_GNA_DEVICE_MODE, "AUTO"}}),
+        BEH_GNA.withConfig({{KEY_GNA_COMPACT_MODE, "ON"}}),
+};
+
+const BehTestParams withIncorrectConfKeys[] = {
+        BEH_GNA.withIncorrectConfigItem(),
+        BEH_GNA.withConfigItem({"KEY_KEY_GNA_DEVICE_MODE", GNA_SW}),
+        BEH_GNA.withConfigItem({"GNA_DEVICE_MODE_XYZ", GNA_SW}),
+        BEH_GNA.withConfigItem({"KEY_GNA_DEVICE_MODE_XYZ", GNA_SW}),
+        BEH_GNA.withConfigItem({"KEY_GNA_SCALE_FACTOR_1", GNA_SW}),
+};
diff --git a/inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/skip_tests_config.cpp b/inference-engine/tests_deprecated/behavior/gna/shared_tests_instances/skip_tests_config.cpp
new file mode 100644 (file)
index 0000000..adf6284
--- /dev/null
@@ -0,0 +1,34 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <vector>
+#include <string>
+
+#include "functional_test_utils/skip_tests_config.hpp"
+
+std::vector<std::string> disabledTestPatterns() {
+    return {
+            ".*BehaviorPluginTestInferRequest\\.canRun3AsyncRequestsConsistentlyFromThreadsWithoutWait.*",
+
+            // TODO: FIX BUG 23740
+            ".*BehaviorPluginTestInferRequest\\.CanCreateTwoExeNetworks.*",
+
+            // TODO: FIX BUG 26702
+            ".*BehaviorPluginTestInferRequest\\.FailedAsyncInferWithNegativeTimeForWait.*",
+
+            // TODO: FIX BUG 23741
+            ".*BehaviorPluginTestInferRequest\\.canRun3SyncRequestsConsistentlyFromThreads.*",
+
+            // TODO: FIX BUG 23742
+            ".*BehaviorPluginTestInferRequest\\.canWaitWithotStartAsync.*",
+
+            // TODO: FIX BUG 23743
+            ".*BehaviorPluginTestInferRequest\\.returnDeviceBusyOnSetBlobAfterAsyncInfer.*",
+            ".*BehaviorPluginTestInferRequest\\.returnDeviceBusyOnGetBlobAfterAsyncInfer.*",
+            ".*BehaviorPluginTestInferRequest\\.returnDeviceBusyOnGetPerformanceCountAfterAsyncInfer.*",
+            ".*BehaviorPluginTestInferRequest\\.returnDeviceBusyOnStartInferAfterAsyncInfer.*",
+            ".*BehaviorPluginTestInferRequest\\.returnDeviceBusyOnGetUserDataAfterAsyncInfer.*",
+            ".*BehaviorPluginTestInferRequest\\.returnDeviceBusyOnSetUserDataAfterAsyncInfer.*",
+    };
+}
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/behavior/mkldnn/CMakeLists.txt b/inference-engine/tests_deprecated/behavior/mkldnn/CMakeLists.txt
new file mode 100644 (file)
index 0000000..6b81443
--- /dev/null
@@ -0,0 +1,35 @@
+# Copyright (C) 2018-2020 Intel Corporation
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+
+set(TARGET_NAME MklDnnBehaviorTests)
+
+file(GLOB_RECURSE TEST_INCLUDE
+        ${CMAKE_CURRENT_SOURCE_DIR}/*.hpp)
+
+file(GLOB_RECURSE TEST_SRC
+        ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp
+        )
+
+list(APPEND DEPENDENCIES
+        MKLDNNPlugin)
+
+source_group("src" FILES ${TEST_SRC})
+source_group("include" FILES ${TEST_INCLUDE})
+
+add_executable(${TARGET_NAME}
+        ${TEST_SRC}
+        ${TEST_INCLUDE})
+
+target_compile_definitions(${TARGET_NAME} PRIVATE
+        INSTANTIATE_TESTS=1)
+
+target_link_libraries(${TARGET_NAME} PRIVATE IEBehaviorSharedTests)
+
+target_include_directories(${TARGET_NAME} PRIVATE $<TARGET_PROPERTY:inference_engine_preproc,INTERFACE_INCLUDE_DIRECTORIES> ${IE_MAIN_SOURCE_DIR}/samples)
+
+add_test(NAME ${TARGET_NAME}
+        COMMAND ${TARGET_NAME})
+
+add_dependencies(${TARGET_NAME} ${DEPENDENCIES})
diff --git a/inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/cpp_wrappers/holders_tests.cpp b/inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/cpp_wrappers/holders_tests.cpp
new file mode 100644 (file)
index 0000000..4a48d06
--- /dev/null
@@ -0,0 +1,17 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "holders_tests.hpp"
+
+INSTANTIATE_TEST_CASE_P(ReleaseOrderTests, CPP_HoldersTests, testing::Combine(testing::ValuesIn(std::vector<std::vector<int>> {
+    // 0 - plugin
+    // 1 - executable_network
+    // 2 - infer_request
+    {0,1,2},
+    {0,2,1},
+    {1,0,2},
+    {1,2,0},
+    {2,0,1},
+    {2,1,0},
+}), testing::Values(std::string("CPU"))));
diff --git a/inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin.cpp b/inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin.cpp
new file mode 100644 (file)
index 0000000..3ee37d6
--- /dev/null
@@ -0,0 +1,14 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin.h"
+#include "behavior_test_plugins.hpp"
+#include "mkldnn_test_data.hpp"
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTest, ValuesIn(supportedValues),
+                        getTestCaseName);
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestInput, ValuesIn(allInputSupportedValues),
+                        getTestCaseName);
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestOutput, ValuesIn(allOutputSupportedValues),
+                        getOutputTestCaseName);
diff --git a/inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_config.cpp b/inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_config.cpp
new file mode 100644 (file)
index 0000000..487dc47
--- /dev/null
@@ -0,0 +1,21 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_config.hpp"
+#include "mkldnn_test_data.hpp"
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginCorrectConfigTest,
+                        ValuesIn(BehTestParams::concat(withCorrectConfValues, withCorrectConfValuesPluginOnly)),
+                        getTestCaseName);
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginIncorrectConfigTest, ValuesIn(withIncorrectConfValues),
+                        getTestCaseName);
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginIncorrectConfigTestInferRequestAPI,
+                        ValuesIn(withIncorrectConfKeys),
+                        getTestCaseName);
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginCorrectConfigTestInferRequestAPI,
+                        ValuesIn(supportedValues),
+                        getTestCaseName);
diff --git a/inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_exec_graph_info.cpp b/inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_exec_graph_info.cpp
new file mode 100644 (file)
index 0000000..ec20b0c
--- /dev/null
@@ -0,0 +1,12 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_exec_graph_info.hpp"
+#include "mkldnn_test_data.hpp"
+
+INSTANTIATE_TEST_CASE_P(
+        BehaviorTest,
+        BehaviorPluginTestExecGraphInfo,
+        ValuesIn(supportedValues),
+        getTestCaseName);
diff --git a/inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request.cpp b/inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request.cpp
new file mode 100644 (file)
index 0000000..41bd3e3
--- /dev/null
@@ -0,0 +1,8 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_infer_request.hpp"
+#include "mkldnn_test_data.hpp"
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestInferRequest, ValuesIn(requestsSupportedValues), getTestCaseName);
diff --git a/inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_callback.cpp b/inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_callback.cpp
new file mode 100644 (file)
index 0000000..ee71646
--- /dev/null
@@ -0,0 +1,9 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_infer_request_callback.hpp"
+#include "mkldnn_test_data.hpp"
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestInferRequestCallback, ValuesIn(requestsSupportedValues),
+                        getTestCaseName);
diff --git a/inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_config.cpp b/inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_config.cpp
new file mode 100644 (file)
index 0000000..d1d7c86
--- /dev/null
@@ -0,0 +1,14 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_infer_request_config.hpp"
+#include "mkldnn_test_data.hpp"
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestInferRequestConfig,
+                        ValuesIn(BehTestParams::concat(withCorrectConfValues, withCorrectConfValuesNetworkOnly)),
+                        getConfigTestCaseName);
+
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestInferRequestConfigExclusiveAsync, ValuesIn(supportedValues),
+                        getConfigTestCaseName);
diff --git a/inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_input.cpp b/inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_input.cpp
new file mode 100644 (file)
index 0000000..088844f
--- /dev/null
@@ -0,0 +1,9 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_infer_request_input.hpp"
+#include "mkldnn_test_data.hpp"
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestInferRequestInput, ValuesIn(allInputSupportedValues),
+                        getTestCaseName);
diff --git a/inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_output.cpp b/inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_output.cpp
new file mode 100644 (file)
index 0000000..7bc7022
--- /dev/null
@@ -0,0 +1,9 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_infer_request_output.hpp"
+#include "mkldnn_test_data.hpp"
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestInferRequestOutput, ValuesIn(allOutputSupportedValues),
+                        getOutputTestCaseName);
diff --git a/inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_layers.cpp b/inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_layers.cpp
new file mode 100644 (file)
index 0000000..5e47e3a
--- /dev/null
@@ -0,0 +1,33 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_layers.hpp"
+
+pool_test_params roi_pool_test_cases[] = {
+        pool_test_params(CommonTestUtils::DEVICE_CPU, "FP32", pool_case),
+};
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, ROIPoolingLayerTest,
+                        ::testing::ValuesIn(roi_pool_test_cases),
+                        getTestName<pool_test_params>);
+
+activ_test_params activ_test_cases[] = {
+        activ_test_params(CommonTestUtils::DEVICE_CPU, "FP32", activation_case),
+};
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, ActivationLayerTest,
+                        ::testing::ValuesIn(activ_test_cases),
+                        getTestName<activ_test_params>);
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, ReLULayerTest,
+                        ::testing::Values(activ_test_params("CPU", "FP32", activation_case)),
+                        getTestName<activ_test_params>);
+
+norm_test_params norm_test_cases[] = {
+        norm_test_params(CommonTestUtils::DEVICE_CPU, "FP32", norm_case),
+};
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, NormalizeLayerTest,
+                        ::testing::ValuesIn(norm_test_cases),
+                        getTestName<norm_test_params>);
diff --git a/inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_layout.cpp b/inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_layout.cpp
new file mode 100644 (file)
index 0000000..49750d1
--- /dev/null
@@ -0,0 +1,29 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_layout.hpp"
+
+layout_test_params power_test_cases[] = {
+    layout_test_params(CommonTestUtils::DEVICE_CPU, "FP32", Layout::C, power_params({ { 3 } }, 2, 2, 2)),
+    layout_test_params(CommonTestUtils::DEVICE_CPU, "FP32", Layout::NC, power_params({ { 1, 3 } }, 2, 2, 2)),
+    layout_test_params(CommonTestUtils::DEVICE_CPU, "FP32", Layout::CHW, power_params({ { 3, 32, 16 } }, 2, 2, 2)),
+    layout_test_params(CommonTestUtils::DEVICE_CPU, "FP32", Layout::NCHW, power_params({ { 1, 3, 16, 16 } }, 2, 2, 2)),
+};
+
+layout_test_params conv_test_cases[] = {
+    layout_test_params(CommonTestUtils::DEVICE_CPU, "FP32", Layout::NCHW, power_params({ { 1, 3, 16, 16 } }, 2, 2, 2)),
+};
+
+layout_test_params conv_neg_test_cases[] = {
+    layout_test_params(CommonTestUtils::DEVICE_CPU, "FP32", Layout::C, power_params({ { 3 } }, 2, 2, 2)),
+    layout_test_params(CommonTestUtils::DEVICE_CPU, "FP32", Layout::NC, power_params({ { 1, 3 } }, 2, 2, 2)),
+    layout_test_params(CommonTestUtils::DEVICE_CPU, "FP32", Layout::CHW, power_params({ { 3, 32, 16 } }, 2, 2, 2)),
+};
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, LayoutTestCanLoadPower,
+    ::testing::ValuesIn(power_test_cases), getTestName);
+INSTANTIATE_TEST_CASE_P(BehaviorTest, LayoutTestCanLoadConv,
+    ::testing::ValuesIn(conv_test_cases), getTestName);
+INSTANTIATE_TEST_CASE_P(BehaviorTest, LayoutTestCanNotLoadConv,
+    ::testing::ValuesIn(conv_neg_test_cases), getTestName);
diff --git a/inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_set_preprocess.cpp b/inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_set_preprocess.cpp
new file mode 100644 (file)
index 0000000..266f1b0
--- /dev/null
@@ -0,0 +1,11 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_set_preprocess.hpp"
+#include "mkldnn_test_data.hpp"
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest,
+                        BehaviorPluginTestPreProcess,
+                        ValuesIn(requestsSupportedValues),
+                        getTestCaseName);
diff --git a/inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_unsupported.cpp b/inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_unsupported.cpp
new file mode 100644 (file)
index 0000000..aadb3dc
--- /dev/null
@@ -0,0 +1,12 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_unsupported.hpp"
+#include "mkldnn_test_data.hpp"
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestAllUnsupported, ValuesIn(allUnSupportedValues),
+    getTestCaseName);
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestTypeUnsupported, ValuesIn(typeUnSupportedValues),
+    getTestCaseName);
diff --git a/inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_version.cpp b/inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/behavior_test_plugin_version.cpp
new file mode 100644 (file)
index 0000000..8d05905
--- /dev/null
@@ -0,0 +1,8 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_version.hpp"
+#include "mkldnn_test_data.hpp"
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestVersion, ValuesIn(add_element_into_array(supportedValues, BEH_HETERO)), getTestCaseName);
diff --git a/inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/mkldnn_test_data.hpp b/inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/plugin_tests/mkldnn_test_data.hpp
new file mode 100644 (file)
index 0000000..7f7f36e
--- /dev/null
@@ -0,0 +1,93 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin.h"
+
+// correct params
+#define BEH_MKLDNN BehTestParams("CPU", \
+                                 FuncTestUtils::TestModel::convReluNormPoolFcModelFP32.model_xml_str, \
+                                 FuncTestUtils::TestModel::convReluNormPoolFcModelFP32.weights_blob, \
+                                 Precision::FP32)
+#define BEH_MKLDNN_FP16 BehTestParams("CPU", \
+                                 FuncTestUtils::TestModel::convReluNormPoolFcModelFP16.model_xml_str, \
+                                 FuncTestUtils::TestModel::convReluNormPoolFcModelFP16.weights_blob, \
+                                 Precision::FP16)
+#define BEH_HETERO BehTestParams("HETERO", \
+                                 FuncTestUtils::TestModel::convReluNormPoolFcModelFP32.model_xml_str, \
+                                 FuncTestUtils::TestModel::convReluNormPoolFcModelFP32.weights_blob, \
+                                 Precision::FP32)
+
+// all parameters are unsupported - reversed
+#define BEH_US_ALL_MKLDNN  BehTestParams("CPU", \
+                                         FuncTestUtils::TestModel::convReluNormPoolFcModelQ78.model_xml_str, \
+                                         FuncTestUtils::TestModel::convReluNormPoolFcModelQ78.weights_blob, \
+                                         Precision::Q78)
+
+const BehTestParams supportedValues[] = {
+        BEH_MKLDNN,
+};
+
+const BehTestParams requestsSupportedValues[] = {
+        BEH_MKLDNN,
+        // the following adds additional test the MKLDNNGraphlessInferRequest (explicitly created for streams)
+        BEH_MKLDNN.withConfig({{KEY_CPU_THROUGHPUT_STREAMS, CPU_THROUGHPUT_AUTO}}),
+};
+
+const BehTestParams allInputSupportedValues[] = {
+        BEH_MKLDNN, BEH_MKLDNN.withIn(Precision::U8), BEH_MKLDNN.withIn(Precision::U16),
+        BEH_MKLDNN.withIn(Precision::I16),
+        // the following list withConfig tests the MKLDNNGraphlessInferRequest (explicitly created for streams)
+        BEH_MKLDNN.withIn(Precision::U8).withConfig({{KEY_CPU_THROUGHPUT_STREAMS, CPU_THROUGHPUT_AUTO}}),
+        BEH_MKLDNN.withIn(Precision::U16).withConfig({{KEY_CPU_THROUGHPUT_STREAMS, CPU_THROUGHPUT_AUTO}}),
+        BEH_MKLDNN.withIn(Precision::I16).withConfig({{KEY_CPU_THROUGHPUT_STREAMS, CPU_THROUGHPUT_AUTO}}),
+        BEH_MKLDNN_FP16.withIn(Precision::FP32),
+        BEH_MKLDNN_FP16.withIn(Precision::U8),
+        BEH_MKLDNN_FP16.withIn(Precision::U16),
+        BEH_MKLDNN_FP16.withIn(Precision::I16),
+        // the following list withConfig tests the MKLDNNGraphlessInferRequest (explicitly created for streams)
+        BEH_MKLDNN_FP16.withIn(Precision::FP32).withConfig({{KEY_CPU_THROUGHPUT_STREAMS, CPU_THROUGHPUT_AUTO}}),
+        BEH_MKLDNN_FP16.withIn(Precision::U8).withConfig({{KEY_CPU_THROUGHPUT_STREAMS, CPU_THROUGHPUT_AUTO}}),
+        BEH_MKLDNN_FP16.withIn(Precision::U16).withConfig({{KEY_CPU_THROUGHPUT_STREAMS, CPU_THROUGHPUT_AUTO}}),
+        BEH_MKLDNN_FP16.withIn(Precision::I16).withConfig({{KEY_CPU_THROUGHPUT_STREAMS, CPU_THROUGHPUT_AUTO}}),
+};
+
+const BehTestParams allOutputSupportedValues[] = {
+        BEH_MKLDNN,
+        // the following withConfig test checks the MKLDNNGraphlessInferRequest (explicitly created for streams)
+        BEH_MKLDNN.withConfig({{KEY_CPU_THROUGHPUT_STREAMS, CPU_THROUGHPUT_AUTO}}),
+};
+
+const BehTestParams typeUnSupportedValues[] = {
+        BEH_MKLDNN.withIn(Precision::Q78),
+        BEH_MKLDNN_FP16,
+};
+
+const BehTestParams allUnSupportedValues[] = {
+        BEH_US_ALL_MKLDNN,
+};
+
+const std::vector<BehTestParams> withCorrectConfValues = {
+        BEH_MKLDNN.withConfig({{KEY_CPU_THROUGHPUT_STREAMS, CPU_THROUGHPUT_NUMA}}),
+        BEH_MKLDNN.withConfig({{KEY_CPU_THROUGHPUT_STREAMS, CPU_THROUGHPUT_AUTO}}),
+        BEH_MKLDNN.withConfig({{KEY_CPU_THROUGHPUT_STREAMS, "8"}}),
+        BEH_MKLDNN.withConfig({{KEY_CPU_BIND_THREAD, NO}}),
+        BEH_MKLDNN.withConfig({{KEY_CPU_BIND_THREAD, YES}}),
+        BEH_MKLDNN.withConfig({{KEY_DYN_BATCH_LIMIT, "10"}}),
+};
+
+const BehTestParams withIncorrectConfValues[] = {
+        BEH_MKLDNN.withConfig({{KEY_CPU_THROUGHPUT_STREAMS, "OFF"}}),
+        BEH_MKLDNN.withConfig({{KEY_CPU_BIND_THREAD, "OFF"}}),
+        BEH_MKLDNN.withConfig({{KEY_DYN_BATCH_LIMIT, "NAN"}}),
+};
+
+const std::vector<BehTestParams> withCorrectConfValuesPluginOnly;
+
+const std::vector<BehTestParams> withCorrectConfValuesNetworkOnly = {
+        BEH_MKLDNN.withConfig({}),
+};
+
+const BehTestParams withIncorrectConfKeys[] = {
+        BEH_MKLDNN.withIncorrectConfigItem(),
+};
diff --git a/inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/skip_tests_config.cpp b/inference-engine/tests_deprecated/behavior/mkldnn/shared_tests_instances/skip_tests_config.cpp
new file mode 100644 (file)
index 0000000..8db78c1
--- /dev/null
@@ -0,0 +1,13 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <vector>
+#include <string>
+
+#include "functional_test_utils/skip_tests_config.hpp"
+
+std::vector<std::string> disabledTestPatterns() {
+    return {
+    };
+}
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/behavior/shared_tests/CMakeLists.txt b/inference-engine/tests_deprecated/behavior/shared_tests/CMakeLists.txt
new file mode 100644 (file)
index 0000000..c570cb9
--- /dev/null
@@ -0,0 +1,37 @@
+# Copyright (C) 2018-2020 Intel Corporation
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+
+set(TARGET_NAME IEBehaviorSharedTests)
+
+disable_deprecated_warnings()
+
+file(GLOB_RECURSE SHARED_TESTS_SRC ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp
+                                   ${CMAKE_CURRENT_SOURCE_DIR}/*.hpp)
+
+add_library(${TARGET_NAME} STATIC ${SHARED_TESTS_SRC})
+add_dependencies(${TARGET_NAME} inference_engine_preproc)
+
+target_include_directories(${TARGET_NAME} PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/plugin_tests")
+
+target_link_libraries(${TARGET_NAME} PUBLIC
+        funcTestUtils
+        ieTestHelpers
+        )
+
+if(UNIX)
+    find_package(Threads REQUIRED)
+    target_link_libraries(${TARGET_NAME} PRIVATE Threads::Threads)
+endif()
+
+target_link_libraries(${TARGET_NAME} PUBLIC gmock)
+
+target_include_directories(${TARGET_NAME} PUBLIC
+        ${IE_MAIN_SOURCE_DIR}/src/vpu/graph_transformer/include
+        $<TARGET_PROPERTY:inference_engine_plugin_api,INTERFACE_INCLUDE_DIRECTORIES>
+        ${CMAKE_CURRENT_SOURCE_DIR}/plugin_tests
+        ${CMAKE_CURRENT_SOURCE_DIR}/cpp_wrappers)
+
+# developer package
+ie_developer_export_targets(${TARGET_NAME})
diff --git a/inference-engine/tests_deprecated/behavior/shared_tests/cpp_wrappers/holders_tests.hpp b/inference-engine/tests_deprecated/behavior/shared_tests/cpp_wrappers/holders_tests.hpp
new file mode 100644 (file)
index 0000000..fd47628
--- /dev/null
@@ -0,0 +1,81 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <cpp/ie_plugin_cpp.hpp>
+#include <cpp/ie_cnn_net_reader.h>
+
+#include "common_test_utils/xml_net_builder/xml_net_builder.hpp"
+
+using namespace InferenceEngine;
+
+#define EXPECT_NO_CRASH(_statement) \
+EXPECT_EXIT(_statement; exit(0), testing::ExitedWithCode(0), "")
+
+void release_order_test(std::vector<int> order, const std::string & deviceName) {
+    SizeVector dims {1,3,3,3};
+    std::map<std::string, std::string> attr {
+            {"power", "1"},
+            {"scale", "-1"},
+            {"shift", "0"}
+    };
+
+    auto model = CommonTestUtils::V2NetBuilder::buildNetworkWithOneInput("RNN_Net", dims, "FP32")
+            .addLayer("Power", "FP32", &attr, {{dims}, {dims}})
+            .finish();
+
+    CNNNetwork net;
+
+    {
+        Core reader;
+        net = reader.ReadNetwork(model, Blob::CPtr());
+    }
+
+    Core core;
+    auto exe_net = core.LoadNetwork(net, deviceName);
+    auto request = exe_net.CreateInferRequest();
+
+    auto release = [&] (int i) {
+        switch (i) {
+            case 0: core = Core{}; break;
+            case 1: exe_net = {}; break;
+            case 2: request = {}; break;
+            default: break;
+        }
+    };
+
+    for (auto i : order)
+        release(i);
+}
+
+class CPP_HoldersTests : public ::testing::TestWithParam<std::tuple<std::vector<int>, std::string>> {
+public:
+    void SetUp() override {
+        order      = std::get<0>(GetParam());
+        deviceName = std::get<1>(GetParam());
+
+        deathTestStyle = ::testing::GTEST_FLAG(death_test_style);
+        if (deathTestStyle == "fast" && deviceName == "MYRIAD") {
+            // Default death test mode "fast" must be used in single-threaded context only.
+            // "MyriadBehaviorTests" links "XLink" library that statically initializes "libusb".
+            // Which in turn creates a thread.
+            ::testing::GTEST_FLAG(death_test_style) = "threadsafe";
+        }
+    }
+
+    void TearDown() override {
+        ::testing::GTEST_FLAG(death_test_style) = deathTestStyle;
+    }
+
+    std::string deviceName;
+    std::vector<int> order;
+
+private:
+    std::string deathTestStyle;
+};
+
+TEST_P(CPP_HoldersTests, Orders) {
+    // Test failed if crash happens
+    EXPECT_NO_CRASH(release_order_test(order, deviceName));
+}
diff --git a/inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin.h b/inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin.h
new file mode 100644 (file)
index 0000000..c08d9ee
--- /dev/null
@@ -0,0 +1,228 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#ifndef BEHAVIOR_TEST_PLUGIN_H_
+#define BEHAVIOR_TEST_PLUGIN_H_
+
+#include <gtest/gtest.h>
+#include <tests_common.hpp>
+#include <inference_engine.hpp>
+#include <ie_plugin_config.hpp>
+#include <vpu/vpu_plugin_config.hpp>
+#include <vpu/private_plugin_config.hpp>
+#include <gna/gna_config.hpp>
+#include <multi-device/multi_device_config.hpp>
+#include <cpp_interfaces/exception2status.hpp>
+#include <tests_utils.hpp>
+#include <memory>
+#include <fstream>
+
+#include "functional_test_utils/test_model/test_model.hpp"
+#include "functional_test_utils/skip_tests_config.hpp"
+
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace InferenceEngine::details;
+using namespace InferenceEngine::PluginConfigParams;
+using namespace InferenceEngine::VPUConfigParams;
+using namespace InferenceEngine::GNAConfigParams;
+
+namespace {
+inline std::string getModelName(std::string strXML) {
+    auto itBegin = strXML.find("<net name=\"");
+    auto itEnd = strXML.find(">", itBegin + 1);
+    auto substr = strXML.substr(itBegin + 1, itEnd - itBegin - 1);
+
+    itBegin = substr.find("\"");
+    itEnd = substr.find("\"", itBegin + 1);
+    substr = substr.substr(itBegin + 1, itEnd - itBegin - 1);
+
+    return substr;
+}
+}
+
+class BehTestParams {
+public:
+    std::string device;
+
+    std::string model_xml_str;
+    Blob::Ptr weights_blob;
+
+    Precision input_blob_precision;
+    Precision output_blob_precision;
+
+    std::map<std::string, std::string> config;
+    uint8_t batch_size;
+
+    BehTestParams() = default;
+
+    BehTestParams(
+            const std::string &_device,
+            const std::string &_model_xml_str,
+            const Blob::Ptr &_weights_blob,
+            Precision _input_blob_precision,
+            const std::map<std::string, std::string> &_config = {},
+            Precision _output_blob_precision = Precision::FP32) : device(_device),
+                                                                  model_xml_str(_model_xml_str),
+                                                                  weights_blob(_weights_blob),
+                                                                  input_blob_precision(_input_blob_precision),
+                                                                  output_blob_precision(_output_blob_precision),
+                                                                  config(_config) {}
+
+    BehTestParams &withIn(Precision _input_blob_precision) {
+        input_blob_precision = _input_blob_precision;
+        return *this;
+    }
+
+    BehTestParams &withOut(Precision _output_blob_precision) {
+        output_blob_precision = _output_blob_precision;
+        return *this;
+    }
+
+    BehTestParams &withConfig(std::map<std::string, std::string> _config) {
+        config = _config;
+        return *this;
+    }
+
+    BehTestParams &withConfigItem(std::pair<std::string, std::string> _config_item) {
+        config.insert(_config_item);
+        return *this;
+    }
+
+    BehTestParams &withIncorrectConfigItem() {
+        config.insert({"some_nonexistent_key", "some_unknown_value"});
+        return *this;
+    }
+
+    BehTestParams &withBatchSize(uint8_t _batch_size) {
+        batch_size = _batch_size;
+        return *this;
+    }
+
+    static std::vector<BehTestParams>
+    concat(std::vector<BehTestParams> const &v1, std::vector<BehTestParams> const &v2) {
+        std::vector<BehTestParams> retval;
+        std::copy(v1.begin(), v1.end(), std::back_inserter(retval));
+        std::copy(v2.begin(), v2.end(), std::back_inserter(retval));
+        return retval;
+    }
+};
+
+class BehaviorPluginTest : public TestsCommon, public WithParamInterface<BehTestParams> {
+protected:
+
+    StatusCode sts;
+    InferenceEngine::ResponseDesc response;
+
+    static Blob::Ptr makeNotAllocatedBlob(Precision eb, Layout l, const SizeVector &dims);
+
+    void setInputNetworkPrecision(CNNNetwork &network, InputsDataMap &inputs_info,
+                                  Precision input_precision);
+
+    void setOutputNetworkPrecision(CNNNetwork &network, OutputsDataMap &outputs_info,
+                                   Precision output_precision);
+};
+
+class BehaviorPluginTestAllUnsupported : public BehaviorPluginTest {
+};
+
+class BehaviorPluginTestTypeUnsupported : public BehaviorPluginTest {
+};
+
+class BehaviorPluginTestBatchUnsupported : public BehaviorPluginTest {
+};
+
+class BehaviorPluginCorrectConfigTest : public BehaviorPluginTest {
+};
+
+class BehaviorPluginIncorrectConfigTest : public BehaviorPluginTest {
+};
+
+class BehaviorPluginIncorrectConfigTestInferRequestAPI : public BehaviorPluginTest {
+};
+
+class BehaviorPluginCorrectConfigTestInferRequestAPI : public BehaviorPluginTest {
+};
+
+class BehaviorPluginTestVersion : public BehaviorPluginTest {
+};
+
+class BehaviorPluginTestInferRequest : public BehaviorPluginTest {
+public:
+    struct TestEnv {
+        // Intentionally defined Core before IInferRequest.
+        // Otherwise plugin will be freed with unloading dll before freeing IInferRequest::Ptr and that will cause memory corruption.
+        // TODO: the same story with IExecutableNetwork and IInferRequest, shared syncEnv object may cause seg fault,
+        // if IExecutableNetwork was freed before IInferRequest
+        InferenceEngine::Core core;
+        InferenceEngine::ExecutableNetwork exeNetwork;
+        IInferRequest::Ptr inferRequest;
+        InferenceEngine::InferRequest actualInferRequest;
+        CNNNetwork network;
+        InputInfo::Ptr networkInput;
+        DataPtr networkOutput;
+        SizeVector inputDims;
+        SizeVector outputDims;
+        std::string inputName;
+        std::string outputName;
+        typedef std::shared_ptr<TestEnv> Ptr;
+    };
+
+    static Blob::Ptr prepareInputBlob(Precision blobPrecision, SizeVector inputDims);
+
+protected:
+    Blob::Ptr _prepareOutputBlob(Precision blobPrecision, SizeVector outputDims);
+
+    void _setInputPrecision(
+            const BehTestParams &param,
+            CNNNetwork &cnnNetwork,
+            TestEnv::Ptr &testEnv,
+            const size_t expectedNetworkInputs = 0);
+
+    void _setOutputPrecision(
+            const BehTestParams &param,
+            CNNNetwork &cnnNetwork,
+            TestEnv::Ptr &testEnv,
+            const size_t expectedNetworkOutputs = 0);
+
+    void _createAndCheckInferRequest(
+            const BehTestParams &param,
+            TestEnv::Ptr &testEnv,
+            const std::map<std::string, std::string> &config = {},
+            const size_t expectedNetworkInputs = 1,
+            const size_t expectedNetworkOutputs = 1,
+            InferenceEngine::IExtensionPtr extension = nullptr);
+
+    bool _wasDeviceBusy(ResponseDesc response);
+
+};
+
+class FPGAHangingTest : public BehaviorPluginTest {
+};
+
+class BehaviorPluginTestInferRequestInput : public BehaviorPluginTestInferRequest {
+};
+
+class BehaviorPluginTestInferRequestOutput : public BehaviorPluginTestInferRequest {
+};
+
+class BehaviorPluginTestInferRequestConfig : public BehaviorPluginTestInferRequest {
+};
+
+class BehaviorPluginTestInferRequestConfigExclusiveAsync : public BehaviorPluginTestInferRequest {
+};
+
+class BehaviorPluginTestInferRequestCallback : public BehaviorPluginTestInferRequest {
+};
+
+class BehaviorPluginTestExecGraphInfo : public BehaviorPluginTestInferRequest {
+};
+
+class BehaviorPluginTestPerfCounters : public BehaviorPluginTestInferRequest {
+};
+
+class BehaviorPluginTestPreProcess : public BehaviorPluginTestInferRequest {
+};
+
+#endif
diff --git a/inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_config.hpp b/inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_config.hpp
new file mode 100644 (file)
index 0000000..4edc6b5
--- /dev/null
@@ -0,0 +1,205 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin.h"
+#include <threading/ie_executor_manager.hpp>
+#include <ie_core.hpp>
+
+using namespace std;
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace InferenceEngine::details;
+
+namespace {
+    std::ostream &operator<<(std::ostream &os, const BehTestParams &p) {
+        return os << "#";
+    }
+
+    std::string getTestCaseName(testing::TestParamInfo<BehTestParams> obj) {
+        std::string config_str = "";
+        for (auto it = obj.param.config.cbegin(); it != obj.param.config.cend(); it++) {
+            std::string v = it->second;
+            std::replace(v.begin(), v.end(), '.', '_');
+            config_str += it->first + "_" + v + "_";
+        }
+        return obj.param.device + "_" + config_str;
+    }
+}
+
+// Setting empty config doesn't throw
+TEST_P(BehaviorPluginCorrectConfigTest, SetEmptyConfig) {
+    InferenceEngine::Core core;
+    std::map<std::string, std::string> config;
+    const std::string device = GetParam().device;
+    ASSERT_NO_THROW(core.GetMetric(device, METRIC_KEY(SUPPORTED_CONFIG_KEYS)));
+    ASSERT_NO_THROW(core.SetConfig(config, GetParam().device));
+}
+
+// Setting correct config doesn't throw
+TEST_P(BehaviorPluginCorrectConfigTest, SetCorrectConfig) {
+    InferenceEngine::Core core;
+    std::map<std::string, std::string> config = GetParam().config;
+    const std::string device = GetParam().device;
+    ASSERT_NO_THROW(core.GetMetric(device, METRIC_KEY(SUPPORTED_CONFIG_KEYS)));
+    ASSERT_NO_THROW(core.SetConfig(config, GetParam().device));
+}
+
+TEST_P(BehaviorPluginIncorrectConfigTest, SetConfigWithIncorrectKey) {
+    InferenceEngine::Core core;
+    std::map<std::string, std::string> config = GetParam().config;
+    const std::string device = GetParam().device;
+    if (device.find(CommonTestUtils::DEVICE_MULTI) == std::string::npos &&
+        device.find(CommonTestUtils::DEVICE_HETERO) == std::string::npos) {
+        ASSERT_NO_THROW(core.GetMetric(device, METRIC_KEY(SUPPORTED_CONFIG_KEYS)));
+        ASSERT_THROW(core.SetConfig(config, GetParam().device), InferenceEngineException);
+    } else {
+        ASSERT_NO_THROW(core.GetMetric(device, METRIC_KEY(SUPPORTED_CONFIG_KEYS)));
+        ASSERT_NO_THROW(core.SetConfig(config, GetParam().device));
+    }
+}
+
+TEST_P(BehaviorPluginIncorrectConfigTest, canNotLoadNetworkWithIncorrectConfig) {
+    auto param = GetParam();
+    std::map<std::string, std::string> config = param.config;
+    InferenceEngine::Core core;
+    IExecutableNetwork::Ptr exeNetwork;
+    CNNNetwork cnnNetwork = core.ReadNetwork(GetParam().model_xml_str, GetParam().weights_blob);
+
+    ASSERT_THROW(exeNetwork = core.LoadNetwork(cnnNetwork, param.device, config), InferenceEngineException);
+}
+
+TEST_P(BehaviorPluginIncorrectConfigTestInferRequestAPI, SetConfigWithNoExistingKey) {
+    std::string refError = NOT_FOUND_str;
+    InferenceEngine::Core core;
+    std::map<std::string, std::string> config = GetParam().config;
+    const std::string device = GetParam().device;
+    ASSERT_NO_THROW(core.GetMetric(device, METRIC_KEY(SUPPORTED_CONFIG_KEYS)));
+    if (device.find(CommonTestUtils::DEVICE_GNA) != std::string::npos) {
+        ASSERT_THROW(core.SetConfig(config, GetParam().device), NotFound);
+    } else {
+        try {
+            core.SetConfig(config, GetParam().device);
+        } catch (InferenceEngineException ex) {
+            ASSERT_STR_CONTAINS(ex.what(), refError);
+        }
+    }
+}
+
+IE_SUPPRESS_DEPRECATED_START
+
+TEST_P(BehaviorPluginCorrectConfigTestInferRequestAPI, canSetExclusiveAsyncRequests) {
+    ASSERT_EQ(0u, ExecutorManager::getInstance()->getExecutorsNumber());
+    auto param = GetParam();
+    InferenceEngine::Core core;
+    std::map<std::string, std::string> config = {{KEY_EXCLUSIVE_ASYNC_REQUESTS, YES}};
+    config.insert(param.config.begin(), param.config.end());
+
+    const std::string device = GetParam().device;
+    if (device.find(CommonTestUtils::DEVICE_MULTI) == std::string::npos &&
+        device.find(CommonTestUtils::DEVICE_HETERO) == std::string::npos) {
+        ASSERT_NO_THROW(core.SetConfig(config, GetParam().device));
+    }
+
+    CNNNetwork cnnNetwork = core.ReadNetwork(GetParam().model_xml_str, GetParam().weights_blob);
+
+    ExecutableNetwork exeNetwork = core.LoadNetwork(cnnNetwork, GetParam().device, config);
+    exeNetwork.CreateInferRequest();
+
+    // TODO: there is no executors to sync. should it be supported natively in HDDL API?
+    if (GetParam().device == CommonTestUtils::DEVICE_HDDL) {
+        ASSERT_EQ(0u, ExecutorManager::getInstance()->getExecutorsNumber());
+    } else if (GetParam().device == CommonTestUtils::DEVICE_FPGA) {
+        ASSERT_EQ(2u, ExecutorManager::getInstance()->getExecutorsNumber());
+    } else if (GetParam().device == CommonTestUtils::DEVICE_MYRIAD) {
+        ASSERT_EQ(2u, ExecutorManager::getInstance()->getExecutorsNumber());
+    } else if (GetParam().device == CommonTestUtils::DEVICE_KEEMBAY) {
+        ASSERT_EQ(2u, ExecutorManager::getInstance()->getExecutorsNumber());
+    } else if (GetParam().device == CommonTestUtils::DEVICE_GNA) {
+        ASSERT_EQ(0u, ExecutorManager::getInstance()->getExecutorsNumber());
+    } else if (GetParam().device == CommonTestUtils::DEVICE_MULTI) {
+        // for multi-device the number of Executors is not known (defined by the devices configuration)
+    } else {
+        ASSERT_EQ(1u, ExecutorManager::getInstance()->getExecutorsNumber());
+    }
+}
+
+TEST_P(BehaviorPluginCorrectConfigTestInferRequestAPI, withoutExclusiveAsyncRequests) {
+    ASSERT_EQ(0u, ExecutorManager::getInstance()->getExecutorsNumber());
+
+    auto param = GetParam();
+    InferenceEngine::Core core;
+
+    std::map<std::string, std::string> config = {{KEY_EXCLUSIVE_ASYNC_REQUESTS, NO}};
+    config.insert(param.config.begin(), param.config.end());
+
+    const std::string device = GetParam().device;
+    if (device.find(CommonTestUtils::DEVICE_MULTI) == std::string::npos &&
+        device.find(CommonTestUtils::DEVICE_HETERO) == std::string::npos) {
+        ASSERT_NO_THROW(core.SetConfig(config, param.device));
+    }
+
+    CNNNetwork cnnNetwork = core.ReadNetwork(param.model_xml_str, param.weights_blob);
+
+    ExecutableNetwork exeNetwork = core.LoadNetwork(cnnNetwork, param.device, config);
+    exeNetwork.CreateInferRequest();
+
+
+    if (GetParam().device == CommonTestUtils::DEVICE_FPGA) {
+        ASSERT_EQ(1u, ExecutorManager::getInstance()->getExecutorsNumber());
+    } else if (GetParam().device == CommonTestUtils::DEVICE_MYRIAD) {
+        ASSERT_EQ(1u, ExecutorManager::getInstance()->getExecutorsNumber());
+    } else if (GetParam().device == CommonTestUtils::DEVICE_KEEMBAY) {
+        ASSERT_EQ(1u, ExecutorManager::getInstance()->getExecutorsNumber());
+    } else if (GetParam().device == CommonTestUtils::DEVICE_MULTI) {
+        // for multi-device the number of Executors is not known (defined by the devices configuration)
+    } else {
+        ASSERT_EQ(0u, ExecutorManager::getInstance()->getExecutorsNumber());
+    }
+}
+
+TEST_P(BehaviorPluginCorrectConfigTestInferRequestAPI, reusableCPUStreamsExecutor) {
+    ASSERT_EQ(0u, ExecutorManager::getInstance()->getExecutorsNumber());
+    ASSERT_EQ(0u, ExecutorManager::getInstance()->getIdleCPUStreamsExecutorsNumber());
+
+    auto param = GetParam();
+    InferenceEngine::Core core;
+    {
+        std::map<std::string, std::string> config = {{KEY_EXCLUSIVE_ASYNC_REQUESTS, NO}};
+        config.insert(param.config.begin(), param.config.end());
+
+        const std::string device = GetParam().device;
+        if (device.find(CommonTestUtils::DEVICE_MULTI) == std::string::npos &&
+            device.find(CommonTestUtils::DEVICE_HETERO) == std::string::npos) {
+            ASSERT_NO_THROW(core.SetConfig(config, param.device));
+        }
+
+        CNNNetwork cnnNetwork = core.ReadNetwork(param.model_xml_str, param.weights_blob);
+
+        ExecutableNetwork exeNetwork = core.LoadNetwork(cnnNetwork, param.device, config);
+        exeNetwork.CreateInferRequest();
+
+
+        if (GetParam().device == CommonTestUtils::DEVICE_FPGA) {
+            ASSERT_EQ(1u, ExecutorManager::getInstance()->getExecutorsNumber());
+            ASSERT_EQ(0u, ExecutorManager::getInstance()->getIdleCPUStreamsExecutorsNumber());
+        } else if (GetParam().device == CommonTestUtils::DEVICE_MYRIAD) {
+            ASSERT_EQ(1u, ExecutorManager::getInstance()->getExecutorsNumber());
+            ASSERT_EQ(0u, ExecutorManager::getInstance()->getIdleCPUStreamsExecutorsNumber());
+        } else if (GetParam().device == CommonTestUtils::DEVICE_KEEMBAY) {
+            ASSERT_EQ(1u, ExecutorManager::getInstance()->getExecutorsNumber());
+            ASSERT_EQ(0u, ExecutorManager::getInstance()->getIdleCPUStreamsExecutorsNumber());
+        } else if (GetParam().device == CommonTestUtils::DEVICE_MULTI) {
+            // for multi-device the number of Executors is not known (defined by the devices configuration)
+        } else {
+            ASSERT_EQ(0u, ExecutorManager::getInstance()->getExecutorsNumber());
+            ASSERT_GE(1u, ExecutorManager::getInstance()->getIdleCPUStreamsExecutorsNumber());
+        }
+  }
+    if (GetParam().device == CommonTestUtils::DEVICE_CPU) {
+        ASSERT_NE(0u, ExecutorManager::getInstance()->getIdleCPUStreamsExecutorsNumber());
+        ASSERT_NO_THROW(core.UnregisterPlugin("CPU"));
+        ASSERT_EQ(0u, ExecutorManager::getInstance()->getExecutorsNumber());
+        ASSERT_EQ(0u, ExecutorManager::getInstance()->getIdleCPUStreamsExecutorsNumber());
+    }
+}
diff --git a/inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_exec_graph_info.hpp b/inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_exec_graph_info.hpp
new file mode 100644 (file)
index 0000000..6f91483
--- /dev/null
@@ -0,0 +1,155 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include "behavior_test_plugin.h"
+#include "details/ie_cnn_network_tools.h"
+#include "exec_graph_info.hpp"
+
+using namespace ::testing;
+using namespace InferenceEngine;
+
+namespace {
+std::string getTestCaseName(testing::TestParamInfo<BehTestParams> obj) {
+    return obj.param.device + "_" + obj.param.input_blob_precision.name()
+           + (obj.param.config.size() ? "_" + obj.param.config.begin()->second : "");
+}
+}
+
+inline std::vector<std::string> separateStrToVec(std::string str, const char sep) {
+    std::vector<std::string> result;
+
+    std::istringstream stream(str);
+    std::string strVal;
+
+    while (getline(stream, strVal, sep)) {
+        result.push_back(strVal);
+    }
+    return result;
+}
+
+
+TEST_P(BehaviorPluginTestExecGraphInfo, CheckExecGraphInfoBeforeExecution) {
+    auto param = GetParam();
+
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+
+    auto cnnNetwork = testEnv->network;
+    auto exeNetwork = testEnv->exeNetwork;
+
+    if (param.device == CommonTestUtils::DEVICE_CPU || param.device == CommonTestUtils::DEVICE_GPU) {
+        CNNNetwork execGraph;
+        ASSERT_NO_THROW(execGraph = exeNetwork.GetExecGraphInfo());
+
+        // Store all the original layers from the network
+        const std::vector<CNNLayerPtr> originalLayers = CNNNetSortTopologically(cnnNetwork);
+        std::map<std::string, int> originalLayersMap;
+        for (const auto &layer : originalLayers) {
+            originalLayersMap[layer->name] = 0;
+        }
+
+        // Store all the layers from the executable graph information represented as CNNNetwork
+        const std::vector<CNNLayerPtr> execGraphLayers = CNNNetSortTopologically(execGraph);
+        for (const auto &execLayer : execGraphLayers) {
+            // Each layer from the execGraphInfo network must have PM data option set
+            ASSERT_EQ("not_executed", execLayer->params[ExecGraphInfoSerialization::PERF_COUNTER]);
+
+            // Parse origin layer names (fused/merged layers) from the executable graph
+            // and compare with layers from the original model
+            auto origFromExecLayer = execLayer->params[ExecGraphInfoSerialization::ORIGINAL_NAMES];
+            std::vector<std::string> origFromExecLayerSep = separateStrToVec(origFromExecLayer, ',');
+
+            std::for_each(origFromExecLayerSep.begin(), origFromExecLayerSep.end(), [&](const std::string &layer) {
+                auto origLayer = originalLayersMap.find(layer);
+                ASSERT_NE(originalLayersMap.end(), origLayer) << layer;
+                origLayer->second++;
+            } );
+        }
+        // All layers from the original IR must be present within ExecGraphInfo
+        for (auto& layer : originalLayersMap) {
+            ASSERT_GT(layer.second, 0);
+        }
+    } else {
+        // Not implemented for other plugins
+        ASSERT_THROW(exeNetwork.GetExecGraphInfo(), InferenceEngineException);
+    }
+}
+
+TEST_P(BehaviorPluginTestExecGraphInfo, CheckExecGraphInfoAfterExecution) {
+    auto param = GetParam();
+
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv,
+            {{ PluginConfigParams::KEY_PERF_COUNT, PluginConfigParams::YES }}));
+    ASSERT_NO_THROW(sts = testEnv->inferRequest->Infer(&response));
+    ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+
+    auto cnnNetwork = testEnv->network;
+    auto exeNetwork = testEnv->exeNetwork;
+
+    if (param.device == CommonTestUtils::DEVICE_CPU || param.device == CommonTestUtils::DEVICE_GPU) {
+        CNNNetwork execGraph;
+        ASSERT_NO_THROW(execGraph = exeNetwork.GetExecGraphInfo());
+
+        // Store all the original layers from the network
+        const std::vector<CNNLayerPtr> originalLayers = CNNNetSortTopologically(cnnNetwork);
+        std::map<std::string, int> originalLayersMap;
+        for (const auto &layer : originalLayers) {
+            originalLayersMap[layer->name] = 0;
+        }
+
+        // Store all the layers from the executable graph information represented as CNNNetwork
+        const std::vector<CNNLayerPtr> execGraphLayers = CNNNetSortTopologically(execGraph);
+        bool has_layer_with_valid_time = false;
+        for (const auto &execLayer : execGraphLayers) {
+            // At least one layer in the topology should be executed and have valid perf counter value
+            try {
+                float x = static_cast<float>(std::atof(execLayer->params[ExecGraphInfoSerialization::PERF_COUNTER].c_str()));
+                ASSERT_GE(x, 0.0f);
+                has_layer_with_valid_time = true;
+            } catch (std::exception&) { }
+
+            // Parse origin layer names (fused/merged layers) from the executable graph
+            // and compare with layers from the original model
+            auto origFromExecLayer = execLayer->params[ExecGraphInfoSerialization::ORIGINAL_NAMES];
+            std::vector<std::string> origFromExecLayerSep = separateStrToVec(origFromExecLayer, ',');
+
+            std::for_each(origFromExecLayerSep.begin(), origFromExecLayerSep.end(), [&](const std::string &layer) {
+                auto origLayer = originalLayersMap.find(layer);
+                ASSERT_NE(originalLayersMap.end(), origLayer) << layer;
+                origLayer->second++;
+            } );
+        }
+
+        ASSERT_TRUE(has_layer_with_valid_time);
+
+        // All layers from the original IR must be present within ExecGraphInfo
+        for (auto& layer : originalLayersMap) {
+            ASSERT_GT(layer.second, 0);
+        }
+    } else {
+        // Not implemented for other plugins
+        ASSERT_THROW(exeNetwork.GetExecGraphInfo(), InferenceEngineException);
+    }
+}
+
+TEST_P(BehaviorPluginTestExecGraphInfo, CheckExecGraphInfoSerialization) {
+    auto param = GetParam();
+
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+
+    auto cnnNetwork = testEnv->network;
+    auto exeNetwork = testEnv->exeNetwork;
+
+    if (param.device == CommonTestUtils::DEVICE_CPU || param.device == CommonTestUtils::DEVICE_GPU) {
+        CNNNetwork execGraph;
+        ASSERT_NO_THROW(execGraph = exeNetwork.GetExecGraphInfo());
+        execGraph.serialize("exeNetwork.xml", "exeNetwork.bin");
+    } else {
+        // Not implemented for other plugins
+        ASSERT_THROW(exeNetwork.GetExecGraphInfo(), InferenceEngineException);
+    }
+}
diff --git a/inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_infer_request.hpp b/inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_infer_request.hpp
new file mode 100644 (file)
index 0000000..38d520e
--- /dev/null
@@ -0,0 +1,615 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gmock/gmock.h>
+#include "behavior_test_plugin.h"
+#include <thread>
+
+using namespace std;
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace InferenceEngine::details;
+
+namespace {
+std::string getTestCaseName(testing::TestParamInfo<BehTestParams> obj) {
+    return obj.param.device + "_" + obj.param.input_blob_precision.name()
+           + (obj.param.config.size() ? "_" + obj.param.config.begin()->second : "");
+}
+}
+
+// Setting empty config to LoadNetwork doesn't throw
+TEST_P(BehaviorPluginTestInferRequest, SetEmptyConfig) {
+    InferenceEngine::Core core;
+
+    const std::string device = GetParam().device;
+    ASSERT_NO_THROW(core.SetConfig(GetParam().config, GetParam().device));
+
+    InferenceEngine::CNNNetwork cnnNetwork = core.ReadNetwork(GetParam().model_xml_str, GetParam().weights_blob);
+    InferenceEngine::IExecutableNetwork::Ptr exeNetwork;
+    std::map<std::string, std::string> config;
+    if (device.find(CommonTestUtils::DEVICE_MULTI) == std::string::npos &&
+        device.find(CommonTestUtils::DEVICE_HETERO) == std::string::npos) {
+        ASSERT_NO_THROW(exeNetwork = core.LoadNetwork(cnnNetwork, GetParam().device, config));
+    } else {
+        ASSERT_NO_THROW(exeNetwork = core.LoadNetwork(cnnNetwork, GetParam().device, GetParam().config));
+    }
+}
+
+// Load incorrect network to Plugin to get executable network
+TEST_P(BehaviorPluginTestInferRequest, canNotLoadNetworkToGetExeNetworkWithoutWeights) {
+    InferenceEngine::Core core;
+    CNNNetwork network = core.ReadNetwork(GetParam().model_xml_str, Blob::CPtr());
+
+    ASSERT_THROW(core.LoadNetwork(network, GetParam().device, GetParam().config),
+                 InferenceEngineException);
+}
+
+// Load correct network to Plugin to get executable network
+TEST_P(BehaviorPluginTestInferRequest, canLoadCorrectNetworkToGetExecutable) {
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork cnnNetwork = core.ReadNetwork(GetParam().model_xml_str, GetParam().weights_blob);
+    ASSERT_NO_THROW(core.LoadNetwork(cnnNetwork, GetParam().device, GetParam().config));
+}
+
+TEST_P(BehaviorPluginTestInferRequest, CanCreateTwoExeNetworks) {
+    SKIP_IF_CURRENT_TEST_IS_DISABLED()
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork cnnNetwork = core.ReadNetwork(GetParam().model_xml_str, GetParam().weights_blob);
+
+    for (auto i = 0; i < 2; i++) {
+        ASSERT_NO_THROW(core.LoadNetwork(cnnNetwork, GetParam().device, GetParam().config));
+    }
+}
+
+TEST_P(BehaviorPluginTestInferRequest, CanCreateInferRequest) {
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+}
+
+TEST_P(BehaviorPluginTestInferRequest, failToSetNullptrForInput) {
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    Blob::Ptr inputBlob = nullptr;
+    ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->inputName.c_str(), inputBlob, &response));
+    ASSERT_EQ(StatusCode::GENERAL_ERROR, sts);
+    std::string refError = NOT_ALLOCATED_str + "Failed to set empty blob with name: \'" + testEnv->inputName + "\'";
+    response.msg[refError.length()] = '\0';
+    ASSERT_EQ(refError, response.msg);
+}
+
+TEST_P(BehaviorPluginTestInferRequest, failToSetEmptyInputBlob) {
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    Blob::Ptr blob;
+    sts = testEnv->inferRequest->SetBlob(testEnv->inputName.c_str(), blob, &response);
+    ASSERT_EQ(StatusCode::GENERAL_ERROR, sts);
+    std::string refError = NOT_ALLOCATED_str + "Failed to set empty blob with name: \'" + testEnv->inputName + "\'";
+    response.msg[refError.length()] = '\0';
+    ASSERT_EQ(refError, response.msg);
+}
+
+TEST_P(BehaviorPluginTestInferRequest, failToSetEmptyOutputBlob) {
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    Blob::Ptr blob;
+    sts = testEnv->inferRequest->SetBlob(testEnv->outputName.c_str(), blob, &response);
+    ASSERT_EQ(StatusCode::GENERAL_ERROR, sts);
+    std::string refError = NOT_ALLOCATED_str + "Failed to set empty blob with name: \'" + testEnv->outputName + "\'";
+    response.msg[refError.length()] = '\0';
+    ASSERT_EQ(refError, response.msg);
+}
+
+TEST_P(BehaviorPluginTestInferRequest, failToSetNotAllocatedInput) {
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    Blob::Ptr input = makeNotAllocatedBlob(GetParam().input_blob_precision,
+                                           TensorDesc::getLayoutByDims(testEnv->inputDims), testEnv->inputDims);
+    ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->inputName.c_str(), input, &response));
+    std::string refError = "Input data was not allocated. Input name: \'" + testEnv->inputName + "\'";
+    ASSERT_EQ(StatusCode::GENERAL_ERROR, sts);
+    response.msg[refError.length()] = '\0';
+    ASSERT_EQ(refError, response.msg);
+}
+
+TEST_P(BehaviorPluginTestInferRequest, failToSetNotAllocatedOutput) {
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    Blob::Ptr output = makeNotAllocatedBlob(GetParam().input_blob_precision,
+                                            TensorDesc::getLayoutByDims(testEnv->outputDims), testEnv->outputDims);
+    ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->outputName.c_str(), output, &response));
+    std::string refError = "Input data was not allocated. Input name: \'" + testEnv->outputName + "\'";
+    ASSERT_EQ(StatusCode::GENERAL_ERROR, sts);
+    response.msg[refError.length()] = '\0';
+    ASSERT_EQ(refError, response.msg);
+}
+
+TEST_P(BehaviorPluginTestInferRequest, failToSetBlobWithIncorrectName) {
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    auto input = makeNotAllocatedBlob(GetParam().input_blob_precision, TensorDesc::getLayoutByDims(testEnv->inputDims),
+                                      testEnv->inputDims);
+    input->allocate();
+    sts = testEnv->inferRequest->SetBlob(FuncTestUtils::TestModel::incorrect_input_name, input, &response);
+    ASSERT_EQ(StatusCode::GENERAL_ERROR, sts);
+    std::string refError =
+            NOT_FOUND_str + "Failed to find input or output with name: \'" +
+            FuncTestUtils::TestModel::incorrect_input_name + "\'";
+    response.msg[refError.length()] = '\0';
+    ASSERT_EQ(refError, response.msg);
+}
+
+TEST_P(BehaviorPluginTestInferRequest, failToSetInputWithIncorrectSizes) {
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    SizeVector incorrectSizes = testEnv->inputDims;
+    /* to use 2x size of first dim to simulate using of an input blob of another size */
+    incorrectSizes[0] *= 2;
+    auto input = makeNotAllocatedBlob(GetParam().input_blob_precision, TensorDesc::getLayoutByDims(incorrectSizes),
+                                      incorrectSizes);
+    input->allocate();
+    int in_size = std::accumulate(testEnv->inputDims.begin(), testEnv->inputDims.end(), 1, std::multiplies<int>());
+    std::string refError = "Input blob size is not equal network input size (" + std::to_string(input->size()) + "!=" +
+                           std::to_string(in_size) + ").";
+    ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->inputName.c_str(), input, &response));
+    ASSERT_EQ(StatusCode::GENERAL_ERROR, sts);
+    response.msg[refError.length()] = '\0';
+    ASSERT_EQ(refError, response.msg);
+}
+
+TEST_P(BehaviorPluginTestInferRequest, failToSetOutputWithIncorrectSizes) {
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    SizeVector incorrectSizes = testEnv->outputDims;
+    /* to use 2x size of first dim to simulate using of an output blob of another size */
+    incorrectSizes[0] *= 2;
+    Blob::Ptr output = _prepareOutputBlob(GetParam().input_blob_precision, incorrectSizes);
+    ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->outputName.c_str(), output, &response));
+    int out_size = std::accumulate(testEnv->outputDims.begin(), testEnv->outputDims.end(), 1, std::multiplies<int>());
+    std::string refError =
+            "Output blob size is not equal network output size (" + std::to_string(output->size()) + "!=" +
+            std::to_string(out_size) + ").";
+    ASSERT_EQ(StatusCode::GENERAL_ERROR, sts);
+    response.msg[refError.length()] = '\0';
+    ASSERT_EQ(refError, response.msg);
+}
+
+TEST_P(BehaviorPluginTestInferRequest, failToSetInputBlobWithPrecisionNotMatchInputPrecision) {
+
+    std::string refError;
+    if (GetParam().device != CommonTestUtils::DEVICE_CPU) {
+        // MKLDNNPlugin now supports input blobs with format other than the network format,
+        // so there is no 'not corresponding user input precision' error
+
+        refError =
+                PARAMETER_MISMATCH_str + "Failed to set Blob with precision not corresponding to user input precision";
+    } else {
+        // ...but it still doesn't support Precision::UNSPECIFIED blobs.
+
+        refError = PARAMETER_MISMATCH_str + "Failed to set Blob with precision";
+    }
+
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    auto inputBlob = prepareInputBlob(Precision::UNSPECIFIED, testEnv->inputDims);
+    ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->inputName.c_str(), inputBlob, &response));
+    ASSERT_EQ(StatusCode::GENERAL_ERROR, sts);
+    response.msg[refError.length()] = '\0';
+
+    if (GetParam().device != CommonTestUtils::DEVICE_CPU) {
+        ASSERT_EQ(refError, response.msg);
+    } else {
+        ASSERT_STR_CONTAINS(response.msg, refError);
+    }
+
+
+}
+
+TEST_P(BehaviorPluginTestInferRequest, failToSetOutputBlobWithPrecisionNotMatchOutputPrecision) {
+    std::string refError =
+            PARAMETER_MISMATCH_str + "Failed to set Blob with precision not corresponding to user output precision";
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    auto outputBlob = _prepareOutputBlob(Precision::UNSPECIFIED, testEnv->outputDims);
+    ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->outputName.c_str(), outputBlob, &response));
+    ASSERT_EQ(StatusCode::GENERAL_ERROR, sts);
+    response.msg[refError.length()] = '\0';
+    ASSERT_EQ(refError, response.msg);
+}
+
+TEST_P(BehaviorPluginTestInferRequest, canInferWithoutSetAndGetInOut) {
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    ASSERT_NO_THROW(sts = testEnv->inferRequest->Infer(&response));
+    ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+}
+
+TEST_P(BehaviorPluginTestInferRequest, canProcessDeallocatedInputBlobAfterGetBlob) {
+    std::string refError = "Input data was not allocated";
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    Blob::Ptr blob;
+    ASSERT_NO_THROW(sts = testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), blob, &response));
+    ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+    blob->deallocate();
+    ASSERT_NO_THROW(sts = testEnv->inferRequest->Infer(&response));
+    ASSERT_EQ(StatusCode::GENERAL_ERROR, sts) << response.msg;
+    EXPECT_THAT(std::string(response.msg), HasSubstr(refError));
+}
+
+TEST_P(BehaviorPluginTestInferRequest, canProcessDeallocatedInputBlobAfterGetBlobForAsync) {
+    std::string refError = "Input data was not allocated";
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    Blob::Ptr blob;
+    ASSERT_NO_THROW(sts = testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), blob, &response));
+    ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+    blob->deallocate();
+    ASSERT_NO_THROW(sts = testEnv->inferRequest->StartAsync(&response));
+    ASSERT_EQ(StatusCode::GENERAL_ERROR, sts) << response.msg;
+    EXPECT_THAT(std::string(response.msg), HasSubstr(refError));
+}
+
+TEST_P(BehaviorPluginTestInferRequest, canProcessDeallocatedInputBlobAfterGetAndSetBlob) {
+    std::string refError = "Input data was not allocated";
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    Blob::Ptr blob;
+    ASSERT_NO_THROW(sts = testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), blob, &response));
+    ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+    ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->inputName.c_str(), blob, &response));
+    ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+    blob->deallocate();
+    ASSERT_NO_THROW(sts = testEnv->inferRequest->Infer(&response));
+    ASSERT_EQ(StatusCode::GENERAL_ERROR, sts) << response.msg;
+    EXPECT_THAT(std::string(response.msg), HasSubstr(refError));
+}
+
+TEST_P(BehaviorPluginTestInferRequest, canProcessDeallocatedInputBlobAfterSetBlob) {
+    std::string refError = "Input data was not allocated";
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    auto blob = makeNotAllocatedBlob(GetParam().input_blob_precision, TensorDesc::getLayoutByDims(testEnv->inputDims),
+                                     testEnv->inputDims);
+    blob->allocate();
+    ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->inputName.c_str(), blob, &response));
+    ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+    blob->deallocate();
+    ASSERT_NO_THROW(sts = testEnv->inferRequest->Infer(&response));
+    ASSERT_EQ(StatusCode::GENERAL_ERROR, sts) << response.msg;
+    EXPECT_THAT(std::string(response.msg), HasSubstr(refError));
+}
+
+TEST_P(BehaviorPluginTestInferRequest, canProcessDeallocatedOutputBlobAfterGetBlob) {
+    std::string refError = "Output data was not allocated";
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    Blob::Ptr blob;
+    ASSERT_NO_THROW(sts = testEnv->inferRequest->GetBlob(testEnv->outputName.c_str(), blob, &response));
+    ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+    blob->deallocate();
+    ASSERT_NO_THROW(sts = testEnv->inferRequest->Infer(&response));
+    ASSERT_EQ(StatusCode::GENERAL_ERROR, sts) << response.msg;
+    EXPECT_THAT(std::string(response.msg), HasSubstr(refError));
+}
+
+TEST_P(BehaviorPluginTestInferRequest, canProcessDeallocatedOutputBlobAfterGetBlobForAsync) {
+    std::string refError = "Output data was not allocated";
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    Blob::Ptr blob;
+    ASSERT_NO_THROW(sts = testEnv->inferRequest->GetBlob(testEnv->outputName.c_str(), blob, &response));
+    ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+    blob->deallocate();
+    ASSERT_NO_THROW(sts = testEnv->inferRequest->StartAsync(&response));
+    ASSERT_EQ(StatusCode::GENERAL_ERROR, sts) << response.msg;
+    EXPECT_THAT(std::string(response.msg), HasSubstr(refError));
+}
+
+TEST_P(BehaviorPluginTestInferRequest, canProcessDeallocatedOutputBlobAfterGetAndSetBlob) {
+    std::string refError = "Output data was not allocated";
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    Blob::Ptr blob;
+    ASSERT_NO_THROW(sts = testEnv->inferRequest->GetBlob(testEnv->outputName.c_str(), blob, &response));
+    ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+    ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->outputName.c_str(), blob, &response));
+    ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+    blob->deallocate();
+    ASSERT_NO_THROW(sts = testEnv->inferRequest->Infer(&response));
+    ASSERT_EQ(StatusCode::GENERAL_ERROR, sts) << response.msg;
+    EXPECT_THAT(std::string(response.msg), HasSubstr(refError));
+}
+
+TEST_P(BehaviorPluginTestInferRequest, canProcessDeallocatedOutputBlobAfterSetBlob) {
+    std::string refError = "Output data was not allocated";
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    auto blob = makeNotAllocatedBlob(GetParam().output_blob_precision, TensorDesc::getLayoutByDims(testEnv->outputDims),
+                                     testEnv->outputDims);
+    blob->allocate();
+    ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->outputName.c_str(), blob, &response));
+    ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+    blob->deallocate();
+    ASSERT_NO_THROW(sts = testEnv->inferRequest->Infer(&response));
+    ASSERT_EQ(StatusCode::GENERAL_ERROR, sts) << response.msg;
+    EXPECT_THAT(std::string(response.msg), HasSubstr(refError));
+}
+
+TEST_P(BehaviorPluginTestInferRequest, DISABLED_secondCallGetOutputDoNotReAllocateData) {
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    Blob::Ptr getBlob1;
+    ASSERT_NO_THROW(sts = testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), getBlob1, &response));
+    Blob::Ptr getBlob2;
+    ASSERT_NO_THROW(sts = testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), getBlob2, &response));
+    ASSERT_EQ(getBlob1.get(), getBlob2.get());
+}
+
+TEST_P(BehaviorPluginTestInferRequest, CorrectOneAsyncInferWithGetInOutWithInfWait) {
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    Blob::Ptr input;
+    Blob::Ptr result;
+    testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response);
+
+    sts = testEnv->inferRequest->StartAsync(&response);
+    ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+
+    sts = testEnv->inferRequest->Wait(IInferRequest::WaitMode::RESULT_READY, &response);
+    ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+
+    testEnv->inferRequest->GetBlob(testEnv->outputName.c_str(), result, &response);
+    ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+}
+
+// Plugin correct infer request with allocating input and result BlobMaps inside plugin
+TEST_P(BehaviorPluginTestInferRequest, canStartAsyncInferWithGetInOutWithStatusOnlyWait) {
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    Blob::Ptr input;
+    Blob::Ptr result;
+    testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response);
+
+    sts = testEnv->inferRequest->StartAsync(&response);
+    ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+
+    sts = testEnv->inferRequest->Wait(IInferRequest::WaitMode::STATUS_ONLY, &response);
+    ASSERT_TRUE(sts == StatusCode::OK || StatusCode::RESULT_NOT_READY) << response.msg;
+}
+
+// Plugin correct infer request with allocating input and result BlobMaps inside plugin
+TEST_P(BehaviorPluginTestInferRequest, FailedAsyncInferWithNegativeTimeForWait) {
+    SKIP_IF_CURRENT_TEST_IS_DISABLED()
+    std::string refError = PARAMETER_MISMATCH_str;
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    Blob::Ptr input;
+    Blob::Ptr result;
+    testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response);
+
+    sts = testEnv->inferRequest->StartAsync(&response);
+    ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+
+    ASSERT_NO_THROW(sts = testEnv->inferRequest->Wait(-2, &response));
+    ASSERT_EQ(StatusCode::GENERAL_ERROR, sts) << response.msg;
+    response.msg[refError.length()] = '\0';
+    ASSERT_EQ(refError, response.msg);
+}
+
+TEST_P(BehaviorPluginTestInferRequest, canRun3SyncRequestsConsistentlyFromThreads) {
+    SKIP_IF_CURRENT_TEST_IS_DISABLED()
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    IInferRequest::Ptr inferRequest2;
+    static_cast<IExecutableNetwork::Ptr &>(testEnv->exeNetwork)->CreateInferRequest(inferRequest2, &response);
+    ASSERT_NE(inferRequest2, nullptr) << response.msg;
+    IInferRequest::Ptr inferRequest3;
+    static_cast<IExecutableNetwork::Ptr &>(testEnv->exeNetwork)->CreateInferRequest(inferRequest3, &response);
+    ASSERT_NE(inferRequest3, nullptr) << response.msg;
+
+    Blob::Ptr input1;
+    testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input1, &response);
+    inferRequest2->SetBlob(testEnv->inputName.c_str(), input1, &response);
+    inferRequest3->SetBlob(testEnv->inputName.c_str(), input1, &response);
+
+    InferenceEngine::ResponseDesc response1, response2, response3;
+    InferenceEngine::StatusCode sts1, sts2, sts3;
+    std::thread t1([&] { sts1 = testEnv->inferRequest->Infer(&response1); });
+    std::thread t2([&] { sts2 = inferRequest2->Infer(&response2); });
+    std::thread t3([&] { sts3 = inferRequest3->Infer(&response3); });
+
+    t1.join();
+    t2.join();
+    t3.join();
+
+    ASSERT_EQ((int) StatusCode::OK, sts1) << response1.msg;
+    ASSERT_EQ((int) StatusCode::OK, sts2) << response2.msg;
+    ASSERT_EQ((int) StatusCode::OK, sts3) << response3.msg;
+}
+
+TEST_P(BehaviorPluginTestInferRequest, canRun3AsyncRequestsConsistentlyWithWait) {
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    IInferRequest::Ptr inferRequest2;
+    static_cast<IExecutableNetwork::Ptr &>(testEnv->exeNetwork)->CreateInferRequest(inferRequest2, &response);
+    ASSERT_NE(inferRequest2, nullptr) << response.msg;
+    IInferRequest::Ptr inferRequest3;
+    static_cast<IExecutableNetwork::Ptr &>(testEnv->exeNetwork)->CreateInferRequest(inferRequest3, &response);
+    ASSERT_NE(inferRequest3, nullptr) << response.msg;
+    Blob::Ptr input1;
+    testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input1, &response);
+    inferRequest2->SetBlob(testEnv->inputName.c_str(), input1, &response);
+    inferRequest3->SetBlob(testEnv->inputName.c_str(), input1, &response);
+
+    sts = testEnv->inferRequest->StartAsync(&response);
+    ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+    sts = inferRequest2->StartAsync(&response);
+    ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+    sts = inferRequest3->StartAsync(&response);
+    ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+    sts = testEnv->inferRequest->Wait(IInferRequest::WaitMode::RESULT_READY, &response);
+    ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+    sts = inferRequest2->Wait(IInferRequest::WaitMode::RESULT_READY, &response);
+    ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+    sts = inferRequest3->Wait(IInferRequest::WaitMode::RESULT_READY, &response);
+    ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+}
+
+TEST_P(BehaviorPluginTestInferRequest, canRun3AsyncRequestsConsistentlyFromThreadsWithoutWait) {
+    SKIP_IF_CURRENT_TEST_IS_DISABLED()
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    IInferRequest::Ptr inferRequest2;
+    static_cast<IExecutableNetwork::Ptr &>(testEnv->exeNetwork)->CreateInferRequest(inferRequest2, &response);
+    ASSERT_NE(inferRequest2, nullptr) << response.msg;
+    IInferRequest::Ptr inferRequest3;
+    static_cast<IExecutableNetwork::Ptr &>(testEnv->exeNetwork)->CreateInferRequest(inferRequest3, &response);
+    ASSERT_NE(inferRequest3, nullptr) << response.msg;
+    Blob::Ptr input1;
+    testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input1, &response);
+    inferRequest2->SetBlob(testEnv->inputName.c_str(), input1, &response);
+    inferRequest3->SetBlob(testEnv->inputName.c_str(), input1, &response);
+
+    InferenceEngine::ResponseDesc response1, response2, response3;
+    InferenceEngine::StatusCode sts1, sts2, sts3;
+    std::thread t1([&] { sts1 = testEnv->inferRequest->StartAsync(&response1); });
+    std::thread t2([&] { sts2 = inferRequest2->StartAsync(&response2); });
+    std::thread t3([&] { sts3 = inferRequest3->StartAsync(&response3); });
+
+    t1.join();
+    t2.join();
+    t3.join();
+
+    ASSERT_EQ((int) StatusCode::OK, sts1) << response1.msg;
+    ASSERT_EQ((int) StatusCode::OK, sts2) << response2.msg;
+    ASSERT_EQ((int) StatusCode::OK, sts3) << response3.msg;
+}
+
+TEST_P(BehaviorPluginTestInferRequest, canWaitWithotStartAsync) {
+    SKIP_IF_CURRENT_TEST_IS_DISABLED()
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    sts = testEnv->inferRequest->Wait(IInferRequest::WaitMode::RESULT_READY, &response);
+    ASSERT_EQ(StatusCode::INFER_NOT_STARTED, sts);
+    sts = testEnv->inferRequest->Wait(IInferRequest::WaitMode::STATUS_ONLY, &response);
+    ASSERT_EQ(StatusCode::INFER_NOT_STARTED, sts);
+    sts = testEnv->inferRequest->Wait(1, &response);
+    ASSERT_EQ(StatusCode::INFER_NOT_STARTED, sts);
+}
+
+TEST_P(BehaviorPluginTestInferRequest, returnDeviceBusyOnSetBlobAfterAsyncInfer) {
+    SKIP_IF_CURRENT_TEST_IS_DISABLED()
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    Blob::Ptr input;
+    sts = testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response);
+    ASSERT_EQ((int) StatusCode::OK, sts) << response.msg;
+
+    sts = testEnv->inferRequest->Wait(IInferRequest::WaitMode::STATUS_ONLY, &response);
+    ASSERT_EQ(StatusCode::INFER_NOT_STARTED, sts) << response.msg;
+
+    std::map<std::string, InferenceEngineProfileInfo> perfMap;
+
+    sts = testEnv->inferRequest->StartAsync(&response);
+    ASSERT_EQ((int) StatusCode::OK, sts) << response.msg;
+
+    sts = testEnv->inferRequest->SetBlob(testEnv->inputName.c_str(), input, &response);
+    if (sts == StatusCode::REQUEST_BUSY) {
+        ASSERT_TRUE(_wasDeviceBusy(response));
+    } else {
+        ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+    }
+    response.msg[0] = 0;
+
+    sts = testEnv->inferRequest->Wait(IInferRequest::WaitMode::STATUS_ONLY, &response);
+    ASSERT_TRUE(sts == StatusCode::OK || sts == StatusCode::RESULT_NOT_READY) << response.msg;
+}
+
+TEST_P(BehaviorPluginTestInferRequest, returnDeviceBusyOnGetBlobAfterAsyncInfer) {
+    SKIP_IF_CURRENT_TEST_IS_DISABLED()
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    Blob::Ptr input;
+    testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response);
+    ResponseDesc response2;
+
+    sts = testEnv->inferRequest->StartAsync(&response);
+    ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+    sts = testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response2);
+    if (sts == StatusCode::REQUEST_BUSY)
+        ASSERT_TRUE(_wasDeviceBusy(response2));
+    else
+        ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+}
+
+TEST_P(BehaviorPluginTestInferRequest, returnDeviceBusyOnGetPerformanceCountAfterAsyncInfer) {
+    SKIP_IF_CURRENT_TEST_IS_DISABLED()
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    Blob::Ptr input;
+    testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response);
+    std::map<std::string, InferenceEngineProfileInfo> perfMap;
+    ResponseDesc response2;
+
+    sts = testEnv->inferRequest->StartAsync(&response);
+    ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+    sts = testEnv->inferRequest->GetPerformanceCounts(perfMap, &response2);
+    if (sts == StatusCode::REQUEST_BUSY)
+        ASSERT_TRUE(_wasDeviceBusy(response2));
+    else
+        ASSERT_EQ(StatusCode::OK, sts);
+}
+
+TEST_P(BehaviorPluginTestInferRequest, returnDeviceBusyOnStartInferAfterAsyncInfer) {
+    SKIP_IF_CURRENT_TEST_IS_DISABLED()
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    Blob::Ptr input;
+    testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response);
+    ResponseDesc response2;
+
+    sts = testEnv->inferRequest->StartAsync(&response);
+    ASSERT_EQ(StatusCode::OK, sts);
+    sts = testEnv->inferRequest->StartAsync(&response2);
+    if (sts == StatusCode::REQUEST_BUSY)
+        ASSERT_TRUE(_wasDeviceBusy(response2));
+    else
+        ASSERT_EQ(StatusCode::OK, sts);
+}
+
+TEST_P(BehaviorPluginTestInferRequest, returnDeviceBusyOnGetUserDataAfterAsyncInfer) {
+    SKIP_IF_CURRENT_TEST_IS_DISABLED()
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    Blob::Ptr input;
+    testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response);
+    ResponseDesc response2;
+
+    sts = testEnv->inferRequest->StartAsync(&response);
+    ASSERT_EQ(StatusCode::OK, sts);
+    testEnv->inferRequest->GetUserData(nullptr, &response2);
+    auto waitStatus = testEnv->inferRequest->Wait(IInferRequest::WaitMode::STATUS_ONLY, &response);
+    if (waitStatus == StatusCode::RESULT_NOT_READY)
+        ASSERT_TRUE(_wasDeviceBusy(response2));
+    else
+        ASSERT_TRUE(waitStatus == StatusCode::OK);
+}
+
+TEST_P(BehaviorPluginTestInferRequest, returnDeviceBusyOnSetUserDataAfterAsyncInfer) {
+    SKIP_IF_CURRENT_TEST_IS_DISABLED()
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    Blob::Ptr input;
+    testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response);
+    ResponseDesc response2;
+
+    sts = testEnv->inferRequest->StartAsync(&response);
+    ASSERT_EQ(StatusCode::OK, sts);
+    testEnv->inferRequest->SetUserData(nullptr, &response2);
+    auto waitStatus = testEnv->inferRequest->Wait(IInferRequest::WaitMode::STATUS_ONLY, &response);
+    if (waitStatus == StatusCode::RESULT_NOT_READY)
+        ASSERT_TRUE(_wasDeviceBusy(response2));
+    else
+        ASSERT_TRUE(waitStatus == StatusCode::OK);
+}
diff --git a/inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_infer_request_callback.hpp b/inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_infer_request_callback.hpp
new file mode 100644 (file)
index 0000000..08c98f7
--- /dev/null
@@ -0,0 +1,413 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin.h"
+#include <mutex>
+#include <condition_variable>
+
+using namespace std;
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace InferenceEngine::details;
+
+namespace {
+std::string getTestCaseName(testing::TestParamInfo<BehTestParams> obj) {
+    return obj.param.device + "_" + obj.param.input_blob_precision.name()
+                + (obj.param.config.size() ? "_" + obj.param.config.begin()->second : "");
+}
+}
+
+TEST_P(BehaviorPluginTestInferRequestCallback, canGetWithNullptr) {
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    ASSERT_NO_FATAL_FAILURE(testEnv->inferRequest->GetUserData(nullptr, nullptr));
+}
+
+TEST_P(BehaviorPluginTestInferRequestCallback, canSetAndGetUserData) {
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    bool setBool = true;
+    bool *getBool = nullptr;
+
+    auto set_sts = testEnv->inferRequest->SetUserData(&setBool, nullptr);
+    auto get_sts = testEnv->inferRequest->GetUserData((void **) &getBool, nullptr);
+    ASSERT_NE(getBool, nullptr);
+    ASSERT_TRUE(*getBool);
+    ASSERT_EQ((int) StatusCode::OK, get_sts);
+    ASSERT_EQ((int) StatusCode::OK, set_sts);
+}
+
+TEST_P(BehaviorPluginTestInferRequestCallback, canCallSyncAndAsyncWithCompletionCallback) {
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+
+    sts = testEnv->inferRequest->Infer(&response);
+    ASSERT_EQ((int) StatusCode::OK, sts) << response.msg;
+
+    bool isCalled = false;
+    InferRequest cppRequest(testEnv->inferRequest);
+    cppRequest.SetCompletionCallback<std::function<void(InferRequest, StatusCode)>>([&](InferRequest request, StatusCode status) {
+        // HSD_1805940120: Wait on starting callback return HDDL_ERROR_INVAL_TASK_HANDLE
+        if (GetParam().device != CommonTestUtils::DEVICE_HDDL) {
+            ASSERT_EQ((int) StatusCode::OK, status);
+        }
+        isCalled = true;
+    });
+
+    sts = testEnv->inferRequest->StartAsync(nullptr);
+    StatusCode waitStatus = testEnv->inferRequest->Wait(IInferRequest::WaitMode::RESULT_READY, nullptr);
+
+    ASSERT_EQ((int) StatusCode::OK, sts);
+    ASSERT_EQ((int) StatusCode::OK, waitStatus);
+    ASSERT_TRUE(isCalled);
+}
+
+// test that can wait all callbacks on dtor
+// TODO: check that is able to wait and to not callback tasks! now it isn't !
+TEST_P(BehaviorPluginTestInferRequestCallback, canStartAsyncInsideCompletionCallback) {
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    struct TestUserData {
+        bool startAsyncOK = false;
+        bool getUserDataOK = false;
+        int numIsCalled = 0;
+        string device;
+    };
+    TestUserData data;
+    data.device = GetParam().device;
+    testEnv->inferRequest->SetUserData(&data, nullptr);
+    testEnv->inferRequest->SetCompletionCallback(
+            [](InferenceEngine::IInferRequest::Ptr request, StatusCode status) {
+                TestUserData *userData = nullptr;
+                ResponseDesc desc;
+                StatusCode sts = request->GetUserData((void **) &userData, &desc);
+                ASSERT_EQ((int) StatusCode::OK, sts) << desc.msg;
+                if (sts == StatusCode::OK) {
+                    userData->getUserDataOK = true;
+                }
+                // HSD_1805940120: Wait on starting callback return HDDL_ERROR_INVAL_TASK_HANDLE
+                if (userData->device != CommonTestUtils::DEVICE_HDDL) {
+                    ASSERT_EQ((int) StatusCode::OK, status);
+                }
+                userData->numIsCalled++;
+                // WA for deadlock
+                request->SetCompletionCallback(nullptr);
+                sts = request->StartAsync(nullptr);
+                if (sts == StatusCode::OK) {
+                    userData->startAsyncOK = true;
+                }
+            });
+
+    sts = testEnv->inferRequest->StartAsync(&response);
+    ResponseDesc responseWait;
+    StatusCode waitStatus = testEnv->inferRequest->Wait(IInferRequest::WaitMode::RESULT_READY, &responseWait);
+
+    ASSERT_EQ((int) StatusCode::OK, sts) << response.msg;
+    ASSERT_EQ((int) StatusCode::OK, waitStatus) << responseWait.msg;
+    ASSERT_EQ(1, data.numIsCalled);
+    ASSERT_TRUE(data.startAsyncOK);
+    ASSERT_TRUE(data.getUserDataOK);
+}
+
+// TODO: test that callback tasks not dtor while someone wait them
+
+// test that can wait all callbacks on dtor
+TEST_P(BehaviorPluginTestInferRequestCallback, canStartSeveralAsyncInsideCompletionCallbackWithSafeDtor) {
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    const int NUM_ITER = 10;
+    struct TestUserData {
+        int numIter = NUM_ITER;
+        bool startAsyncOK = true;
+        bool getDataOK = true;
+        int numIsCalled = 0;
+        std::mutex mutex_block_emulation;
+        std::condition_variable cv_block_emulation;
+        bool isBlocked = true;
+        string device;
+    };
+    TestUserData data;
+    data.device = GetParam().device;
+    testEnv->inferRequest->SetUserData(&data, nullptr);
+    testEnv->inferRequest->SetCompletionCallback(
+            [](InferenceEngine::IInferRequest::Ptr request, StatusCode status) {
+                TestUserData *userData = nullptr;
+                StatusCode sts = request->GetUserData((void **) &userData, nullptr);
+                if (sts != StatusCode::OK) {
+                    userData->getDataOK = false;
+                }
+                // HSD_1805940120: Wait on starting callback return HDDL_ERROR_INVAL_TASK_HANDLE
+                if (userData->device != CommonTestUtils::DEVICE_HDDL) {
+                    ASSERT_EQ((int) StatusCode::OK, status);
+                }
+                if (--userData->numIter) {
+                    sts = request->StartAsync(nullptr);
+                    if (sts != StatusCode::OK) {
+                        userData->startAsyncOK = false;
+                    }
+                }
+                userData->numIsCalled++;
+                if (!userData->numIter) {
+                    userData->isBlocked = false;
+                    userData->cv_block_emulation.notify_all();
+                }
+            });
+
+    sts = testEnv->inferRequest->StartAsync(nullptr);
+    StatusCode waitStatus = testEnv->inferRequest->Wait(IInferRequest::WaitMode::RESULT_READY, nullptr);
+    // intentionally block until notification from callback
+    std::unique_lock<std::mutex> lock(data.mutex_block_emulation);
+    data.cv_block_emulation.wait(lock, [&]() { return !data.isBlocked; });
+
+    ASSERT_EQ((int) StatusCode::OK, sts);
+    ASSERT_EQ((int) StatusCode::OK, waitStatus);
+
+
+    ASSERT_EQ(NUM_ITER, data.numIsCalled);
+    ASSERT_TRUE(data.startAsyncOK);
+    ASSERT_TRUE(data.getDataOK);
+}
+
+// test that can wait all callbacks on dtor
+// FIXME: CVS-8956, dll is unloaded before finishing infer request
+TEST_P(BehaviorPluginTestInferRequestCallback, DISABLED_canStartSeveralAsyncInsideCompletionCallbackNoSafeDtor) {
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    const int NUM_ITER = 10;
+    struct TestUserData {
+        int numIter = NUM_ITER;
+        bool startAsyncOK = true;
+        bool getDataOK = true;
+        int numIsCalled = 0;
+        std::mutex mutex_block_emulation;
+        std::condition_variable cv_block_emulation;
+        bool isBlocked = true;
+    };
+    TestUserData data;
+    testEnv->inferRequest->SetUserData(&data, nullptr);
+    testEnv->inferRequest->SetCompletionCallback(
+            [](InferenceEngine::IInferRequest::Ptr request, StatusCode status) {
+                TestUserData *userData = nullptr;
+                StatusCode sts = request->GetUserData((void **) &userData, nullptr);
+                if (sts != StatusCode::OK) {
+                    userData->getDataOK = false;
+                }
+                // WA for deadlock
+                if (!--userData->numIter) {
+                    request->SetCompletionCallback(nullptr);
+                }
+                sts = request->StartAsync(nullptr);
+                if (sts != StatusCode::OK) {
+                    userData->startAsyncOK = false;
+                }
+                userData->numIsCalled++;
+                if (!userData->numIter) {
+                    userData->isBlocked = false;
+                    userData->cv_block_emulation.notify_all();
+                }
+            });
+
+    sts = testEnv->inferRequest->StartAsync(nullptr);
+    StatusCode waitStatus = testEnv->inferRequest->Wait(IInferRequest::WaitMode::RESULT_READY, nullptr);
+    testEnv->inferRequest = nullptr;
+
+    // intentionally block until notification from callback
+    std::unique_lock<std::mutex> lock(data.mutex_block_emulation);
+    data.cv_block_emulation.wait(lock, [&]() { return !data.isBlocked; });
+
+    ASSERT_EQ((int) StatusCode::OK, sts);
+    ASSERT_EQ((int) StatusCode::OK, waitStatus);
+
+    ASSERT_EQ(NUM_ITER, data.numIsCalled);
+    ASSERT_TRUE(data.startAsyncOK);
+    ASSERT_TRUE(data.getDataOK);
+}
+
+// test that can wait all callbacks on dtor
+// FIXME: CVS-8956, dll is unloaded before finishing infer request
+TEST_P(BehaviorPluginTestInferRequest, DISABLED_canStartSeveralAsyncInsideCompletionCallbackNoSafeDtorWithoutWait) {
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    const int NUM_ITER = 1;
+    struct TestUserData {
+        int numIter = NUM_ITER;
+        bool startAsyncOK = true;
+        bool getDataOK = true;
+        int numIsCalled = 0;
+        std::mutex mutex_block_emulation;
+        std::condition_variable cv_block_emulation;
+        bool isBlocked = true;
+    };
+    TestUserData data;
+    testEnv->inferRequest->SetUserData(&data, nullptr);
+    testEnv->inferRequest->SetCompletionCallback(
+            [](InferenceEngine::IInferRequest::Ptr request, StatusCode status) {
+                TestUserData *userData = nullptr;
+                StatusCode sts = request->GetUserData((void **) &userData, nullptr);
+                if (sts != StatusCode::OK) {
+                    userData->getDataOK = false;
+                }
+                // WA for deadlock
+                if (!--userData->numIter) {
+                    request->SetCompletionCallback(nullptr);
+                }
+                sts = request->StartAsync(nullptr);
+                if (sts != StatusCode::OK) {
+                    userData->startAsyncOK = false;
+                }
+                userData->numIsCalled++;
+                if (!userData->numIter) {
+                    userData->isBlocked = false;
+                    userData->cv_block_emulation.notify_all();
+                }
+            });
+
+    sts = testEnv->inferRequest->StartAsync(nullptr);
+    testEnv->inferRequest = nullptr;
+    testEnv = nullptr;
+
+    // intentionally block until notification from callback
+    std::unique_lock<std::mutex> lock(data.mutex_block_emulation);
+    data.cv_block_emulation.wait(lock, [&]() { return !data.isBlocked; });
+
+    ASSERT_EQ((int) StatusCode::OK, sts);
+
+    ASSERT_EQ(NUM_ITER, data.numIsCalled);
+    ASSERT_TRUE(data.startAsyncOK);
+    ASSERT_TRUE(data.getDataOK);
+}
+
+// DEAD LOCK with Wait
+TEST_P(BehaviorPluginTestInferRequestCallback, DISABLED_canStartSeveralAsyncInsideCompletionCallbackWithWaitInside) {
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    const int NUM_ITER = 10;
+    struct TestUserData {
+        int numIter = NUM_ITER;
+        bool startAsyncOK = true;
+        bool waitOK = true;
+        int numIsCalled = 0;
+        std::mutex mutex_block_emulation;
+        std::condition_variable cv_block_emulation;
+        bool isBlocked = true;
+    };
+    TestUserData data;
+    testEnv->inferRequest->SetUserData(&data, nullptr);
+    testEnv->inferRequest->SetCompletionCallback(
+            [](InferenceEngine::IInferRequest::Ptr request, StatusCode status) {
+                TestUserData *userData = nullptr;
+                StatusCode sts = request->GetUserData((void **) &userData, nullptr);
+                if (sts == StatusCode::OK) {
+                    userData->numIsCalled++;
+                }
+                // WA for deadlock
+                if (!--userData->numIter) {
+                    request->SetCompletionCallback(nullptr);
+                    userData->isBlocked = false;
+                    userData->cv_block_emulation.notify_all();
+                }
+                sts = request->StartAsync(nullptr);
+                if (sts != StatusCode::OK) {
+                    userData->startAsyncOK = false;
+                }
+                if (userData->numIter % 2) {
+                    sts = request->Wait(IInferRequest::WaitMode::RESULT_READY, nullptr);
+                    if (sts != StatusCode::OK) {
+                        userData->waitOK = false;
+                    }
+                }
+            });
+
+    sts = testEnv->inferRequest->StartAsync(nullptr);
+    testEnv->inferRequest = nullptr;
+
+    // intentionally block until notification from callback
+    std::unique_lock<std::mutex> lock(data.mutex_block_emulation);
+    data.cv_block_emulation.wait(lock, [&]() { return !data.isBlocked; });
+
+    ASSERT_EQ((int) StatusCode::OK, sts);
+
+    ASSERT_EQ(NUM_ITER, data.numIsCalled);
+    ASSERT_TRUE(data.startAsyncOK);
+    ASSERT_TRUE(data.waitOK);
+}
+
+// TODO: no, this is not correct test. callback throw exception and plugin shouldn't fail? user have to process this by himself.
+TEST_P(BehaviorPluginTestInferRequestCallback, DISABLED_returnGeneralErrorIfCallbackThrowException) {
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    testEnv->inferRequest->SetCompletionCallback(
+            [](InferenceEngine::IInferRequest::Ptr, StatusCode status) {
+                THROW_IE_EXCEPTION << "returnGeneralErrorIfCallbackThrowException";
+            });
+
+    sts = testEnv->inferRequest->StartAsync(nullptr);
+    StatusCode waitStatus = INFER_NOT_STARTED;
+    while (StatusCode::RESULT_NOT_READY == waitStatus || StatusCode::INFER_NOT_STARTED == waitStatus) {
+        waitStatus = testEnv->inferRequest->Wait(IInferRequest::WaitMode::STATUS_ONLY, &response);
+    }
+
+    ASSERT_EQ((int) StatusCode::OK, sts);
+    ASSERT_EQ(StatusCode::GENERAL_ERROR, waitStatus);
+    string refError = "returnGeneralErrorIfCallbackThrowException";
+    response.msg[refError.length()] = '\0';
+    ASSERT_EQ(refError, response.msg);
+}
+
+TEST_P(BehaviorPluginTestInferRequestCallback, inferDoesNotCallCompletionCallback) {
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    struct TestUserData {
+        bool isCalled = false;
+    };
+    TestUserData data;
+    testEnv->inferRequest->SetUserData(&data, nullptr);
+    testEnv->inferRequest->SetCompletionCallback(
+            [](InferenceEngine::IInferRequest::Ptr request, StatusCode status) {
+                TestUserData *userData = nullptr;
+                request->GetUserData((void **) &userData, nullptr);
+                userData->isCalled = true;
+            });
+    sts = testEnv->inferRequest->Infer(&response);
+    ASSERT_EQ((int) StatusCode::OK, sts);
+    ASSERT_FALSE(data.isCalled);
+}
+
+// TODO: develop test that request not released until request is done itself? (to check wait in dtor?)
+TEST_P(BehaviorPluginTestInferRequestCallback, DISABLED_requestNotReleasedUntilCallbackAreDone) {
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+
+    struct SyncEnv {
+        std::mutex mutex_block_emulation;
+        std::condition_variable cv_block_emulation;
+        bool isBlocked = true;
+        bool isCalled = false;
+        typedef std::shared_ptr<SyncEnv> Ptr;
+    };
+    SyncEnv::Ptr syncEnv = std::make_shared<SyncEnv>();
+    testEnv->inferRequest->SetUserData(static_cast<void *>(syncEnv.get()), &response);
+    testEnv->inferRequest->SetCompletionCallback(
+            [](InferenceEngine::IInferRequest::Ptr request, StatusCode status) {
+                SyncEnv *userData = nullptr;
+                StatusCode sts = request->GetUserData((void **) &userData, nullptr);
+                if (sts == StatusCode::OK) {
+                    userData->isCalled = true;
+                }
+                // intentionally block task for launching tasks after calling dtor for TaskExecutor
+                std::unique_lock<std::mutex> lock(userData->mutex_block_emulation);
+                userData->cv_block_emulation.wait(lock, [&]() { return userData->isBlocked; });
+
+                // TODO: notify that everything is called
+            });
+
+    sts = testEnv->inferRequest->StartAsync(nullptr);
+    testEnv->inferRequest = nullptr; //Release();
+    syncEnv->isBlocked = false;
+    syncEnv->cv_block_emulation.notify_all();
+
+    // TODO: wait until notification from callback
+    ASSERT_EQ((int) StatusCode::OK, sts);
+    ASSERT_TRUE(syncEnv->isCalled);
+}
diff --git a/inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_infer_request_config.hpp b/inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_infer_request_config.hpp
new file mode 100644 (file)
index 0000000..ed20ec4
--- /dev/null
@@ -0,0 +1,80 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin.h"
+#include <threading/ie_executor_manager.hpp>
+
+using namespace std;
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace InferenceEngine::details;
+
+namespace {
+std::string getConfigTestCaseName(testing::TestParamInfo<BehTestParams> obj) {
+    std::string config_str = "";
+    for (auto it = obj.param.config.cbegin(); it != obj.param.config.cend(); it++) {
+        std::string v = it->second;
+        std::replace(v.begin(), v.end(), '.', '_');
+        config_str += it->first + "_" + v + "_";
+    }
+    return obj.param.device + "_" + config_str;
+}
+}
+
+TEST_P(BehaviorPluginTestInferRequestConfig, CanInferWithConfig) {
+    TestEnv::Ptr testEnv;
+    std::map<std::string, std::string> config = GetParam().config;
+
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv, config));
+    sts = testEnv->inferRequest->Infer(&response);
+
+    ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+}
+
+TEST_P(BehaviorPluginTestInferRequestConfigExclusiveAsync, canSetExclusiveAsyncRequests) {
+    ASSERT_EQ(0ul, ExecutorManager::getInstance()->getExecutorsNumber());
+    TestEnv::Ptr testEnv;
+    std::map<std::string, std::string> config;
+    config[PluginConfigParams::KEY_EXCLUSIVE_ASYNC_REQUESTS] = PluginConfigParams::YES;
+
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv, config));
+
+    // TODO: there is no executors to sync. should it be supported natively in HDDL API?
+    if (GetParam().device == CommonTestUtils::DEVICE_HDDL) {
+        ASSERT_EQ(0u, ExecutorManager::getInstance()->getExecutorsNumber());
+    } else if (GetParam().device == CommonTestUtils::DEVICE_FPGA) {
+        ASSERT_EQ(2u, ExecutorManager::getInstance()->getExecutorsNumber());
+    } else if (GetParam().device == CommonTestUtils::DEVICE_MYRIAD) {
+        ASSERT_EQ(2u, ExecutorManager::getInstance()->getExecutorsNumber());
+    } else if (GetParam().device == CommonTestUtils::DEVICE_KEEMBAY) {
+        ASSERT_EQ(2u, ExecutorManager::getInstance()->getExecutorsNumber());
+    } else if (GetParam().device == CommonTestUtils::DEVICE_GNA) {
+        ASSERT_EQ(0u, ExecutorManager::getInstance()->getExecutorsNumber());
+    } else if (GetParam().device == CommonTestUtils::DEVICE_MULTI) {
+        // for multi-device the number of Executors is not known (defined by the devices configuration)
+    } else {
+        ASSERT_EQ(1u, ExecutorManager::getInstance()->getExecutorsNumber());
+    }
+}
+
+TEST_P(BehaviorPluginTestInferRequestConfigExclusiveAsync, withoutExclusiveAsyncRequests) {
+    ASSERT_EQ(0u, ExecutorManager::getInstance()->getExecutorsNumber());
+    TestEnv::Ptr testEnv;
+    std::map<std::string, std::string> config;
+    config[PluginConfigParams::KEY_EXCLUSIVE_ASYNC_REQUESTS] = PluginConfigParams::NO;
+
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv, config));
+
+    if (GetParam().device == CommonTestUtils::DEVICE_FPGA) {
+        ASSERT_EQ(1u, ExecutorManager::getInstance()->getExecutorsNumber());
+    } else if (GetParam().device == CommonTestUtils::DEVICE_MYRIAD) {
+        ASSERT_EQ(1u, ExecutorManager::getInstance()->getExecutorsNumber());
+    } else if (GetParam().device == CommonTestUtils::DEVICE_KEEMBAY) {
+        ASSERT_EQ(1u, ExecutorManager::getInstance()->getExecutorsNumber());
+    } else if (GetParam().device == CommonTestUtils::DEVICE_MULTI) {
+        // for multi-device the number of Executors is not known (defined by the devices configuration)
+    } else {
+        ASSERT_EQ(0u, ExecutorManager::getInstance()->getExecutorsNumber());
+    }
+}
diff --git a/inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_infer_request_fixture.cpp b/inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_infer_request_fixture.cpp
new file mode 100644 (file)
index 0000000..da82f76
--- /dev/null
@@ -0,0 +1,108 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin.h"
+#include "details/ie_cnn_network_tools.h"
+
+using namespace std;
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace InferenceEngine::details;
+
+Blob::Ptr BehaviorPluginTestInferRequest::prepareInputBlob(Precision blobPrecision, SizeVector inputDims) {
+    auto input = makeNotAllocatedBlob(blobPrecision, TensorDesc::getLayoutByDims(inputDims), inputDims);
+    input->allocate();
+    return input;
+}
+
+Blob::Ptr BehaviorPluginTestInferRequest::_prepareOutputBlob(Precision blobPrecision, SizeVector outputDims) {
+    auto output = makeNotAllocatedBlob(blobPrecision, TensorDesc::getLayoutByDims(outputDims), outputDims);
+    output->allocate();
+    return output;
+}
+
+void BehaviorPluginTestInferRequest::_setInputPrecision(
+    const BehTestParams &param,
+    CNNNetwork &cnnNetwork,
+    TestEnv::Ptr &testEnv,
+    const size_t expectedNetworkInputs) {
+
+    InputsDataMap networkInputs = cnnNetwork.getInputsInfo();
+    if (expectedNetworkInputs != 0) {
+        ASSERT_EQ(networkInputs.size(), expectedNetworkInputs);
+    }
+    testEnv->networkInput = networkInputs.begin()->second;
+    testEnv->networkInput->setPrecision(param.input_blob_precision);
+    testEnv->inputDims = testEnv->networkInput->getTensorDesc().getDims();
+    testEnv->inputName = networkInputs.begin()->first;
+}
+
+void BehaviorPluginTestInferRequest::_setOutputPrecision(
+    const BehTestParams &param,
+    CNNNetwork &cnnNetwork,
+    TestEnv::Ptr &testEnv,
+    const size_t expectedNetworkOutputs) {
+
+    OutputsDataMap networkOutputs = cnnNetwork.getOutputsInfo();
+    if (expectedNetworkOutputs != 0) {
+        ASSERT_EQ(networkOutputs.size(), expectedNetworkOutputs);
+    }
+    testEnv->networkOutput = networkOutputs.begin()->second;
+    testEnv->networkOutput->setPrecision(param.output_blob_precision);
+    testEnv->outputDims = testEnv->networkOutput->getTensorDesc().getDims();
+    testEnv->outputName = networkOutputs.begin()->first;
+}
+
+void BehaviorPluginTestInferRequest::_createAndCheckInferRequest(
+    const BehTestParams &param,
+    TestEnv::Ptr &testEnv,
+    const std::map<std::string, std::string> &config,
+    const size_t expectedNetworkInputs,
+    const size_t expectedNetworkOutputs,
+    InferenceEngine::IExtensionPtr extension) {
+
+    testEnv = make_shared<TestEnv>();
+    if (extension) {
+        ASSERT_NO_THROW(testEnv->core.AddExtension(extension));
+    }
+
+    Core ie;
+    testEnv->network = ie.ReadNetwork(param.model_xml_str, param.weights_blob);
+    /* Call conversion from CNNNetwork NgraphImpl to CNNNetwork */
+    testEnv->network.begin();
+
+    _setInputPrecision(param, testEnv->network, testEnv, expectedNetworkInputs);
+    _setOutputPrecision(param, testEnv->network, testEnv, expectedNetworkOutputs);
+
+    std::map<std::string, std::string> full_config = config;
+    full_config.insert(param.config.begin(), param.config.end());
+
+#ifdef DUMP_EXECUTION_GRAPH
+    full_config[PluginConfigParams::KEY_DUMP_EXEC_GRAPH_AS_DOT] = "behavior_tests_execution_graph_dump";
+#endif
+
+     ResponseDesc response;
+//     ASSERT_NO_THROW(testEnv->exeNetwork = testEnv->core.LoadNetwork(testEnv->network, param.device, full_config));
+     try {
+         testEnv->exeNetwork = testEnv->core.LoadNetwork(testEnv->network, param.device, full_config);
+     } catch (InferenceEngineException ex) {
+         std::cout << "LoadNetwork failed. Status: " << ex.getStatus() << ", Response: " << ex.what();
+         throw;
+     } catch (std::exception ex) {
+         std::cout << "LoadNetwork failed. Exception: " << typeid(ex).name() << ", what(): " << ex.what() << std::endl;
+         throw;
+     } catch (...) {
+         std::cout << "LoadNetwork failed with unknown reason.";
+         throw;
+     }
+     testEnv->actualInferRequest = testEnv->exeNetwork.CreateInferRequest();
+     testEnv->inferRequest = static_cast<IInferRequest::Ptr &>(testEnv->actualInferRequest);
+}
+
+bool BehaviorPluginTestInferRequest::_wasDeviceBusy(ResponseDesc response) {
+    std::cout << response.msg << "\n";
+    std::string refError = REQUEST_BUSY_str;
+    response.msg[refError.length()] = '\0';
+    return !refError.compare(response.msg);
+}
diff --git a/inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_infer_request_input.hpp b/inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_infer_request_input.hpp
new file mode 100644 (file)
index 0000000..96cb02b
--- /dev/null
@@ -0,0 +1,125 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin.h"
+
+using namespace std;
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace InferenceEngine::details;
+
+namespace {
+std::string getTestCaseName(testing::TestParamInfo<BehTestParams> obj) {
+    return obj.param.device + "_" + obj.param.input_blob_precision.name() + "_" + getModelName(obj.param.model_xml_str)
+                + (obj.param.config.size() ? "_" + obj.param.config.begin()->second : "");
+}
+}
+
+TEST_P(BehaviorPluginTestInferRequestInput, canSetInputBlobForSyncRequest) {
+    TestEnv::Ptr testEnv;
+    Blob::Ptr actualBlob;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    auto inputBlob = prepareInputBlob(GetParam().input_blob_precision, testEnv->inputDims);
+
+    ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->inputName.c_str(), inputBlob, &response));
+    ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+    ASSERT_NO_THROW(testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), actualBlob, &response));
+
+    ASSERT_EQ(inputBlob, actualBlob);
+}
+
+TEST_P(BehaviorPluginTestInferRequestInput, canSetInputBlobForAsyncRequest) {
+    TestEnv::Ptr testEnv;
+    Blob::Ptr actualBlob;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    auto inputBlob = prepareInputBlob(GetParam().input_blob_precision, testEnv->inputDims);
+
+    ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->inputName.c_str(), inputBlob, &response));
+    ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+    ASSERT_NO_THROW(testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), actualBlob, &response));
+
+    ASSERT_EQ(inputBlob, actualBlob);
+}
+
+TEST_P(BehaviorPluginTestInferRequestInput, canInferWithSetInOut) {
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    auto input = prepareInputBlob(GetParam().input_blob_precision, testEnv->inputDims);
+    testEnv->inferRequest->SetBlob(testEnv->inputName.c_str(), input, &response);
+    auto output = _prepareOutputBlob(GetParam().output_blob_precision, testEnv->outputDims);
+    testEnv->inferRequest->SetBlob(testEnv->outputName.c_str(), output, &response);
+    sts = testEnv->inferRequest->Infer(&response);
+    ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+}
+
+TEST_P(BehaviorPluginTestInferRequestInput, canGetInputBlob_deprecatedAPI) {
+    TestEnv::Ptr testEnv;
+    Blob::Ptr input;
+    auto param = GetParam();
+
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(param, testEnv));
+    ASSERT_NO_THROW(sts = testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response));
+
+    ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+    ASSERT_TRUE(input) << "Plugin didn't allocate input blobs";
+    ASSERT_FALSE(input->buffer() == nullptr) << "Plugin didn't allocate input blobs";
+    auto dims = input->getTensorDesc().getDims();
+    ASSERT_TRUE(testEnv->inputDims == dims) << "Input blob dimensions don't match network input";
+
+    ASSERT_EQ(param.input_blob_precision, input->getTensorDesc().getPrecision()) << "Input blob precision don't match network input";
+}
+
+TEST_P(BehaviorPluginTestInferRequestInput, canGetInputBlob) {
+    TestEnv::Ptr testEnv;
+    Blob::Ptr input;
+    auto param = GetParam();
+
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(param, testEnv));
+    ASSERT_NO_THROW(sts = testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response));
+
+    ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+    ASSERT_TRUE(input) << "Plugin didn't allocate input blobs";
+    ASSERT_FALSE(input->buffer() == nullptr) << "Plugin didn't allocate input blobs";
+
+    auto tensorDescription = input->getTensorDesc();
+    auto dims = tensorDescription.getDims();
+    ASSERT_TRUE(testEnv->inputDims == dims) << "Input blob dimensions don't match network input";
+
+    ASSERT_EQ(param.input_blob_precision, tensorDescription.getPrecision()) << "Input blob precision don't match network input";
+}
+
+TEST_P(BehaviorPluginTestInferRequestInput, getInputAfterSetInputDoNotChangeInput) {
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    Blob::Ptr inputSetBlob = prepareInputBlob(GetParam().input_blob_precision, testEnv->inputDims);
+    ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->inputName.c_str(), inputSetBlob, &response));
+    Blob::Ptr inputGetBlob;
+    ASSERT_NO_THROW(sts = testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), inputGetBlob, &response));
+    ASSERT_EQ(inputGetBlob.get(), inputSetBlob.get());
+}
+
+TEST_P(BehaviorPluginTestInferRequestInput, canInferWithGetInOut) {
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    Blob::Ptr input;
+    Blob::Ptr result;
+    testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response);
+    testEnv->inferRequest->GetBlob(testEnv->outputName.c_str(), result, &response);
+    sts = testEnv->inferRequest->Infer(&response);
+    ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+}
+
+TEST_P(BehaviorPluginTestInferRequestInput, canStartAsyncInferWithGetInOut) {
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    Blob::Ptr input;
+    Blob::Ptr result;
+    testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response);
+    sts = testEnv->inferRequest->StartAsync(&response);
+    ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+    sts = testEnv->inferRequest->Wait(500, &response);
+    ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+    testEnv->inferRequest->GetBlob(testEnv->outputName.c_str(), result, &response);
+    ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+}
diff --git a/inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_infer_request_output.hpp b/inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_infer_request_output.hpp
new file mode 100644 (file)
index 0000000..ced2b09
--- /dev/null
@@ -0,0 +1,164 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin.h"
+
+using namespace std;
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace InferenceEngine::details;
+
+namespace {
+std::string getOutputTestCaseName(testing::TestParamInfo<BehTestParams> obj) {
+    return obj.param.device + "_" + obj.param.output_blob_precision.name()
+           + (obj.param.config.size() ? "_" + obj.param.config.begin()->second : "");
+}
+
+}
+
+TEST_P(BehaviorPluginTestInferRequestOutput, canSetOutputBlobForAsyncRequest) {
+    TestEnv::Ptr testEnv;
+    Blob::Ptr actualBlob;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    auto outputBlob = _prepareOutputBlob(GetParam().output_blob_precision, testEnv->outputDims);
+
+    ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->outputName.c_str(), outputBlob, &response));
+    ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+    ASSERT_NO_THROW(testEnv->inferRequest->GetBlob(testEnv->outputName.c_str(), actualBlob, &response));
+
+    ASSERT_EQ(outputBlob, actualBlob);
+}
+
+TEST_P(BehaviorPluginTestInferRequestOutput, canSetOutputBlobForSyncRequest) {
+    TestEnv::Ptr testEnv;
+    Blob::Ptr actualBlob;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    auto outputBlob = _prepareOutputBlob(GetParam().output_blob_precision, testEnv->outputDims);
+
+    ASSERT_NO_THROW(sts = testEnv->inferRequest->SetBlob(testEnv->outputName.c_str(), outputBlob, &response));
+    ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+    ASSERT_NO_THROW(testEnv->inferRequest->GetBlob(testEnv->outputName.c_str(), actualBlob, &response));
+
+    ASSERT_EQ(outputBlob, actualBlob);
+}
+
+TEST_P(BehaviorPluginTestInferRequestOutput, canInferWithSetInOut) {
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    auto input = prepareInputBlob(GetParam().input_blob_precision, testEnv->inputDims);
+    testEnv->inferRequest->SetBlob(testEnv->inputName.c_str(), input, &response);
+    auto output = _prepareOutputBlob(GetParam().output_blob_precision, testEnv->outputDims);
+    testEnv->inferRequest->SetBlob(testEnv->outputName.c_str(), output, &response);
+
+    sts = testEnv->inferRequest->Infer(&response);
+
+    ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+}
+
+TEST_P(BehaviorPluginTestInferRequestOutput, canGetOutputBlob_deprecatedAPI) {
+    TestEnv::Ptr testEnv;
+    Blob::Ptr output;
+    auto param = GetParam();
+
+    StatusCode sts = StatusCode::OK;
+    ResponseDesc response;
+
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(param, testEnv));
+    ASSERT_NO_THROW(sts = testEnv->inferRequest->GetBlob(testEnv->outputName.c_str(), output, &response));
+
+    ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+    ASSERT_TRUE(output) << "Plugin didn't allocate output blobs";
+    ASSERT_FALSE(output->buffer() == nullptr) << "Plugin didn't allocate output blobs";
+    auto dims = output->getTensorDesc().getDims();
+    ASSERT_TRUE(testEnv->outputDims == dims) << "Output blob dimensions don't match network output";
+    // [IE FPGA] The plugin ignores custom output precision: CVS-8122
+    if (param.device != CommonTestUtils::DEVICE_FPGA && param.output_blob_precision != Precision::FP32) {
+        ASSERT_EQ(param.output_blob_precision, output->getTensorDesc().getPrecision())
+                                    << "Output blob precision don't match network output";
+    } else if (param.device == CommonTestUtils::DEVICE_FPGA) {
+        set<Precision> supportedOutputs = {Precision::FP16, Precision::FP32};
+        ASSERT_TRUE(supportedOutputs.find(output->getTensorDesc().getPrecision()) != supportedOutputs.end()) << "Output blob precision don't match network output";
+    } else {
+        ASSERT_EQ(Precision::FP32, output->getTensorDesc().getPrecision()) << "Output blob precision don't match network output";
+    }
+}
+
+TEST_P(BehaviorPluginTestInferRequestOutput, canGetOutputBlob) {
+    TestEnv::Ptr testEnv;
+    Blob::Ptr output;
+    auto param = GetParam();
+
+    StatusCode sts = StatusCode::OK;
+    ResponseDesc response;
+
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(param, testEnv));
+    ASSERT_NO_THROW(sts = testEnv->inferRequest->GetBlob(testEnv->outputName.c_str(), output, &response));
+
+    ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+    ASSERT_TRUE(output) << "Plugin didn't allocate output blobs";
+    ASSERT_FALSE(output->buffer() == nullptr) << "Plugin didn't allocate output blobs";
+
+    auto tensorDescription = output->getTensorDesc();
+    auto dims = tensorDescription.getDims();
+    ASSERT_TRUE(testEnv->outputDims == dims) << "Output blob dimensions don't match network output";
+    // [IE FPGA] The plugin ignores custom output precision: CVS-8122
+    std::cout << "Device: " << param.device << std::endl;
+    if (param.device != CommonTestUtils::DEVICE_FPGA && param.output_blob_precision != Precision::FP32) {
+        ASSERT_EQ(param.output_blob_precision, tensorDescription.getPrecision())
+                                    << "Output blob precision don't match network output";
+    } else if (param.device == CommonTestUtils::DEVICE_FPGA) {
+        set<Precision> supportedOutputs = {Precision::FP16, Precision::FP32};
+        ASSERT_TRUE(supportedOutputs.find(tensorDescription.getPrecision()) != supportedOutputs.end()) << "Output blob precision don't match network output";
+    } else {
+        ASSERT_EQ(Precision::FP32, tensorDescription.getPrecision()) << "Output blob precision don't match network output";
+    }
+}
+
+TEST_P(BehaviorPluginTestInferRequestOutput, getOutputAfterSetOutputDoNotChangeOutput) {
+    TestEnv::Ptr testEnv;
+    ResponseDesc response;
+
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    Blob::Ptr outputSetBlob = _prepareOutputBlob(GetParam().output_blob_precision, testEnv->outputDims);
+    ASSERT_EQ(StatusCode::OK, testEnv->inferRequest->SetBlob(testEnv->outputName.c_str(), outputSetBlob, &response));
+    Blob::Ptr outputGetBlob;
+    ASSERT_EQ(StatusCode::OK, testEnv->inferRequest->GetBlob(testEnv->outputName.c_str(), outputGetBlob, &response));
+    ASSERT_EQ(outputGetBlob.get(), outputSetBlob.get());
+}
+
+TEST_P(BehaviorPluginTestInferRequestOutput, canInferWithGetInOut) {
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    Blob::Ptr input;
+    Blob::Ptr result;
+
+    StatusCode sts = StatusCode::OK;
+    ResponseDesc response;
+
+    testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response);
+    testEnv->inferRequest->GetBlob(testEnv->outputName.c_str(), result, &response);
+    sts = testEnv->inferRequest->Infer(&response);
+    ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+}
+
+TEST_P(BehaviorPluginTestInferRequestOutput, canStartAsyncInferWithGetInOut) {
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    Blob::Ptr input;
+    Blob::Ptr result;
+
+    StatusCode sts = StatusCode::OK;
+    ResponseDesc response;
+
+    testEnv->inferRequest->GetBlob(testEnv->inputName.c_str(), input, &response);
+
+    sts = testEnv->inferRequest->StartAsync(&response);
+    ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+
+    sts = testEnv->inferRequest->Wait(500, &response);
+    ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+
+    testEnv->inferRequest->GetBlob(testEnv->outputName.c_str(), result, &response);
+    ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+}
diff --git a/inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_layers.hpp b/inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_layers.hpp
new file mode 100644 (file)
index 0000000..ab39e44
--- /dev/null
@@ -0,0 +1,931 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin.h"
+#include "common_test_utils/xml_net_builder/xml_net_builder.hpp"
+
+using namespace std;
+using namespace CommonTestUtils;
+using namespace InferenceEngine;
+using namespace InferenceEngine::details;
+
+struct in_params_c {
+    struct {
+        size_t c;
+    } in;
+};
+
+struct in_params_nc {
+    struct {
+        size_t n;
+        size_t c;
+    } in;
+};
+
+struct in_params_chw {
+    struct {
+        size_t c;
+        size_t h;
+        size_t w;
+    } in;
+};
+
+struct in_params_nchw {
+    struct {
+        size_t n;
+        size_t c;
+        size_t h;
+        size_t w;
+    } in;
+};
+
+struct str_params {
+    struct {
+        size_t w;
+        size_t h;
+    } str;
+};
+
+struct krn_params {
+    struct {
+        size_t w;
+        size_t h;
+    } krn;
+};
+
+struct pad_params {
+    struct {
+        size_t w;
+        size_t h;
+    } pad;
+};
+
+struct base_test_params {
+    std::string device;
+    std::string precision;
+
+    base_test_params(std::string name, std::string _precision = "FP32") {
+        device = name;
+        precision = _precision;
+    }
+};
+
+struct conv_params : in_params_chw, krn_params, str_params, pad_params {
+    size_t out_c;
+    size_t grp_c;
+
+    struct out_struct {
+        size_t w;
+        size_t h;
+    } out;
+
+    conv_params(in_params_chw in,
+                krn_params krn,
+                str_params str,
+                pad_params pad,
+                size_t _out_c,
+                size_t _grp_c,
+                out_struct _out = {}) :
+            in_params_chw(in), krn_params(krn), str_params(str), pad_params(pad) {
+        out_c = _out_c;
+        grp_c = _grp_c;
+        out = _out;
+    }
+};
+
+struct conv_test_params : conv_params, base_test_params {
+    conv_test_params(std::string name, conv_params params) :
+            conv_params(params), base_test_params(name) {}
+};
+
+template<class T>
+class LayerTestsCommon : public TestsCommon,
+                         public testing::WithParamInterface<T> {
+protected:
+};
+
+template<class T>
+std::string getTestName(testing::TestParamInfo<T> obj) {
+    return obj.param.device;
+}
+
+IE_SUPPRESS_DEPRECATED_START
+class ConvolutionLayerTest : public LayerTestsCommon<conv_test_params> {
+protected:
+    std::string getModel(const conv_test_params& p) {
+        std::map<std::string, std::string> params = {
+                {"stride-x", std::to_string(p.str.w)},
+                {"stride-y", std::to_string(p.str.h)},
+                {"pad-x",    std::to_string(p.pad.w)},
+                {"pad-y",    std::to_string(p.pad.h)},
+                {"kernel-x", std::to_string(p.krn.w)},
+                {"kernel-y", std::to_string(p.krn.h)},
+                {"output",   std::to_string(p.out_c)},
+                {"group",    std::to_string(p.grp_c)}
+        };
+        size_t out_h = p.out.h == 0 ?
+                    (p.in.h + 2 * p.pad.h - p.krn.h) / p.str.h + 1 : p.out.h;
+        size_t out_w = p.out.w == 0 ?
+                    (p.in.w + 2 * p.pad.w - p.krn.w) / p.str.w + 1 : p.out.w;
+        InOutShapes inout = {{{p.in.c,  p.in.h, p.in.w}},
+                           {{p.out_c, out_h,  out_w}}};
+
+        size_t weights = (p.krn.w * p.krn.h * p.out_c * p.in.c / p.grp_c) *
+                         sizeof(float);
+        size_t biases = p.out_c * sizeof(float);
+
+        V2NetBuilder model = V2NetBuilder::buildNetworkWithOneInput(
+                "Convolution_Only", inout.inDims[0], p.precision)
+                .addLayer("Convolution", p.precision, &params, inout, weights, biases);
+        return model.finish();
+    }
+
+    InferenceEngine::TBlob<uint8_t>::Ptr GetNetworkWeights(const conv_test_params& p) {
+        TBlob<uint8_t>* weights = new TBlob<uint8_t>(
+                { Precision::U8, {
+                        (p.krn.w * p.krn.h * p.out_c * p.in.c / p.grp_c + p.out_c)
+                        * sizeof(float)}, Layout::C} );
+        weights->allocate();
+        fill_data(weights->buffer().as<float*>(),
+                  weights->size() / sizeof(float));
+        TBlob<uint8_t>::Ptr weights_ptr = TBlob<uint8_t>::Ptr(weights);
+
+        return weights_ptr;
+    }
+};
+
+class DeconvolutionLayerTest : public ConvolutionLayerTest {
+protected:
+    std::string getModel(const conv_test_params& p) {
+        std::map<std::string, std::string> params = {
+                {"stride-x", std::to_string(p.str.w)},
+                {"stride-y", std::to_string(p.str.h)},
+                {"pad-x",    std::to_string(p.pad.w)},
+                {"pad-y",    std::to_string(p.pad.h)},
+                {"kernel-x", std::to_string(p.krn.w)},
+                {"kernel-y", std::to_string(p.krn.h)},
+                {"output",   std::to_string(p.out_c)},
+                {"group",    std::to_string(p.grp_c)}
+        };
+        size_t out_h = p.out.h == 0 ?
+                    (p.in.h + 2 * p.pad.h - p.krn.h) / p.str.h + 1 : p.out.h;
+        size_t out_w = p.out.w == 0 ?
+                    (p.in.w + 2 * p.pad.w - p.krn.w) / p.str.w + 1 : p.out.w;
+        InOutShapes inout = {{{p.in.c,  p.in.h, p.in.w}},
+                           {{p.out_c, out_h,  out_w}}};
+
+        size_t weights = (p.krn.w * p.krn.h * p.out_c * p.in.c / p.grp_c) *
+                         sizeof(float);
+
+        V2NetBuilder model = V2NetBuilder::buildNetworkWithOneInput(
+                "Deconvolution_Only", inout.inDims[0], p.precision)
+                .addLayer("Deconvolution", p.precision, &params, inout, weights);
+        return model.finish();
+    }
+};
+
+TEST_P(ConvolutionLayerTest, CanNotLoadConvLayer) {
+    auto param = GetParam();
+    string ref_error = (param.device == CommonTestUtils::DEVICE_FPGA) ?
+                       "Graph is not supported on FPGA" : "Unsupported layer: Convolution1:Convolution";
+    
+    InferenceEngine::Core core;
+    auto network = core.ReadNetwork(getModel(param), GetNetworkWeights(param));
+
+    try {
+        ExecutableNetwork exeNetwork = core.LoadNetwork(network, param.device, {});
+    } catch (InferenceEngineException ex) {
+        ASSERT_EQ(ex.getStatus(), StatusCode::GENERAL_ERROR);
+        ASSERT_STR_CONTAINS(ex.what(), ref_error);
+    }
+}
+
+TEST_P(DeconvolutionLayerTest, CanNotLoadDeconvLayer) {
+    auto param = GetParam();
+    string ref_error = (param.device == CommonTestUtils::DEVICE_FPGA)
+                       ? "Graph is not supported on FPGA"
+                       : (param.device == CommonTestUtils::DEVICE_GNA)
+                         ? "[GNAPlugin] in function LoadNetwork: The plugin does not support layer: Deconvolution1:Deconvolution\n"
+                         : "Unsupported layer: Deconvolution1:Deconvolution";
+
+    InferenceEngine::Core core;
+    auto network = core.ReadNetwork(getModel(param), GetNetworkWeights(param));
+
+    try {
+        ExecutableNetwork exeNetwork = core.LoadNetwork(network, param.device, {});
+    } catch (InferenceEngineException ex) {
+        ASSERT_EQ(ex.getStatus(), StatusCode::GENERAL_ERROR);
+        ASSERT_STR_CONTAINS(ex.what(), ref_error);
+    }
+
+}
+
+#define conv_case conv_params({{32, 16, 9}, {1, 1}, {1, 1}, {0, 0}, 17, 1})
+#define conv_dw_case conv_params({{32, 16, 9}, {1, 1}, {2, 2}, {0, 0}, 512, 512})
+
+struct pool_params : in_params_nchw, krn_params, str_params, pad_params {
+    pool_params(in_params_nchw in, krn_params krn, str_params str, pad_params pad) :
+            in_params_nchw(in), krn_params(krn), str_params(str), pad_params(pad) {}
+};
+
+struct pool_test_params : pool_params, base_test_params {
+    pool_test_params(std::string name, std::string pr, pool_params params) :
+            pool_params(params), base_test_params(name, pr) {}
+};
+
+class PoolingLayerTest : public LayerTestsCommon<pool_test_params> {
+protected:
+    std::string getModel(const pool_test_params& p) {
+        std::map<std::string, std::string> params = {
+                {"stride-x", std::to_string(p.str.w)},
+                {"stride-y", std::to_string(p.str.h)},
+                {"pad-x",    std::to_string(p.pad.w)},
+                {"pad-y",    std::to_string(p.pad.h)},
+                {"kernel-x", std::to_string(p.krn.w)},
+                {"kernel-y", std::to_string(p.krn.h)},
+                {"method",   "MAX"},
+                {"round",    "Ceil"}
+        };
+
+        size_t out_h = (p.in.h + 2 * p.pad.h - p.krn.h) / p.str.h + 1;
+        size_t out_w = (p.in.w + 2 * p.pad.w - p.krn.w) / p.str.w + 1;
+        InOutShapes inout = {
+                {{p.in.n, p.in.c, p.in.h, p.in.w}},
+                {{p.in.n, p.in.c, out_h,  out_w}}
+        };
+
+        V2NetBuilder model = V2NetBuilder::buildNetworkWithOneInput(
+                "Pooling_Only", inout.inDims[0], p.precision)
+                .addLayer("Pooling", p.precision, &params, inout);
+        return model.finish();
+    }
+};
+
+class ROIPoolingLayerTest : public LayerTestsCommon<pool_test_params> {
+protected:
+    std::string getROIPoolingModel(const pool_test_params& p) {
+
+        size_t out_h = (p.in.h + 2 * p.pad.h - p.krn.h) / p.str.h + 1;
+        size_t out_w = (p.in.w + 2 * p.pad.w - p.krn.w) / p.str.w + 1;
+        InOutShapes inout = {
+                {{p.in.n, p.in.c, p.in.h, p.in.w}, {p.in.n, p.in.c}},
+                {{p.in.n, p.in.c, out_h,  out_w}}
+        };
+        std::map<std::string, std::string> params = {
+                {"pooled_h",      std::to_string(out_h)},
+                {"pooled_w",      std::to_string(out_w)},
+                {"spatial_scale", "0.062500"}
+        };
+        return V2NetBuilder::buildNetworkWithOneInput("ROIPooling_Only", inout.inDims[0], p.precision)
+                .addInputLayer(p.precision, inout.inDims[1])
+                .addLayer("ROIPooling", p.precision, &params, inout)
+                .havingEdges().connect(0, 2).connect(1, 2).finish();
+    }
+};
+
+TEST_P(PoolingLayerTest, CanNotLoadPoolLayer) {
+    auto param = GetParam();
+    string ref_error = (param.device == CommonTestUtils::DEVICE_FPGA) ?
+                       "Graph is not supported on FPGA" : "Unsupported layer: Pooling1:Pooling";
+    
+    InferenceEngine::Core core;
+    std::string model = getModel(param);
+    CNNNetwork network = core.ReadNetwork(model, Blob::CPtr());
+
+    try {
+        ExecutableNetwork exeNetwork = core.LoadNetwork(network, param.device, {});
+    } catch (InferenceEngineException ex) {
+        ASSERT_EQ(ex.getStatus(), StatusCode::GENERAL_ERROR);
+        ASSERT_EQ(ex.what(), ref_error);
+    }
+}
+
+TEST_P(ROIPoolingLayerTest, CanNotLoadROIPoolLayer) {
+    auto param = GetParam();
+    string ref_error =
+            (param.device == CommonTestUtils::DEVICE_FPGA) ?
+                "Graph is not supported on FPGA" :
+                "Unsupported layer: ROIPooling2:ROIPooling";
+                
+    InferenceEngine::Core core;
+    std::string model = getROIPoolingModel(param);
+    CNNNetwork network = core.ReadNetwork(model, Blob::CPtr());
+
+    if (param.device == CommonTestUtils::DEVICE_CPU ||
+        param.device == CommonTestUtils::DEVICE_MYRIAD ||
+        param.device == CommonTestUtils::DEVICE_HDDL ||
+        param.device == CommonTestUtils::DEVICE_KEEMBAY) {
+        ASSERT_NO_THROW(ExecutableNetwork exeNetwork = core.LoadNetwork(network, param.device, {}));
+    } else {
+        try {
+            ExecutableNetwork exeNetwork = core.LoadNetwork(network, param.device, {});
+        } catch (InferenceEngineException ex) {
+            if (param.device != CommonTestUtils::DEVICE_HDDL) {
+                ASSERT_EQ(ex.getStatus(), StatusCode::GENERAL_ERROR);
+            }
+            ASSERT_STR_CONTAINS(ex.what(), ref_error);
+        }
+    }
+}
+
+#define pool_case pool_params({{1, 1, 16, 16}, {2, 2}, {2, 2}, {0, 0}})
+
+struct activ_test_params : in_params_nchw, base_test_params {
+    activ_test_params(std::string name, std::string pr, in_params_nchw params) :
+            in_params_nchw(params), base_test_params(name, pr) {}
+};
+
+class ActivationLayerTest : public LayerTestsCommon<activ_test_params> {
+protected:
+    std::string getModel(const activ_test_params& p) {
+        std::map<std::string, std::string> params = {
+                {"type", "sigmoid"}
+        };
+
+        InOutShapes inout = {{{p.in.n, p.in.c}},
+                           {{p.in.n, p.in.c}}};
+
+        V2NetBuilder model = V2NetBuilder::buildNetworkWithOneInput(
+                "Activation_Only", inout.inDims[0], p.precision)
+                .addLayer("Activation", p.precision, &params, inout);
+        return model.finish();
+    }
+};
+
+class ReLULayerTest : public ActivationLayerTest {
+protected:
+    std::string getModel(const activ_test_params& p) {
+        InOutShapes inout = {
+                {{p.in.c, p.in.h, p.in.w}},
+                {{p.in.c, p.in.h, p.in.w}}
+        };
+
+        V2NetBuilder model = V2NetBuilder::buildNetworkWithOneInput(
+                "ReLU_Only", inout.inDims[0], p.precision)
+                .addLayer("ReLU", p.precision, nullptr, inout);
+        return model.finish();
+    }
+};
+
+class ClampLayerTest : public ActivationLayerTest {
+protected:
+    std::string getModel(const activ_test_params& p) {
+        std::map<std::string, std::string> params = {
+                {"min", "-50"},
+                {"max", "50"}
+        };
+
+        InOutShapes inout = {
+                {{p.in.n, p.in.c, p.in.h, p.in.w}},
+                {{p.in.n, p.in.c, p.in.h, p.in.w}}
+        };
+
+        V2NetBuilder model = V2NetBuilder::buildNetworkWithOneInput(
+                "Clamp_Only", inout.inDims[0], p.precision)
+                .addLayer("Clamp", p.precision, &params, inout);
+        return model.finish();
+    }
+};
+
+TEST_P(ActivationLayerTest, CanNotLoadActivationLayer) {
+    auto param = GetParam();
+    string ref_error = (param.device == CommonTestUtils::DEVICE_FPGA) ?
+                       "Graph is not supported on FPGA" : "Unsupported primitive of type: Activation name: Activation1";
+    
+    InferenceEngine::Core core;
+    std::string model = getModel(param);
+    CNNNetwork network = core.ReadNetwork(model, Blob::CPtr());
+
+    if (param.device == CommonTestUtils::DEVICE_CPU) {
+        ASSERT_NO_THROW(ExecutableNetwork exeNetwork = core.LoadNetwork(network, param.device));
+    } else {
+        try {
+            ExecutableNetwork exeNetwork = core.LoadNetwork(network, param.device, {});
+        } catch (InferenceEngineException ex) {
+            ASSERT_EQ(ex.getStatus(), StatusCode::GENERAL_ERROR);
+            ASSERT_STR_CONTAINS(ex.what(), ref_error);
+        }
+    }
+}
+
+TEST_P(ReLULayerTest, CanNotLoadReLULayer) {
+    auto param = GetParam();
+    string ref_error = (param.device == CommonTestUtils::DEVICE_FPGA) ? "Graph is not supported on FPGA" :
+                       (param.device == CommonTestUtils::DEVICE_CPU)  ? "channels mismatch between mea" :
+                       "Unsupported layer: ReLU1:ReLU";
+    
+    InferenceEngine::Core core;
+    std::string model = getModel(param);
+    CNNNetwork network = core.ReadNetwork(model, Blob::CPtr());
+
+    try {
+        ExecutableNetwork exeNetwork = core.LoadNetwork(network, param.device, {});
+    } catch (InferenceEngineException ex) {
+        if (param.device != CommonTestUtils::DEVICE_CPU) {
+            ASSERT_EQ(ex.getStatus(), StatusCode::GENERAL_ERROR);
+        }
+        ASSERT_STR_CONTAINS(ex.what(), ref_error);
+    }
+
+}
+
+TEST_P(ClampLayerTest, CanNotLoadClampLayer) {
+    auto param = GetParam();
+    string ref_error = (param.device == CommonTestUtils::DEVICE_FPGA) ?
+                       "Graph is not supported on FPGA" : "Unsupported layer: Clamp1:Clamp";
+    
+    InferenceEngine::Core core;
+    std::string model = getModel(param);
+    CNNNetwork network = core.ReadNetwork(model, Blob::CPtr());
+
+    try {
+        ExecutableNetwork exeNetwork = core.LoadNetwork(network, param.device, {});
+    } catch (InferenceEngineException ex) {
+        ASSERT_EQ(ex.getStatus(), StatusCode::GENERAL_ERROR);
+        ASSERT_STR_CONTAINS(ex.what(), ref_error);
+    }
+
+}
+
+#define activation_case in_params_nchw({1, 96, 55, 55})
+#define clamp_case in_params_nchw({1, 1, 512, 1})
+
+struct norm_test_params : in_params_nchw, base_test_params {
+    norm_test_params(std::string name, std::string pr, in_params_nchw params) :
+            in_params_nchw(params), base_test_params(name, pr) {}
+};
+
+class NormalizeLayerTest : public LayerTestsCommon<norm_test_params> {
+protected:
+    std::string getModel(const norm_test_params& p) {
+        std::map<std::string, std::string> params = {
+                {"across_spatial", "0"},
+                {"type",           "constant"},
+                {"value",          "20.000000"},
+                {"min",            "0.000000"},
+                {"max",            "1.000000"},
+                {"mean",           "0.000000"},
+                {"std",            "1.000000"},
+                {"sparse",         "-1"},
+                {"variance_norm",  "caffe.FillerParameter.FAN_IN"},
+                {"channel_shared", "0"},
+                {"eps",            "0.000000"}
+        };
+
+        InOutShapes inout = {
+                {{p.in.n, p.in.c, p.in.h, p.in.w}},
+                {{p.in.n, p.in.c, p.in.h, p.in.w}}
+        };
+        size_t weights = 2048;
+
+        V2NetBuilder model = V2NetBuilder::buildNetworkWithOneInput(
+                "Normalize_Only", inout.inDims[0], p.precision)
+                .addLayer("Normalize", p.precision, &params, inout, weights);
+        return model.finish();
+    }
+
+    TBlob<uint8_t>::Ptr GetNetworkWeights(const norm_test_params& p) {
+        TBlob<uint8_t>* weights = new TBlob<uint8_t>(
+                { Precision::U8, {p.in.c * sizeof(float)}, Layout::C });
+        weights->allocate();
+        fill_data(weights->buffer().as<float*>(),
+                  weights->size() / sizeof(float));
+        TBlob<uint8_t>::Ptr weights_ptr = TBlob<uint8_t>::Ptr(weights);
+
+        return weights_ptr;
+    }
+};
+
+TEST_P(NormalizeLayerTest, CanNotLoadNormalizeLayer) {
+    auto param = GetParam();
+    string ref_error = (param.device == CommonTestUtils::DEVICE_FPGA) ?
+                       "Graph is not supported on FPGA" : "Unsupported layer: Normalize1:Normalize";
+    
+    InferenceEngine::Core core;
+    auto network = core.ReadNetwork(getModel(param), GetNetworkWeights(param));
+
+    if (param.device == CommonTestUtils::DEVICE_CPU) {
+        ASSERT_NO_THROW(ExecutableNetwork exeNetwork = core.LoadNetwork(network, param.device));
+    } else {
+        try {
+            ExecutableNetwork exeNetwork = core.LoadNetwork(network, param.device);
+        } catch (InferenceEngineException ex) {
+            ASSERT_EQ(ex.getStatus(), StatusCode::GENERAL_ERROR);
+            ASSERT_STR_CONTAINS(ex.what(), ref_error);
+        }
+    }
+}
+
+#define norm_case in_params_nchw({1, 512, 38, 38})
+
+struct scale_params : in_params_nchw {
+    size_t axis;
+
+    scale_params(in_params_nchw in, size_t ax) :
+            in_params_nchw(in) {
+        axis = ax;
+    }
+};
+
+struct scale_test_params : scale_params, base_test_params {
+    scale_test_params(std::string name, std::string pr, scale_params params) :
+            scale_params(params), base_test_params(name, pr) {}
+};
+
+class ScalingLayerTest : public LayerTestsCommon<scale_test_params> {
+protected:
+    std::string getScaleShiftModel(const scale_test_params& p) {
+        InOutShapes inout = {
+                {{p.in.w, p.in.h}},
+                {{p.in.w, p.in.h}}
+        };
+        size_t weights = 2048;
+
+        V2NetBuilder model = V2NetBuilder::buildNetworkWithOneInput(
+                "ScaleShift_Only", inout.inDims[0], p.precision)
+                .addLayer("ScaleShift", p.precision, nullptr, inout, weights);
+        return model.finish();
+    }
+
+    std::string getSoftMaxModel(const scale_test_params& p) {
+        std::map<std::string, std::string> params = {
+                {"axis", std::to_string(p.axis)}
+        };
+
+        InOutShapes inout = {
+                {{p.in.w, p.in.h}},
+                {{p.in.w, p.in.h}}
+        };
+        size_t weights = 2048;
+
+        V2NetBuilder model = V2NetBuilder::buildNetworkWithOneInput(
+                "SoftMax_Only", inout.inDims[0], p.precision)
+                .addLayer("SoftMax", p.precision, &params, inout, weights);
+        return model.finish();
+    }
+
+    std::string getBatchNormalizationModel(const scale_test_params& p) {
+        std::map<std::string, std::string> params = {
+                {"epsilon", "2e-05"}
+        };
+
+        InOutShapes inout = {
+                {{p.in.n, p.in.c, p.in.w, p.in.h}},
+                {{p.in.n, p.in.c, p.in.w, p.in.h}}
+        };
+        size_t weights = 12;
+        size_t biases = 12;
+
+        V2NetBuilder model = V2NetBuilder::buildNetworkWithOneInput(
+                "BatchNormalization_Only", inout.inDims[0], p.precision)
+                .addLayer("BatchNormalization", p.precision, &params, inout, weights, biases);
+        return model.finish();
+    }
+};
+
+TEST_P(ScalingLayerTest, CanNotLoadScaleShiftLayer) {
+    auto param = GetParam();
+    string ref_error = "Unsupported layer: ScaleShift1:ScaleShift";
+    std::map<std::string, std::string> config;
+    if (param.device == CommonTestUtils::DEVICE_FPGA) {
+        ref_error = "Graph is not supported on FPGA";
+    } else if (param.device == CommonTestUtils::DEVICE_GNA) {
+        config.insert({GNA_CONFIG_KEY(SCALE_FACTOR), std::to_string(1)});
+        ref_error = "[GNAPlugin] in function operator(): "
+                "Incorrect weight value for ScaleShift1:ScaleShift";
+    }
+    
+    InferenceEngine::Core core;
+    std::string model = getScaleShiftModel(param);
+    CNNNetwork network = core.ReadNetwork(model, Blob::CPtr());
+
+    try {
+        ExecutableNetwork exeNetwork = core.LoadNetwork(network, param.device, config);
+    } catch (InferenceEngineException ex) {
+        ASSERT_EQ(ex.getStatus(), StatusCode::GENERAL_ERROR);
+        ASSERT_STR_CONTAINS(ex.what(), ref_error);
+    }
+}
+
+TEST_P(ScalingLayerTest, CanNotLoadSoftMaxLayer) {
+    auto param = GetParam();
+    string ref_error = (param.device == CommonTestUtils::DEVICE_FPGA) ? "Graph is not supported on FPGA" :
+                       (param.device == CommonTestUtils::DEVICE_CPU) ? "Incorrect axis!" : "Unsupported layer: SoftMax1:SoftMax";
+    
+    InferenceEngine::Core core;
+    std::string model = getSoftMaxModel(param);
+    CNNNetwork network = core.ReadNetwork(model, Blob::CPtr());
+
+    try {
+        ExecutableNetwork exeNetwork = core.LoadNetwork(network, param.device);
+    } catch (InferenceEngineException ex) {
+        ASSERT_EQ(ex.getStatus(), StatusCode::GENERAL_ERROR);
+        ASSERT_STR_CONTAINS(ex.what(), ref_error);
+    }
+}
+
+TEST_P(ScalingLayerTest, CanNotLoadBatchNormalizationLayer) {
+    auto param = GetParam();
+    string ref_error = "Unsupported layer: BatchNormalization1:BatchNormalization";
+
+    if (param.device == CommonTestUtils::DEVICE_FPGA) {
+        ref_error = "Graph is not supported on FPGA";
+    } else if (param.device == CommonTestUtils::DEVICE_CPU) {
+        ref_error = "Weights/biases are empty for layer: BatchNormalization1 ";
+    } else if (param.device == CommonTestUtils::DEVICE_GNA) {
+        ref_error = "[GNAPlugin] in function LoadNetwork: "
+                "The plugin does not support layer: "
+                "BatchNormalization1:BatchNormalization";
+    }
+
+    InferenceEngine::Core core;
+    std::string model = getBatchNormalizationModel(param);
+    CNNNetwork network = core.ReadNetwork(model, Blob::CPtr());
+
+    try {
+        ExecutableNetwork exeNetwork = core.LoadNetwork(network, param.device, {});
+    } catch (InferenceEngineException ex) {
+        ASSERT_EQ(ex.getStatus(), StatusCode::GENERAL_ERROR);
+        ASSERT_STR_CONTAINS(ex.what(), ref_error);
+    }
+}
+
+struct shaping_test_params : in_params_nchw, base_test_params {
+    shaping_test_params(std::string name, std::string pr, in_params_nchw params) :
+            in_params_nchw(params), base_test_params(name, pr) {}
+};
+
+class ShapingLayerTest : public LayerTestsCommon<shaping_test_params> {
+protected:
+    std::string getFlattenModel(const shaping_test_params& p) {
+        std::map<std::string, std::string> params = {
+                {"axis",     "1"},
+                {"end_axis", "-1"}
+        };
+        InOutShapes inout = {
+                {{p.in.n, p.in.c, p.in.w, p.in.h}},
+                {{p.in.n, p.in.c}}
+        };
+
+        V2NetBuilder model = V2NetBuilder::buildNetworkWithOneInput(
+                "Flatten_Only", inout.inDims[0], p.precision)
+                .addLayer("Flatten", p.precision, &params, inout);
+        return model.finish();
+    }
+
+    std::string getReshapeModel(const shaping_test_params& p) {
+        std::map<std::string, std::string> params = {
+                {"dim",      std::to_string(p.in.n) + "," + std::to_string(p.in.c)},
+                {"axis",     "0"},
+                {"num_axes", "-1"}
+        };
+        InOutShapes inout = {
+                {{p.in.n, p.in.c, p.in.w, p.in.h}},
+                {{p.in.n, p.in.c}}
+        };
+
+        V2NetBuilder model = V2NetBuilder::buildNetworkWithOneInput(
+                "Reshape_Only", inout.inDims[0], p.precision)
+                .addLayer("Reshape", p.precision, &params, inout);
+        return model.finish();
+    }
+
+    std::string getCropModel(const shaping_test_params& p) {
+        std::map<std::string, std::string> params = {
+                {"dim",    "12,12"},
+                {"axis",   "2,3"},
+                {"offset", "0,0"}
+        };
+        InOutShapes inout = {
+                {{p.in.n, p.in.c, p.in.w, p.in.h}},
+                {{p.in.n, p.in.c, 12,     12}}
+        };
+
+        V2NetBuilder model = V2NetBuilder::buildNetworkWithOneInput(
+                "Crop_Only", inout.inDims[0], p.precision)
+                .addLayer("Crop", p.precision, &params, inout);
+        return model.finish();
+    }
+};
+
+TEST_P(ShapingLayerTest, CanNotLoadFlattenLayer) {
+    auto param = GetParam();
+    string ref_error = (param.device == CommonTestUtils::DEVICE_FPGA) ?
+                       "Graph is not supported on FPGA" : "Unsupported layer: Flatten1:Flatten";
+    
+    InferenceEngine::Core core;
+    std::string model = getFlattenModel(param);
+    CNNNetwork network = core.ReadNetwork(model, Blob::CPtr());
+
+    try {
+        ExecutableNetwork exeNetwork = core.LoadNetwork(network, param.device, {});
+    } catch (InferenceEngineException ex) {
+        ASSERT_EQ(ex.getStatus(), StatusCode::GENERAL_ERROR);
+        ASSERT_STR_CONTAINS(ex.what(), ref_error);
+    }
+}
+
+TEST_P(ShapingLayerTest, CanNotLoadReshapeLayer) {
+    auto param = GetParam();
+    string ref_error = (param.device == CommonTestUtils::DEVICE_FPGA) ?
+                       "Graph is not supported on FPGA" : "Unsupported layer: Reshape1:Reshape";
+    
+    InferenceEngine::Core core;
+    std::string model = getFlattenModel(param);
+    CNNNetwork network = core.ReadNetwork(model, Blob::CPtr());
+
+    try {
+        ExecutableNetwork exeNetwork = core.LoadNetwork(network, param.device, {});
+    } catch (InferenceEngineException ex) {
+        ASSERT_EQ(ex.getStatus(), StatusCode::GENERAL_ERROR);
+        ASSERT_STR_CONTAINS(ex.what(), ref_error);
+    }
+}
+
+TEST_P(ShapingLayerTest, CanNotLoadCropLayer) {
+    auto param = GetParam();
+    string ref_error = (param.device == CommonTestUtils::DEVICE_FPGA) ?
+                       "Graph is not supported on FPGA" : "Unsupported layer: Crop1:Crop";
+    
+    InferenceEngine::Core core;
+    std::string model = getFlattenModel(param);
+    CNNNetwork network = core.ReadNetwork(model, Blob::CPtr());
+
+    try {
+        ExecutableNetwork exeNetwork = core.LoadNetwork(network, param.device, {});
+    } catch (InferenceEngineException ex) {
+        ASSERT_EQ(ex.getStatus(), StatusCode::GENERAL_ERROR);
+        ASSERT_STR_CONTAINS(ex.what(), ref_error);
+    }
+}
+
+#define shape_case in_params_nchw({1, 512, 1, 1})
+
+struct element_test_params : in_params_nchw, base_test_params {
+    element_test_params(std::string name, std::string pr, in_params_nchw params) :
+            in_params_nchw(params), base_test_params(name, pr) {}
+};
+
+class ElementWiseLayerTest : public LayerTestsCommon<element_test_params> {
+protected:
+    std::string getEltwiseModel(const element_test_params& p) {
+        std::vector<size_t> dims{p.in.n, p.in.c};
+        InOutShapes inout = {{dims, dims},
+                           {dims}};
+
+        std::map<std::string, std::string> params = {
+                {"operation", "prod"}
+        };
+
+        return V2NetBuilder::buildNetworkWithOneInput(
+                "Eltwise_Only", dims, p.precision)
+                .addInputLayer(p.precision, dims)
+                .addLayer("Eltwise", p.precision, &params, inout)
+                .havingEdges().connect(0, 2).connect(1, 2).finish();
+    }
+};
+
+TEST_P(ElementWiseLayerTest, CanNotLoadEltwiseLayer) {
+    auto param = GetParam();
+    string ref_error = (param.device == CommonTestUtils::DEVICE_FPGA) ?
+                       "Graph is not supported on FPGA" : "Unsupported layer: Eltwise1:Eltwise";
+
+    std::string model = getEltwiseModel(param);
+    InferenceEngine::Core core;
+    auto network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr());
+
+    try {
+        ExecutableNetwork exeNetwork = core.LoadNetwork(network, param.device, {});
+    } catch (InferenceEngineException ex) {
+        ASSERT_EQ(ex.getStatus(), StatusCode::GENERAL_ERROR);
+        ASSERT_STR_CONTAINS(ex.what(), ref_error);
+    }
+}
+
+struct object_test_params : in_params_nchw, base_test_params {
+    object_test_params(std::string name, std::string pr, in_params_nchw params) :
+            in_params_nchw(params), base_test_params(name, pr) {}
+};
+
+class ObjectDetectionLayerTest : public LayerTestsCommon<object_test_params> {
+protected:
+    std::string getPermuteModel(const object_test_params& p) {
+        std::map<std::string, std::string> params = {
+                {"order", "0,2,3,1"}
+        };
+        InOutShapes inout = {
+                {{p.in.n, p.in.c, p.in.w, p.in.h}},
+                {{p.in.n, p.in.w, p.in.h, p.in.c}},
+        };
+
+        V2NetBuilder model = V2NetBuilder::buildNetworkWithOneInput(
+                "Permute_Only", inout.inDims[0], p.precision)
+                .addLayer("Permute", p.precision, &params, inout);
+        return model.finish();
+    }
+
+    std::string getPriorBoxModel(const object_test_params& p) {
+        std::map<std::string, std::string> params = {
+                {"min_size",     "162.000000"},
+                {"max_size",     "213.000000"},
+                {"aspect_ratio", "2.000000,3.000000"},
+                {"flip",         "1"},
+                {"clip",         "0"},
+                {"variance",     "0.100000,0.100000,0.200000,0.200000"},
+                {"img_size",     "0"},
+                {"img_h",        "0"},
+                {"img_w",        "0"},
+                {"step",         "64.000000"},
+                {"step_h",       "0.000000"},
+                {"step_w",       "0.000000"},
+                {"offset",       "0.500000"}
+        };
+        InOutShapes inout = {
+                {{p.in.n, p.in.c, p.in.w, p.in.h}},
+                {{p.in.n, p.in.w, p.in.h, p.in.c}},
+        };
+
+        V2NetBuilder model = V2NetBuilder::buildNetworkWithOneInput(
+                "PriorBox_Only", inout.inDims[0], p.precision)
+                .addLayer("PriorBox", p.precision, &params, inout);
+        return model.finish();
+    }
+
+    // TODO: add DetectionOutput and Tile layers
+};
+
+TEST_P(ObjectDetectionLayerTest, CanNotLoadPermuteLayer) {
+    auto param = GetParam();
+    string ref_error = (param.device == CommonTestUtils::DEVICE_FPGA) ?
+                       "Graph is not supported on FPGA" : "Unsupported layer: Permute1:Permute";
+    
+    InferenceEngine::Core core;
+    CNNNetwork network = core.ReadNetwork(getPermuteModel(param), Blob::CPtr());
+
+    try {
+        ExecutableNetwork exeNetwork = core.LoadNetwork(network, param.device);
+    } catch (InferenceEngineException ex) {
+        ASSERT_EQ(ex.getStatus(), StatusCode::GENERAL_ERROR);
+        ASSERT_STR_CONTAINS(ex.what(), ref_error);
+    }
+}
+
+#define scale_case scale_params({1, 512, 38, 38}, 2)
+
+#define object_case in_params_nchw({1, 804, 38, 38})
+
+struct memory_test_params : in_params_nchw, base_test_params {
+    memory_test_params(std::string name, std::string pr, in_params_nchw params) :
+            in_params_nchw(params), base_test_params(name, pr) {}
+};
+
+class MemoryLayerTest : public LayerTestsCommon<memory_test_params> {
+protected:
+    std::string getMemoryModel(const memory_test_params& p) {
+        std::map<std::string, std::string> params = {
+                {"id",    "r_2-3"},
+                {"index", "1"},
+                {"size",  "2"}
+        };
+        std::map<std::string, std::string> paramsFC = {
+                {"out-size", "2048"}
+        };
+        InOutShapes inout = {
+                {{p.in.n, p.in.c}},
+                {{p.in.n, 2048}}
+        };
+        InOutShapes inoutMemory = {
+                {{p.in.n, 2048}},
+                {}
+        };
+        return V2NetBuilder::buildNetworkWithOneInput(
+                "FC_with_Memory", inout.inDims[0], p.precision)
+                .addInputLayer(p.precision, inout.inDims[0])
+                .addLayer("FullyConnected", p.precision, &paramsFC, inout, 1638400)
+                .addLayer("FullyConnected", p.precision, &paramsFC, inout, 1638400)
+                .addLayer("Memory", p.precision, &paramsFC, inoutMemory)
+                .havingEdges().connect(0, 2).connect(1, 3).connect(2, 4).finish();
+    }
+};
+
+TEST_P(MemoryLayerTest, CanNotLoadMemoryLayer) {
+    auto param = GetParam();
+    string ref_error = (param.device == CommonTestUtils::DEVICE_FPGA) ?
+                       "Graph is not supported on FPGA" : "Unsupported layer: Memory1:Memory";
+
+    InferenceEngine::Core core;
+    std::string model = getMemoryModel(param);
+    auto network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr());
+
+    try {
+        ExecutableNetwork exeNetwork = core.LoadNetwork(network, param.device, {});
+    } catch (InferenceEngineException ex) {
+        ASSERT_EQ(ex.getStatus(), StatusCode::GENERAL_ERROR);
+        ASSERT_STR_CONTAINS(ex.what(), ref_error);
+    }
+}
+IE_SUPPRESS_DEPRECATED_END
+
+#define memory_case in_params_nchw({1, 512, 38, 38})
+
diff --git a/inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_layout.hpp b/inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_layout.hpp
new file mode 100644 (file)
index 0000000..bc0ef24
--- /dev/null
@@ -0,0 +1,271 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin.h"
+
+#include "precision_utils.h"
+#include "common_test_utils/xml_net_builder/xml_net_builder.hpp"
+
+using namespace std;
+using namespace CommonTestUtils;
+using namespace InferenceEngine;
+using namespace InferenceEngine::details;
+
+static constexpr int testDimW = 17;
+static constexpr int testDimH = 3;
+
+struct in_params {
+    std::vector<size_t> in;
+};
+
+struct base_layout_test_params {
+    std::string device;
+    Layout layout;
+    std::string precision;
+    base_layout_test_params(std::string name, std::string _precision = "FP32", Layout _layout = Layout::C) {
+        device = name;
+        precision = _precision;
+        layout = _layout;
+    }
+};
+
+struct power_params : in_params {
+    int power;
+    int scale;
+    int shift;
+
+    power_params(in_params in,
+        int _power,
+        int _scale,
+        int _shift ) :
+        in_params(in) {
+        power = _power;
+        scale = _scale;
+        shift = _shift;
+    }
+};
+
+struct layout_test_params : power_params, base_layout_test_params {
+    layout_test_params(std::string name, std::string precision, Layout layout, power_params params) :
+        power_params(params), base_layout_test_params(name, precision, layout) {}
+};
+
+std::ostream &operator<<(std::ostream &os, const layout_test_params &p) {
+    return os << "device: " << p.device;
+}
+
+std::string layoutToString(Layout l) {
+    std::string str_layout;
+    switch (l)
+    {
+    case InferenceEngine::NCHW:
+        str_layout = "NCHW";
+        break;
+    case InferenceEngine::NHWC:
+        str_layout = "NHWC";
+        break;
+    case InferenceEngine::C:
+        str_layout = "C";
+        break;
+    case InferenceEngine::CHW:
+        str_layout = "CHW";
+        break;
+    case InferenceEngine::HW:
+        str_layout = "HW";
+        break;
+    case InferenceEngine::NC:
+        str_layout = "NC";
+        break;
+    case InferenceEngine::CN:
+        str_layout = "CN";
+        break;
+    default:
+        break;
+    }
+    return str_layout;
+}
+
+std::string getTestName(testing::TestParamInfo<layout_test_params> obj) {
+    return  "layout_" + layoutToString(obj.param.layout) + "_" + obj.param.device;
+}
+
+class LayoutTestCanLoad : public TestsCommon,
+    public testing::WithParamInterface<layout_test_params>{
+protected:
+    std::string getPowerModel(const layout_test_params &p) {
+        std::map<std::string, std::string> params = {
+            { "power", std::to_string(p.power) },
+            { "scale", std::to_string(p.scale) },
+            { "shift", std::to_string(p.shift) }
+        };
+
+        InOutShapes inout = {{p.in},
+                           {p.in}};
+
+        V2NetBuilder model = V2NetBuilder::buildNetworkWithOneInput(
+                "Power_Only", inout.inDims[0], p.precision)
+            .addLayer("Power", p.precision, &params, inout);
+        return model.finish(false);
+    }
+
+    std::string getConvModel(const layout_test_params &p) {
+        std::map<std::string, std::string> params = {
+            { "stride-x", "1" },
+            { "stride-y", "1" },
+            { "pad-x",    "0" },
+            { "pad-y",    "0" },
+            { "kernel-x", "1" },
+            { "kernel-y", "1" },
+            { "output",   std::to_string(testDimW)},
+            { "group",    "1" }
+        };
+
+        std::vector<size_t> out = p.in;
+        if (out.size() == 1 || out.size() == 3) {
+            out[0] = testDimW;
+        } else {
+            out[1] = testDimW;
+        }
+
+        InOutShapes inout = {{p.in},
+                           {out}};
+
+        const auto elemSize = p.precision == "FP16" ? sizeof(ie_fp16) : sizeof(float);
+
+        size_t weights = testDimW * testDimH * elemSize;
+        size_t biases = testDimW * elemSize;
+
+        V2NetBuilder model = V2NetBuilder::buildNetworkWithOneInput(
+                "Convolution_Only", inout.inDims[0], p.precision)
+            .addLayer("Convolution", p.precision, &params, inout, weights, biases);
+        return model.finish(false);
+    }
+
+    std::string getActivModel(const layout_test_params &p) {
+        std::map<std::string, std::string> params = {
+            { "type", "sigmoid" }
+        };
+
+        InOutShapes inout = {{p.in},
+                           {p.in}};
+
+        V2NetBuilder model = V2NetBuilder::buildNetworkWithOneInput(
+                "Activation_Only", inout.inDims[0], p.precision)
+            .addLayer("Activation", p.precision, &params, inout);
+        return model.finish(false);
+    }
+
+    Blob::Ptr getNetworkWeights(const layout_test_params &p) {
+        const auto elemSize = p.precision == "FP16" ? sizeof(ie_fp16) : sizeof(float);
+
+        TensorDesc tdesc (Precision::U8, { (testDimW * testDimH + testDimW ) * elemSize }, C);
+        TBlob<uint8_t> *weights = new TBlob<uint8_t>(tdesc);
+        weights->allocate();
+        fill_data(weights->buffer().as<float*>(),
+            weights->size() / sizeof(float));
+        TBlob<uint8_t>::Ptr weights_ptr = TBlob<uint8_t>::Ptr(weights);
+        return weights_ptr;
+    }
+};
+
+    class LayoutTestCanLoadPower : public LayoutTestCanLoad {};
+    class LayoutTestCanLoadConv : public LayoutTestCanLoad {};
+    class LayoutTestCanLoadActiv : public LayoutTestCanLoad {};
+
+    class LayoutTestCanNotLoadPower : public LayoutTestCanLoad {};
+    class LayoutTestCanNotLoadConv : public LayoutTestCanLoad {};
+
+
+TEST_P(LayoutTestCanLoadPower, NetWithLayout) {
+    auto param = GetParam();
+    InferenceEngine::Core core;
+    std::string model = getPowerModel(param);
+    Blob::CPtr weights;
+    auto network = core.ReadNetwork(model, weights);
+
+    ASSERT_NO_THROW(ExecutableNetwork exeNetwork = core.LoadNetwork(network, param.device, {}));
+}
+
+TEST_P(LayoutTestCanLoadConv, NetWithLayout) {
+    auto param = GetParam();
+    InferenceEngine::Core core;
+    std::string model = getConvModel(param);
+    Blob::Ptr weights = getNetworkWeights(param);
+    auto network = core.ReadNetwork(model, weights);
+    try {
+        ExecutableNetwork exeNetwork = core.LoadNetwork(network, param.device, {});
+    } catch (InferenceEngineException ex) {
+        std::cout << "Device" << param.device << " threw exception \"" << ex.what() << "\" with status code " << ex.getStatus() << std::endl;
+        GTEST_FAIL() << ex.what();
+    } catch (std::exception ex) {
+        std::cout << "Caught" << ex.what() << std::endl;
+        GTEST_FAIL() << ex.what();
+    } catch (...) {
+        GTEST_FAIL();
+    }
+}
+
+
+TEST_P(LayoutTestCanLoadActiv, NetWithLayout) {
+    auto param = GetParam();
+    InferenceEngine::Core core;
+    std::string model = getActivModel(param);
+    CNNNetwork network;
+    Blob::CPtr weights;
+    ASSERT_NO_THROW(network = core.ReadNetwork(model, weights));
+    ASSERT_NO_THROW(ExecutableNetwork exeNetwork = core.LoadNetwork(network, param.device, {}));
+}
+
+
+TEST_P(LayoutTestCanNotLoadPower, NetWithLayout) {
+    auto param = GetParam();
+    string ref_error = (param.device == CommonTestUtils::DEVICE_MYRIAD) ? "Unsupported 1D dimensions" :
+                       (param.device == CommonTestUtils::DEVICE_FPGA) ? "Graph is not supported on FPGA plugin due to existance of layer (Name: Input0, Type: Input)\n"\
+                            "in topology. Most likely you need to use heterogeneous plugin instead of FPGA plugin directly." : "Invalid data dimensions";
+    InferenceEngine::Core core;
+    std::string model = getPowerModel(param);
+    CNNNetwork network;
+    Blob::CPtr weights;
+    ASSERT_NO_THROW(network = core.ReadNetwork(model, weights));
+
+    try {
+        ExecutableNetwork exeNetwork = core.LoadNetwork(network, param.device, {});
+    } catch (InferenceEngineException ex) {
+        std::cout << "Device" << param.device << " threw exception \"" << ex.what() << "\" with status code " << ex.getStatus() << std::endl;
+        //ASSERT_EQ(ex.getStatus(), StatusCode::GENERAL_ERROR);
+        ASSERT_STR_CONTAINS(ex.what(), ref_error);
+    } catch (std::exception ex) {
+        std::cout << "Caught" << ex.what() << std::endl;
+        GTEST_FAIL() << ex.what();
+    } catch (...) {
+        GTEST_FAIL();
+    }
+}
+
+TEST_P(LayoutTestCanNotLoadConv, NetWithLayout) {
+    auto param = GetParam();
+    string ref_error =
+        (param.device == CommonTestUtils::DEVICE_MYRIAD) ? "Convolution supports only 3D or 4D or 5D input" :
+        (param.device == CommonTestUtils::DEVICE_FPGA) ? "Graph is not supported on FPGA" :
+        (param.device == CommonTestUtils::DEVICE_CPU) ? "Convolution layer. Unsupported mode. Only 4D and 5D blobs are supported as input." :
+        "Invalid data dimensions";
+    InferenceEngine::Core core;
+    std::string model = getConvModel(param);
+    CNNNetwork network;
+    ASSERT_NO_THROW(network = core.ReadNetwork(model, getNetworkWeights(param)));
+    try {
+        ExecutableNetwork exeNetwork = core.LoadNetwork(network, param.device, {});
+    } catch (InferenceEngineException ex) {
+        std::cout << "Device" << param.device << " threw exception \"" << ex.what() << "\" with status code " << ex.getStatus() << std::endl;
+        /*if (param.device != CommonTestUtils::DEVICE_CPU) {
+            ASSERT_EQ(ex.getStatus(), StatusCode::GENERAL_ERROR);
+        }*/
+        ASSERT_STR_CONTAINS(ex.what(), ref_error);
+    } catch (std::exception ex) {
+        std::cout << "Caught" << ex.what() << std::endl;
+        GTEST_FAIL() << ex.what();
+    } catch (...) {
+        GTEST_FAIL();
+    }
+}
diff --git a/inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_perf_counters.hpp b/inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_perf_counters.hpp
new file mode 100644 (file)
index 0000000..70782a0
--- /dev/null
@@ -0,0 +1,42 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin.h"
+#include "details/ie_cnn_network_tools.h"
+#include "exec_graph_info.hpp"
+
+using namespace ::testing;
+using namespace InferenceEngine;
+
+namespace {
+std::string getTestCaseName(testing::TestParamInfo<BehTestParams> obj) {
+    return obj.param.device + "_" + obj.param.input_blob_precision.name()
+           + (obj.param.config.size() ? "_" + obj.param.config.begin()->second : "");
+}
+}
+
+TEST_P(BehaviorPluginTestPerfCounters, EmptyWhenNotExecuted) {
+    auto param = GetParam();
+
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+
+    std::map<std::string, InferenceEngineProfileInfo> perfMap;
+    ASSERT_EQ(StatusCode::GENERAL_ERROR, testEnv->inferRequest->GetPerformanceCounts(perfMap, &response)) << response.msg;
+    ASSERT_EQ(perfMap.size(), 0);
+}
+
+TEST_P(BehaviorPluginTestPerfCounters, NotEmptyWhenExecuted) {
+    auto param = GetParam();
+
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv,
+            {{ PluginConfigParams::KEY_PERF_COUNT, PluginConfigParams::YES }}));
+    ASSERT_NO_THROW(sts = testEnv->inferRequest->Infer(&response));
+    ASSERT_EQ(StatusCode::OK, sts) << response.msg;
+
+    std::map<std::string, InferenceEngineProfileInfo> perfMap;
+    ASSERT_EQ(StatusCode::OK, testEnv->inferRequest->GetPerformanceCounts(perfMap, &response)) << response.msg;
+    ASSERT_NE(perfMap.size(), 0);
+}
diff --git a/inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_set_preprocess.hpp b/inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_set_preprocess.hpp
new file mode 100644 (file)
index 0000000..8b17573
--- /dev/null
@@ -0,0 +1,75 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin.h"
+#include <test_assertions.hpp>
+
+using namespace std;
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace InferenceEngine::details;
+
+namespace {
+    std::string getTestCaseName(testing::TestParamInfo<BehTestParams> obj) {
+        return obj.param.device + "_" + obj.param.input_blob_precision.name()
+               + (obj.param.config.size() ? "_" + obj.param.config.begin()->second : "");
+    }
+}
+
+TEST_P(BehaviorPluginTestPreProcess, SetPreProcessToInputInfo) {
+    InferenceEngine::Core core;
+
+    CNNNetwork cnnNetwork = core.ReadNetwork(GetParam().model_xml_str, GetParam().weights_blob);
+
+    auto &preProcess = cnnNetwork.getInputsInfo().begin()->second->getPreProcess();
+    preProcess.setResizeAlgorithm(ResizeAlgorithm::RESIZE_BILINEAR);
+
+    InferenceEngine::IExecutableNetwork::Ptr exeNetwork;
+    ASSERT_NO_THROW(exeNetwork = core.LoadNetwork(cnnNetwork, GetParam().device, GetParam().config));
+
+    IInferRequest::Ptr inferRequest;
+    ASSERT_EQ(StatusCode::OK, exeNetwork->CreateInferRequest(inferRequest, &response));
+
+    {
+        ConstInputsDataMap inputsMap;
+        ASSERT_EQ(StatusCode::OK, exeNetwork->GetInputsInfo(inputsMap, &response));
+        const auto& name = inputsMap.begin()->second->name();
+        const PreProcessInfo *info;
+        inferRequest->GetPreProcess(name.c_str(), &info, &response);
+
+        ASSERT_EQ(info->getResizeAlgorithm(), ResizeAlgorithm::RESIZE_BILINEAR);
+        ASSERT_PREPROCESS_INFO_EQ(preProcess, *info);
+    }
+}
+
+TEST_P(BehaviorPluginTestPreProcess, SetPreProcessToInferRequest) {
+    TestEnv::Ptr testEnv;
+    ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
+    ResponseDesc response;
+
+    auto& request = testEnv->inferRequest;
+    PreProcessInfo preProcessInfo;
+    preProcessInfo.setResizeAlgorithm(ResizeAlgorithm::RESIZE_BILINEAR);
+
+    IInferRequest::Ptr untouched_request = testEnv->exeNetwork.CreateInferRequest();
+
+    ConstInputsDataMap inputs = testEnv->exeNetwork.GetInputsInfo();
+    auto input_name = inputs.begin()->second->name();
+    auto inputBlob = prepareInputBlob(GetParam().input_blob_precision, testEnv->inputDims);
+
+    ASSERT_EQ(StatusCode::OK, request->SetBlob(input_name.c_str(), inputBlob, preProcessInfo, &response));
+
+    {
+        const PreProcessInfo *info = nullptr;
+        ASSERT_EQ(StatusCode::OK, request->GetPreProcess(input_name.c_str(), &info, &response));
+        ASSERT_EQ(info->getResizeAlgorithm(), ResizeAlgorithm::RESIZE_BILINEAR);
+        ASSERT_PREPROCESS_INFO_EQ(preProcessInfo, *info);
+    }
+
+    {
+        const PreProcessInfo *info = nullptr;
+        ASSERT_EQ(StatusCode::OK, untouched_request->GetPreProcess(input_name.c_str(), &info, &response));
+        ASSERT_EQ(testEnv->network.getInputsInfo()[input_name]->getPreProcess().getResizeAlgorithm(),info->getResizeAlgorithm());
+    }
+}
diff --git a/inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_unsupported.hpp b/inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_unsupported.hpp
new file mode 100644 (file)
index 0000000..a5d6bc3
--- /dev/null
@@ -0,0 +1,42 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin.h"
+
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace InferenceEngine::details;
+
+namespace {
+    std::string getTestCaseName(testing::TestParamInfo<BehTestParams> obj) {
+        return obj.param.device + (obj.param.config.size() ? "_" + obj.param.config.begin()->second : "") +
+               "_" + getModelName(obj.param.model_xml_str) + "_" + obj.param.input_blob_precision.name();
+    }
+}
+
+// Load unsupported network type to the Plugin
+TEST_P(BehaviorPluginTestAllUnsupported, cannotLoadUnsupportedNetwork) {
+    std::string refError = "The plugin does not support";
+    auto param = GetParam();
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network = core.ReadNetwork(GetParam().model_xml_str, GetParam().weights_blob);
+    ASSERT_THROW(core.LoadNetwork(network, param.device, param.config), InferenceEngineException);
+}
+
+// Load incorrect input type for Plugin
+TEST_P(BehaviorPluginTestTypeUnsupported, LoadIncorrectInputType) {
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network = core.ReadNetwork(GetParam().model_xml_str, GetParam().weights_blob);
+    InferenceEngine::InputsDataMap inputs = network.getInputsInfo();
+    inputs.begin()->second->setPrecision(GetParam().input_blob_precision);
+    ASSERT_THROW(core.LoadNetwork(network, GetParam().device, GetParam().config), InferenceEngineException);
+}
+
+TEST_P(BehaviorPluginTestBatchUnsupported, DISABLED_LoadIncorrectBatchSize) {
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network = core.ReadNetwork(GetParam().model_xml_str, GetParam().weights_blob);
+    InferenceEngine::InputsDataMap inputs = network.getInputsInfo();
+    network.setBatchSize(GetParam().batch_size);
+    ASSERT_THROW(core.LoadNetwork(network, GetParam().device, GetParam().config), InferenceEngineException);
+}
diff --git a/inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_version.hpp b/inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_version.hpp
new file mode 100644 (file)
index 0000000..867f03c
--- /dev/null
@@ -0,0 +1,43 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin.h"
+#include <array>
+
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace InferenceEngine::details;
+
+namespace {
+std::string getTestCaseName(testing::TestParamInfo<BehTestParams> obj) {
+    return obj.param.device + (obj.param.config.size() ? "_" + obj.param.config.begin()->second : "")
+           + "_" + obj.param.input_blob_precision.name();
+}
+}
+
+// Load unsupported network type to the Plugin
+TEST_P(BehaviorPluginTestVersion, pluginCurrentVersionIsCorrect) {
+    std::string refError = "The plugin does not support";
+    InferenceEngine::Core core;
+    const std::string device = GetParam().device;
+    if (device.find(CommonTestUtils::DEVICE_MULTI) == std::string::npos &&
+        device.find(CommonTestUtils::DEVICE_HETERO) == std::string::npos) {
+        std::map<std::string, InferenceEngine::Version> versions = core.GetVersions(GetParam().device);
+        ASSERT_EQ(versions.size(), 1);
+        auto version = versions.begin()->second;
+        ASSERT_EQ(version.apiVersion.major, 2);
+        ASSERT_EQ(version.apiVersion.minor, 1);
+    }
+}
+
+template <typename T, size_t N>
+std::array<T, N+1> add_element_into_array(const T (&arr)[N], const T & element) {
+    std::array<T, N+1> ar;
+    for(size_t i =  0; i != N; i++) {
+        ar[i] = arr[i];
+    }
+    ar[N] = element;
+    return ar;
+
+};
diff --git a/inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugins.hpp b/inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugins.hpp
new file mode 100644 (file)
index 0000000..99c225e
--- /dev/null
@@ -0,0 +1,223 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin.h"
+#include <thread>
+
+using namespace std;
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace InferenceEngine::details;
+
+namespace {
+    std::string getTestCaseName(testing::TestParamInfo<BehTestParams> obj) {
+        return obj.param.device + "_" + obj.param.input_blob_precision.name() + "_" + getModelName(obj.param.model_xml_str)
+               + (obj.param.config.size() ? "_" + obj.param.config.begin()->second : "");
+    }
+
+    std::string getOutputTestCaseName(testing::TestParamInfo<BehTestParams> obj) {
+        return obj.param.device + "_" + obj.param.output_blob_precision.name()
+               + (obj.param.config.size() ? "_" + obj.param.config.begin()->second : "");
+    }
+}
+
+Blob::Ptr BehaviorPluginTest::makeNotAllocatedBlob(Precision eb, Layout l, const SizeVector &dims) {
+    TensorDesc tdesc (eb, dims, l);
+    switch (eb) {
+        case Precision::I8:
+            return make_shared<TBlob<int8_t>>(tdesc);
+        case Precision::I16:
+            return make_shared<TBlob<int16_t>>(tdesc);
+        case Precision::I32:
+            return make_shared<TBlob<int32_t>>(tdesc);
+        case Precision::U8:
+            return make_shared<TBlob<uint8_t>>(tdesc);
+        case Precision::U16:
+            return make_shared<TBlob<uint16_t>>(tdesc);
+        case Precision::FP16:
+            return make_shared<TBlob<uint16_t>>(tdesc);
+        case Precision::FP32:
+            return make_shared<TBlob<float>>(tdesc);
+        case Precision::Q78:
+            return make_shared<TBlob<uint16_t>>(tdesc);
+        case Precision::UNSPECIFIED:
+            return make_shared<TBlob<float>>(tdesc);
+        default:
+            break;
+    }
+    throw std::runtime_error("unexpected");
+}
+
+IE_SUPPRESS_DEPRECATED_START
+void BehaviorPluginTest::setInputNetworkPrecision(CNNNetwork &network, InputsDataMap &inputs_info,
+    Precision input_precision) {
+    inputs_info = network.getInputsInfo();
+    ASSERT_TRUE(inputs_info.size() == 1u);
+    inputs_info.begin()->second->setPrecision(input_precision);
+}
+
+void BehaviorPluginTest::setOutputNetworkPrecision(CNNNetwork &network, OutputsDataMap &outputs_info,
+    Precision output_precision) {
+    outputs_info = network.getOutputsInfo();
+    ASSERT_EQ(outputs_info.size(), 1u);
+    outputs_info.begin()->second->setPrecision(output_precision);
+}
+IE_SUPPRESS_DEPRECATED_END
+
+class BehaviorPluginTestInput : public BehaviorPluginTest { };
+class BehaviorPluginTestOutput : public BehaviorPluginTest { };
+
+TEST_F(BehaviorPluginTest, AllocateNullBlob) {
+    TensorDesc tdesc = TensorDesc(Precision::FP32, NCHW);
+    InferenceEngine::TBlob<float> blob(tdesc);
+    ASSERT_NO_THROW(blob.allocate());
+}
+
+// Create Plugin
+// TEST_P(BehaviorPluginTest, canCreatePlugin) {
+//     ASSERT_NO_THROW(InferenceEnginePluginPtr plugin(make_plugin_name(GetParam().pluginName)));
+// }
+
+// Load correct network to Plugin
+// TODO
+// TEST_P(BehaviorPluginTest, canLoadCorrectNetwork) {
+//     InferenceEnginePluginPtr plugin(make_plugin_name(GetParam().pluginName));
+//     ASSERT_NO_THROW(pluginLoadCorrectNetwork(GetParam(), plugin));
+// }
+
+// // TODO
+// // Load correct network to Plugin
+// TEST_P(BehaviorPluginTest, canLoadTwoNetworks) {
+//     auto param = GetParam();
+//     InferenceEnginePluginPtr plugin(make_plugin_name(param.pluginName));
+//     pluginLoadCorrectNetwork(param, plugin);
+//     ASSERT_NO_THROW(pluginLoadCorrectNetwork(param, plugin));
+// }
+
+// Load incorrect network to Plugin
+TEST_P(BehaviorPluginTest, canNotLoadNetworkWithoutWeights) {
+    InferenceEngine::Core core;
+    CNNNetwork network = core.ReadNetwork(GetParam().model_xml_str, Blob::CPtr());
+
+    IExecutableNetwork::Ptr exeNetwork;
+    ASSERT_THROW(core.LoadNetwork(network, GetParam().device, {}), InferenceEngineException);
+}
+
+bool static compare_two_files_lexicographically(const std::string& name_a, const std::string& name_b) {
+    std::ifstream a(name_a), b(name_b);
+
+    std::string line_a, line_b;
+    while (std::getline(a, line_a)) {
+        std::string str_a, str_b;
+        std::istringstream(line_a) >> str_a;
+
+        if (!std::getline(b, line_b))
+            throw std::logic_error("Second file is shorter than first");
+        else
+            std::istringstream(line_b) >> str_b;
+
+        if (line_a != line_b) {
+            std::cout << "Line A: " << line_a << std::endl;
+            std::cout << "Line B: " << line_b << std::endl;
+            throw std::logic_error("Files are different");
+        }
+    }
+
+    if (std::getline(b, line_b))
+        throw std::logic_error("First file is shorter than second");
+    else
+        return true;
+}
+
+TEST_P(BehaviorPluginTest, pluginDoesNotChangeOriginalNetwork) {
+    const std::string name_a = "a.xml";
+    const std::string name_b = "b.xml";
+    IE_SUPPRESS_DEPRECATED_START
+    auto param = GetParam();
+
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network = core.ReadNetwork(param.model_xml_str, param.weights_blob);
+    network.serialize(name_a);
+    IE_SUPPRESS_DEPRECATED_END
+
+    ASSERT_NO_THROW(core.LoadNetwork(network, param.device, param.config));
+    network.serialize(name_b);
+
+    ASSERT_NO_THROW(compare_two_files_lexicographically(name_a, name_b));
+}
+
+TEST_P(BehaviorPluginTestInput, canSetInputPrecisionForNetwork) {
+    auto param = GetParam();
+    InputsDataMap inputs_info;
+
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network = core.ReadNetwork(param.model_xml_str, param.weights_blob);
+    setInputNetworkPrecision(network, inputs_info, param.input_blob_precision);
+
+    // Input image format I16 is not supported yet.
+    // Disable verification for myriad plugin: CVS-7979, CVS-8144
+    if ( (  param.device == CommonTestUtils::DEVICE_MYRIAD
+            || param.device == CommonTestUtils::DEVICE_HDDL
+            || param.device == CommonTestUtils::DEVICE_KEEMBAY)
+         && param.input_blob_precision == Precision::I16) {
+        std::string msg;
+        StatusCode sts = StatusCode::OK;
+        try {
+            core.LoadNetwork(network, GetParam().device, param.config);
+        } catch (InferenceEngineException ex) {
+            msg = ex.what();
+            sts = ex.getStatus();
+        }
+        ASSERT_EQ(StatusCode::GENERAL_ERROR, sts) << msg;
+        std::string refError = "Input image format I16 is not supported yet.";
+        //response.msg[refError.length()] = '\0';
+        ASSERT_EQ(refError, msg);
+    }
+    else {
+        ASSERT_NO_THROW(core.LoadNetwork(network, GetParam().device, param.config));
+    }
+}
+
+TEST_P(BehaviorPluginTestOutput, canSetOutputPrecisionForNetwork) {
+    auto param = GetParam();
+    IE_SUPPRESS_DEPRECATED_START
+    OutputsDataMap outputs_info;
+
+    InferenceEngine::Core ie;
+    InferenceEngine::CNNNetwork network = ie.ReadNetwork(param.model_xml_str, param.weights_blob);
+
+    setOutputNetworkPrecision(network, outputs_info, param.output_blob_precision);
+
+    StatusCode sts = StatusCode::OK;
+    std::string msg;
+
+    try {
+        ExecutableNetwork exeNetwork = ie.LoadNetwork(network, param.device, GetParam().config);
+    } catch (InferenceEngineException ex) {
+        sts = ex.getStatus();
+        msg = ex.what();
+        std::cout << "LoadNetwork() threw InferenceEngineException. Status: " << sts << ", message: " << msg << std::endl;
+    }
+
+    if ((param.output_blob_precision == Precision::I16 || param.output_blob_precision == Precision::U8)) {
+        if (param.device == "CPU") {
+            ASSERT_EQ(StatusCode::OK, sts);
+        }
+        else if (param.device == "GPU") {
+            // Supported precisions: FP32, FP16
+            ASSERT_EQ(StatusCode::GENERAL_ERROR, sts) << msg;
+            std::string refError = "The plugin does not support output";
+            ASSERT_STR_CONTAINS(msg, refError);
+        }
+        else {
+            // Supported precisions: FP32, FP16
+            ASSERT_EQ(StatusCode::GENERAL_ERROR, sts) << msg;
+            std::string refError = "Unsupported output precision!";
+            ASSERT_STR_CONTAINS(msg, refError);
+        }
+    }
+    else {
+        ASSERT_EQ(StatusCode::OK, sts);
+    }
+}
diff --git a/inference-engine/tests_deprecated/behavior/vpu/CMakeLists.txt b/inference-engine/tests_deprecated/behavior/vpu/CMakeLists.txt
new file mode 100644 (file)
index 0000000..99c6d89
--- /dev/null
@@ -0,0 +1,78 @@
+# Copyright (C) 2018-2020 Intel Corporation
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+
+function(enable_vpu TARGET_NAME FLAG_NAME PLUGIN_NAME)
+
+    # Common tests for HDDL MYRIAD KMB
+    file(GLOB_RECURSE TEST_INCLUDE
+            ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instances/*.hpp)
+
+    file(GLOB_RECURSE TEST_SRC
+            ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instances/*.cpp
+            )
+
+    # Tests for Myriad plugin
+    if (ENABLE_MYRIAD)
+        file(GLOB_RECURSE TEST_INCLUDE myriad_tests/*.hpp)
+        file(GLOB_RECURSE VPU_TESTS myriad_tests/*.cpp)
+        if (NOT ENABLE_MYRIAD_NO_BOOT)
+            list(REMOVE_ITEM VPU_TESTS ${CMAKE_CURRENT_SOURCE_DIR}/myriad_tests/vpu_boot_tests.cpp)
+        endif()
+        list(APPEND TEST_SRC ${VPU_TESTS})
+    endif()
+
+    # Tests for HDDL plugin
+    if (ENABLE_HDDL)
+        file(GLOB HDDL_TESTS hddl_tests/*.cpp)
+        list(APPEND TEST_SRC ${HDDL_TESTS})
+    endif()
+
+    list(APPEND DEPENDENCIES
+            ${PLUGIN_NAME}
+            vpu_copy_firmware)
+
+    source_group("src" FILES ${TEST_SRC})
+    source_group("include" FILES ${TEST_INCLUDE})
+
+    add_executable(${TARGET_NAME}
+            ${TEST_SRC}
+            ${TEST_INCLUDE})
+
+    target_compile_definitions(${TARGET_NAME} PRIVATE
+            INSTANTIATE_TESTS=1
+            ${FLAG_NAME}=1)
+
+    target_link_libraries(${TARGET_NAME} PRIVATE
+            IEBehaviorSharedTests)
+
+    target_include_directories(${TARGET_NAME} PRIVATE
+            ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instances/plugin_tests)
+
+    if (ENABLE_MYRIAD)
+        include(${XLINK_DIR}/XLink.cmake)
+
+        target_compile_definitions(${TARGET_NAME} PRIVATE __PC__)
+
+        target_include_directories(${TARGET_NAME} PRIVATE
+                ${IE_MAIN_SOURCE_DIR}/src/vpu/common/include
+                ${CMAKE_CURRENT_SOURCE_DIR}/myriad_tests
+                ${IE_MAIN_SOURCE_DIR}/thirdparty/movidius
+                ${IE_MAIN_SOURCE_DIR}/thirdparty/movidius/mvnc/include
+                ${IE_MAIN_SOURCE_DIR}/thirdparty/movidius/mvnc/include/watchdog
+                ${XLINK_INCLUDE}
+                ${XLINK_PLATFORM_INCLUDE})
+        target_link_libraries(${TARGET_NAME} PRIVATE mvnc)
+    endif()
+
+    add_test(NAME ${TARGET_NAME}
+            COMMAND ${TARGET_NAME})
+
+    add_dependencies(${TARGET_NAME} ${DEPENDENCIES})
+endfunction(enable_vpu)
+
+if (ENABLE_MYRIAD)
+    set(MYRIAD_TARGET_NAME MyriadBehaviorTests)
+    enable_vpu(${MYRIAD_TARGET_NAME} USE_MYRIAD myriadPlugin)
+endif()
diff --git a/inference-engine/tests_deprecated/behavior/vpu/myriad_tests/aot_behavior_tests.cpp b/inference-engine/tests_deprecated/behavior/vpu/myriad_tests/aot_behavior_tests.cpp
new file mode 100644 (file)
index 0000000..66f7972
--- /dev/null
@@ -0,0 +1,209 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+///  @rationale aot tests aim to test network export/import  functionality
+
+#if defined(ENABLE_MYRIAD)
+
+#include <behavior_test_plugin.h>
+#include <mvnc.h>
+#include <vpu/backend/blob_format.hpp>
+#include <vpu/graph_transformer.hpp>
+#include <file_utils.h>
+
+#include "vpu_test_data.hpp"
+
+using namespace std;
+using namespace vpu;
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace InferenceEngine::details;
+
+namespace {
+std::string getTestCaseName(testing::TestParamInfo<BehTestParams> obj) {
+    return obj.param.device + "_" + obj.param.input_blob_precision.name()
+        + (obj.param.config.size() ? "_" + obj.param.config.begin()->second : "");
+}
+
+const int BLOB_VERSION_MAJOR = 3;
+}
+
+#if (defined(_WIN32) || defined(_WIN64) )
+extern "C" void initialize_usb_boot();
+#else
+#define initialize_usb_boot()
+#endif
+
+
+class AOTBehaviorTests : public BehaviorPluginTest {
+ public:
+    typedef std::chrono::high_resolution_clock Time;
+    typedef std::chrono::milliseconds ms;
+
+
+    static std::string exported_file_name () noexcept {
+        return "local_tmp.fw";
+    }
+
+    void SetUp() override {
+        initialize_usb_boot();
+    }
+
+    void dumpBlob() {
+        InferenceEngine::Core core;
+
+        CNNNetwork network = core.ReadNetwork(GetParam().model_xml_str, GetParam().weights_blob);
+
+        ExecutableNetwork ret;
+        ASSERT_NO_THROW(ret = core.LoadNetwork(network, GetParam().device, {}));
+
+        ret.Export(exported_file_name());
+    }
+
+    void canImportBlob() {
+        ASSERT_EQ(StatusCode::OK, importBlob()) << response.msg;
+    }
+
+    void canNotImportBlob() {
+        ASSERT_NE(StatusCode::OK, importBlob()) << response.msg;
+    }
+
+    StatusCode importBlob() {
+        InferenceEngine::Core core;
+        ExecutableNetwork ret;
+
+        try
+        {
+            ret = core.ImportNetwork("local_tmp.fw", GetParam().device, { {KEY_LOG_LEVEL, LOG_DEBUG} } );
+        }
+        catch (InferenceEngine::details::InferenceEngineException ex)
+        {
+            return ex.getStatus();
+        }
+
+        return StatusCode::OK;
+    }
+
+    void setHeaderVersion(int major, int minor) {
+        FILE * f = fopen("local_tmp.fw", "r+b");
+        ASSERT_NE(f, nullptr);
+
+        ASSERT_EQ(0, fseek(f, sizeof(ElfN_Ehdr), SEEK_SET));
+        mv_blob_header blobHeader;
+
+        ASSERT_EQ(sizeof(mv_blob_header), fread(&blobHeader, 1, sizeof(mv_blob_header), f));
+
+        ASSERT_EQ(0, fseek(f, sizeof(ElfN_Ehdr), SEEK_SET));
+
+        blobHeader.blob_ver_major = major;
+        blobHeader.blob_ver_minor = minor;
+
+        ASSERT_EQ(sizeof(mv_blob_header), fwrite(&blobHeader, 1, sizeof(mv_blob_header), f));
+
+        fclose(f);
+    }
+    std::vector<char> getBlobFileContent() {
+        std::ifstream file(exported_file_name(), std::ios_base::binary);
+        std::vector<char> vec;
+
+        if (!file.eof() && !file.fail())
+        {
+            file.seekg(0, std::ios_base::end);
+            std::streampos fileSize = file.tellg();
+            vec.resize(fileSize);
+
+            file.seekg(0, std::ios_base::beg);
+            file.read(&vec[0], fileSize);
+        }
+
+        return vec;
+    }
+
+    ncDeviceHandle_t *device = nullptr;
+
+    bool bootDevice() {
+        ncStatus_t statusOpen = NC_ERROR;
+        std::cout << "Opening device" << std::endl;
+
+#ifdef  _WIN32
+        const char* pathToFw = nullptr;
+#else
+        std::string absPathToFw = getIELibraryPath();
+        const char* pathToFw = absPathToFw.c_str();
+#endif //  _WIN32
+        ncDeviceDescr_t deviceDesc = {};
+        deviceDesc.protocol = NC_ANY_PROTOCOL;
+        deviceDesc.platform = NC_ANY_PLATFORM;
+
+        statusOpen = ncDeviceOpen(&device, deviceDesc, 1000, pathToFw);
+
+        if (statusOpen != NC_OK) {
+            ncDeviceClose(&device);
+            return false;
+        }
+
+        return true;
+    }
+};
+
+TEST_P(AOTBehaviorTests, canImportNonModified) {
+    ASSERT_NO_FATAL_FAILURE(dumpBlob());
+    ASSERT_NO_FATAL_FAILURE(canImportBlob());
+}
+
+TEST_P(AOTBehaviorTests, hostSideErrorImportingIfVersionIncorrect) {
+
+    ASSERT_NO_FATAL_FAILURE(dumpBlob());
+    ASSERT_NO_FATAL_FAILURE(setHeaderVersion(vpu::BLOB_VERSION_MAJOR+1, 0));
+    ASSERT_NO_FATAL_FAILURE(canNotImportBlob());
+}
+
+TEST_P(AOTBehaviorTests, canLoadGraphWithoutPlugin) {
+
+    ASSERT_NO_FATAL_FAILURE(dumpBlob());
+
+    auto graph = getBlobFileContent();
+
+    ASSERT_TRUE(bootDevice());
+    ncGraphHandle_t *graphHandle = nullptr;
+    ASSERT_EQ(NC_OK, ncGraphCreate("aot_graph_test", &graphHandle));
+
+    auto res = ncGraphAllocate(device, graphHandle,
+                               (void*)graph.data(), graph.size(), (void*)graph.data(),
+                               sizeof(ElfN_Ehdr) + sizeof(mv_blob_header));
+
+    ncGraphDestroy(&graphHandle);
+    ncDeviceClose(&device);
+
+    ASSERT_EQ(NC_OK, res);
+}
+
+TEST_P(AOTBehaviorTests, deviceSideErrorImportingIfVersionIncorrect) {
+
+    ASSERT_NO_FATAL_FAILURE(dumpBlob());
+    ASSERT_NO_FATAL_FAILURE(setHeaderVersion(vpu::BLOB_VERSION_MAJOR+1, 0));
+
+    auto graph = getBlobFileContent();
+
+    ASSERT_TRUE(bootDevice());
+    ncGraphHandle_t *graphHandle = nullptr;
+    ASSERT_EQ(NC_OK, ncGraphCreate("aot_graph_test_negative", &graphHandle));
+
+    auto res = ncGraphAllocate(device, graphHandle,
+                               (void*)graph.data(), graph.size(), (void*)graph.data(),
+                               sizeof(ElfN_Ehdr) + sizeof(mv_blob_header));
+
+    ncGraphDestroy(&graphHandle);
+    ncDeviceClose(&device);
+
+    ASSERT_NE(NC_OK, res);
+}
+
+const BehTestParams vpuValues[] = {
+    BEH_MYRIAD,
+};
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, AOTBehaviorTests, ValuesIn(vpuValues), getTestCaseName);
+
+#endif
diff --git a/inference-engine/tests_deprecated/behavior/vpu/myriad_tests/helpers/myriad_devices.cpp b/inference-engine/tests_deprecated/behavior/vpu/myriad_tests/helpers/myriad_devices.cpp
new file mode 100644 (file)
index 0000000..a33a583
--- /dev/null
@@ -0,0 +1,64 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_devices.hpp"
+#include <usb_boot.h>
+#include <mvnc_ext.h>
+#include "mvnc_data.h"
+
+//------------------------------------------------------------------------------
+// Implementation of methods of class MyriadDevicesInfo
+//------------------------------------------------------------------------------
+
+constexpr char MyriadDevicesInfo::kMyriadXName[];
+constexpr char MyriadDevicesInfo::kMyriad2Name[];
+
+MyriadDevicesInfo::MyriadDevicesInfo() {
+#if (defined(_WIN32) || defined(_WIN64))
+    initialize_usb_boot();
+#endif
+
+
+#if !(defined(_WIN32) || defined(_WIN64))
+    firmware_dir_ = "./lib/";
+#endif
+}
+
+std::vector<std::string> MyriadDevicesInfo::getDevicesList(
+                    const ncDeviceProtocol_t deviceProtocol,
+                    const ncDevicePlatform_t devicePlatform,
+                    const XLinkDeviceState_t state) {
+
+        deviceDesc_t req_deviceDesc = {};
+        req_deviceDesc.protocol = convertProtocolToXlink(deviceProtocol);
+        req_deviceDesc.platform = convertPlatformToXlink(devicePlatform);
+
+        deviceDesc_t deviceDescArray[NC_MAX_DEVICES] = {};
+        unsigned int foundDevices = 0;
+        XLinkFindAllSuitableDevices(
+                state, req_deviceDesc, deviceDescArray, NC_MAX_DEVICES, &foundDevices);
+
+        std::vector < std::string > devNames;
+        for (int i = 0; i < foundDevices; ++i) {
+            devNames.emplace_back(deviceDescArray[i].name);
+        }
+
+        return devNames;
+}
+
+int MyriadDevicesInfo::getAmountOfDevices(
+                            const ncDeviceProtocol_t deviceProtocol,
+                            const ncDevicePlatform_t devicePlatform,
+                            const XLinkDeviceState_t state) {
+    deviceDesc_t req_deviceDesc = {};
+    req_deviceDesc.protocol = convertProtocolToXlink(deviceProtocol);
+    req_deviceDesc.platform = convertPlatformToXlink(devicePlatform);
+
+    deviceDesc_t deviceDescArray[NC_MAX_DEVICES] = {};
+    unsigned int foundDevices = 0;
+    XLinkFindAllSuitableDevices(
+            state, req_deviceDesc, deviceDescArray, NC_MAX_DEVICES, &foundDevices);
+
+    return foundDevices;
+}
diff --git a/inference-engine/tests_deprecated/behavior/vpu/myriad_tests/helpers/myriad_devices.hpp b/inference-engine/tests_deprecated/behavior/vpu/myriad_tests/helpers/myriad_devices.hpp
new file mode 100644 (file)
index 0000000..00d2738
--- /dev/null
@@ -0,0 +1,78 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include "string"
+#include "vector"
+#include <algorithm>
+#include <mvnc.h>
+#include <XLink.h>
+
+//------------------------------------------------------------------------------
+// class MyriadDevicesInfo
+//------------------------------------------------------------------------------
+
+class MyriadDevicesInfo {
+public:
+    // Constants
+    static constexpr char kMyriadXName[] = "ma2480";
+    static constexpr char kMyriad2Name[] = "ma2450";
+
+    //Constructor
+    MyriadDevicesInfo();
+
+    //Accessors
+    inline const std::string& firmwareDir();
+
+    std::vector<std::string> getDevicesList(
+            const ncDeviceProtocol_t deviceProtocol = NC_ANY_PROTOCOL,
+            const ncDevicePlatform_t devicePlatform = NC_ANY_PLATFORM,
+            const XLinkDeviceState_t state = X_LINK_ANY_STATE
+            );
+
+    inline bool isMyriadXDevice(const std::string &device_name);
+    inline bool isMyriad2Device(const std::string &device_name);
+
+    inline bool isMyriadBootedDevice(const std::string &device_name);
+    inline bool isMyriadUnbootedDevice(const std::string &device_name);
+
+    int getAmountOfDevices(const ncDeviceProtocol_t deviceProtocol = NC_ANY_PROTOCOL,
+                           const ncDevicePlatform_t devicePlatform = NC_ANY_PLATFORM,
+                           const XLinkDeviceState_t state = X_LINK_ANY_STATE);
+
+    inline long getAmountOfBootedDevices(const ncDeviceProtocol_t deviceProtocol);
+    inline long getAmountOfUnbootedDevices(const ncDeviceProtocol_t deviceProtocol);
+
+private:
+    std::string firmware_dir_;
+};
+
+const std::string& MyriadDevicesInfo::firmwareDir() {
+    return firmware_dir_;
+}
+
+bool MyriadDevicesInfo::isMyriadXDevice(const std::string &device_name) {
+    return (device_name.find(kMyriadXName) != std::string::npos);
+}
+
+bool MyriadDevicesInfo::isMyriad2Device(const std::string &device_name) {
+    return (device_name.find(kMyriad2Name) != std::string::npos);
+}
+
+bool MyriadDevicesInfo::isMyriadBootedDevice(const std::string &device_name) {
+    return (!isMyriad2Device(device_name) && !isMyriadXDevice(device_name));
+}
+
+bool MyriadDevicesInfo::isMyriadUnbootedDevice(const std::string &device_name) {
+    return (isMyriad2Device(device_name) || isMyriadXDevice(device_name));
+}
+
+long MyriadDevicesInfo::getAmountOfUnbootedDevices(const ncDeviceProtocol_t deviceProtocol) {
+    return getAmountOfDevices(deviceProtocol, NC_ANY_PLATFORM, X_LINK_UNBOOTED);
+}
+
+long MyriadDevicesInfo::getAmountOfBootedDevices(const ncDeviceProtocol_t deviceProtocol) {
+    return getAmountOfDevices(deviceProtocol, NC_ANY_PLATFORM, X_LINK_BOOTED);
+}
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/behavior/vpu/myriad_tests/helpers/myriad_load_network_case.cpp b/inference-engine/tests_deprecated/behavior/vpu/myriad_tests/helpers/myriad_load_network_case.cpp
new file mode 100644 (file)
index 0000000..7bf1598
--- /dev/null
@@ -0,0 +1,31 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_load_network_case.hpp"
+
+//------------------------------------------------------------------------------
+// Implementation of methods of class MyriadLoadNetworkTestCase
+//------------------------------------------------------------------------------
+
+void MyriadLoadNetworkTestCase::SetUp() {
+    try {
+        ie = std::make_shared<InferenceEngine::Core>();
+    }
+    catch (...) {
+        std::cerr << "create core error";
+    }
+
+    cnnNetwork = ie->ReadNetwork(FuncTestUtils::TestModel::convReluNormPoolFcModelFP16.model_xml_str,
+                                 FuncTestUtils::TestModel::convReluNormPoolFcModelFP16.weights_blob);
+}
+
+void MyriadLoadNetworkTestCase::LoadNetwork() {
+    ASSERT_NO_THROW(InferenceEngine::IExecutableNetwork::Ptr exe_network =
+                            ie->LoadNetwork(cnnNetwork, "MYRIAD"));
+}
+
+bool MyriadLoadNetworkTestCase::IsDeviceAvailable(std::string device_name) {
+    auto act_devices = getDevicesList();
+    return std::find(act_devices.begin(), act_devices.end(), device_name) != act_devices.end();
+}
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/behavior/vpu/myriad_tests/helpers/myriad_load_network_case.hpp b/inference-engine/tests_deprecated/behavior/vpu/myriad_tests/helpers/myriad_load_network_case.hpp
new file mode 100644 (file)
index 0000000..8fa5da0
--- /dev/null
@@ -0,0 +1,33 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+#include <gtest/gtest.h>
+#include <ie_core.hpp>
+
+#include "myriad_devices.hpp"
+#include <behavior_test_plugin.h>
+#include <mvnc.h>
+
+using namespace InferenceEngine;
+using ExeNetworkPtr = InferenceEngine::IExecutableNetwork::Ptr;
+
+//------------------------------------------------------------------------------
+// class MyriadLoadNetworkTestCase
+//------------------------------------------------------------------------------
+
+class MyriadLoadNetworkTestCase : public testing::Test,
+                                        public MyriadDevicesInfo
+{
+protected:
+    // Operations
+    void SetUp() override;
+    void LoadNetwork();
+
+    bool IsDeviceAvailable(std::string device_name);
+
+    // Data section
+    InferenceEngine::CNNNetwork cnnNetwork;
+    std::shared_ptr<InferenceEngine::Core> ie;
+};
diff --git a/inference-engine/tests_deprecated/behavior/vpu/myriad_tests/helpers/myriad_protocol_case.cpp b/inference-engine/tests_deprecated/behavior/vpu/myriad_tests/helpers/myriad_protocol_case.cpp
new file mode 100644 (file)
index 0000000..d564850
--- /dev/null
@@ -0,0 +1,45 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_protocol_case.hpp"
+#include "mvnc_ext.h"
+
+void MyriadProtocolTests::SetUp() {
+    protocol = GetParam();
+}
+
+void MyriadProtocolTests::SetUpTestCase() {
+    try {
+        ie = std::make_shared<InferenceEngine::Core>();
+    }
+    catch (...)
+    {
+        std::cerr << "Create core error";
+    }
+}
+
+std::map<std::string, std::string> MyriadProtocolTests::getConfigForProtocol(const ncDeviceProtocol_t protocol) {
+    switch (protocol) {
+        case NC_ANY_PROTOCOL :
+            return {{CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_INFO)},
+                    {VPU_MYRIAD_CONFIG_KEY(PROTOCOL), ""}};
+        case NC_USB:
+            return {{CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_INFO)},
+                    {VPU_MYRIAD_CONFIG_KEY(PROTOCOL), VPU_MYRIAD_CONFIG_VALUE(USB)}};
+        case NC_PCIE:
+            return {{CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_INFO)},
+                    {VPU_MYRIAD_CONFIG_KEY(PROTOCOL), VPU_MYRIAD_CONFIG_VALUE(PCIE)}};
+        default:
+            return {};
+    }
+}
+
+std::string MyriadProtocolTests::getTestCaseName(
+    const ::testing::TestParamInfo<ncDeviceProtocol_t> param) {
+    return std::string(ncProtocolToStr(param.param));
+}
+
+void MyriadProtocolTests::TearDownTestCase() {
+    ie.reset();
+}
diff --git a/inference-engine/tests_deprecated/behavior/vpu/myriad_tests/helpers/myriad_protocol_case.hpp b/inference-engine/tests_deprecated/behavior/vpu/myriad_tests/helpers/myriad_protocol_case.hpp
new file mode 100644 (file)
index 0000000..df05410
--- /dev/null
@@ -0,0 +1,39 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+#include <gtest/gtest.h>
+
+#include <ie_core.hpp>
+#include "myriad_devices.hpp"
+#include <behavior_test_plugin.h>
+#include <mvnc.h>
+
+static const std::vector<ncDeviceProtocol_t> myriadProtocols = {
+    NC_ANY_PROTOCOL,
+    NC_USB,
+    NC_PCIE
+};
+
+class MyriadProtocolTests : public testing::Test,
+                            public testing::WithParamInterface<ncDeviceProtocol_t>,
+                            public MyriadDevicesInfo {
+public:
+    // IE variables
+    InferenceEngine::IInferRequest::Ptr request;
+    InferenceEngine::ResponseDesc resp;
+    StatusCode statusCode = StatusCode::GENERAL_ERROR;
+    static std::shared_ptr<InferenceEngine::Core> ie;
+
+    // MVNC variables
+    ncDeviceProtocol_t protocol;
+
+    void SetUp() override;
+    static void SetUpTestCase();
+    static void TearDownTestCase();
+
+    static std::map<std::string, std::string> getConfigForProtocol(ncDeviceProtocol_t protocol);
+    static std::string getTestCaseName(
+        const ::testing::TestParamInfo<ncDeviceProtocol_t> param);
+};
diff --git a/inference-engine/tests_deprecated/behavior/vpu/myriad_tests/vpu_boot_tests.cpp b/inference-engine/tests_deprecated/behavior/vpu/myriad_tests/vpu_boot_tests.cpp
new file mode 100644 (file)
index 0000000..8525def
--- /dev/null
@@ -0,0 +1,115 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <behavior_test_plugin.h>
+#include <XLink.h>
+#include <mvnc.h>
+#include <mvnc_ext.h>
+#include "vpu_test_data.hpp"
+#include "helpers/myriad_devices.hpp"
+
+namespace {
+    #define ASSERT_NO_ERROR(call)   ASSERT_EQ(call, 0)
+    #define ASSERT_ERROR            ASSERT_TRUE
+
+    const int MAX_DEVICES   = 32;
+    const int MAX_DEV_NAME  = 255;
+
+    std::string getTestCaseName(testing::TestParamInfo<BehTestParams> obj) {
+        return obj.param.pluginName + "_" + obj.param.input_blob_precision.name()
+               + (obj.param.config.size() ? "_" + obj.param.config.begin()->second : "");
+    }
+}
+
+#if (defined(_WIN32) || defined(_WIN64) )
+extern "C" void initialize_usb_boot();
+#else
+#define initialize_usb_boot()
+#endif
+
+class MYRIADBoot : public MyriadDevicesInfo,
+                   public BehaviorPluginTest {
+ public:
+#if !(defined(_WIN32) || defined(_WIN64))
+    const char  firmwareDir[255] = "./lib/";
+#else
+    const char* firmwareDir = nullptr;
+#endif
+
+    void SetUp() override {
+        initialize_usb_boot();
+    }
+
+    /*
+     * @brief Boot any free device
+     */
+    void bootOneDevice() {
+        ASSERT_NO_ERROR(ncDeviceLoadFirmware(NC_ANY_PLATFORM, firmwareDir));
+    }
+
+};
+
+/*
+ * @brief Boot myriad device through XLink, and then try to connect to it with plugin
+ */
+#if !(defined(_WIN32) || defined(_WIN64))   // TODO CVS-15574
+TEST_P(MYRIADBoot, ConnectToAlreadyBootedDevice) {
+#else
+TEST_P(MYRIADBoot, DISABLED_ConnectToAlreadyBootedDevice) {
+#endif
+    bootOneDevice();
+    ASSERT_EQ(getAmountOfBootedDevices(), 1);
+    {
+        InferenceEnginePluginPtr plugin(make_plugin_name(GetParam().pluginName));
+        CNNNetReader reader;
+        reader.ReadNetwork(GetParam().model_xml_str.data(), GetParam().model_xml_str.length());
+
+        CNNNetwork network = reader.getNetwork();
+        ExecutableNetwork ret;
+
+        sts = plugin->LoadNetwork(ret, network, {
+                {KEY_LOG_LEVEL, LOG_DEBUG},
+                {KEY_VPU_MYRIAD_WATCHDOG, NO},
+        }, &response);
+
+        ASSERT_NE(StatusCode::OK, sts) << response.msg;
+
+        ASSERT_EQ(getAmountOfBootedDevices(), 1);
+    }
+    ncDeviceResetAll();
+}
+
+/*
+ * @brief Check that with NO option plugin would boot new device
+ * @warn  Test required two or more Myriad devices
+ */
+TEST_P(MYRIADBoot, DISABLED_OpenNotBootedDevice) {
+    ASSERT_GE(getAmountOfUnbootedDevices(), 2);
+    bootOneDevice();
+    ASSERT_EQ(getAmountOfBootedDevices(), 1);
+    {
+        InferenceEnginePluginPtr plugin(make_plugin_name(GetParam().pluginName));
+        CNNNetReader reader;
+        reader.ReadNetwork(GetParam().model_xml_str.data(), GetParam().model_xml_str.length());
+
+        CNNNetwork network = reader.getNetwork();
+        ExecutableNetwork ret;
+
+        sts = plugin->LoadNetwork(ret, network, {
+                {KEY_LOG_LEVEL, LOG_DEBUG},
+                {KEY_VPU_MYRIAD_WATCHDOG, NO},
+        }, &response);
+
+        ASSERT_NE(StatusCode::OK, sts) << response.msg;
+
+        ASSERT_EQ(getAmountOfBootedDevices(), 2);
+    }
+    ncDeviceResetAll();
+}
+
+const BehTestParams vpuValues[] = {
+        BEH_MYRIAD,
+};
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, MYRIADBoot, ValuesIn(vpuValues), getTestCaseName);
diff --git a/inference-engine/tests_deprecated/behavior/vpu/myriad_tests/vpu_get_metric_tests.cpp b/inference-engine/tests_deprecated/behavior/vpu/myriad_tests/vpu_get_metric_tests.cpp
new file mode 100644 (file)
index 0000000..890fb3b
--- /dev/null
@@ -0,0 +1,100 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <inference_engine.hpp>
+#include <vpu/vpu_plugin_config.hpp>
+#include "behavior_test_plugin.h"
+
+class VPUGetMetric : public testing::Test {
+protected:
+    InferenceEngine::Core ie;
+
+    std::vector<std::string> getAvailableDevices() {
+        auto result = Parameter{};
+        result = ie.GetMetric("MYRIAD", METRIC_KEY(AVAILABLE_DEVICES));
+
+        return result.as<std::vector<std::string>>();
+    }
+
+    ExecutableNetwork loadNetworkOnDevice(const std::string &deviceName) {
+        auto network = ie.ReadNetwork(FuncTestUtils::TestModel::convReluNormPoolFcModelFP16.model_xml_str,
+                                      FuncTestUtils::TestModel::convReluNormPoolFcModelFP16.weights_blob);
+
+        return ie.LoadNetwork(network, deviceName);
+    }
+};
+
+TEST_F(VPUGetMetric, GetThermalStatsFromNetwork) {
+    const auto exe_network = loadNetworkOnDevice("MYRIAD");
+
+    auto result = Parameter{};
+    ASSERT_NO_THROW(result = exe_network.GetMetric(METRIC_KEY(DEVICE_THERMAL)));
+
+    ASSERT_FALSE(result.empty());
+    ASSERT_GT(result.as<float>(), 0);
+}
+
+TEST_F(VPUGetMetric, GetThermalStatsFromPlugin) {
+    std::vector<std::string> availableDevices;
+    ASSERT_NO_THROW(availableDevices = getAvailableDevices());
+    ASSERT_TRUE(!availableDevices.empty());
+
+    for (const auto &availableDevice : availableDevices) {
+        const auto deviceName = "MYRIAD." + availableDevice;
+        ASSERT_NO_THROW(loadNetworkOnDevice(deviceName));
+
+        auto result = Parameter{};
+        ASSERT_NO_THROW(result = ie.GetMetric(deviceName, METRIC_KEY(DEVICE_THERMAL)));
+
+        ASSERT_FALSE(result.empty());
+        ASSERT_GT(result.as<float>(), 0.f);
+    }
+}
+
+TEST_F(VPUGetMetric, ThermalStatsFromPluginWithIncorrectID) {
+    std::vector<std::string> availableDevices;
+    ASSERT_NO_THROW(availableDevices = getAvailableDevices());
+    ASSERT_TRUE(!availableDevices.empty());
+
+    // Load network with correct device to fill the device pool.
+    const auto deviceName = "MYRIAD." + availableDevices.front();
+    ASSERT_NO_THROW(loadNetworkOnDevice(deviceName));
+
+    // Try to get DEVICE_THERMAL metric for a device with incorrect name.
+    // This should result in an exception.
+    const auto incorrectDeviceName = "MYRIAD.incorrect_device";
+    auto result = Parameter{};
+    ASSERT_NO_THROW(result = ie.GetMetric(incorrectDeviceName, METRIC_KEY(DEVICE_THERMAL)));
+    ASSERT_TRUE(result.empty());
+}
+
+TEST_F(VPUGetMetric, ThermalStatsFromPluginWithoutLoadedNetwork) {
+    std::vector<std::string> availableDevices;
+    ASSERT_NO_THROW(availableDevices = getAvailableDevices());
+    ASSERT_TRUE(!availableDevices.empty());
+
+    // Try to get DEVICE_THERMAL metric for a device on which the network is not loaded.
+    // This should result in an exception.
+    const auto deviceName = "MYRIAD." + availableDevices.front();
+    auto result = Parameter{};
+    ASSERT_NO_THROW(result = ie.GetMetric(deviceName, METRIC_KEY(DEVICE_THERMAL)));
+    ASSERT_TRUE(result.empty());
+}
+
+TEST_F(VPUGetMetric, MyriadGetAvailableDevices) {
+    std::vector<std::string> availableDevices;
+    ASSERT_NO_THROW(availableDevices = getAvailableDevices());
+    ASSERT_TRUE(!availableDevices.empty());
+
+    auto result = Parameter{};
+    auto deviceNames = std::vector<std::string>(availableDevices.size());
+    for (size_t i = 0; i < availableDevices.size(); ++i) {
+        const auto deviceName = "MYRIAD." + availableDevices[i];
+        ASSERT_NO_THROW(result = ie.GetMetric(deviceName, METRIC_KEY(FULL_DEVICE_NAME)));
+
+        deviceNames[i] = result.as<std::string>();
+        ASSERT_TRUE(deviceNames[i] != availableDevices[i]);
+    }
+}
diff --git a/inference-engine/tests_deprecated/behavior/vpu/myriad_tests/vpu_load_network_tests.cpp b/inference-engine/tests_deprecated/behavior/vpu/myriad_tests/vpu_load_network_tests.cpp
new file mode 100644 (file)
index 0000000..eb4baab
--- /dev/null
@@ -0,0 +1,81 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <iostream>
+#include <map>
+#include <gtest/gtest.h>
+#include "behavior_test_plugin.h"
+#include "helpers/myriad_load_network_case.hpp"
+
+TEST_F(MyriadLoadNetworkTestCase, ReloadPlugin) {
+    ASSERT_NO_THROW(LoadNetwork());
+    ASSERT_NO_THROW(LoadNetwork());
+}
+
+TEST_F(MyriadLoadNetworkTestCase, SimpleLoading) {
+    auto devices = getDevicesList();
+    ASSERT_TRUE(devices.size());
+
+    auto device_to_load = devices[0];
+    std::map<std::string, std::string> config = {
+        {KEY_DEVICE_ID, device_to_load},
+    };
+
+    ASSERT_NO_THROW(ExeNetworkPtr exe_network =
+                        ie->LoadNetwork(cnnNetwork, "MYRIAD", config));
+
+    ASSERT_TRUE(!IsDeviceAvailable(device_to_load));
+}
+
+TEST_F(MyriadLoadNetworkTestCase, LoadingAtTheSameDevice) {
+    auto devices = getDevicesList();
+    ASSERT_TRUE(devices.size());
+
+    auto device_to_load = devices[0];
+    std::map<std::string, std::string> config = {
+        {KEY_DEVICE_ID, device_to_load},
+    };
+
+    ASSERT_NO_THROW(ExeNetworkPtr exe_network =
+                        ie->LoadNetwork(cnnNetwork, "MYRIAD", config));
+
+    ASSERT_TRUE(!IsDeviceAvailable(device_to_load));
+
+    ASSERT_NO_THROW(ExeNetworkPtr exe_network =
+                        ie->LoadNetwork(cnnNetwork, "MYRIAD", config));
+}
+
+TEST_F(MyriadLoadNetworkTestCase, ThrowsExeptionWhenNameIsInvalid) {
+    auto device_to_load = "SomeVeryBadName";
+    std::map<std::string, std::string> config = {
+        {KEY_DEVICE_ID, device_to_load},
+    };
+
+    ASSERT_ANY_THROW(ExeNetworkPtr exe_network =
+        ie->LoadNetwork(cnnNetwork, "MYRIAD", config));
+}
+
+TEST_F(MyriadLoadNetworkTestCase, ThrowsExeptionWhenPlatformConflictWithProtocol) {
+    std::string wrong_platform;
+    auto devices = getDevicesList();
+    ASSERT_TRUE(devices.size());
+
+    auto device_to_load = devices[0];
+
+    IE_SUPPRESS_DEPRECATED_START
+    if(isMyriadXDevice(device_to_load)) {
+        wrong_platform = VPU_MYRIAD_2450;
+    } else {
+        wrong_platform = VPU_MYRIAD_2480;
+    }
+    IE_SUPPRESS_DEPRECATED_END
+
+    std::map<std::string, std::string> config = {
+        {KEY_DEVICE_ID, device_to_load},
+        {KEY_VPU_MYRIAD_PLATFORM, wrong_platform},
+    };
+
+    ASSERT_ANY_THROW(ExeNetworkPtr exe_network =
+        ie->LoadNetwork(cnnNetwork, "MYRIAD", config));
+}
diff --git a/inference-engine/tests_deprecated/behavior/vpu/myriad_tests/vpu_protocol_tests.cpp b/inference-engine/tests_deprecated/behavior/vpu/myriad_tests/vpu_protocol_tests.cpp
new file mode 100644 (file)
index 0000000..76e201a
--- /dev/null
@@ -0,0 +1,32 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "helpers/myriad_protocol_case.hpp"
+
+std::shared_ptr<InferenceEngine::Core> MyriadProtocolTests::ie = nullptr;
+
+TEST_P(MyriadProtocolTests, CanInferenceWithProtocol) {
+    if (protocol != NC_ANY_PROTOCOL && !getAmountOfDevices(protocol)) {
+        GTEST_SKIP();
+    }
+
+    auto network = ie->ReadNetwork(FuncTestUtils::TestModel::convReluNormPoolFcModelFP16.model_xml_str,
+                                   FuncTestUtils::TestModel::convReluNormPoolFcModelFP16.weights_blob);
+
+    std::map<std::string, std::string> config = getConfigForProtocol(protocol);
+
+    InferenceEngine::IExecutableNetwork::Ptr exe_network =
+            ie->LoadNetwork(network, "MYRIAD", config);
+
+    ASSERT_NO_THROW(statusCode = exe_network->CreateInferRequest(request, &resp));
+    ASSERT_EQ(statusCode, StatusCode::OK) << resp.msg;
+
+    ASSERT_NO_THROW(statusCode = request->Infer(&resp));
+    ASSERT_EQ(statusCode, StatusCode::OK) << resp.msg;
+}
+
+INSTANTIATE_TEST_CASE_P(VPUConfigProtocolTests,
+                        MyriadProtocolTests,
+                        ::testing::ValuesIn(myriadProtocols),
+                        MyriadProtocolTests::getTestCaseName);
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/behavior/vpu/myriad_tests/vpu_watchdog_tests.cpp b/inference-engine/tests_deprecated/behavior/vpu/myriad_tests/vpu_watchdog_tests.cpp
new file mode 100644 (file)
index 0000000..a20cb24
--- /dev/null
@@ -0,0 +1,238 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <behavior_test_plugin.h>
+#include <XLink.h>
+#include <mvnc.h>
+#include <mvnc/include/ncPrivateTypes.h>
+#include <watchdog.h>
+#include <watchdogPrivate.hpp>
+#include <thread>
+#include <file_utils.h>
+#include "vpu_test_data.hpp"
+
+#include "helpers/myriad_devices.hpp"
+#include <details/ie_exception.hpp>
+
+using namespace std;
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace InferenceEngine::details;
+
+namespace {
+inline std::string getTestCaseName(testing::TestParamInfo<BehTestParams> obj) {
+    return obj.param.device + "_" + obj.param.input_blob_precision.name()
+        + (obj.param.config.size() ? "_" + obj.param.config.begin()->second : "");
+}
+}
+
+#if (defined(_WIN32) || defined(_WIN64) )
+extern "C" void initialize_usb_boot();
+#else
+#define initialize_usb_boot()
+#endif
+
+
+class MYRIADWatchdog :  public BehaviorPluginTest,
+                        public MyriadDevicesInfo {
+ public:
+    typedef std::chrono::high_resolution_clock Time;
+    typedef std::chrono::milliseconds ms;
+
+    void SetUp() override {
+        initialize_usb_boot();
+    }
+
+    struct DevicesState {
+        int booted = 0;
+        int unbooted = 0;
+        int total() const {return booted + unbooted;}
+    };
+
+    DevicesState queryDevices() {
+        DevicesState devicesState;
+        devicesState.booted = getAmountOfBootedDevices(NC_USB);
+        devicesState.unbooted = getAmountOfUnbootedDevices(NC_USB);
+        return devicesState;
+    }
+
+    ncDeviceHandle_t *device = nullptr;
+    void resetOneDevice() {
+        ncDeviceClose(&device);
+        device = nullptr;
+    }
+
+    void bootOneDevice(int watchdogInterval, void* ptr_in_dll) {
+        ncStatus_t statusOpen = NC_ERROR;
+        std::cout << "Opening device" << std::endl;
+#ifdef  _WIN32
+        const char* pathToFw = nullptr;
+#else
+        std::string absPathToFw = getIELibraryPath();
+        const char* pathToFw = absPathToFw.c_str();
+#endif //  _WIN32
+
+        ncDeviceDescr_t deviceDesc = {};
+        deviceDesc.protocol = NC_ANY_PROTOCOL;
+        deviceDesc.platform = NC_ANY_PLATFORM;
+
+        statusOpen = ncDeviceOpen(&device, deviceDesc, watchdogInterval, pathToFw);
+
+        if (statusOpen != NC_OK) {
+            ncDeviceClose(&device);
+        }
+    }
+};
+
+
+#define ASSERT_BOOTED_DEVICES_ONE_MORE() {\
+    std::cout << "Time since boot:" << chrono::duration_cast<ms>(Time::now() - ctime).count() << std::endl;\
+    auto q = queryDevices();\
+    cout << "BOOTED=" << q.booted << "\n";\
+    cout << "TOTAL=" << q.total() << "\n";\
+    ASSERT_EQ(q.booted, startup_devices.booted + 1);\
+    ASSERT_EQ(q.total(), startup_devices.total());\
+}
+
+#define ASSERT_BOOTED_DEVICES_SAME() {\
+    std::cout << "Time since boot:" << chrono::duration_cast<ms>(Time::now() - ctime).count() << std::endl;\
+    auto q = queryDevices();\
+    cout << "BOOTED=" << q.booted << "\n";\
+    cout << "TOTAL=" << q.total() << "\n";\
+    ASSERT_EQ(q.booted, startup_devices.booted);\
+    ASSERT_EQ(q.total(), startup_devices.total());\
+}
+
+TEST_P(MYRIADWatchdog, canDisableWatchdog) {
+
+    auto startup_devices = queryDevices();
+    ASSERT_GE(startup_devices.unbooted, 1);
+
+    auto ctime = Time::now();
+    SharedObjectLoader myriadPlg (make_plugin_name("myriadPlugin").c_str());
+    void *p = myriadPlg.get_symbol(SOCreatorTrait<IInferencePlugin>::name);
+
+    bootOneDevice(0,  p);
+
+    ASSERT_BOOTED_DEVICES_ONE_MORE();
+
+    // waiting while more that device side ping interval which is 12s
+    for (int j = 0; j != 20; j++) {
+        std::this_thread::sleep_for(std::chrono::milliseconds(1000));
+        std::cout << "Time since boot:" << chrono::duration_cast<ms>(Time::now() - ctime).count() << std::endl;
+        if (queryDevices().booted == startup_devices.booted) {
+            SUCCEED() << "All devices gets reset";
+            break;
+        }
+    }
+    ASSERT_BOOTED_DEVICES_ONE_MORE();
+
+    resetOneDevice();
+
+    ASSERT_BOOTED_DEVICES_SAME();
+}
+
+TEST_P(MYRIADWatchdog, canDetectWhenHostSiteStalled) {
+    auto startup_devices = queryDevices();
+    ASSERT_GE(startup_devices.unbooted, 1);
+
+    auto ctime = Time::now();
+
+    SharedObjectLoader myriadPlg (make_plugin_name("myriadPlugin").c_str());
+    void *p = myriadPlg.get_symbol(SOCreatorTrait<IInferencePlugin>::name);
+
+    bootOneDevice(20000, p);
+
+    //  due to increased ping interval device side of WD will abort execution
+    ASSERT_BOOTED_DEVICES_ONE_MORE();
+
+    // waiting while device understand that no ping request happened and reset itself
+    for (int j = 0; j != 20; j++) {
+        std::this_thread::sleep_for(std::chrono::milliseconds(1000));
+        std::cout << "Time since boot:" << chrono::duration_cast<ms>(Time::now() - ctime).count() << std::endl;
+        if (queryDevices().booted == startup_devices.booted) {
+            SUCCEED() << "All devices gets reset";
+            break;
+        }
+    }
+    // after watchdog reset device it requires some time to appear in system
+    std::this_thread::sleep_for(std::chrono::milliseconds(2000));
+    ASSERT_BOOTED_DEVICES_SAME();
+
+    resetOneDevice();
+}
+
+TEST_P(MYRIADWatchdog, watchDogIntervalDefault) {
+    auto startup_devices = queryDevices();
+    auto ctime = Time::now();
+    {
+
+        InferenceEngine::Core core;
+        CNNNetwork network = core.ReadNetwork(GetParam().model_xml_str, Blob::CPtr());
+        ASSERT_GE(startup_devices.unbooted, 1);
+
+        ExecutableNetwork ret;
+        ctime = Time::now();
+        ASSERT_THROW(ret = core.LoadNetwork(network, GetParam().device, {
+            {KEY_LOG_LEVEL, LOG_DEBUG}}),
+            InferenceEngine::details::InferenceEngineException);
+
+        ASSERT_BOOTED_DEVICES_ONE_MORE();
+
+        // waiting while device understand that no ping request happened and reset itself
+        for (int j = 0; j != 20; j++) {
+            std::this_thread::sleep_for(std::chrono::milliseconds(1000));
+            std::cout << "Time since boot:" << chrono::duration_cast<ms>(Time::now() - ctime).count() << std::endl;
+            if (queryDevices().booted == startup_devices.booted) {
+                SUCCEED() << "All devices gets reset";
+                break;
+            }
+        }
+        ASSERT_BOOTED_DEVICES_ONE_MORE();
+    }
+    // device willbe reset by unloaded plugin
+    // after watchdog reset device it requires some time to appear in system
+    std::this_thread::sleep_for(std::chrono::milliseconds(2000));
+    ASSERT_BOOTED_DEVICES_SAME();
+}
+
+TEST_P(MYRIADWatchdog, canTurnoffWatchDogViaConfig) {
+    auto startup_devices = queryDevices();
+    auto ctime = Time::now();
+    {
+        InferenceEngine::Core core;
+        CNNNetwork network = core.ReadNetwork(GetParam().model_xml_str, Blob::CPtr());
+        ASSERT_GE(startup_devices.unbooted, 1);
+
+        ExecutableNetwork ret;
+        ctime = Time::now();
+        ASSERT_THROW(ret = core.LoadNetwork(network, GetParam().device, {
+            {KEY_LOG_LEVEL, LOG_DEBUG},
+            {KEY_VPU_MYRIAD_WATCHDOG, NO}}),
+            InferenceEngine::details::InferenceEngineException);
+
+        ASSERT_BOOTED_DEVICES_ONE_MORE();
+
+        // waiting while device understand that no ping request happened and reset itself
+        for (int j = 0; j != 20; j++) {
+            std::this_thread::sleep_for(std::chrono::milliseconds(1000));
+            std::cout << "Time since boot:" << chrono::duration_cast<ms>(Time::now() - ctime).count() << std::endl;
+            if (queryDevices().booted == startup_devices.booted) {
+                SUCCEED() << "All devices gets reset";
+                break;
+            }
+        }
+        ASSERT_BOOTED_DEVICES_ONE_MORE();
+    }
+    // device will be reset by unloaded plugin
+    // after watchdog reset device it requires some time to appear in system
+    std::this_thread::sleep_for(std::chrono::milliseconds(2000));
+    ASSERT_BOOTED_DEVICES_SAME();
+}
+
+const BehTestParams vpuValues[] = {
+    BEH_MYRIAD,
+};
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, MYRIADWatchdog, ValuesIn(vpuValues), getTestCaseName);
diff --git a/inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/cpp_wrappers/holders_tests.cpp b/inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/cpp_wrappers/holders_tests.cpp
new file mode 100644 (file)
index 0000000..eaa3e59
--- /dev/null
@@ -0,0 +1,17 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "holders_tests.hpp"
+
+INSTANTIATE_TEST_CASE_P(ReleaseOrderTests, CPP_HoldersTests, testing::Combine(testing::ValuesIn(std::vector<std::vector<int>> {
+    // 0 - plugin
+    // 1 - executable_network
+    // 2 - infer_request
+    {0,1,2},
+    {0,2,1},
+    {1,0,2},
+    {1,2,0},
+    {2,0,1},
+    {2,1,0},
+}), testing::Values(std::string("MYRIAD"))));
diff --git a/inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin.cpp b/inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin.cpp
new file mode 100644 (file)
index 0000000..4ff0124
--- /dev/null
@@ -0,0 +1,14 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin.h"
+#include "behavior_test_plugins.hpp"
+#include "vpu_test_data.hpp"
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTest, ValuesIn(supportedValues),
+                        getTestCaseName);
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestInput, ValuesIn(allInputSupportedValues),
+                        getTestCaseName);
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestOutput, ValuesIn(allOutputSupportedValues),
+                        getOutputTestCaseName);
diff --git a/inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_config.cpp b/inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_config.cpp
new file mode 100644 (file)
index 0000000..b33e8e8
--- /dev/null
@@ -0,0 +1,28 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_config.hpp"
+#include "vpu_test_data.hpp"
+
+INSTANTIATE_TEST_CASE_P(
+    BehaviorTest, BehaviorPluginCorrectConfigTest,
+    ValuesIn(
+        BehTestParams::concat(
+            BehTestParams::concat(deviceSpecificConfigurations, deviceAgnosticConfigurations),
+            withCorrectConfValuesPluginOnly
+        )
+    ),
+    getTestCaseName
+);
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginIncorrectConfigTest, ValuesIn(withIncorrectConfValues),
+                        getTestCaseName);
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginIncorrectConfigTestInferRequestAPI,
+                        ValuesIn(withIncorrectConfKeys),
+                        getTestCaseName);
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginCorrectConfigTestInferRequestAPI,
+                        ValuesIn(supportedValues),
+                        getTestCaseName);
diff --git a/inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_exec_graph_info.cpp b/inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_exec_graph_info.cpp
new file mode 100644 (file)
index 0000000..ab60df5
--- /dev/null
@@ -0,0 +1,15 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_exec_graph_info.hpp"
+#include "vpu_test_data.hpp"
+
+// TODO: currently this tests are not applicable to myriadPlugin
+#if 0
+INSTANTIATE_TEST_CASE_P(
+        BehaviorTest,
+        BehaviorPluginTestExecGraphInfo,
+        ValuesIn(supportedValues),
+        getTestCaseName);
+#endif
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request.cpp b/inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request.cpp
new file mode 100644 (file)
index 0000000..d05de67
--- /dev/null
@@ -0,0 +1,8 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_infer_request.hpp"
+#include "vpu_test_data.hpp"
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestInferRequest, ValuesIn(requestsSupportedValues), getTestCaseName);
diff --git a/inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_callback.cpp b/inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_callback.cpp
new file mode 100644 (file)
index 0000000..24daac1
--- /dev/null
@@ -0,0 +1,9 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_infer_request_callback.hpp"
+#include "vpu_test_data.hpp"
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestInferRequestCallback, ValuesIn(requestsSupportedValues),
+                        getTestCaseName);
diff --git a/inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_config.cpp b/inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_config.cpp
new file mode 100644 (file)
index 0000000..7b9db6b
--- /dev/null
@@ -0,0 +1,13 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_infer_request_config.hpp"
+#include "vpu_test_data.hpp"
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestInferRequestConfig,
+                        ValuesIn(BehTestParams::concat(deviceAgnosticConfigurations, withCorrectConfValuesNetworkOnly)),
+                        getConfigTestCaseName);
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestInferRequestConfigExclusiveAsync, ValuesIn(supportedValues),
+                        getConfigTestCaseName);
diff --git a/inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_input.cpp b/inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_input.cpp
new file mode 100644 (file)
index 0000000..cdde246
--- /dev/null
@@ -0,0 +1,9 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_infer_request_input.hpp"
+#include "vpu_test_data.hpp"
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestInferRequestInput, ValuesIn(allInputSupportedValues),
+                        getTestCaseName);
diff --git a/inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_output.cpp b/inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_infer_request_output.cpp
new file mode 100644 (file)
index 0000000..fb68122
--- /dev/null
@@ -0,0 +1,9 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_infer_request_output.hpp"
+#include "vpu_test_data.hpp"
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestInferRequestOutput, ValuesIn(allOutputSupportedValues),
+                        getOutputTestCaseName);
diff --git a/inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_layers.cpp b/inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_layers.cpp
new file mode 100644 (file)
index 0000000..86c076e
--- /dev/null
@@ -0,0 +1,25 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_layers.hpp"
+
+pool_test_params roi_pool_test_cases[] = {
+    pool_test_params(CommonTestUtils::DEVICE_MYRIAD, "FP16", pool_case),
+};
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, ROIPoolingLayerTest,
+                        ::testing::ValuesIn(roi_pool_test_cases),
+                        getTestName<pool_test_params>);
+
+memory_test_params memory_test_cases[] = {
+    memory_test_params(CommonTestUtils::DEVICE_MYRIAD, "FP32", memory_case),
+};
+
+// FIXME
+//#if (defined INSTANTIATE_TESTS)
+//INSTANTIATE_TEST_CASE_P(BehaviorTest, MemoryLayerTest,
+//    ::testing::ValuesIn(memory_test_cases),
+//    getTestName<memory_test_params>);
+//#endif
+
diff --git a/inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_layout.cpp b/inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_layout.cpp
new file mode 100644 (file)
index 0000000..c26895a
--- /dev/null
@@ -0,0 +1,29 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_layout.hpp"
+
+layout_test_params power_test_cases[] = {
+    layout_test_params(CommonTestUtils::DEVICE_MYRIAD, "FP16", Layout::C, power_params({ { 3 } }, 2, 2, 2)),
+    layout_test_params(CommonTestUtils::DEVICE_MYRIAD, "FP16", Layout::NC, power_params({ { 1, 3 } }, 2, 2, 2)),
+    layout_test_params(CommonTestUtils::DEVICE_MYRIAD, "FP16", Layout::CHW, power_params({ { 3, 32, 16 } }, 2, 2, 2)),
+    layout_test_params(CommonTestUtils::DEVICE_MYRIAD, "FP16", Layout::NCHW, power_params({ { 1, 3, 16, 16 } }, 2, 2, 2)),
+};
+INSTANTIATE_TEST_CASE_P(BehaviorTest, LayoutTestCanLoadPower,
+    ::testing::ValuesIn(power_test_cases), getTestName);
+
+layout_test_params conv_neg_test_cases[] = {
+    layout_test_params(CommonTestUtils::DEVICE_MYRIAD, "FP16", Layout::C, power_params({ { 3 } }, 2, 2, 2)),
+    layout_test_params(CommonTestUtils::DEVICE_MYRIAD, "FP16", Layout::NC, power_params({ { 1, 3 } }, 2, 2, 2)),
+};
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, LayoutTestCanNotLoadConv,
+    ::testing::ValuesIn(conv_neg_test_cases), getTestName);
+
+layout_test_params conv_test_cases[] = {
+    layout_test_params(CommonTestUtils::DEVICE_MYRIAD, "FP16", Layout::CHW, power_params({ { 3, 32, 16 } }, 2, 2, 2)),
+    layout_test_params(CommonTestUtils::DEVICE_MYRIAD, "FP16", Layout::NCHW, power_params({ { 1, 3, 16, 16 } }, 2, 2, 2)),
+    };
+    INSTANTIATE_TEST_CASE_P(BehaviorTest, LayoutTestCanLoadConv,
+    ::testing::ValuesIn(conv_test_cases), getTestName);
diff --git a/inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_set_preprocess.cpp b/inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_set_preprocess.cpp
new file mode 100644 (file)
index 0000000..24a1234
--- /dev/null
@@ -0,0 +1,11 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_set_preprocess.hpp"
+#include "vpu_test_data.hpp"
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest,
+                        BehaviorPluginTestPreProcess,
+                        ValuesIn(supportedValues),
+                        getTestCaseName);
diff --git a/inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_unsupported.cpp b/inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_unsupported.cpp
new file mode 100644 (file)
index 0000000..abe5a37
--- /dev/null
@@ -0,0 +1,14 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_unsupported.hpp"
+#include "vpu_test_data.hpp"
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestAllUnsupported, ValuesIn(allUnSupportedValues),
+    getTestCaseName);
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestTypeUnsupported, ValuesIn(typeUnSupportedValues),
+    getTestCaseName);
+    INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestBatchUnsupported, ValuesIn(batchUnSupportedValues),
+        getTestCaseName);
diff --git a/inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_version.cpp b/inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/behavior_test_plugin_version.cpp
new file mode 100644 (file)
index 0000000..db9b742
--- /dev/null
@@ -0,0 +1,8 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin_version.hpp"
+#include "vpu_test_data.hpp"
+
+INSTANTIATE_TEST_CASE_P(BehaviorTest, BehaviorPluginTestVersion, ValuesIn(add_element_into_array(supportedValues, BEH_HETERO)), getTestCaseName);
diff --git a/inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/vpu_test_data.hpp b/inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/plugin_tests/vpu_test_data.hpp
new file mode 100644 (file)
index 0000000..39ae924
--- /dev/null
@@ -0,0 +1,114 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior_test_plugin.h"
+
+// correct params
+#define BEH_MYRIAD BehTestParams("MYRIAD", \
+                                 FuncTestUtils::TestModel::convReluNormPoolFcModelFP16.model_xml_str, \
+                                 FuncTestUtils::TestModel::convReluNormPoolFcModelFP16.weights_blob, \
+                                 Precision::FP32)
+#define BEH_HETERO BehTestParams("HETERO", \
+                                 FuncTestUtils::TestModel::convReluNormPoolFcModelFP32.model_xml_str, \
+                                 FuncTestUtils::TestModel::convReluNormPoolFcModelFP32.weights_blob, \
+                                 Precision::FP32)
+
+// all parameters are unsupported - reversed
+#define BEH_US_ALL_MYRIAD  BehTestParams("MYRIAD", \
+                                         FuncTestUtils::TestModel::convReluNormPoolFcModelQ78.model_xml_str, \
+                                         FuncTestUtils::TestModel::convReluNormPoolFcModelQ78.weights_blob, \
+                                         Precision::Q78)
+const BehTestParams supportedValues[] = {
+        BEH_MYRIAD,
+};
+
+const BehTestParams requestsSupportedValues[] = {
+        BEH_MYRIAD,
+};
+
+const BehTestParams allInputSupportedValues[] = {
+        BEH_MYRIAD, BEH_MYRIAD.withIn(Precision::U8), BEH_MYRIAD.withIn(Precision::FP16),
+        // I16 not supported yet
+        // (ISSUE-7979) [IE myriad] The plugin should support I16 format for Input
+        //BEH_MYRIAD.withIn(Precision::I16),
+};
+
+const BehTestParams allOutputSupportedValues[] = {
+        BEH_MYRIAD, BEH_MYRIAD.withOut(Precision::FP16),
+};
+
+const BehTestParams typeUnSupportedValues[] = {
+        BEH_MYRIAD.withIn(Precision::Q78), BEH_MYRIAD.withIn(Precision::U16), BEH_MYRIAD.withIn(Precision::I8),
+        BEH_MYRIAD.withIn(Precision::I16), BEH_MYRIAD.withIn(Precision::I32),
+};
+
+const BehTestParams batchUnSupportedValues[] = {
+        BEH_MYRIAD.withBatchSize(0),
+};
+
+const BehTestParams allUnSupportedValues[] = {
+        BEH_US_ALL_MYRIAD,
+};
+
+const std::vector<BehTestParams> deviceSpecificConfigurations = {
+    BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(PROTOCOL), VPU_MYRIAD_CONFIG_VALUE(USB)}}),
+    BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(PROTOCOL), VPU_MYRIAD_CONFIG_VALUE(PCIE)}}),
+
+    BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(PLATFORM), VPU_MYRIAD_CONFIG_VALUE(2450)}}),
+    BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(PLATFORM), VPU_MYRIAD_CONFIG_VALUE(2480)}}),
+};
+
+const std::vector<BehTestParams> deviceAgnosticConfigurations = {
+    BEH_MYRIAD.withConfig({{VPU_CONFIG_KEY(IGNORE_IR_STATISTIC), CONFIG_VALUE(YES)}}),
+    BEH_MYRIAD.withConfig({{VPU_CONFIG_KEY(IGNORE_IR_STATISTIC), CONFIG_VALUE(NO)}}),
+
+    BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(FORCE_RESET), CONFIG_VALUE(YES)}}),
+    BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(FORCE_RESET), CONFIG_VALUE(NO)}}),
+
+    BEH_MYRIAD.withConfig({{CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_NONE)}}),
+    BEH_MYRIAD.withConfig({{CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_ERROR)}}),
+    BEH_MYRIAD.withConfig({{CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_WARNING)}}),
+    BEH_MYRIAD.withConfig({{CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_INFO)}}),
+    BEH_MYRIAD.withConfig({{CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_DEBUG)}}),
+    BEH_MYRIAD.withConfig({{CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_TRACE)}}),
+
+    BEH_MYRIAD.withConfig({{VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), CONFIG_VALUE(YES)}}),
+    BEH_MYRIAD.withConfig({{VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), CONFIG_VALUE(NO)}}),
+
+    BEH_MYRIAD.withConfig({{VPU_CONFIG_KEY(PRINT_RECEIVE_TENSOR_TIME), CONFIG_VALUE(YES)}}),
+    BEH_MYRIAD.withConfig({{VPU_CONFIG_KEY(PRINT_RECEIVE_TENSOR_TIME), CONFIG_VALUE(NO)}}),
+};
+
+const std::vector<BehTestParams> withCorrectConfValuesPluginOnly = {
+};
+
+const std::vector<BehTestParams> withCorrectConfValuesNetworkOnly = {
+};
+
+const BehTestParams withIncorrectConfValues[] = {
+    BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(PROTOCOL), "BLUETOOTH"}}),
+    BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(PROTOCOL), "LAN"}}),
+
+    BEH_MYRIAD.withConfig({{VPU_CONFIG_KEY(IGNORE_IR_STATISTIC), "ON"}}),
+    BEH_MYRIAD.withConfig({{VPU_CONFIG_KEY(IGNORE_IR_STATISTIC), "OFF"}}),
+
+    BEH_MYRIAD.withConfig({{VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), "ON"}}),
+    BEH_MYRIAD.withConfig({{VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), "OFF"}}),
+
+    BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(FORCE_RESET), "ON"}}),
+    BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(FORCE_RESET), "OFF"}}),
+
+    BEH_MYRIAD.withConfig({{CONFIG_KEY(LOG_LEVEL), "VERBOSE"}}),
+
+    BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(PLATFORM), "-1"}}),
+    BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(PLATFORM), "0"}}),
+    BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(PLATFORM), "1"}}),
+
+    BEH_MYRIAD.withConfig({{VPU_CONFIG_KEY(PRINT_RECEIVE_TENSOR_TIME), "ON"}}),
+    BEH_MYRIAD.withConfig({{VPU_CONFIG_KEY(PRINT_RECEIVE_TENSOR_TIME), "OFF"}}),
+};
+
+const BehTestParams withIncorrectConfKeys[] = {
+        BEH_MYRIAD.withIncorrectConfigItem(),
+};
diff --git a/inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/skip_tests_config.cpp b/inference-engine/tests_deprecated/behavior/vpu/shared_tests_instances/skip_tests_config.cpp
new file mode 100644 (file)
index 0000000..8db78c1
--- /dev/null
@@ -0,0 +1,13 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <vector>
+#include <string>
+
+#include "functional_test_utils/skip_tests_config.hpp"
+
+std::vector<std::string> disabledTestPatterns() {
+    return {
+    };
+}
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/fluid_preproc/CMakeLists.txt b/inference-engine/tests_deprecated/fluid_preproc/CMakeLists.txt
new file mode 100644 (file)
index 0000000..c7f9416
--- /dev/null
@@ -0,0 +1,31 @@
+# Copyright (C) 2018-2020 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+#
+
+find_package(OpenCV COMPONENTS gapi QUIET)
+if(NOT OpenCV_FOUND)
+    message(WARNING "No suitable OpenCV version detected, " ${TARGET_NAME} " skipped")
+    return()
+endif()
+
+add_subdirectory(fluid_test_computations)
+
+file(GLOB SOURCES *.cpp common/*.cpp cpu/*.cpp)
+file(GLOB HEADERS *.hpp common/*.hpp cpu/*.hpp)
+
+set(TARGET fluid_preproc_tests)
+add_executable(${TARGET} ${SOURCES} ${HEADERS})
+
+target_include_directories(${TARGET} PRIVATE
+          "${CMAKE_CURRENT_SOURCE_DIR}/common"
+          "${CMAKE_CURRENT_SOURCE_DIR}/cpu"
+          $<TARGET_PROPERTY:inference_engine_plugin_api,INTERFACE_INCLUDE_DIRECTORIES>
+          $<TARGET_PROPERTY:inference_engine_preproc,INTERFACE_INCLUDE_DIRECTORIES>)
+
+target_link_libraries(${TARGET} PRIVATE opencv_core opencv_imgproc inference_engine fluid_test_computations gtest gtest_main)
+
+if(GAPI_TEST_PERF)
+  target_compile_definitions(${TARGET} PRIVATE -DPERF_TEST=1)
+else()
+  target_compile_definitions(${TARGET} PRIVATE -DPERF_TEST=0)
+endif()
diff --git a/inference-engine/tests_deprecated/fluid_preproc/common/fluid_tests.cpp b/inference-engine/tests_deprecated/fluid_preproc/common/fluid_tests.cpp
new file mode 100644 (file)
index 0000000..b38bb5b
--- /dev/null
@@ -0,0 +1,1155 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "fluid_tests.hpp"
+
+#include "blob_factory.hpp"
+#include "blob_transform.hpp"
+#include "ie_preprocess.hpp"
+#include "ie_preprocess_data.hpp"
+#include "ie_compound_blob.h"
+
+#include <opencv2/core.hpp>
+#include <opencv2/imgproc.hpp>
+#include <opencv2/gapi.hpp>
+#include <opencv2/gapi/imgproc.hpp>
+
+#include <cstdarg>
+#include <cstdio>
+#include <ctime>
+
+#include <chrono>
+
+#include <map>
+
+#include <fluid_test_computations.hpp>
+
+// Can be set externally (via CMake) if built with -DGAPI_TEST_PERF=ON
+#ifndef PERF_TEST
+#define PERF_TEST 0 // 1=test performance, 0=don't
+#endif
+
+namespace {
+#if PERF_TEST
+// performance test: iterate function, measure and print milliseconds per call
+template<typename F> void test_ms(F func, int iter, const char format[], ...)
+{
+    using std::chrono::high_resolution_clock;
+
+    std::vector<high_resolution_clock::duration> samples(iter); samples.clear();
+    if (0 == iter)
+        return;
+
+    for (int i=0; i < iter; i++)
+    {
+        auto start = high_resolution_clock::now();
+        func(); // iterate calls
+        samples.push_back(high_resolution_clock::now() - start);
+    }
+
+    std::sort(samples.begin(), samples.end());
+
+    auto median = samples[samples.size() / 2];
+
+    double median_ms = std::chrono::duration_cast<std::chrono::microseconds>(median).count() * 0.001; // convert to milliseconds
+
+    printf("Performance(ms): %lg ", median_ms);
+
+    va_list args;
+    va_start(args, format);
+    vprintf(format, args);
+    va_end(args);
+
+    printf("\n");
+}
+
+cv::String interpToString(int interp)
+{
+    switch(interp)
+    {
+    case cv::INTER_AREA   : return "INTER_AREA";
+    case cv::INTER_LINEAR : return "INTER_LINEAR";
+    case cv::INTER_NEAREST: return "INTER_NEAREST";
+    }
+    CV_Assert(!"ERROR: unsupported interpolation!");
+    return nullptr;
+}
+
+cv::String depthToString(int depth)
+{
+    switch(depth)
+    {
+    case CV_8U  : return "CV_8U";
+    case CV_32F : return "CV_32F";
+    }
+    CV_Assert(!"ERROR: unsupported depth!");
+    return nullptr;
+}
+
+cv::String typeToString(int type)
+{
+    switch(type)
+    {
+    case CV_8UC1  : return "CV_8UC1";
+    case CV_8UC2  : return "CV_8UC2";
+    case CV_8UC3  : return "CV_8UC3";
+    case CV_8UC4  : return "CV_8UC4";
+    case CV_32FC1 : return "CV_32FC1";
+    case CV_32FC2 : return "CV_32FC2";
+    case CV_32FC3 : return "CV_32FC3";
+    case CV_32FC4 : return "CV_32FC4";
+    }
+    CV_Assert(!"ERROR: unsupported type!");
+    return nullptr;
+}
+
+cv::String colorFormatToString(InferenceEngine::ColorFormat f) {
+    using namespace InferenceEngine;
+    switch (f)
+    {
+        case ColorFormat::RAW: return "RAW";
+        case ColorFormat::RGB: return "RGB";
+        case ColorFormat::BGR: return "BGR";
+        case ColorFormat::RGBX: return "RGBX";
+        case ColorFormat::BGRX: return "BGRX";
+        case ColorFormat::NV12: return "NV12";
+        default: THROW_IE_EXCEPTION << "Unrecognized color format";
+    }
+}
+
+cv::String layoutToString(InferenceEngine::Layout l) {
+    using namespace InferenceEngine;
+    switch (l) {
+    case Layout::NCHW: return "NCHW";
+    case Layout::NHWC: return "NHWC";
+    default: return "?";
+    }
+}
+#endif  // PERF_TEST
+
+test::Mat to_test(cv::Mat& mat) { return {mat.rows, mat.cols, mat.type(), mat.data, mat.step}; }
+std::vector<test::Mat> to_test(std::vector<cv::Mat>& mats)
+{
+    std::vector<test::Mat> test_mats(mats.size());
+    for (int i = 0; i < mats.size(); i++) {
+        test_mats[i] = to_test(mats[i]);
+    }
+    return test_mats;
+}
+
+test::Rect to_test(cv::Rect& rect) { return {rect.x, rect.y, rect.width, rect.height}; }
+
+cv::ColorConversionCodes toCvtColorCode(InferenceEngine::ColorFormat in,
+                                     InferenceEngine::ColorFormat out) {
+    using namespace InferenceEngine;
+    static const std::map<std::pair<ColorFormat, ColorFormat>, cv::ColorConversionCodes> types = {
+        {{ColorFormat::RGBX, ColorFormat::BGRX}, cv::COLOR_RGBA2BGRA},
+        {{ColorFormat::RGBX, ColorFormat::BGR}, cv::COLOR_RGBA2BGR},
+        {{ColorFormat::RGBX, ColorFormat::RGB}, cv::COLOR_RGBA2RGB},
+        {{ColorFormat::BGRX, ColorFormat::RGBX}, cv::COLOR_BGRA2RGBA},
+        {{ColorFormat::BGRX, ColorFormat::BGR}, cv::COLOR_BGRA2BGR},
+        {{ColorFormat::BGRX, ColorFormat::RGB}, cv::COLOR_BGRA2RGB},
+        {{ColorFormat::RGB, ColorFormat::RGBX}, cv::COLOR_RGB2RGBA},
+        {{ColorFormat::RGB, ColorFormat::BGRX}, cv::COLOR_RGB2BGRA},
+        {{ColorFormat::RGB, ColorFormat::BGR}, cv::COLOR_RGB2BGR},
+        {{ColorFormat::BGR, ColorFormat::RGBX}, cv::COLOR_BGR2RGBA},
+        {{ColorFormat::BGR, ColorFormat::BGRX}, cv::COLOR_BGR2BGRA},
+        {{ColorFormat::BGR, ColorFormat::RGB}, cv::COLOR_BGR2RGB},
+        {{ColorFormat::NV12, ColorFormat::BGR}, cv::COLOR_YUV2BGR_NV12},
+        {{ColorFormat::NV12, ColorFormat::RGB}, cv::COLOR_YUV2RGB_NV12}
+    };
+    return types.at(std::make_pair(in, out));
+}
+
+cv::ColorConversionCodes toCvtColorCode(InferenceEngine::ColorFormat fmt) {
+    using namespace InferenceEngine;
+    // Note: OpenCV matrices are always in BGR format by default
+    return toCvtColorCode(ColorFormat::BGR, fmt);
+}
+
+int numChannels(InferenceEngine::ColorFormat fmt) {
+    using namespace InferenceEngine;
+    switch (fmt) {
+        // case ColorFormat::RAW: return 0;  // any number of channels apply
+        case ColorFormat::RGB: return 3;
+        case ColorFormat::BGR: return 3;
+        case ColorFormat::RGBX: return 4;
+        case ColorFormat::BGRX: return 4;
+        default: THROW_IE_EXCEPTION << "Unrecognized color format";
+    }
+}
+
+// FIXME: Copy-paste from cropRoi tests
+template <InferenceEngine::Precision::ePrecision PRC>
+InferenceEngine::Blob::Ptr img2Blob(cv::Mat &img, InferenceEngine::Layout layout) {
+    using namespace InferenceEngine;
+    using data_t = typename PrecisionTrait<PRC>::value_type;
+
+    const size_t channels = img.channels();
+    const size_t height = img.size().height;
+    const size_t width = img.size().width;
+
+    CV_Assert(cv::DataType<data_t>::depth == img.depth());
+
+    SizeVector dims = {1, channels, height, width};
+    Blob::Ptr resultBlob = make_shared_blob<data_t>(TensorDesc(PRC, dims, layout));;
+    resultBlob->allocate();
+
+    data_t* blobData = resultBlob->buffer().as<data_t*>();
+
+    switch (layout) {
+        case Layout::NCHW: {
+            for (size_t c = 0; c < channels; c++) {
+                for (size_t h = 0; h < height; h++) {
+                    for (size_t w = 0; w < width; w++) {
+                        blobData[c * width * height + h * width + w] = img.ptr<data_t>(h,w)[c];
+                    }
+                }
+            }
+        }
+        break;
+        case Layout::NHWC: {
+            for (size_t h = 0; h < height; h++) {
+                for (size_t w = 0; w < width; w++) {
+                    for (size_t c = 0; c < channels; c++) {
+                        blobData[h * width * channels + w * channels + c] = img.ptr<data_t>(h,w)[c];
+                    }
+                }
+            }
+        }
+        break;
+        default:
+            THROW_IE_EXCEPTION << "Inconsistent input layout for image processing: " << layout;
+    }
+    return resultBlob;
+}
+
+template <InferenceEngine::Precision::ePrecision PRC>
+void Blob2Img(const InferenceEngine::Blob::Ptr& blobP, cv::Mat& img, InferenceEngine::Layout layout) {
+    using namespace InferenceEngine;
+    using data_t = typename PrecisionTrait<PRC>::value_type;
+
+    const size_t channels = img.channels();
+    const size_t height = img.size().height;
+    const size_t width = img.size().width;
+
+    CV_Assert(cv::DataType<data_t>::depth == img.depth());
+
+    data_t* blobData = blobP->buffer().as<data_t*>();
+
+    switch (layout) {
+        case Layout::NCHW: {
+            for (size_t c = 0; c < channels; c++) {
+                for (size_t h = 0; h < height; h++) {
+                    for (size_t w = 0; w < width; w++) {
+                        img.ptr<data_t>(h,w)[c] = blobData[c * width * height + h * width + w];
+                    }
+                }
+            }
+        }
+        break;
+        case Layout::NHWC: {
+            for (size_t h = 0; h < height; h++) {
+                for (size_t w = 0; w < width; w++) {
+                    for (size_t c = 0; c < channels; c++) {
+                        img.ptr<data_t>(h,w)[c] = blobData[h * width * channels + w * channels + c];
+                    }
+                }
+            }
+        }
+        break;
+        default:
+            THROW_IE_EXCEPTION << "Inconsistent input layout for image processing: " << layout;
+    }
+}
+} // anonymous namespace
+
+TEST_P(ResizeTestGAPI, AccuracyTest)
+{
+    int type = 0, interp = 0;
+    cv::Size sz_in, sz_out;
+    double tolerance = 0.0;
+    std::pair<cv::Size, cv::Size> sizes;
+    std::tie(type, interp, sizes, tolerance) = GetParam();
+    std::tie(sz_in, sz_out) = sizes;
+
+    cv::Mat in_mat1 (sz_in, type );
+    cv::Scalar mean = cv::Scalar::all(127);
+    cv::Scalar stddev = cv::Scalar::all(40.f);
+
+    cv::randn(in_mat1, mean, stddev);
+
+    cv::Mat out_mat(sz_out, type);
+    cv::Mat out_mat_ocv(sz_out, type);
+
+    // G-API code //////////////////////////////////////////////////////////////
+    FluidResizeComputation rc(to_test(in_mat1), to_test(out_mat), interp);
+    rc.warmUp();
+
+#if PERF_TEST
+    // iterate testing, and print performance
+    test_ms([&](){ rc.apply(); },
+            100, "Resize GAPI %s %s %dx%d -> %dx%d",
+            interpToString(interp).c_str(), typeToString(type).c_str(),
+            sz_in.width, sz_in.height, sz_out.width, sz_out.height);
+#endif
+
+    // OpenCV code /////////////////////////////////////////////////////////////
+    {
+        cv::resize(in_mat1, out_mat_ocv, sz_out, 0, 0, interp);
+    }
+    // Comparison //////////////////////////////////////////////////////////////
+    {
+        EXPECT_LE(cv::norm(out_mat, out_mat_ocv, cv::NORM_INF), tolerance);
+    }
+}
+
+TEST_P(ResizeRGB8UTestGAPI, AccuracyTest)
+{
+    int type = 0, interp = 0;
+    cv::Size sz_in, sz_out;
+    double tolerance = 0.0;
+    std::pair<cv::Size, cv::Size> sizes;
+    std::tie(type, interp, sizes, tolerance) = GetParam();
+    std::tie(sz_in, sz_out) = sizes;
+
+    cv::Mat in_mat1 (sz_in, type );
+    cv::Scalar mean = cv::Scalar::all(127);
+    cv::Scalar stddev = cv::Scalar::all(40.f);
+
+    cv::randn(in_mat1, mean, stddev);
+
+    cv::Mat out_mat(sz_out, type);
+    cv::Mat out_mat_ocv(sz_out, type);
+
+    // G-API code //////////////////////////////////////////////////////////////
+    FluidResizeRGB8UComputation rc(to_test(in_mat1), to_test(out_mat), interp);
+    rc.warmUp();
+
+#if PERF_TEST
+    // iterate testing, and print performance
+    test_ms([&](){ rc.apply(); },
+            100, "Resize GAPI %s %s %dx%d -> %dx%d",
+            interpToString(interp).c_str(), typeToString(type).c_str(),
+            sz_in.width, sz_in.height, sz_out.width, sz_out.height);
+#endif
+
+    // OpenCV code /////////////////////////////////////////////////////////////
+    {
+        cv::resize(in_mat1, out_mat_ocv, sz_out, 0, 0, interp);
+    }
+    // Comparison //////////////////////////////////////////////////////////////
+    {
+        EXPECT_LE(cv::norm(out_mat, out_mat_ocv, cv::NORM_INF), tolerance);
+    }
+}
+
+TEST_P(ResizeRoiTestGAPI, AccuracyTest)
+{
+    int type = 0, interp = 0;
+    cv::Size sz_in, sz_out;
+    cv::Rect roi;
+    double tolerance = 0.0;
+    std::pair<cv::Size, cv::Size> sizes;
+    std::tie(type, interp, sizes, roi, tolerance) = GetParam();
+    std::tie(sz_in, sz_out) = sizes;
+
+    cv::Mat in_mat1 (sz_in, type);
+    cv::Scalar mean = cv::Scalar::all(127);
+    cv::Scalar stddev = cv::Scalar::all(40.f);
+
+    cv::randn(in_mat1, mean, stddev);
+
+    cv::Mat out_mat(sz_out, type);
+    cv::Mat out_mat_ocv(sz_out, type);
+
+    // G-API code //////////////////////////////////////////////////////////////
+    FluidResizeComputation rc(to_test(in_mat1), to_test(out_mat), interp);
+    rc.warmUp(to_test(roi));
+
+#if PERF_TEST
+    // iterate testing, and print performance
+    test_ms([&](){ rc.apply(); },
+            100, "Resize GAPI %s %s %dx%d -> %dx%d",
+            interpToString(interp).c_str(), typeToString(type).c_str(),
+            sz_in.width, sz_in.height, sz_out.width, sz_out.height);
+#endif
+
+    // OpenCV code /////////////////////////////////////////////////////////////
+    {
+        cv::resize(in_mat1, out_mat_ocv, sz_out, 0, 0, interp);
+    }
+    // Comparison //////////////////////////////////////////////////////////////
+    {
+        EXPECT_LE(cv::norm(out_mat(roi), out_mat_ocv(roi), cv::NORM_INF), tolerance);
+    }
+}
+
+TEST_P(ResizeRGB8URoiTestGAPI, AccuracyTest)
+{
+    int type = 0, interp = 0;
+    cv::Size sz_in, sz_out;
+    cv::Rect roi;
+    double tolerance = 0.0;
+    std::pair<cv::Size, cv::Size> sizes;
+    std::tie(type, interp, sizes, roi, tolerance) = GetParam();
+    std::tie(sz_in, sz_out) = sizes;
+
+    cv::Mat in_mat1 (sz_in, type);
+    cv::Scalar mean = cv::Scalar::all(127);
+    cv::Scalar stddev = cv::Scalar::all(40.f);
+
+    cv::randn(in_mat1, mean, stddev);
+
+    cv::Mat out_mat(sz_out, type);
+    cv::Mat out_mat_ocv(sz_out, type);
+
+    // G-API code //////////////////////////////////////////////////////////////
+    FluidResizeRGB8UComputation rc(to_test(in_mat1), to_test(out_mat), interp);
+    rc.warmUp(to_test(roi));
+
+#if PERF_TEST
+    // iterate testing, and print performance
+    test_ms([&](){ rc.apply(); },
+            100, "Resize GAPI %s %s %dx%d -> %dx%d",
+            interpToString(interp).c_str(), typeToString(type).c_str(),
+            sz_in.width, sz_in.height, sz_out.width, sz_out.height);
+#endif
+
+    // OpenCV code /////////////////////////////////////////////////////////////
+    {
+        cv::resize(in_mat1, out_mat_ocv, sz_out, 0, 0, interp);
+    }
+    // Comparison //////////////////////////////////////////////////////////////
+    {
+        EXPECT_LE(cv::norm(out_mat(roi), out_mat_ocv(roi), cv::NORM_INF), tolerance);
+    }
+}
+
+TEST_P(SplitTestGAPI, AccuracyTest)
+{
+    const auto params = GetParam();
+    int planes  = std::get<0>(params);
+    int depth   = std::get<1>(params);
+    cv::Size sz = std::get<2>(params);
+    double tolerance = std::get<3>(params);
+
+    int srcType = CV_MAKE_TYPE(depth, planes);
+    int dstType = CV_MAKE_TYPE(depth, 1);
+
+    cv::Mat in_mat(sz, srcType);
+    cv::randn(in_mat, cv::Scalar::all(127), cv::Scalar::all(40.f));
+
+    std::vector<cv::Mat> out_mats_gapi(planes, cv::Mat::zeros(sz, dstType));
+    std::vector<cv::Mat> out_mats_ocv (planes, cv::Mat::zeros(sz, dstType));
+
+    // G-API code //////////////////////////////////////////////////////////////
+    FluidSplitComputation sc(to_test(in_mat), to_test(out_mats_gapi));
+    sc.warmUp();
+
+#if PERF_TEST
+    // iterate testing, and print performance
+    test_ms([&](){ sc.apply(); },
+        400, "Split GAPI %s %dx%d", typeToString(srcType).c_str(), sz.width, sz.height);
+#endif
+
+    // OpenCV code /////////////////////////////////////////////////////////////
+    {
+        cv::split(in_mat, out_mats_ocv);
+    }
+    // Comparison //////////////////////////////////////////////////////////////
+    {
+        for (int p = 0; p < planes; p++) {
+            EXPECT_LE(cv::norm(out_mats_ocv[p], out_mats_gapi[p], cv::NORM_INF), tolerance);
+        }
+    }
+}
+
+TEST_P(ChanToPlaneTestGAPI, AccuracyTest)
+{
+    const auto params = GetParam();
+    int planes  = std::get<0>(params);
+    int depth   = std::get<1>(params);
+    cv::Size sz = std::get<2>(params);
+    double tolerance = std::get<3>(params);
+
+    int inType  = CV_MAKE_TYPE(depth, planes);
+    int outType = CV_MAKE_TYPE(depth, 1);
+
+    cv::Mat in_mat(sz, inType);
+    cv::randn(in_mat, cv::Scalar::all(127), cv::Scalar::all(40.f));
+
+    cv::Mat out_mat_gapi(sz, outType);
+    std::vector<cv::Mat> out_mats_ocv;
+
+    // OpenCV code /////////////////////////////////////////////////////////////
+    {
+        cv::split(in_mat, out_mats_ocv);
+    }
+
+    for(int i = 0; i < planes; ++i){
+        // G-API code //////////////////////////////////////////////////////////////
+        FluidChanToPlaneComputation sc(to_test(in_mat), to_test(out_mat_gapi), i);
+        sc.warmUp();
+
+        #if PERF_TEST
+            // run just for a single plane
+            if(i == 0){
+                // iterate testing, and print performance
+                test_ms([&](){ sc.apply(); },
+                        400, "ChanToPlane GAPI %s %dx%d", typeToString(inType).c_str(), sz.width, sz.height);
+            }
+        #endif
+
+        // Comparison //////////////////////////////////////////////////////////////
+        {
+            EXPECT_LE(cv::norm(out_mats_ocv[i], out_mat_gapi, cv::NORM_INF), tolerance);
+        }
+    }
+}
+
+TEST_P(MergeTestGAPI, AccuracyTest)
+{
+    const auto params = GetParam();
+    int planes  = std::get<0>(params);
+    int depth   = std::get<1>(params);
+    cv::Size sz = std::get<2>(params);
+    double tolerance = std::get<3>(params);
+
+    int srcType = CV_MAKE_TYPE(depth, 1);
+    int dstType = CV_MAKE_TYPE(depth, planes);
+
+    std::vector<cv::Mat> in_mats(planes, cv::Mat(sz, srcType));
+    for (int p = 0; p < planes; p++) {
+        cv::randn(in_mats[p], cv::Scalar::all(127), cv::Scalar::all(40.f));
+    }
+
+    cv::Mat out_mat_ocv  = cv::Mat::zeros(sz, dstType);
+    cv::Mat out_mat_gapi = cv::Mat::zeros(sz, dstType);
+
+    // G-API code //////////////////////////////////////////////////////////////
+    FluidMergeComputation mc(to_test(in_mats), to_test(out_mat_gapi));
+    mc.warmUp();
+
+#if PERF_TEST
+    // iterate testing, and print performance
+    test_ms([&](){ mc.apply(); },
+        400, "Merge GAPI %s %dx%d", typeToString(dstType).c_str(), sz.width, sz.height);
+#endif
+
+    // OpenCV code /////////////////////////////////////////////////////////////
+    {
+        cv::merge(in_mats, out_mat_ocv);
+    }
+    // Comparison //////////////////////////////////////////////////////////////
+    {
+        EXPECT_LE(cv::norm(out_mat_ocv, out_mat_gapi, cv::NORM_INF), tolerance);
+    }
+}
+
+TEST_P(NV12toRGBTestGAPI, AccuracyTest)
+{
+    const auto params = GetParam();
+    cv::Size sz = std::get<0>(params);
+    double tolerance = std::get<1>(params);
+
+    cv::Mat in_mat_y(sz, CV_8UC1);
+    cv::Mat in_mat_uv(cv::Size(sz.width / 2, sz.height / 2), CV_8UC2);
+    cv::randn(in_mat_y, cv::Scalar::all(127), cv::Scalar::all(40.f));
+    cv::randn(in_mat_uv, cv::Scalar::all(127), cv::Scalar::all(40.f));
+
+    cv::Mat out_mat_gapi(cv::Mat::zeros(sz, CV_8UC3));
+    cv::Mat out_mat_ocv (cv::Mat::zeros(sz, CV_8UC3));
+
+    // G-API code //////////////////////////////////////////////////////////////
+    FluidNV12toRGBComputation cc(to_test(in_mat_y), to_test(in_mat_uv), to_test(out_mat_gapi));
+    cc.warmUp();
+
+#if PERF_TEST
+    // iterate testing, and print performance
+    test_ms([&](){ cc.apply(); },
+        400, "NV12toRGB GAPI %s %dx%d", typeToString(CV_8UC3).c_str(), sz.width, sz.height);
+#endif
+
+    // OpenCV code /////////////////////////////////////////////////////////////
+    {
+        cv::cvtColorTwoPlane(in_mat_y,in_mat_uv,out_mat_ocv,cv::COLOR_YUV2RGB_NV12);
+    }
+    // Comparison //////////////////////////////////////////////////////////////
+    {
+        EXPECT_LE(cv::norm(out_mat_ocv, out_mat_gapi, cv::NORM_INF), tolerance);
+        EXPECT_EQ(sz, out_mat_gapi.size());
+    }
+}
+
+
+TEST_P(I420toRGBTestGAPI, AccuracyTest)
+{
+    const auto params = GetParam();
+    cv::Size sz = std::get<0>(params);
+    double tolerance = std::get<1>(params);
+
+    cv::Mat in_mat_y(sz, CV_8UC1);
+    cv::Mat in_mat_u(cv::Size(sz.width / 2, sz.height / 2), CV_8UC1);
+    cv::Mat in_mat_v(cv::Size(sz.width / 2, sz.height / 2), CV_8UC1);
+    cv::randn(in_mat_y, cv::Scalar::all(127), cv::Scalar::all(40.f));
+    cv::randn(in_mat_u, cv::Scalar::all(127), cv::Scalar::all(40.f));
+    cv::randn(in_mat_v, cv::Scalar::all(127), cv::Scalar::all(40.f));
+
+    cv::Mat out_mat_gapi(cv::Mat::zeros(sz, CV_8UC3));
+    cv::Mat out_mat_ocv (cv::Mat::zeros(sz, CV_8UC3));
+
+    // G-API code //////////////////////////////////////////////////////////////
+    FluidI420toRGBComputation cc(to_test(in_mat_y), to_test(in_mat_u), to_test(in_mat_v), to_test(out_mat_gapi));
+    cc.warmUp();
+
+#if PERF_TEST
+    // iterate testing, and print performance
+    test_ms([&](){ cc.apply(); },
+        400, "I420toRGB GAPI %s %dx%d", typeToString(CV_8UC3).c_str(), sz.width, sz.height);
+#endif
+
+    // OpenCV code /////////////////////////////////////////////////////////////
+    {
+        cv::Mat in_mat_uv = cv::Mat::zeros(in_mat_u.size(), CV_8UC2);
+        std::array<cv::Mat, 2> in_uv = {in_mat_u, in_mat_v};
+        cv::merge(in_uv, in_mat_uv);
+        //cvtColorTwoPlane supports NV12 only at the moment
+        cv::cvtColorTwoPlane(in_mat_y,in_mat_uv,out_mat_ocv,cv::COLOR_YUV2RGB_NV12);
+    }
+    // Comparison //////////////////////////////////////////////////////////////
+    {
+        EXPECT_LE(cv::norm(out_mat_ocv, out_mat_gapi, cv::NORM_INF), tolerance);
+        EXPECT_EQ(sz, out_mat_gapi.size());
+    }
+}
+//----------------------------------------------------------------------
+
+TEST_P(ResizeTestIE, AccuracyTest)
+{
+    int type = 0, interp = 0;
+    cv::Size sz_in, sz_out;
+    double tolerance = 0.0;
+    std::pair<cv::Size, cv::Size> sizes;
+    std::tie(type, interp, sizes, tolerance) = GetParam();
+    std::tie(sz_in, sz_out) = sizes;
+
+    cv::Mat in_mat1(sz_in, type );
+    cv::Scalar mean = cv::Scalar::all(127);
+    cv::Scalar stddev = cv::Scalar::all(40.f);
+
+    cv::randn(in_mat1, mean, stddev);
+
+    cv::Mat out_mat(sz_out, type);
+    cv::Mat out_mat_ocv(sz_out, type);
+
+    // Inference Engine code ///////////////////////////////////////////////////
+
+    size_t channels = out_mat.channels();
+    CV_Assert(1 == channels || 3 == channels);
+
+    int depth = CV_MAT_DEPTH(type);
+    CV_Assert(CV_8U == depth || CV_32F == depth);
+
+    CV_Assert(cv::INTER_AREA == interp || cv::INTER_LINEAR == interp);
+
+    ASSERT_TRUE(in_mat1.isContinuous() && out_mat.isContinuous());
+
+    using namespace InferenceEngine;
+
+    size_t  in_height = in_mat1.rows,  in_width = in_mat1.cols;
+    size_t out_height = out_mat.rows, out_width = out_mat.cols;
+    InferenceEngine::SizeVector  in_sv = { 1, channels,  in_height,  in_width };
+    InferenceEngine::SizeVector out_sv = { 1, channels, out_height, out_width };
+
+    // HWC blob: channels are interleaved
+    Precision precision = CV_8U == depth ? Precision::U8 : Precision::FP32;
+    TensorDesc  in_desc(precision,  in_sv, Layout::NHWC);
+    TensorDesc out_desc(precision, out_sv, Layout::NHWC);
+
+    Blob::Ptr in_blob, out_blob;
+    in_blob  = make_blob_with_precision(in_desc , in_mat1.data);
+    out_blob = make_blob_with_precision(out_desc, out_mat.data);
+
+    PreProcessDataPtr preprocess = CreatePreprocDataHelper();
+    preprocess->setRoiBlob(in_blob);
+
+    ResizeAlgorithm algorithm = cv::INTER_AREA == interp ? RESIZE_AREA : RESIZE_BILINEAR;
+    PreProcessInfo info;
+    info.setResizeAlgorithm(algorithm);
+
+    // test once to warm-up cache
+    preprocess->execute(out_blob, info, false);
+
+#if PERF_TEST
+    // iterate testing, and print performance
+    test_ms([&](){ preprocess->execute(out_blob, info, false); },
+            100, "Resize IE %s %s %dx%d -> %dx%d",
+            interpToString(interp).c_str(), typeToString(type).c_str(),
+            sz_in.width, sz_in.height, sz_out.width, sz_out.height);
+#endif
+
+    // OpenCV code /////////////////////////////////////////////////////////////
+    {
+        cv::resize(in_mat1, out_mat_ocv, sz_out, 0, 0, interp);
+    }
+    // Comparison //////////////////////////////////////////////////////////////
+    {
+        EXPECT_LE(cv::norm(out_mat_ocv, out_mat, cv::NORM_INF), tolerance);
+    }
+}
+
+TEST_P(ColorConvertTestIE, AccuracyTest)
+{
+    using namespace InferenceEngine;
+    int depth = 0;
+    auto in_fmt = ColorFormat::RAW;
+    auto out_fmt = ColorFormat::BGR;  // for now, always BGR
+    auto in_layout = Layout::ANY;
+    auto out_layout = Layout::ANY;
+    cv::Size size;
+    double tolerance = 0.0;
+    std::tie(depth, in_fmt, in_layout, out_layout, size, tolerance) = GetParam();
+
+    int in_type = CV_MAKE_TYPE(depth, numChannels(in_fmt));
+    int out_type = CV_MAKE_TYPE(depth, numChannels(out_fmt));
+
+    cv::Mat in_mat1(size, in_type);
+    cv::Scalar mean = cv::Scalar::all(127);
+    cv::Scalar stddev = cv::Scalar::all(40.f);
+
+    cv::randn(in_mat1, mean, stddev);
+
+    cv::Mat out_mat(size, out_type);
+    cv::Mat out_mat_ocv(size, out_type);
+
+    // Inference Engine code ///////////////////////////////////////////////////
+
+    if (in_fmt != ColorFormat::RAW && in_fmt != ColorFormat::BGR) {
+        cv::cvtColor(in_mat1, in_mat1, toCvtColorCode(in_fmt));
+    }
+
+    size_t in_channels = in_mat1.channels();
+    CV_Assert(3 == in_channels || 4 == in_channels);
+
+    size_t out_channels = out_mat.channels();
+    CV_Assert(3 == out_channels || 4 == out_channels);
+
+    CV_Assert(CV_8U == depth || CV_32F == depth);
+
+    ASSERT_TRUE(in_mat1.isContinuous() && out_mat.isContinuous());
+
+    size_t  in_height = in_mat1.rows,  in_width = in_mat1.cols;
+    size_t out_height = out_mat.rows, out_width = out_mat.cols;
+    InferenceEngine::SizeVector  in_sv = { 1, in_channels,  in_height,  in_width };
+    InferenceEngine::SizeVector out_sv = { 1, out_channels, out_height, out_width };
+
+    // HWC blob: channels are interleaved
+    Precision precision = CV_8U == depth ? Precision::U8 : Precision::FP32;
+
+    Blob::Ptr in_blob, out_blob;
+    switch (precision)
+    {
+    case Precision::U8:
+        in_blob = img2Blob<Precision::U8>(in_mat1, in_layout);
+        out_blob = img2Blob<Precision::U8>(out_mat, out_layout);
+        break;
+
+    case Precision::FP32:
+        in_blob = img2Blob<Precision::FP32>(in_mat1, in_layout);
+        out_blob = img2Blob<Precision::FP32>(out_mat, out_layout);
+        break;
+
+    default:
+        FAIL() << "Unsupported configuration";
+    }
+
+    PreProcessDataPtr preprocess = CreatePreprocDataHelper();
+    preprocess->setRoiBlob(in_blob);
+
+    PreProcessInfo info;
+    info.setColorFormat(in_fmt);
+
+    // test once to warm-up cache
+    preprocess->execute(out_blob, info, false);
+
+    switch (precision)
+    {
+    case Precision::U8:   Blob2Img<Precision::U8>  (out_blob, out_mat, out_layout); break;
+    case Precision::FP32: Blob2Img<Precision::FP32>(out_blob, out_mat, out_layout); break;
+    default: FAIL() << "Unsupported configuration";
+    }
+
+#if PERF_TEST
+    // iterate testing, and print performance
+    test_ms([&](){ preprocess->execute(out_blob, info, false); },
+            100, "Color Convert IE %s %s %s %dx%d %s->%s",
+            depthToString(depth).c_str(),
+            layoutToString(in_layout).c_str(), layoutToString(out_layout).c_str(),
+            size.width, size.height,
+            colorFormatToString(in_fmt).c_str(), colorFormatToString(out_fmt).c_str());
+#endif
+
+    // OpenCV code /////////////////////////////////////////////////////////////
+    {
+        if (in_fmt != out_fmt) {
+            cv::cvtColor(in_mat1, out_mat_ocv, toCvtColorCode(in_fmt, out_fmt));
+        } else {
+            // only reorder is done
+            out_mat_ocv = in_mat1;
+        }
+    }
+
+    // Comparison //////////////////////////////////////////////////////////////
+    {
+        EXPECT_LE(cv::norm(out_mat_ocv, out_mat, cv::NORM_INF), tolerance);
+    }
+}
+
+TEST_P(ColorConvertYUV420TestIE, AccuracyTest)
+{
+    using namespace InferenceEngine;
+    const int depth = CV_8U;
+    auto in_fmt = ColorFormat::NV12;
+    const auto out_fmt = ColorFormat::BGR;  // for now, always BGR
+    const auto in_layout = Layout::NCHW;
+    auto out_layout = Layout::ANY;
+    cv::Size size;
+    double tolerance = 0.0;
+    std::tie(in_fmt, out_layout, size, tolerance) = GetParam();
+
+    cv::Mat in_mat_y(size, CV_MAKE_TYPE(depth, 1));
+    cv::Mat in_mat_uv(cv::Size(size.width / 2, size.height / 2), CV_MAKE_TYPE(depth, 2));
+    cv::Scalar mean = cv::Scalar::all(127);
+    cv::Scalar stddev = cv::Scalar::all(40.f);
+
+    cv::randn(in_mat_y, mean, stddev);
+    cv::randn(in_mat_uv, mean / 2, stddev / 2);
+
+    int out_type = CV_MAKE_TYPE(depth, numChannels(out_fmt));
+    cv::Mat out_mat(size, out_type);
+    cv::Mat out_mat_ocv(size, out_type);
+
+    // Inference Engine code ///////////////////////////////////////////////////
+
+    size_t out_channels = out_mat.channels();
+    CV_Assert(3 == out_channels || 4 == out_channels);
+
+    ASSERT_TRUE(in_mat_y.isContinuous() && out_mat.isContinuous());
+
+    const Precision precision = Precision::U8;
+
+    auto make_nv12_blob = [&](){
+        auto y_blob = img2Blob<Precision::U8>(in_mat_y, Layout::NHWC);
+        auto uv_blob = img2Blob<Precision::U8>(in_mat_uv, Layout::NHWC);
+        return make_shared_blob<NV12Blob>(y_blob, uv_blob);
+
+    };
+    auto make_I420_blob = [&](){
+        cv::Mat in_mat_u(cv::Size(size.width / 2, size.height / 2), CV_MAKE_TYPE(depth, 1));
+        cv::Mat in_mat_v(cv::Size(size.width / 2, size.height / 2), CV_MAKE_TYPE(depth, 1));
+
+        std::array<cv::Mat, 2> in_uv = {in_mat_u, in_mat_v};
+        cv::split(in_mat_uv, in_uv);
+
+        auto y_blob = img2Blob<Precision::U8>(in_mat_y, Layout::NHWC);
+        auto u_blob = img2Blob<Precision::U8>(in_mat_u, Layout::NHWC);
+        auto v_blob = img2Blob<Precision::U8>(in_mat_v, Layout::NHWC);
+        return make_shared_blob<I420Blob>(y_blob, u_blob, v_blob);
+    };
+
+    Blob::Ptr in_blob = (in_fmt == ColorFormat::NV12) ?  Blob::Ptr{make_nv12_blob()} :  Blob::Ptr {make_I420_blob()};
+    auto out_blob = img2Blob<Precision::U8>(out_mat, out_layout);
+
+    PreProcessDataPtr preprocess = CreatePreprocDataHelper();
+    preprocess->setRoiBlob(in_blob);
+
+    PreProcessInfo info;
+    info.setColorFormat(in_fmt);
+
+    // test once to warm-up cache
+    preprocess->execute(out_blob, info, false);
+
+    Blob2Img<Precision::U8>(out_blob, out_mat, out_layout);
+
+#if PERF_TEST
+    // iterate testing, and print performance
+    test_ms([&](){ preprocess->execute(out_blob, info, false); },
+            100, "Color Convert IE %s %s %s %dx%d %s->%s",
+            depthToString(depth).c_str(),
+            layoutToString(in_layout).c_str(), layoutToString(out_layout).c_str(),
+            size.width, size.height,
+            colorFormatToString(in_fmt).c_str(), colorFormatToString(out_fmt).c_str());
+#endif
+
+    // OpenCV code /////////////////////////////////////////////////////////////
+    {
+        //for both I420 and NV12 use NV12 as I420 is not supported by OCV
+        cv::cvtColorTwoPlane(in_mat_y, in_mat_uv, out_mat_ocv, toCvtColorCode(ColorFormat::NV12, out_fmt));
+    }
+
+    // Comparison //////////////////////////////////////////////////////////////
+    {
+        EXPECT_LE(cv::norm(out_mat_ocv, out_mat, cv::NORM_INF), tolerance);
+    }
+}
+
+TEST_P(SplitTestIE, AccuracyTest)
+{
+    const auto params = GetParam();
+    int type = std::get<0>(params);
+    cv::Size size = std::get<1>(params);
+    double tolerance = std::get<2>(params);
+
+    int depth = CV_MAT_DEPTH(type);
+    CV_Assert(CV_8U == depth || CV_32F == depth);
+
+    int type1 = CV_MAKE_TYPE(depth, 1);
+    int type4 = CV_MAKE_TYPE(depth, 4);
+
+    cv::Scalar mean = cv::Scalar::all(127);
+    cv::Scalar stddev = cv::Scalar::all(40.f);
+
+    cv::Mat in_mat(size, type);
+    cv::randn(in_mat, mean, stddev);
+
+    int channels = in_mat.channels();
+    CV_Assert(2 == channels || 3 == channels || 4 == channels);
+
+    size_t elemsize1 = in_mat.elemSize1();
+    int    total     = in_mat.total();
+
+    cv::Mat out_mat(size, type4);
+    CV_Assert(in_mat.isContinuous() && out_mat.isContinuous());
+
+    cv::Mat out_mat0(size, type1, out_mat.data + 0*total*elemsize1);
+    cv::Mat out_mat1(size, type1, out_mat.data + 1*total*elemsize1);
+    cv::Mat out_mat2(size, type1, out_mat.data + 2*total*elemsize1);
+    cv::Mat out_mat3(size, type1, out_mat.data + 3*total*elemsize1);
+
+    cv::Mat out_mats[] = {out_mat0, out_mat1, out_mat2, out_mat3};
+
+    std::vector<cv::Mat> out_mats_ocv(channels);
+
+    // Inference Engine code ///////////////////////////////////////////////////
+
+    using namespace InferenceEngine;
+
+    size_t width  = size.width;
+    size_t height = size.height;
+    InferenceEngine::SizeVector sv = { 1, (size_t)channels, height,  width };
+
+    Precision precision = CV_8U == depth ? Precision::U8 : Precision::FP32;
+    TensorDesc  in_desc(precision, sv, Layout::NHWC); // interleaved
+    TensorDesc out_desc(precision, sv, Layout::NCHW); // color planes
+
+    Blob::Ptr in_blob, out_blob;
+    in_blob  = make_blob_with_precision( in_desc,  in_mat.data);
+    out_blob = make_blob_with_precision(out_desc, out_mat.data);
+
+    // test once
+    blob_copy(in_blob, out_blob);
+
+#if PERF_TEST
+    // iterate testing, and print performance
+    test_ms([&]() { blob_copy(in_blob, out_blob); },
+        400, "Split IE %s %dx%d", typeToString(type).c_str(), size.width, size.height);
+#endif
+
+    // OpenCV code /////////////////////////////////////////////////////////////
+
+    cv::split(in_mat, out_mats_ocv);
+
+    // Comparison //////////////////////////////////////////////////////////////
+
+    for (int i = 0; i < channels; i++)
+    {
+        EXPECT_LE(cv::norm(out_mats_ocv[i], out_mats[i], cv::NORM_INF), tolerance);
+    }
+}
+
+TEST_P(MergeTestIE, AccuracyTest)
+{
+    const auto params = GetParam();
+    int type = std::get<0>(params);
+    cv::Size size = std::get<1>(params);
+    double tolerance = std::get<2>(params);
+
+    int depth = CV_MAT_DEPTH(type);
+    CV_Assert(CV_8U == depth || CV_32F == depth);
+
+    int type1 = CV_MAKE_TYPE(depth, 1);
+    int type4 = CV_MAKE_TYPE(depth, 4);
+
+    cv::Mat out_mat(size, type), out_mat_ocv;
+
+    cv::Mat in_mat(size, type4);
+
+    int channels = out_mat.channels();
+    CV_Assert(2 == channels || 3 == channels || 4 == channels);
+
+    size_t elemsize1 = out_mat.elemSize1();
+    int    total     = out_mat.total();
+
+    cv::Mat in_mat0(size, type1, in_mat.data + 0*total*elemsize1);
+    cv::Mat in_mat1(size, type1, in_mat.data + 1*total*elemsize1);
+    cv::Mat in_mat2(size, type1, in_mat.data + 2*total*elemsize1);
+    cv::Mat in_mat3(size, type1, in_mat.data + 3*total*elemsize1);
+
+    cv::Mat in_mats[] = { in_mat0, in_mat1, in_mat2, in_mat3 };
+
+    cv::Scalar mean = cv::Scalar::all(127);
+    cv::Scalar stddev = cv::Scalar::all(40.f);
+
+    for (int i = 0; i < 4 ; i++)
+    {
+        cv::randn(in_mats[i], mean, stddev);
+    }
+
+    CV_Assert(in_mat.isContinuous() && out_mat.isContinuous());
+
+    // Inference Engine code ///////////////////////////////////////////////////
+
+    using namespace InferenceEngine;
+
+    size_t width  = size.width;
+    size_t height = size.height;
+    InferenceEngine::SizeVector sv = { 1, (size_t)channels, height,  width };
+
+    Precision precision = CV_8U == depth ? Precision::U8 : Precision::FP32;
+    TensorDesc  in_desc(precision, sv, Layout::NCHW); // color planes
+    TensorDesc out_desc(precision, sv, Layout::NHWC); // interleaved
+
+    Blob::Ptr in_blob, out_blob;
+    in_blob  = make_blob_with_precision( in_desc,  in_mat.data);
+    out_blob = make_blob_with_precision(out_desc, out_mat.data);
+
+    // test once
+    blob_copy(in_blob, out_blob);
+
+#if PERF_TEST
+    // iterate testing, and print performance
+    test_ms([&]() { blob_copy(in_blob, out_blob); },
+        400, "Merge IE %s %dx%d", typeToString(type).c_str(), size.width, size.height);
+#endif
+
+    // OpenCV code /////////////////////////////////////////////////////////////
+
+    cv::merge(in_mats, channels, out_mat_ocv);
+
+    // Comparison //////////////////////////////////////////////////////////////
+
+    EXPECT_LE(cv::norm(out_mat_ocv, out_mat, cv::NORM_INF), tolerance);
+}
+
+TEST_P(PreprocTest, Performance)
+{
+    using namespace InferenceEngine;
+    Precision prec;
+    ResizeAlgorithm interp;
+    Layout in_layout, out_layout;
+    std::pair<int, int> ocv_channels{-1, -1};
+    std::pair<cv::Size, cv::Size> sizes;
+    ColorFormat in_fmt = ColorFormat::RAW;
+    ColorFormat out_fmt = ColorFormat::BGR;
+    std::tie(prec, interp, in_fmt, in_layout, out_layout, ocv_channels, sizes) = GetParam();
+    cv::Size in_size, out_size;
+    std::tie(in_size, out_size) = sizes;
+    int in_ocv_chan = -1, out_ocv_chan = -1;
+    std::tie(in_ocv_chan, out_ocv_chan) = ocv_channels;
+    double tolerance = Precision::U8 ? 1 : 0.015;
+
+    const int ocv_depth = prec == Precision::U8 ? CV_8U :
+        prec == Precision::FP32 ? CV_32F : -1;
+    const int in_ocv_type = CV_MAKETYPE(ocv_depth, in_ocv_chan);
+    const int out_ocv_type = CV_MAKETYPE(ocv_depth, out_ocv_chan);
+    initMatrixRandU(in_ocv_type, in_size, in_ocv_type, false);
+
+    cv::Mat out_mat(out_size, out_ocv_type);
+
+    // convert input mat to correct color format if required. note that NV12 being a planar case is
+    // handled separately
+    if (in_fmt != ColorFormat::RAW && in_fmt != ColorFormat::BGR && in_fmt != ColorFormat::NV12) {
+        cv::cvtColor(in_mat1, in_mat1, toCvtColorCode(in_fmt));
+    }
+    // create additional cv::Mat in NV12 case
+    if (in_fmt == ColorFormat::NV12) {
+        in_mat2 = cv::Mat(cv::Size(in_mat1.cols / 2, in_mat1.rows / 2), CV_8UC2);
+        cv::randu(in_mat2, cv::Scalar::all(0), cv::Scalar::all(255));
+    }
+
+    Blob::Ptr in_blob, out_blob;
+    switch (prec)
+    {
+    case Precision::U8:
+        if (in_fmt == ColorFormat::NV12) {
+            auto y_blob = img2Blob<Precision::U8>(in_mat1, Layout::NHWC);
+            auto uv_blob = img2Blob<Precision::U8>(in_mat2, Layout::NHWC);
+            in_blob = make_shared_blob<NV12Blob>(y_blob, uv_blob);
+        } else {
+            in_blob = img2Blob<Precision::U8>(in_mat1, in_layout);
+        }
+        out_blob = img2Blob<Precision::U8>(out_mat, out_layout);
+        break;
+
+    case Precision::FP32:
+        in_blob = img2Blob<Precision::FP32>(in_mat1, in_layout);
+        out_blob = img2Blob<Precision::FP32>(out_mat, out_layout);
+        break;
+
+    default:
+        FAIL() << "Unsupported configuration";
+    }
+
+    PreProcessDataPtr preprocess = CreatePreprocDataHelper();
+    preprocess->setRoiBlob(in_blob);
+
+    PreProcessInfo info;
+    info.setResizeAlgorithm(interp);
+    info.setColorFormat(in_fmt);
+
+    // test once to warm-up cache
+    preprocess->execute(out_blob, info, false);
+
+    switch (prec)
+    {
+    case Precision::U8:   Blob2Img<Precision::U8>  (out_blob, out_mat, out_layout); break;
+    case Precision::FP32: Blob2Img<Precision::FP32>(out_blob, out_mat, out_layout); break;
+    default: FAIL() << "Unsupported configuration";
+    }
+
+    cv::Mat ocv_out_mat(in_mat1);
+
+    if (in_fmt != ColorFormat::RAW && in_fmt != out_fmt && in_fmt != ColorFormat::NV12) {
+        cv::cvtColor(ocv_out_mat, ocv_out_mat, toCvtColorCode(in_fmt, out_fmt));
+    } else if (in_fmt == ColorFormat::NV12) {
+        cv::cvtColorTwoPlane(ocv_out_mat, in_mat2, ocv_out_mat, toCvtColorCode(in_fmt, out_fmt));
+    }
+
+    auto cv_interp = interp == RESIZE_AREA ? cv::INTER_AREA : cv::INTER_LINEAR;
+    cv::resize(ocv_out_mat, ocv_out_mat, out_size, 0, 0, cv_interp);
+
+    EXPECT_LE(cv::norm(ocv_out_mat, out_mat, cv::NORM_INF), tolerance);
+
+#if PERF_TEST
+    // iterate testing, and print performance
+    const auto type_str = depthToString(ocv_depth);
+    const auto interp_str = interp == RESIZE_AREA ? "AREA"
+        : interp == RESIZE_BILINEAR ? "BILINEAR" : "?";
+    const auto in_layout_str = layoutToString(in_layout);
+    const auto out_layout_str = layoutToString(out_layout);
+
+    test_ms([&]() { preprocess->execute(out_blob, info, false); },
+            300,
+            "Preproc %s %s %d %s %dx%d %d %s %dx%d %s->%s",
+            type_str.c_str(),
+            interp_str,
+            in_ocv_chan,
+            in_layout_str.c_str(), in_size.width, in_size.height,
+            out_ocv_chan,
+            out_layout_str.c_str(), out_size.width, out_size.height,
+            colorFormatToString(in_fmt).c_str(), colorFormatToString(out_fmt).c_str());
+#endif // PERF_TEST
+
+}
diff --git a/inference-engine/tests_deprecated/fluid_preproc/common/fluid_tests.hpp b/inference-engine/tests_deprecated/fluid_preproc/common/fluid_tests.hpp
new file mode 100644 (file)
index 0000000..c76f67f
--- /dev/null
@@ -0,0 +1,59 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#ifndef FLUID_TESTS_HPP
+#define FLUID_TESTS_HPP
+
+#include "fluid_tests_common.hpp"
+#include "ie_preprocess.hpp"
+
+#include <gtest/gtest.h>
+
+struct ResizeTestGAPI: public testing::TestWithParam<std::tuple<int, int, std::pair<cv::Size, cv::Size>, double>> {};
+struct ResizeRGB8UTestGAPI: public testing::TestWithParam<std::tuple<int, int, std::pair<cv::Size, cv::Size>, double>> {};
+struct SplitTestGAPI: public TestParams<std::tuple<int, int, cv::Size, double>> {};
+struct ChanToPlaneTestGAPI: public TestParams<std::tuple<int, int, cv::Size, double>> {};
+struct MergeTestGAPI: public TestParams<std::tuple<int, int, cv::Size, double>> {};
+struct NV12toRGBTestGAPI: public TestParams<std::tuple<cv::Size, double>> {};
+struct I420toRGBTestGAPI: public TestParams<std::tuple<cv::Size, double>> {};
+struct ResizeRoiTestGAPI: public testing::TestWithParam<std::tuple<int, int, std::pair<cv::Size, cv::Size>, cv::Rect, double>> {};
+struct ResizeRGB8URoiTestGAPI: public testing::TestWithParam<std::tuple<int, int, std::pair<cv::Size, cv::Size>, cv::Rect, double>> {};
+
+//------------------------------------------------------------------------------
+
+struct ResizeTestIE: public testing::TestWithParam<std::tuple<int, int, std::pair<cv::Size, cv::Size>, double>> {};
+
+struct SplitTestIE: public TestParams<std::tuple<int, cv::Size, double>> {};
+struct MergeTestIE: public TestParams<std::tuple<int, cv::Size, double>> {};
+
+struct ColorConvertTestIE:
+    public testing::TestWithParam<std::tuple<int,  // matrix depth
+                                             InferenceEngine::ColorFormat,  // input color format
+                                             InferenceEngine::Layout,  // input layout
+                                             InferenceEngine::Layout,  // output layout
+                                             cv::Size,  // matrix size (input and output)
+                                             double>>  // tolerance
+{};
+
+struct ColorConvertYUV420TestIE:
+    public testing::TestWithParam<std::tuple<InferenceEngine::ColorFormat,  // input color format NV12 or I420
+                                             InferenceEngine::Layout,       // output layout
+                                             cv::Size,                      // matrix size (input and output)
+                                             double>>                       // tolerance
+{};
+
+//------------------------------------------------------------------------------
+
+using PreprocParams = std::tuple< InferenceEngine::Precision     // input-output data type
+                                , InferenceEngine::ResizeAlgorithm // resize algorithm, if needed
+                                , InferenceEngine::ColorFormat // input color format, if needed
+                                , InferenceEngine::Layout        // input tensor layout
+                                , InferenceEngine::Layout        // output tensor layout
+                                , std::pair<int, int>   // number of input and output channels
+                                , std::pair<cv::Size, cv::Size>
+                                >;
+
+struct PreprocTest: public TestParams<PreprocParams> {};
+
+#endif //FLUID_TESTS_HPP
diff --git a/inference-engine/tests_deprecated/fluid_preproc/common/fluid_tests_common.hpp b/inference-engine/tests_deprecated/fluid_preproc/common/fluid_tests_common.hpp
new file mode 100644 (file)
index 0000000..4de8ccd
--- /dev/null
@@ -0,0 +1,79 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <iostream>
+
+#include "opencv2/core.hpp"
+#include "opencv2/gapi/cpu/core.hpp"
+
+#include <gtest/gtest.h>
+
+namespace
+{
+    std::ostream& operator<<(std::ostream& o, const cv::GCompileArg& arg)
+    {
+        return o << (arg.tag.empty() ? "empty" : arg.tag);
+    }
+}
+
+class TestFunctional
+{
+public:
+    cv::Mat in_mat1;
+    cv::Mat in_mat2;
+    cv::Mat out_mat_gapi;
+    cv::Mat out_mat_ocv;
+
+    cv::Scalar sc;
+
+    void initMatsRandU(int type, cv::Size sz_in, int dtype, bool createOutputMatrices = true)
+    {
+        in_mat1 = cv::Mat(sz_in, type);
+        in_mat2 = cv::Mat(sz_in, type);
+
+        auto& rng = cv::theRNG();
+        sc = cv::Scalar(rng(100),rng(100),rng(100),rng(100));
+        cv::randu(in_mat1, cv::Scalar::all(0), cv::Scalar::all(255));
+        cv::randu(in_mat2, cv::Scalar::all(0), cv::Scalar::all(255));
+
+        if (createOutputMatrices && dtype != -1)
+        {
+            out_mat_gapi = cv::Mat (sz_in, dtype);
+            out_mat_ocv = cv::Mat (sz_in, dtype);
+        }
+    }
+
+    void initMatrixRandU(int type, cv::Size sz_in, int dtype, bool createOutputMatrices = true)
+    {
+        in_mat1 = cv::Mat(sz_in, type);
+
+        auto& rng = cv::theRNG();
+        sc = cv::Scalar(rng(100),rng(100),rng(100),rng(100));
+
+        cv::randu(in_mat1, cv::Scalar::all(0), cv::Scalar::all(255));
+
+        if (createOutputMatrices && dtype != -1)
+        {
+            out_mat_gapi = cv::Mat (sz_in, dtype);
+            out_mat_ocv = cv::Mat (sz_in, dtype);
+        }
+    }
+
+    void initMatsRandN(int type, cv::Size sz_in, int dtype, bool createOutputMatrices = true)
+    {
+        in_mat1  = cv::Mat(sz_in, type);
+        cv::randn(in_mat1, cv::Scalar::all(127), cv::Scalar::all(40.f));
+
+        if (createOutputMatrices  && dtype != -1)
+        {
+            out_mat_gapi = cv::Mat(sz_in, dtype);
+            out_mat_ocv = cv::Mat(sz_in, dtype);
+        }
+    }
+};
+
+template<class T>
+class TestParams: public TestFunctional, public testing::TestWithParam<T>{};
diff --git a/inference-engine/tests_deprecated/fluid_preproc/cpu/fluid_tests_cpu.cpp b/inference-engine/tests_deprecated/fluid_preproc/cpu/fluid_tests_cpu.cpp
new file mode 100644 (file)
index 0000000..e28c612
--- /dev/null
@@ -0,0 +1,356 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "fluid_tests.hpp"
+
+#include <opencv2/opencv.hpp>
+
+#include <gtest/gtest.h>
+
+#define TEST_SIZES        \
+    cv::Size(3840, 2160), \
+    cv::Size(1920, 1080), \
+    cv::Size(1280,  720), \
+    cv::Size(1280,  960), \
+    cv::Size( 960,  720), \
+    cv::Size( 640,  480), \
+    cv::Size( 320,  200), \
+    cv::Size( 113,   71)
+
+#define TEST_RESIZE_DOWN \
+    std::make_pair(cv::Size(3840, 2160), cv::Size(1920, 1080)), \
+    std::make_pair(cv::Size(3840, 2160), cv::Size(1280,  720)), \
+    std::make_pair(cv::Size(1920, 1080), cv::Size(1280,  720)), \
+    std::make_pair(cv::Size(1920, 1080), cv::Size( 640,  480)), \
+    std::make_pair(cv::Size(1280,  720), cv::Size( 640,  480)), \
+    std::make_pair(cv::Size(1280,  720), cv::Size( 320,  200)), \
+    std::make_pair(cv::Size( 640,  480), cv::Size( 320,  200)), \
+    std::make_pair(cv::Size( 640,  480), cv::Size( 113,   71)), \
+    std::make_pair(cv::Size( 320,  200), cv::Size( 113,   71))
+
+#define TEST_RESIZE_UP \
+    std::make_pair(cv::Size(1920, 1080), cv::Size(3840, 2160)), \
+    std::make_pair(cv::Size(1280,  720), cv::Size(3840, 2160)), \
+    std::make_pair(cv::Size(1280,  720), cv::Size(1920, 1080)), \
+    std::make_pair(cv::Size( 640,  480), cv::Size(1920, 1080)), \
+    std::make_pair(cv::Size( 640,  480), cv::Size(1280,  720)), \
+    std::make_pair(cv::Size( 320,  200), cv::Size(1280,  720)), \
+    std::make_pair(cv::Size( 320,  200), cv::Size( 640,  480)), \
+    std::make_pair(cv::Size( 113,   71), cv::Size( 640,  480)), \
+    std::make_pair(cv::Size( 113,   71), cv::Size( 320,  200))
+
+#define TEST_RESIZE_HORZ \
+    std::make_pair(cv::Size(3840, 2160), cv::Size(1920, 2160)), \
+    std::make_pair(cv::Size(1920, 1080), cv::Size(3840, 1080)), \
+    std::make_pair(cv::Size(1920, 1080), cv::Size(1280, 1080)), \
+    std::make_pair(cv::Size(1280,  720), cv::Size(1920,  720)), \
+    std::make_pair(cv::Size(1280,  720), cv::Size( 640,  720)), \
+    std::make_pair(cv::Size( 640,  480), cv::Size(1280,  480)), \
+    std::make_pair(cv::Size( 640,  480), cv::Size( 320,  480)), \
+    std::make_pair(cv::Size( 320,  200), cv::Size( 640,  200)), \
+    std::make_pair(cv::Size( 320,  200), cv::Size( 113,  200)), \
+    std::make_pair(cv::Size( 113,   71), cv::Size( 320,   71))
+
+#define TEST_RESIZE_VERT \
+    std::make_pair(cv::Size(3840, 2160), cv::Size(3840, 1080)), \
+    std::make_pair(cv::Size(1920, 1080), cv::Size(1920, 2160)), \
+    std::make_pair(cv::Size(1920, 1080), cv::Size(1920,  720)), \
+    std::make_pair(cv::Size(1280,  720), cv::Size(1280, 1080)), \
+    std::make_pair(cv::Size(1280,  720), cv::Size(1280,  480)), \
+    std::make_pair(cv::Size( 640,  480), cv::Size( 640,  720)), \
+    std::make_pair(cv::Size( 640,  480), cv::Size( 640,  200)), \
+    std::make_pair(cv::Size( 320,  200), cv::Size( 320,  480)), \
+    std::make_pair(cv::Size( 320,  200), cv::Size( 320,   71)), \
+    std::make_pair(cv::Size( 113,   71), cv::Size( 113,  200))
+
+#define TEST_RESIZE_COPY \
+    std::make_pair(cv::Size(3840, 2160), cv::Size(3840, 2160)), \
+    std::make_pair(cv::Size(1920, 1080), cv::Size(1920, 1080)), \
+    std::make_pair(cv::Size(1280,  720), cv::Size(1280,  720)), \
+    std::make_pair(cv::Size( 640,  480), cv::Size( 640,  480)), \
+    std::make_pair(cv::Size( 320,  200), cv::Size( 320,  200)), \
+    std::make_pair(cv::Size( 113,   71), cv::Size( 113,   71))
+
+#define TEST_RESIZE_SPECIAL \
+    std::make_pair(cv::Size(300, 300), cv::Size(300, 199)), \
+    std::make_pair(cv::Size(300, 300), cv::Size(199, 300)), \
+    std::make_pair(cv::Size(300, 300), cv::Size(199, 199)), \
+    std::make_pair(cv::Size(199, 199), cv::Size(300, 300)), \
+    std::make_pair(cv::Size(199, 300), cv::Size(300, 300)), \
+    std::make_pair(cv::Size(300, 199), cv::Size(300, 300))
+
+#define TEST_RESIZE_PAIRS \
+    TEST_RESIZE_DOWN, \
+    TEST_RESIZE_UP, \
+    TEST_RESIZE_HORZ, \
+    TEST_RESIZE_VERT, \
+    TEST_RESIZE_COPY, \
+    TEST_RESIZE_SPECIAL
+
+#define TEST_SIZES_PREPROC \
+    std::make_pair(cv::Size(1920, 1080), cv::Size(1024, 1024)), \
+    std::make_pair(cv::Size(1280,  720), cv::Size( 544,  320)), \
+    std::make_pair(cv::Size( 640,  480), cv::Size( 896,  512)), \
+    std::make_pair(cv::Size( 200,  400), cv::Size( 128,  384)), \
+    std::make_pair(cv::Size( 256,  256), cv::Size(  72,   72)), \
+    std::make_pair(cv::Size(  96,  256), cv::Size( 128,  384))
+
+using namespace testing;
+
+INSTANTIATE_TEST_CASE_P(ResizeTestFluid_U8, ResizeTestGAPI,
+                        Combine(Values(CV_8UC1, CV_8UC3),
+                                Values(cv::INTER_LINEAR, cv::INTER_AREA),
+                                Values(TEST_RESIZE_PAIRS),
+                                Values(1))); // error not more than 1 unit
+
+INSTANTIATE_TEST_CASE_P(ResizeTestFluid_F32, ResizeTestGAPI,
+                        Combine(Values(CV_32FC1, CV_32FC3),
+                                Values(cv::INTER_LINEAR, cv::INTER_AREA),
+                                Values(TEST_RESIZE_PAIRS),
+                                Values(0.015))); // accuracy like ~1.5%
+
+INSTANTIATE_TEST_CASE_P(ResizeRGB8UTestFluid_U8, ResizeRGB8UTestGAPI,
+                        Combine(Values(CV_8UC3, CV_8UC4),
+                                Values(cv::INTER_LINEAR),
+                                Values(TEST_RESIZE_PAIRS),
+                                Values(1))); // error not more than 1 unit
+
+INSTANTIATE_TEST_CASE_P(SplitTestFluid, SplitTestGAPI,
+                        Combine(Values(2, 3, 4),
+                                Values(CV_8U, CV_32F),
+                                Values(TEST_SIZES),
+                                Values(0)));
+
+INSTANTIATE_TEST_CASE_P(ChanToPlaneTestFluid, ChanToPlaneTestGAPI,
+                        Combine(Values(1, 3),
+                                Values(CV_8U, CV_32F),
+                                Values(TEST_SIZES),
+                                Values(0)));
+
+INSTANTIATE_TEST_CASE_P(MergeTestFluid, MergeTestGAPI,
+                        Combine(Values(2, 3, 4),
+                                Values(CV_8U, CV_32F),
+                                Values(TEST_SIZES),
+                                Values(0)));
+
+INSTANTIATE_TEST_CASE_P(NV12toRGBTestFluid, NV12toRGBTestGAPI,
+                        Combine(Values(cv::Size(3840, 2160),
+                                       cv::Size(1920, 1080),
+                                       cv::Size(1280,  720),
+                                       cv::Size(1280,  960),
+                                       cv::Size( 960,  720),
+                                       cv::Size( 640,  480),
+                                       cv::Size( 300,  300),
+                                       cv::Size( 320,  200)),
+                                Values(0)));
+
+INSTANTIATE_TEST_CASE_P(I420toRGBTestFluid, I420toRGBTestGAPI,
+                        Combine(Values(cv::Size(3840, 2160),
+                                       cv::Size(1920, 1080),
+                                       cv::Size(1280,  720),
+                                       cv::Size(1280,  960),
+                                       cv::Size( 960,  720),
+                                       cv::Size( 640,  480),
+                                       cv::Size( 300,  300),
+                                       cv::Size( 320,  200)),
+                                Values(0)));
+
+
+INSTANTIATE_TEST_CASE_P(ResizeRoiTestFluid, ResizeRoiTestGAPI,
+                        Combine(Values(CV_8UC1, CV_8UC3),
+                                Values(cv::INTER_LINEAR),
+                                Values(std::make_pair(cv::Size(24, 24), cv::Size(12, 12))),
+                                Values(cv::Rect{0, 0, 12, 3},
+                                       cv::Rect{0, 3, 12, 3},
+                                       cv::Rect{0, 6, 12, 3},
+                                       cv::Rect{0, 9, 12, 3}),
+                                Values(1))); // error not more than 1 unit
+
+INSTANTIATE_TEST_CASE_P(ResizeRGB8URoiTestFluid, ResizeRGB8URoiTestGAPI,
+                        Combine(Values(CV_8UC3, CV_8UC4),
+                                Values(cv::INTER_LINEAR),
+                                Values(std::make_pair(cv::Size(24, 24), cv::Size(12, 12))),
+                                Values(cv::Rect{0, 0, 12, 3},
+                                       cv::Rect{0, 3, 12, 3},
+                                       cv::Rect{0, 6, 12, 3},
+                                       cv::Rect{0, 9, 12, 3}),
+                                Values(1))); // error not more than 1 unit
+
+//----------------------------------------------------------------------
+
+INSTANTIATE_TEST_CASE_P(ResizeTestFluid_U8, ResizeTestIE,
+                        Combine(Values(CV_8UC1, CV_8UC3),
+                                Values(cv::INTER_LINEAR, cv::INTER_AREA),
+                                Values(TEST_RESIZE_PAIRS),
+                                Values(1))); // error not more than 1 unit
+
+INSTANTIATE_TEST_CASE_P(ResizeTestFluid_F32, ResizeTestIE,
+                        Combine(Values(CV_32FC1, CV_32FC3),
+                                Values(cv::INTER_LINEAR, cv::INTER_AREA),
+                                Values(TEST_RESIZE_PAIRS),
+                                Values(0.05))); // error within 0.05 units
+
+INSTANTIATE_TEST_CASE_P(SplitTestFluid, SplitTestIE,
+                        Combine(Values(CV_8UC2, CV_8UC3, CV_8UC4,
+                                       CV_32FC2, CV_32FC3, CV_32FC4),
+                                Values(TEST_SIZES),
+                                Values(0)));
+
+INSTANTIATE_TEST_CASE_P(MergeTestFluid, MergeTestIE,
+                        Combine(Values(CV_8UC2, CV_8UC3, CV_8UC4,
+                                       CV_32FC2, CV_32FC3, CV_32FC4),
+                                Values(TEST_SIZES),
+                                Values(0)));
+
+INSTANTIATE_TEST_CASE_P(ColorConvertFluid_3ch, ColorConvertTestIE,
+                        Combine(Values(CV_8U, CV_32F),
+                                Values(InferenceEngine::ColorFormat::RGB),
+                                Values(InferenceEngine::NHWC, InferenceEngine::NCHW),
+                                Values(InferenceEngine::NHWC, InferenceEngine::NCHW),
+                                Values(TEST_SIZES),
+                                Values(0)));
+
+INSTANTIATE_TEST_CASE_P(ColorConvertFluid_4ch, ColorConvertTestIE,
+                        Combine(Values(CV_8U, CV_32F),
+                                Values(InferenceEngine::ColorFormat::RGBX,
+                                       InferenceEngine::ColorFormat::BGRX),
+                                Values(InferenceEngine::NHWC),
+                                Values(InferenceEngine::NHWC, InferenceEngine::NCHW),
+                                Values(TEST_SIZES),
+                                Values(0)));
+
+INSTANTIATE_TEST_CASE_P(ColorConvertYUV420Fluid, ColorConvertYUV420TestIE,
+                        Combine(Values(InferenceEngine::NV12, InferenceEngine::I420),
+                                Values(InferenceEngine::NHWC, InferenceEngine::NCHW),
+                                Values(cv::Size(3840, 2160),
+                                       cv::Size(1920, 1080),
+                                       cv::Size(1280,  720),
+                                       cv::Size(1280,  960),
+                                       cv::Size( 960,  720),
+                                       cv::Size( 640,  480),
+                                       cv::Size( 320,  200),
+                                       cv::Size( 300,  300),
+                                       cv::Size( 150,  150)),
+                                Values(0)));
+
+INSTANTIATE_TEST_CASE_P(Reorder_HWC2CHW, ColorConvertTestIE,
+                        Combine(Values(CV_8U, CV_32F),
+                                Values(InferenceEngine::ColorFormat::BGR),
+                                Values(InferenceEngine::NHWC),
+                                Values(InferenceEngine::NCHW),
+                                Values(TEST_SIZES),
+                                Values(0)));
+
+INSTANTIATE_TEST_CASE_P(Reorder_CHW2HWC, ColorConvertTestIE,
+                        Combine(Values(CV_8U, CV_32F),
+                                Values(InferenceEngine::ColorFormat::BGR),
+                                Values(InferenceEngine::NCHW),
+                                Values(InferenceEngine::NHWC),
+                                Values(TEST_SIZES),
+                                Values(0)));
+
+//------------------------------------------------------------------------------
+
+namespace IE = InferenceEngine;
+
+static const auto FRAME_SIZES =
+   Values(std::make_pair(cv::Size(1920,1080),
+                         cv::Size(1024,1024)), // person-vehicle-bike-detection-crossroad-0078
+          std::make_pair(cv::Size(1024, 768),
+                         cv::Size( 992, 544)), // person-detection-retail-0001
+          std::make_pair(cv::Size(1280, 720),
+                         cv::Size( 896, 512)), // road-segmentation-adas-0001
+          std::make_pair(cv::Size(3840, 2160),
+                         cv::Size(2048, 1024)), // semantic-segmentation-adas-0001
+          std::make_pair(cv::Size(1270, 720),
+                         cv::Size(2048, 1024)), // semantic-segmentation-adas-0001 (UPSCALE)
+          std::make_pair(cv::Size( 640, 480),
+                         cv::Size( 544, 320)));  // 320 - face-person-detection-retail-0002,
+                                                 // 320 - person-detection-retail-10013
+                                                 // 300 - face-detection-retail-0004
+
+static const auto PATCH_SIZES =
+    Values(std::make_pair(cv::Size(200,400),
+                          cv::Size(128,384)),  // person-reidentification-retail-0076
+           std::make_pair(cv::Size( 96,256),
+                          cv::Size(128,384)),  // person-reidentification-retail-0076 (UPSCALE)
+           std::make_pair(cv::Size(340,340),
+                          cv::Size(320,256)),  // vehicle-license-plate-detection-barrier-0007
+           std::make_pair(cv::Size(256,256),
+                          cv::Size( 72,72)),   // vehicle-attributes-recognition-barrier-0039
+           std::make_pair(cv::Size(96,96),
+                          cv::Size(64,64)),    // 60 - head-pose-estimation-adas-0001
+                                               // 62 - age-gender-recognition-retail-0013
+                                               // 64 - emotions-recognition-retail-0003
+           std::make_pair(cv::Size(128,48),
+                          cv::Size( 94,24)),   // license-plate-recognition-barrier-0001
+           std::make_pair(cv::Size(120,200),
+                          cv::Size(80, 160))); // 80 - person-attributes-recognition-crossroad-0031
+                                               // 64 - person-reidentification-retail-0079
+
+INSTANTIATE_TEST_CASE_P(ReorderResize_Frame, PreprocTest,
+                        Combine(Values(IE::Precision::U8, IE::Precision::FP32),
+                                Values(IE::ResizeAlgorithm::RESIZE_BILINEAR), // AREA is not there yet
+                                Values(IE::ColorFormat::RAW),
+                                Values(IE::Layout::NHWC),
+                                Values(IE::Layout::NCHW),
+                                Values(std::make_pair(1, 1), std::make_pair(3, 3)),
+                                FRAME_SIZES));
+
+INSTANTIATE_TEST_CASE_P(Scale3ch_Frame, PreprocTest,
+                        Combine(Values(IE::Precision::U8, IE::Precision::FP32),
+                                Values(IE::ResizeAlgorithm::RESIZE_BILINEAR), // AREA is not there yet
+                                Values(IE::ColorFormat::RAW),
+                                Values(IE::Layout::NHWC),
+                                Values(IE::Layout::NHWC),
+                                Values(std::make_pair(3, 3)),
+                                FRAME_SIZES));
+
+INSTANTIATE_TEST_CASE_P(ReorderResize_Patch, PreprocTest,
+                        Combine(Values(IE::Precision::U8, IE::Precision::FP32),
+                                Values(IE::ResizeAlgorithm::RESIZE_BILINEAR), // AREA is not there yet
+                                Values(IE::ColorFormat::RAW),
+                                Values(IE::Layout::NHWC),
+                                Values(IE::Layout::NCHW, IE::Layout::NCHW),
+                                Values(std::make_pair(1, 1), std::make_pair(3, 3)),
+                                PATCH_SIZES));
+
+INSTANTIATE_TEST_CASE_P(Everything_Resize, PreprocTest,
+                        Combine(Values(IE::Precision::U8, IE::Precision::FP32),
+                                Values(IE::ResizeAlgorithm::RESIZE_BILINEAR, IE::ResizeAlgorithm::RESIZE_AREA),
+                                Values(IE::ColorFormat::RAW),
+                                Values(IE::Layout::NHWC, IE::Layout::NCHW),
+                                Values(IE::Layout::NHWC, IE::Layout::NCHW),
+                                Values(std::make_pair(1, 1),
+                                       std::make_pair(2, 2),
+                                       std::make_pair(3, 3),
+                                       std::make_pair(4, 4)),
+                                Values(TEST_SIZES_PREPROC)));
+
+INSTANTIATE_TEST_CASE_P(ColorFormats_3ch, PreprocTest,
+                        Combine(Values(IE::Precision::U8, IE::Precision::FP32),
+                                Values(IE::ResizeAlgorithm::RESIZE_BILINEAR, IE::ResizeAlgorithm::RESIZE_AREA),
+                                Values(IE::ColorFormat::RGB),
+                                Values(IE::Layout::NHWC, IE::Layout::NCHW),
+                                Values(IE::Layout::NHWC, IE::Layout::NCHW),
+                                Values(std::make_pair(3, 3)),
+                                Values(TEST_SIZES_PREPROC)));
+
+INSTANTIATE_TEST_CASE_P(ColorFormats_4ch, PreprocTest,
+                        Combine(Values(IE::Precision::U8, IE::Precision::FP32),
+                                Values(IE::ResizeAlgorithm::RESIZE_BILINEAR, IE::ResizeAlgorithm::RESIZE_AREA),
+                                Values(IE::ColorFormat::BGRX, IE::ColorFormat::RGBX),
+                                Values(IE::Layout::NHWC),
+                                Values(IE::Layout::NHWC, IE::Layout::NCHW),
+                                Values(std::make_pair(4, 3)),
+                                Values(TEST_SIZES_PREPROC)));
+
+INSTANTIATE_TEST_CASE_P(ColorFormat_NV12, PreprocTest,
+                        Combine(Values(IE::Precision::U8),
+                                Values(IE::ResizeAlgorithm::RESIZE_BILINEAR, IE::ResizeAlgorithm::RESIZE_AREA),
+                                Values(IE::ColorFormat::NV12),
+                                Values(IE::Layout::NCHW),
+                                Values(IE::Layout::NHWC, IE::Layout::NCHW),
+                                Values(std::make_pair(1, 3)),
+                                Values(TEST_SIZES_PREPROC)));
diff --git a/inference-engine/tests_deprecated/fluid_preproc/fluid_test_computations/CMakeLists.txt b/inference-engine/tests_deprecated/fluid_preproc/fluid_test_computations/CMakeLists.txt
new file mode 100644 (file)
index 0000000..94b935f
--- /dev/null
@@ -0,0 +1,14 @@
+# Copyright (C) 2018-2020 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+#
+
+file(GLOB SRC *.cpp)
+file(GLOB HDR *.hpp)
+
+add_library(fluid_test_computations SHARED ${SRC} ${HDR})
+
+target_include_directories(fluid_test_computations PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}")
+
+target_link_libraries(fluid_test_computations PRIVATE inference_engine_preproc_s inference_engine fluid)
+
+target_compile_definitions(fluid_test_computations PRIVATE IMPLEMENT_FLUID_COMPUTATION_API)
diff --git a/inference-engine/tests_deprecated/fluid_preproc/fluid_test_computations/fluid_test_computations.cpp b/inference-engine/tests_deprecated/fluid_preproc/fluid_test_computations/fluid_test_computations.cpp
new file mode 100644 (file)
index 0000000..8ec8bd9
--- /dev/null
@@ -0,0 +1,215 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <fluid_test_computations.hpp>
+#include <opencv2/gapi.hpp>
+#include <ie_preprocess_gapi_kernels.hpp>
+#include <opencv2/gapi/fluid/gfluidkernel.hpp>
+
+#define CV_MAT_CHANNELS(flags) (((flags) >> CV_CN_SHIFT) + 1)
+
+struct FluidComputation::Priv
+{
+    cv::GComputation m_c;
+    std::vector<cv::gapi::own::Mat> m_v_in;
+    std::vector<cv::gapi::own::Mat> m_v_out;
+};
+
+FluidComputation::FluidComputation(Priv *priv)
+    : m_priv(priv)
+{}
+
+namespace
+{
+    cv::gapi::own::Rect to_own(test::Rect rect) { return {rect.x, rect.y, rect.width, rect.height}; }
+}
+
+void FluidComputation::warmUp(test::Rect roi)
+{
+    if (roi.empty())
+        m_priv->m_c.apply(m_priv->m_v_in, m_priv->m_v_out, cv::compile_args(InferenceEngine::gapi::preprocKernels()));
+    else
+        m_priv->m_c.apply(m_priv->m_v_in, m_priv->m_v_out, cv::compile_args(InferenceEngine::gapi::preprocKernels(), cv::GFluidOutputRois{{to_own(roi)}}));
+}
+
+void FluidComputation::apply()
+{
+    m_priv->m_c.apply(m_priv->m_v_in, m_priv->m_v_out);
+}
+
+namespace
+{
+cv::gapi::own::Mat to_own(test::Mat mat) {
+    return {mat.rows, mat.cols, mat.type, mat.data, mat.step};
+}
+
+std::vector<cv::gapi::own::Mat> to_own(std::vector<test::Mat> mats)
+{
+    std::vector<cv::gapi::own::Mat> own_mats(mats.size());
+    for (int i = 0; i < mats.size(); i++) {
+        own_mats[i] = to_own(mats[i]);
+    }
+    return own_mats;
+}
+
+template<typename... Ts, int... IIs>
+std::vector<cv::GMat> to_vec_impl(std::tuple<Ts...> &&gmats, cv::detail::Seq<IIs...>) {
+    return { std::get<IIs>(gmats)... };
+}
+
+template<typename... Ts>
+std::vector<cv::GMat> to_vec(std::tuple<Ts...> &&gmats) {
+    return to_vec_impl(std::move(gmats), typename cv::detail::MkSeq<sizeof...(Ts)>::type());
+}
+} // anonymous namespace
+
+static cv::GComputation buildResizeComputation(test::Mat inMat, test::Mat outMat, int interp)
+{
+    cv::gapi::own::Size sz_in  { inMat.cols,  inMat.rows};
+    cv::gapi::own::Size sz_out {outMat.cols, outMat.rows};
+    int type = outMat.type;
+    cv::GMat in, out;
+    switch (CV_MAT_CHANNELS(type)) {
+    case 1:
+        out = InferenceEngine::gapi::ScalePlane::on(in, type, sz_in, sz_out, interp);
+        break;
+    case 3:
+        {
+        int depth = CV_MAT_DEPTH(type);
+        int type1 = CV_MAKE_TYPE(depth, 1);
+        cv::GMat in0, in1, in2, out0, out1, out2;
+        std::tie(in0, in1, in2) = InferenceEngine::gapi::Split3::on(in);
+        out0 = InferenceEngine::gapi::ScalePlane::on(in0, type1, sz_in, sz_out, interp);
+        out1 = InferenceEngine::gapi::ScalePlane::on(in1, type1, sz_in, sz_out, interp);
+        out2 = InferenceEngine::gapi::ScalePlane::on(in2, type1, sz_in, sz_out, interp);
+        out = InferenceEngine::gapi::Merge3::on(out0, out1, out2);
+        }
+        break;
+    default: GAPI_Assert(!"ERROR: unsupported number of channels!");
+    }
+
+    return cv::GComputation(in, out);
+}
+
+FluidResizeComputation::FluidResizeComputation(test::Mat inMat, test::Mat outMat, int interp)
+    : FluidComputation(new Priv{buildResizeComputation(inMat, outMat, interp)
+                               ,{to_own(inMat)}
+                               ,{to_own(outMat)}
+                               })
+{}
+
+static cv::GComputation buildResizeRGB8UComputation(test::Mat inMat, test::Mat outMat, int interp)
+{
+    cv::gapi::own::Size sz_in  { inMat.cols,  inMat.rows};
+    cv::gapi::own::Size sz_out {outMat.cols, outMat.rows};
+    int type = outMat.type;
+    cv::GMat in, out, out_r, out_g, out_b, out_x;
+
+    if (type == CV_8UC3) {
+        std::tie(out_r, out_g, out_b) = InferenceEngine::gapi::ScalePlanes::on(in, type, sz_in, sz_out, interp);
+        out = InferenceEngine::gapi::Merge3::on(out_r, out_g, out_b);
+    }
+    else if (type == CV_8UC4) {
+        std::tie(out_r, out_g, out_b, out_x) = InferenceEngine::gapi::ScalePlanes4::on(in, type, sz_in, sz_out, interp);
+        out = InferenceEngine::gapi::Merge4::on(out_r, out_g, out_b, out_x);
+    } else {
+        GAPI_Assert(!"ERROR: unsupported number of channels!");
+    }
+
+    return cv::GComputation(in, out);
+}
+
+FluidResizeRGB8UComputation::FluidResizeRGB8UComputation(test::Mat inMat, test::Mat outMat, int interp)
+    : FluidComputation(new Priv{buildResizeRGB8UComputation(inMat, outMat, interp)
+                               ,{to_own(inMat)}
+                               ,{to_own(outMat)}
+                               })
+{}
+
+static cv::GComputation buildSplitComputation(int planes)
+{
+    std::vector<cv::GMat> ins(1);
+    std::vector<cv::GMat> outs(planes);
+
+    switch (planes) {
+    case 2: outs = to_vec(InferenceEngine::gapi::Split2::on(ins[0])); break;
+    case 3: outs = to_vec(InferenceEngine::gapi::Split3::on(ins[0])); break;
+    case 4: outs = to_vec(InferenceEngine::gapi::Split4::on(ins[0])); break;
+    default: GAPI_Assert(false);
+    }
+
+    return cv::GComputation(ins, outs);
+}
+
+FluidSplitComputation::FluidSplitComputation(test::Mat inMat, std::vector<test::Mat> outMats)
+    : FluidComputation(new Priv{buildSplitComputation(outMats.size())
+                               ,{to_own(inMat)}
+                               ,to_own(outMats)
+                               })
+{}
+
+static cv::GComputation buildChanToPlaneComputation(int chan)
+{
+    cv::GMat in, out;
+    out = InferenceEngine::gapi::ChanToPlane::on(in, chan);
+    return cv::GComputation(in, out);
+}
+
+FluidChanToPlaneComputation::FluidChanToPlaneComputation(test::Mat inMat, test::Mat outMat, int chan)
+    : FluidComputation(new Priv{buildChanToPlaneComputation(chan)
+                               ,{to_own(inMat)}
+                               ,{to_own(outMat)}
+                               })
+{}
+
+static cv::GComputation buildMergeComputation(int planes)
+{
+    std::vector<cv::GMat> ins(planes);
+    std::vector<cv::GMat> outs(1);
+
+    switch (planes) {
+    case 2: outs[0] = InferenceEngine::gapi::Merge2::on(ins[0], ins[1]); break;
+    case 3: outs[0] = InferenceEngine::gapi::Merge3::on(ins[0], ins[1], ins[2]); break;
+    case 4: outs[0] = InferenceEngine::gapi::Merge4::on(ins[0], ins[1], ins[2], ins[3]); break;
+    default: GAPI_Assert(false);
+    }
+
+    return cv::GComputation(ins, outs);
+}
+
+FluidMergeComputation::FluidMergeComputation(std::vector<test::Mat> inMats, test::Mat outMat)
+    : FluidComputation(new Priv{buildMergeComputation(inMats.size())
+                               ,to_own(inMats)
+                               ,{to_own(outMat)}
+                               })
+{}
+
+static cv::GComputation buildFluidNV12toRGBComputation()
+{
+    cv::GMat in_y, in_uv;
+    cv::GMat out = InferenceEngine::gapi::NV12toRGB::on(in_y,in_uv);
+    return cv::GComputation(cv::GIn(in_y,in_uv), cv::GOut(out));
+}
+
+FluidNV12toRGBComputation::FluidNV12toRGBComputation(test::Mat inMat_y, test::Mat inMat_uv, test::Mat outMat)
+    : FluidComputation(new Priv{buildFluidNV12toRGBComputation()
+                               ,to_own({inMat_y,inMat_uv})
+                               ,{to_own(outMat)}
+                               })
+{}
+
+static cv::GComputation buildFluidI420toRGBComputation()
+{
+    cv::GMat in_y, in_u, in_v;
+    cv::GMat out = InferenceEngine::gapi::I420toRGB::on(in_y, in_u, in_v);
+    return cv::GComputation(cv::GIn(in_y, in_u, in_v), cv::GOut(out));
+}
+
+
+FluidI420toRGBComputation::FluidI420toRGBComputation(test::Mat inMat_y, test::Mat inMat_u, test::Mat inMat_v, test::Mat outMat)
+    : FluidComputation(new Priv{buildFluidI420toRGBComputation()
+                               ,to_own({inMat_y,inMat_u, inMat_v})
+                               ,{to_own(outMat)}
+                               })
+{}
diff --git a/inference-engine/tests_deprecated/fluid_preproc/fluid_test_computations/fluid_test_computations.hpp b/inference-engine/tests_deprecated/fluid_preproc/fluid_test_computations/fluid_test_computations.hpp
new file mode 100644 (file)
index 0000000..26b9221
--- /dev/null
@@ -0,0 +1,101 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#ifndef FLUID_TEST_COMPUTATIONS_HPP
+#define FLUID_TEST_COMPUTATIONS_HPP
+
+#include <ie_api.h>
+
+#include <memory>
+#include <vector>
+
+#if defined(_WIN32)
+    #ifdef IMPLEMENT_FLUID_COMPUTATION_API
+        #define FLUID_COMPUTATION_VISIBILITY __declspec(dllexport)
+    #else
+        #define FLUID_COMPUTATION_VISIBILITY __declspec(dllimport)
+    #endif
+#else
+    #ifdef IMPLEMENT_FLUID_COMPUTATION_API
+        #define FLUID_COMPUTATION_VISIBILITY __attribute__((visibility("default")))
+    #else
+        #define FLUID_COMPUTATION_VISIBILITY
+    #endif
+#endif
+
+namespace test
+{
+struct Mat
+{
+    int     rows;
+    int     cols;
+    int     type;
+    void*   data;
+    size_t  step;
+};
+struct Rect{
+    int x;
+    int y;
+    int width;
+    int height;
+    bool empty(){
+        return width == 0 && height == 0;
+    };
+};
+}
+
+class FLUID_COMPUTATION_VISIBILITY FluidComputation
+{
+protected:
+    struct Priv;
+    std::shared_ptr<Priv> m_priv;
+public:
+    FluidComputation(Priv* priv);
+    void warmUp(test::Rect roi = {});
+    void apply();
+};
+
+class FLUID_COMPUTATION_VISIBILITY FluidResizeComputation : public FluidComputation
+{
+public:
+    FluidResizeComputation(test::Mat inMat, test::Mat outMat, int interp);
+};
+
+class FLUID_COMPUTATION_VISIBILITY FluidResizeRGB8UComputation : public FluidComputation
+{
+public:
+    FluidResizeRGB8UComputation(test::Mat inMat, test::Mat outMat, int interp);
+};
+
+class FLUID_COMPUTATION_VISIBILITY FluidSplitComputation : public FluidComputation
+{
+public:
+    FluidSplitComputation(test::Mat inMat, std::vector<test::Mat> outMats);
+};
+
+class FLUID_COMPUTATION_VISIBILITY FluidChanToPlaneComputation : public FluidComputation
+{
+public:
+    FluidChanToPlaneComputation(test::Mat inMat, test::Mat outMat, int chan);
+};
+
+class FLUID_COMPUTATION_VISIBILITY FluidMergeComputation : public FluidComputation
+{
+public:
+    FluidMergeComputation(std::vector<test::Mat> inMats, test::Mat outMat);
+};
+
+class FLUID_COMPUTATION_VISIBILITY FluidNV12toRGBComputation : public FluidComputation
+{
+public:
+    FluidNV12toRGBComputation(test::Mat inMat_y, test::Mat inMat_uv, test::Mat outMat);
+};
+
+class FLUID_COMPUTATION_VISIBILITY FluidI420toRGBComputation : public FluidComputation
+{
+public:
+    FluidI420toRGBComputation(test::Mat inMat_y, test::Mat inMat_u, test::Mat inMat_v, test::Mat outMat);
+};
+
+#endif // FLUID_TEST_COMPUTATIONS_HPP
diff --git a/inference-engine/tests_deprecated/functional/CMakeLists.txt b/inference-engine/tests_deprecated/functional/CMakeLists.txt
new file mode 100644 (file)
index 0000000..13b519e
--- /dev/null
@@ -0,0 +1,25 @@
+# Copyright (C) 2016-2020 Intel Corporation
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+
+add_subdirectory(ie_tests)
+add_subdirectory(shared_tests)
+
+disable_deprecated_warnings()
+
+if (ENABLE_MYRIAD)
+    add_subdirectory(vpu)
+endif()
+
+if (ENABLE_CLDNN)
+    add_subdirectory(cldnn)
+endif()
+
+if (ENABLE_GNA)
+    add_subdirectory(gna)
+endif()
+
+if (ENABLE_MKL_DNN)
+    add_subdirectory(mkldnn)
+endif()
diff --git a/inference-engine/tests_deprecated/functional/cldnn/CMakeLists.txt b/inference-engine/tests_deprecated/functional/cldnn/CMakeLists.txt
new file mode 100644 (file)
index 0000000..88d6e04
--- /dev/null
@@ -0,0 +1,73 @@
+# Copyright (C) 2018-2020 Intel Corporation
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+
+set(TARGET_NAME ClDnnFunctionalTests)
+
+file(GLOB CLDNN_TEST_SOURCES
+        ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp
+        ${CMAKE_CURRENT_SOURCE_DIR}/regression_tests/*.cpp
+        ${CMAKE_CURRENT_SOURCE_DIR}/single_layer_tests/*.cpp
+        ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instance/io_blob_tests/*.cpp
+        ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instance/input_tests/*.cpp
+        ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instance/inference_engine_regression_tests/*.cpp
+        ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instance/lstm/*.cpp
+        ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instance/common_single_layer_tests/*.cpp
+        ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instance/ie_class/*.cpp
+        ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instance/single_layer_tests/*.cpp
+        ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instance/transformations/*.cpp)
+
+list(APPEND TEST_SRC ${CLDNN_TEST_SOURCES})
+
+list(APPEND CLDNN_LIBS
+        IESharedTests
+        inference_engine_lp_transformations
+        inference_engine_ir_readers
+        ${CLDNN__IOCL_ICD_LIBPATH})
+
+# try to find VA libraries
+include(FindPkgConfig)
+pkg_search_module(LIBVA QUIET libva)
+
+if(LIBVA_FOUND)
+    list(APPEND CLDNN_LIBS ${LIBVA_LINK_LIBRARIES})
+endif()
+
+list(APPEND DEPENDENCIES
+        clDNNPlugin)
+
+if (ENABLE_MKL_DNN)
+    list(APPEND DEPENDENCIES
+            MKLDNNPlugin
+            HeteroPlugin)
+endif()
+
+# add OpenCL dependency end
+
+source_group("src" FILES ${TEST_SRC})
+source_group("include" FILES ${TEST_INCLUDE})
+
+add_executable(${TARGET_NAME}
+        ${TEST_SRC}
+        ${TEST_INCLUDE})
+
+target_compile_definitions(${TARGET_NAME}
+        PRIVATE
+        INSTANTIATE_TESTS=1
+        PUBLIC ${ARGV}
+        DATA_PATH=\"${DATA_PATH}\"
+        MODELS_PATH=\"${MODELS_PATH}\")
+
+if(LIBVA_FOUND)
+    target_compile_definitions(${TARGET_NAME} PRIVATE ENABLE_LIBVA)
+    target_include_directories(${TARGET_NAME} PRIVATE ${LIBVA_INCLUDE_DIRS})
+endif()
+
+target_include_directories(${TARGET_NAME} PRIVATE ${CLDNN__IOCL_ICD_INCDIRS})
+target_link_libraries(${TARGET_NAME} PRIVATE ${CLDNN_LIBS})
+
+add_dependencies(${TARGET_NAME} ${DEPENDENCIES})
+
+add_test(NAME ${TARGET_NAME}
+        COMMAND ${TARGET_NAME})
diff --git a/inference-engine/tests_deprecated/functional/cldnn/regression_tests/regression_reference.cpp b/inference-engine/tests_deprecated/functional/cldnn/regression_tests/regression_reference.cpp
new file mode 100644 (file)
index 0000000..6df50cb
--- /dev/null
@@ -0,0 +1,11 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "regression_reference.hpp"
+
+namespace Regression {
+    namespace Reference {
+        std::map<std::string, std::vector<ClassificationScoringResultsForTests>> values = {};
+    }  // namespace Reference
+}  // namespace Regression
diff --git a/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/common_single_layer_tests/single_layer_tests.cpp b/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/common_single_layer_tests/single_layer_tests.cpp
new file mode 100644 (file)
index 0000000..9477b8c
--- /dev/null
@@ -0,0 +1,234 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "single_layer_tests.hpp"
+
+static std::vector<PluginParams> pluginParams = {
+        PluginDependentParam{"GPU", Layout::NCHW, Precision::FP32, 0.001f},
+};
+
+
+static CommonTestUtils::conv_common_params convParams =
+        {
+                PropertyVector<unsigned>{{2, 2}},  // stride
+                PropertyVector<unsigned>{{3, 3}},  // kernel
+                {},                                // pad_begin
+                {},                                // pad_end
+                PropertyVector<unsigned>{{1, 1}},  // dilation
+                "same_upper",                      // auto_pad
+                1,                                 // group
+                2                                  // out_c
+        };
+
+static CommonTestUtils::conv_common_params defConvParamsHeavy =
+        {
+                PropertyVector<unsigned>{{1, 1}},  // stride
+                PropertyVector<unsigned>{{3, 3}},  // kernel
+                {},                                // pad_begin
+                {},                                // pad_end
+                PropertyVector<unsigned>{{2, 2}},  // dilation
+                "same_upper",                      // auto_pad
+                1,                                 // group
+                128                                // out_c
+        };
+
+static CommonTestUtils::conv_common_params defConvParamsLight0 =
+        {
+                PropertyVector<unsigned>{{1, 1}},  // stride
+                PropertyVector<unsigned>{{3, 3}},  // kernel
+                {},                                // pad_begin
+                {},                                // pad_end
+                PropertyVector<unsigned>{{2, 2}},  // dilation
+                "same_upper",                      // auto_pad
+                1,                                 // group
+                4                                  // out_c
+        };
+
+static CommonTestUtils::conv_common_params defConvParamsLight1 =
+        {
+                PropertyVector<unsigned>{{2, 2}},  // stride
+                PropertyVector<unsigned>{{3, 3}},  // kernel
+                {},                                // pad_begin
+                {},                                // pad_end
+                PropertyVector<unsigned>{{1, 1}},  // dilation
+                "same_upper",                      // auto_pad
+                1,                                 // group
+                16                                 // out_c
+        };
+
+
+static CommonTestUtils::conv_common_params defConvParamsLight2 =
+        {
+                PropertyVector<unsigned>{{2, 2}},  // stride
+                PropertyVector<unsigned>{{3, 3}},  // kernel
+                {},                                // pad_begin
+                {},                                // pad_end
+                PropertyVector<unsigned>{{2, 2}},  // dilation
+                "same_upper",                      // auto_pad
+                1,                                 // group
+                15                                 // out_c
+        };
+
+
+static CommonTestUtils::conv_common_params defConvParamsLight3 =
+        {
+                PropertyVector<unsigned>{{1, 1}},  // stride
+                PropertyVector<unsigned>{{3, 3}},  // kernel
+                {},                                // pad_begin
+                {},                                // pad_end
+                PropertyVector<unsigned>{{2, 2}},  // dilation
+                "same_upper",                      // auto_pad
+                2,                                 // group
+                4                                  // out_c
+        };
+
+static CommonTestUtils::pool_common_params poolParams =
+        {
+                PropertyVector<unsigned>{{2, 2}},  // stride
+                PropertyVector<unsigned>{{3, 3}},  // kernel
+                {},                                // pad_begin
+                {},                                // pad_end
+                "same_upper",                      // auto_pad
+                true,                              // avg
+                false                              // exclude_pad
+        };
+
+std::string
+getTestCaseName(testing::TestParamInfo<std::tuple<InitialShapes, NewShapes, PluginParams, Helper>> obj) {
+    auto params = obj.param;
+    LayerTestHelper::Ptr helper = std::get<3>(params);
+    return "CLDNN" + helper->getType();
+}
+
+#if (defined INSTANTIATE_TESTS)
+
+INSTANTIATE_TEST_CASE_P(
+        Conv_nightly, CommonSingleLayerTest,
+        ::testing::Combine(
+        ::testing::Values(InitialShapes({
+                                                {{1, 2, 16, 16}},           // input
+                                                {{1, 2, 8,  8}}             // output
+                                        })),
+        ::testing::Values(NewShapes({
+                                            {{1, 2, 15, 15}},               // input
+                                            {{1, 2, 8,  8}}                 // output
+                                    })),
+        ::testing::ValuesIn(pluginParams),
+        ::testing::Values(Helper(std::make_shared<ConvolutionTestHelper>(convParams)))
+), getTestCaseName
+);
+
+INSTANTIATE_TEST_CASE_P(
+        Deconv_nightly, CommonSingleLayerTest,
+        ::testing::Combine(
+        ::testing::Values(InitialShapes({
+                                                {{1, 2, 8,  8}},             // input
+                                                {{1, 2, 16, 16}}              // output
+                                        })),
+        ::testing::Values(NewShapes({
+                                            {{1, 2, 7,  7}},                  // input
+                                            {{1, 2, 14, 14}}                  // output
+                                    })),
+        ::testing::ValuesIn(pluginParams),
+        ::testing::Values(Helper(std::make_shared<DeconvolutionTestHelper>(convParams)))
+), getTestCaseName
+);
+
+INSTANTIATE_TEST_CASE_P(
+        Pool_nightly, CommonSingleLayerTest,
+        ::testing::Combine(
+        ::testing::Values(InitialShapes({
+                                                {{1, 2, 16, 16}},           // input
+                                                {{1, 2, 8,  8}}             // output
+                                        })),
+        ::testing::Values(NewShapes({
+                                            {{1, 2, 15, 15}},               // input
+                                            {{1, 2, 8,  8}}                 // output
+                                    })),
+        ::testing::ValuesIn(pluginParams),
+        ::testing::Values(Helper(std::make_shared<PoolingTestHelper>(poolParams)))
+), getTestCaseName
+);
+
+INSTANTIATE_TEST_CASE_P(
+        DefConvLight0_nightly, CommonSingleLayerTest,
+        ::testing::Combine(
+                ::testing::Values(InitialShapes({
+                                                        {{1, 4, 4, 4}, {1, 36, 4, 4}}, // input, trans
+                                                        {{1, 4, 4, 4}}                 // output
+                                                })),
+                ::testing::Values(NewShapes({
+                                                    {{1, 4, 4, 4}, {1, 36, 4, 4}}, // input, trans
+                                                    {{1, 4, 4, 4}}                 // output
+                                            })),
+                ::testing::ValuesIn(pluginParams),
+                ::testing::Values(Helper(std::make_shared<DeformableConvolutionTestHelper>(defConvParamsLight0, 2)))
+        ), getTestCaseName
+);
+
+INSTANTIATE_TEST_CASE_P(
+        DefConvLight1_WithBatch_nightly, CommonSingleLayerTest,
+        ::testing::Combine(
+                ::testing::Values(InitialShapes({
+                                                        {{2, 4, 8, 8}, {2, 36, 4, 4}}, // input, trans
+                                                        {{2, 16, 4, 4}}                // output
+                                                })),
+                ::testing::Values(NewShapes({
+                                                    {{2, 4, 8, 8}, {2, 36, 4, 4}}, // input, trans
+                                                    {{2, 16, 4, 4}}                // output
+                                            })),
+                ::testing::ValuesIn(pluginParams),
+                ::testing::Values(Helper(std::make_shared<DeformableConvolutionTestHelper>(defConvParamsLight1, 2)))
+        ), getTestCaseName
+);
+
+INSTANTIATE_TEST_CASE_P(
+        DefConvLight2_WithBatch_nightly, CommonSingleLayerTest,
+        ::testing::Combine(
+                ::testing::Values(InitialShapes({
+                                                        {{2, 4, 8, 8}, {2, 18, 4, 4}}, // input, trans
+                                                        {{2, 15, 4, 4}}                // output
+                                                })),
+                ::testing::Values(NewShapes({
+                                                    {{2, 4, 8, 8}, {2, 18, 4, 4}}, // input, trans
+                                                    {{2, 15, 4, 4}}                // output
+                                            })),
+                ::testing::ValuesIn(pluginParams),
+                ::testing::Values(Helper(std::make_shared<DeformableConvolutionTestHelper>(defConvParamsLight2, 1)))
+        ), getTestCaseName
+);
+
+INSTANTIATE_TEST_CASE_P(
+        DefConvLight3_WithGroups_nightly, CommonSingleLayerTest,
+        ::testing::Combine(
+                ::testing::Values(InitialShapes({
+                                                        {{1, 4, 4, 4}, {1, 18, 4, 4}}, // input, trans
+                                                        {{1, 4, 4, 4}}                 // output
+                                                })),
+                ::testing::Values(NewShapes({
+                                                    {{1, 4, 4, 4}, {1, 18, 4, 4}}, // input, trans
+                                                    {{1, 4, 4, 4}}                 // output
+                                            })),
+                ::testing::ValuesIn(pluginParams),
+                ::testing::Values(Helper(std::make_shared<DeformableConvolutionTestHelper>(defConvParamsLight3, 1)))
+        ), getTestCaseName
+);
+
+INSTANTIATE_TEST_CASE_P(
+        DefConvHeavy_nightly, CommonSingleLayerTest,
+        ::testing::Combine(
+                ::testing::Values(InitialShapes({
+                                                        {{1, 512, 38, 38}, {1, 72, 38, 38}}, // input, trans
+                                                        {{1, 128, 38, 38}}                   // output
+                                                })),
+                ::testing::Values(NewShapes({
+                                                    {{1, 512, 38, 38}, {1, 72, 38, 38}}, // input, trans
+                                                    {{1, 128, 38, 38}}                   // output
+                                            })),
+                ::testing::ValuesIn(pluginParams),
+                ::testing::Values(Helper(std::make_shared<DeformableConvolutionTestHelper>(defConvParamsHeavy, 4)))
+        ), getTestCaseName
+);
+
+#endif
diff --git a/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/ie_class/ie_class.cpp b/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/ie_class/ie_class.cpp
new file mode 100644 (file)
index 0000000..1db1828
--- /dev/null
@@ -0,0 +1,148 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "ie_class.hpp"
+
+#ifdef _WIN32
+# include "gpu/gpu_context_api_dx.hpp"
+#elif defined ENABLE_LIBVA
+# include <gpu/gpu_context_api_va.hpp>
+#endif
+#include "gpu/gpu_context_api_ocl.hpp"
+
+//
+// IE Class Common tests with <pluginName, deviceName params>
+//
+
+INSTANTIATE_TEST_CASE_P(
+        nightly_IEClassCommon, IEClassBasicTestP,
+        ::testing::Values(std::make_pair("clDNNPlugin", "GPU")));
+
+INSTANTIATE_TEST_CASE_P(
+        nightly_IEClassNetworkTestP, IEClassNetworkTestP,
+        ::testing::Values("GPU"));
+
+//
+// IE Class GetMetric
+//
+
+INSTANTIATE_TEST_CASE_P(
+        nightly_IEClassGetMetricTest, IEClassGetMetricTest_SUPPORTED_CONFIG_KEYS,
+        ::testing::Values("GPU", "MULTI", "HETERO"));
+
+INSTANTIATE_TEST_CASE_P(
+        nightly_IEClassGetMetricTest, IEClassGetMetricTest_SUPPORTED_METRICS,
+        ::testing::Values("GPU", "MULTI", "HETERO"));
+
+INSTANTIATE_TEST_CASE_P(
+        nightly_IEClassGetMetricTest, IEClassGetMetricTest_AVAILABLE_DEVICES,
+        ::testing::Values("GPU"));
+
+INSTANTIATE_TEST_CASE_P(
+        nightly_IEClassGetMetricTest, IEClassGetMetricTest_FULL_DEVICE_NAME,
+        ::testing::Values("GPU", "MULTI", "HETERO"));
+
+INSTANTIATE_TEST_CASE_P(
+        nightly_IEClassGetMetricTest, IEClassGetMetricTest_OPTIMIZATION_CAPABILITIES,
+        ::testing::Values("GPU"));
+
+INSTANTIATE_TEST_CASE_P(
+        nightly_IEClassGetMetricTest, IEClassGetMetricTest_RANGE_FOR_ASYNC_INFER_REQUESTS,
+        ::testing::Values("GPU"));
+
+INSTANTIATE_TEST_CASE_P(
+        nightly_IEClassGetMetricTest, IEClassGetMetricTest_RANGE_FOR_STREAMS,
+        ::testing::Values("GPU"));
+
+INSTANTIATE_TEST_CASE_P(
+        nightly_IEClassGetMetricTest, IEClassGetMetricTest_ThrowUnsupported,
+        ::testing::Values("GPU", "MULTI", "HETERO"));
+
+INSTANTIATE_TEST_CASE_P(
+        nightly_IEClassGetConfigTest, IEClassGetConfigTest_ThrowUnsupported,
+        ::testing::Values("GPU", "MULTI", "HETERO"));
+
+INSTANTIATE_TEST_CASE_P(
+        nightly_IEClassGetAvailableDevices, IEClassGetAvailableDevices,
+        ::testing::Values("GPU"));
+
+//
+// IE Class GetConfig
+//
+
+INSTANTIATE_TEST_CASE_P(
+        nightly_IEClassGetConfigTest, IEClassGetConfigTest,
+        ::testing::Values("GPU"));
+
+//
+// Executable Network GetMetric
+//
+
+INSTANTIATE_TEST_CASE_P(
+        nightly_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_OPTIMAL_NUMBER_OF_INFER_REQUESTS,
+        ::testing::Values("GPU", "MULTI:GPU", "HETERO:GPU"));
+
+INSTANTIATE_TEST_CASE_P(
+        nightly_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS,
+        ::testing::Values("GPU", "MULTI:GPU", "HETERO:GPU"));
+
+INSTANTIATE_TEST_CASE_P(
+        nightly_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_SUPPORTED_METRICS,
+        ::testing::Values("GPU", "MULTI:GPU", "HETERO:GPU"));
+
+INSTANTIATE_TEST_CASE_P(
+        nightly_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_NETWORK_NAME,
+        ::testing::Values("GPU", "MULTI:GPU", "HETERO:GPU"));
+
+INSTANTIATE_TEST_CASE_P(
+        nightly_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_ThrowsUnsupported,
+        ::testing::Values("GPU", "MULTI:GPU", "HETERO:GPU"));
+
+//
+// Executable Network GetConfig / SetConfig
+//
+
+INSTANTIATE_TEST_CASE_P(
+        nightly_IEClassExecutableNetworkGetConfigTest, IEClassExecutableNetworkGetConfigTest,
+        ::testing::Values("GPU"));
+
+INSTANTIATE_TEST_CASE_P(
+        nightly_IEClassExecutableNetworkSetConfigTest, IEClassExecutableNetworkSetConfigTest,
+        ::testing::Values("GPU"));
+
+//
+// Hetero Executable Network GetMetric
+//
+
+INSTANTIATE_TEST_CASE_P(
+        nightly_IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS,
+        ::testing::Values("GPU"));
+
+INSTANTIATE_TEST_CASE_P(
+        nightly_IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_METRICS,
+        ::testing::Values("GPU"));
+
+INSTANTIATE_TEST_CASE_P(
+        nightly_IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_NETWORK_NAME,
+        ::testing::Values("GPU"));
+
+INSTANTIATE_TEST_CASE_P(
+        nightly_IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_TARGET_FALLBACK,
+        ::testing::Values("GPU"));
+
+// IE Class Query network
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_IEClassQueryNetworkTest, IEClassQueryNetworkTest,
+        ::testing::Values("GPU"));
+
+// IE Class Load network
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_IEClassLoadNetworkTest, IEClassLoadNetworkTest,
+        ::testing::Values("GPU"));
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_IEClassHeteroExecutableNetworkGetMetricTest, IEClassLoadNetworkAfterCoreRecreateTest,
+        ::testing::Values("GPU"));
diff --git a/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/inference_engine_regression_tests/common_dyn_batch_regression.cpp b/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/inference_engine_regression_tests/common_dyn_batch_regression.cpp
new file mode 100644 (file)
index 0000000..f70953f
--- /dev/null
@@ -0,0 +1,16 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "common_dyn_batch_regression.hpp"
+
+std::vector<CommonDynBatchFuncTestParams> supportedDynBatchValues = {
+    { "GPU", 4, 3 },
+    { "GPU", 4, 2 },
+    { "GPU", 4, 1 },
+    { "GPU", 8, 5 },
+    { "GPU", 8, 4 },
+    { "GPU", 8, 3 },
+};
+
+INSTANTIATE_TEST_CASE_P(FunctionalTest_smoke, TestNoRegressionDynBatchFP32, ValuesIn(supportedDynBatchValues), getTestCaseName);
diff --git a/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/input_tests/parser_tests.cpp b/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/input_tests/parser_tests.cpp
new file mode 100644 (file)
index 0000000..7ba0088
--- /dev/null
@@ -0,0 +1,35 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "parser_tests.hpp"
+
+ir_test_params ir_test_cases[] = {
+        ir_test_params("GPU", "FP16", negative_conv_kernel_x_case),
+        ir_test_params("GPU", "FP16", negative_conv_kernel_y_case),
+        ir_test_params("GPU", "FP16", negative_conv_stride_x_case),
+        ir_test_params("GPU", "FP16", negative_conv_weights_case),
+        ir_test_params("GPU", "FP16", negative_conv_biases_case),
+
+        ir_test_params("GPU", "FP16", negative_fc_out_size_case),
+        ir_test_params("GPU", "FP16", negative_fc_weights_case),
+        ir_test_params("GPU", "FP16", negative_fc_biases_case),
+
+        ir_test_params("GPU", "FP16", negative_deconv_kernel_x_case),
+        ir_test_params("GPU", "FP16", negative_deconv_kernel_y_case),
+        ir_test_params("GPU", "FP16", negative_deconv_stride_x_case),
+        ir_test_params("GPU", "FP16", negative_deconv_weights_case),
+        ir_test_params("GPU", "FP16", negative_deconv_biases_case),
+
+        ir_test_params("GPU", "FP16", negative_pool_kernel_x_case),
+        ir_test_params("GPU", "FP16", negative_pool_kernel_y_case),
+        ir_test_params("GPU", "FP16", negative_pool_stride_x_case),
+        ir_test_params("GPU", "FP16", incorrect_pool_type_case),
+
+        ir_test_params("GPU", "FP16", negative_norm_local_size_case),
+        ir_test_params("GPU", "FP16", negative_norm_k_case)
+};
+
+INSTANTIATE_TEST_CASE_P(FunctionalTest_smoke, IncorrectIRTests,
+        ::testing::ValuesIn(ir_test_cases),
+        getTestName);
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/io_blob_tests/cropResize_tests.cpp b/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/io_blob_tests/cropResize_tests.cpp
new file mode 100644 (file)
index 0000000..8c0f9e5
--- /dev/null
@@ -0,0 +1,209 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "cropResize_tests.hpp"
+
+#ifdef USE_OPENCV
+#define COMBINE_WITH_DEFAULT(_dims, _in_layouts, _color_formats) \
+    Combine(Values(Precision::FP32), \
+            Values(_dims), \
+            Values(std::make_pair(Precision::FP32, 1e-2), std::make_pair(Precision::U8, 1)), \
+            Values(_in_layouts), \
+            Values(ResizeAlgorithm::RESIZE_BILINEAR, ResizeAlgorithm::RESIZE_AREA), \
+            Values(_color_formats), \
+            Values(ROI({0, 40, 50, 220, 220})), \
+            Values(false, true))
+
+// test resize-only for all dims (as before)
+// test resize + color conversion for smaller number of dims (simple upscale/downscale scenarios only)
+namespace smoke {
+static auto params_resize_only = COMBINE_WITH_DEFAULT(
+    TESTED_DIMS(1),
+    MULTI_VALUE(NCHW, NHWC),
+    COLOR_FORMATS_RAW);
+
+static auto params_csc_3ch_and_resize = COMBINE_WITH_DEFAULT(
+    TESTED_DIMS_SMALL(1),
+    MULTI_VALUE(NCHW, NHWC),
+    COLOR_FORMATS_3CH);
+
+static auto params_csc_4ch_and_resize = COMBINE_WITH_DEFAULT(
+    TESTED_DIMS_SMALL(1),
+    NHWC,
+    COLOR_FORMATS_4CH);
+
+// batch preprocessing parameters:
+static auto batch_params_resize_only = COMBINE_WITH_DEFAULT(
+    TESTED_DIMS(2),
+    MULTI_VALUE(NCHW, NHWC),
+    COLOR_FORMATS_RAW);
+
+static auto batch_params_csc_3ch_and_resize = COMBINE_WITH_DEFAULT(
+    TESTED_DIMS_SMALL(2),
+    MULTI_VALUE(NCHW, NHWC),
+    COLOR_FORMATS_3CH);
+
+static auto batch_params_csc_4ch_and_resize = COMBINE_WITH_DEFAULT(
+    TESTED_DIMS_SMALL(2),
+    NHWC,
+    COLOR_FORMATS_4CH);
+}  // namespace smoke
+
+
+// test everything in nightly (as before)
+namespace nightly {
+static auto params_csc_3ch_and_resize = COMBINE_WITH_DEFAULT(
+    TESTED_DIMS(1),
+    MULTI_VALUE(NCHW, NHWC),
+    MULTI_VALUE(COLOR_FORMATS_RAW, COLOR_FORMATS_3CH));
+
+
+static auto params_csc_4ch_and_resize = COMBINE_WITH_DEFAULT(
+    TESTED_DIMS(1),
+    NHWC,
+    COLOR_FORMATS_4CH);
+
+// batch preprocessing parameters:
+static auto batch_params_csc_3ch_and_resize = COMBINE_WITH_DEFAULT(
+    MULTI_VALUE(TESTED_DIMS(2), TESTED_DIMS(3)),
+    MULTI_VALUE(NCHW, NHWC),
+    MULTI_VALUE(COLOR_FORMATS_RAW, COLOR_FORMATS_3CH));
+
+static auto batch_params_csc_4ch_and_resize = COMBINE_WITH_DEFAULT(
+    MULTI_VALUE(TESTED_DIMS(2), TESTED_DIMS(3)),
+    NHWC,
+    COLOR_FORMATS_4CH);
+}  // namespace nightly
+
+// reorder preprocessing parameters:
+static auto reorder_params = Combine(
+        Values(Precision::FP32),  // network precision
+        Values(SizeVector({1, 3, 300, 300})),  // sizes of the network
+        Values(std::make_pair(Precision::FP32, 1e-2), std::make_pair(Precision::U8, 1)),  // precision and threshold
+        Values(std::make_pair(NCHW, NHWC), std::make_pair(NHWC, NCHW)),  // Input/network data layout
+        Values(ResizeAlgorithm::NO_RESIZE),
+        Values(ColorFormat::BGR),
+        Values(ROI({0, 0, 0, 300, 300})),  // cropped ROI params (id, x, y, width, height)
+        Values(false, true)  // Infer mode sync/async
+);
+
+// nv12 preprocessing parameters:
+static auto nv12_params = Combine(
+        Values(Precision::FP32),  // network precision
+        Values(cv::Size(300, 300)),  // input image size
+        Values(TESTED_DIMS(1)),  // sizes of the network
+        Values(std::make_pair(Precision::U8, 1)),  // precision and threshold
+        Values(ResizeAlgorithm::RESIZE_BILINEAR, ResizeAlgorithm::RESIZE_AREA),
+        Values(ColorFormat::NV12),
+        Values(ROI({0, 0, 0, 300, 300}), ROI({0, 15, 10, 210, 210})),  // cropped ROI params (id, x, y, width, height)
+        Values(false, true)  // Infer mode sync/async
+);
+
+static auto random_roi_3c = Combine(
+            Values(Precision::FP32),
+            Values(TESTED_DIMS(1)),
+            Values(std::make_pair(Precision::FP32, 1e-2), std::make_pair(Precision::U8, 1)),
+            Values(MULTI_VALUE(NCHW, NHWC)),
+            Values(ResizeAlgorithm::RESIZE_BILINEAR, ResizeAlgorithm::RESIZE_AREA),
+            Values(COLOR_FORMATS_3CH),
+            Values(ROI({0, 0, 0, 0, 0})),
+            Values(false, true)
+);
+
+static auto random_roi_4c = Combine(
+            Values(Precision::FP32),
+            Values(TESTED_DIMS(1)),
+            Values(std::make_pair(Precision::FP32, 1e-2), std::make_pair(Precision::U8, 1)),
+            Values(NHWC),
+            Values(ResizeAlgorithm::RESIZE_BILINEAR, ResizeAlgorithm::RESIZE_AREA),
+            Values(COLOR_FORMATS_4CH),
+            Values(ROI({0, 0, 0, 0, 0})),
+            Values(false, true)
+);
+
+static auto random_roi_nv12 = Combine(
+            Values(Precision::FP32),
+            Values(TESTED_DIMS(1)),
+            Values(std::make_pair(Precision::U8, 1)),
+            Values(NHWC),
+            Values(ResizeAlgorithm::RESIZE_BILINEAR, ResizeAlgorithm::RESIZE_AREA),
+            Values(ColorFormat::NV12),
+            Values(ROI({0, 0, 0, 0, 0})),
+            Values(false, true)
+);
+
+// smoke:RandomROI
+PLUGING_CASE_WITH_SUFFIX(GPU, _gapi_random_roi_c3_smoke, RandomROITest, random_roi_3c);
+PLUGING_CASE_WITH_SUFFIX(GPU, _gapi_random_roi_c4_smoke, RandomROITest, random_roi_4c);
+PLUGING_CASE_WITH_SUFFIX(GPU, _gapi_random_roi_nv12_smoke, RandomROITest, random_roi_nv12);
+
+PLUGING_CASE_WITH_SUFFIX(GPU, _gapi_resize_only_smoke, CropResizeTest, smoke::params_resize_only);
+PLUGING_CASE_WITH_SUFFIX(GPU, _gapi_csc_3ch_and_resize_smoke, CropResizeTest, smoke::params_csc_3ch_and_resize);
+PLUGING_CASE_WITH_SUFFIX(GPU, _gapi_csc_4ch_and_resize_smoke, CropResizeTest, smoke::params_csc_4ch_and_resize);
+
+PLUGING_CASE_WITH_SUFFIX(GPU, _gapi_resize_only_smoke, DynamicBatchResizeTest, smoke::batch_params_resize_only);
+PLUGING_CASE_WITH_SUFFIX(GPU, _gapi_csc_3ch_and_resize_smoke, DynamicBatchResizeTest, smoke::batch_params_csc_3ch_and_resize);
+PLUGING_CASE_WITH_SUFFIX(GPU, _gapi_csc_4ch_and_resize_smoke, DynamicBatchResizeTest, smoke::batch_params_csc_4ch_and_resize);
+
+PLUGING_CASE_WITH_SUFFIX(GPU, _gapi_reorder_smoke, ReorderTest, reorder_params);
+
+//PLUGING_CASE_WITH_SUFFIX(GPU, _gapi_csc_nv12_and_resize_smoke, NV12ColorConvertTest, nv12_params);
+
+#if defined(ENABLE_MKL_DNN)
+    PLUGING_CASE_WITH_SUFFIX(HETERO, _gapi_random_roi_c3_smoke, RandomROITest, random_roi_3c);
+    PLUGING_CASE_WITH_SUFFIX(HETERO, _gapi_random_roi_c4_smoke, RandomROITest, random_roi_4c);
+    PLUGING_CASE_WITH_SUFFIX(HETERO, _gapi_random_roi_nv12_smoke, RandomROITest, random_roi_nv12);
+
+    PLUGING_CASE_WITH_SUFFIX(HETERO, _gapi_resize_only_smoke, CropResizeTest, smoke::params_resize_only);
+    PLUGING_CASE_WITH_SUFFIX(HETERO, _gapi_csc_3ch_and_resize_smoke, CropResizeTest, smoke::params_csc_3ch_and_resize);
+    PLUGING_CASE_WITH_SUFFIX(HETERO, _gapi_csc_4ch_and_resize_smoke, CropResizeTest, smoke::params_csc_4ch_and_resize);
+
+    PLUGING_CASE_WITH_SUFFIX(HETERO, _gapi_resize_only_smoke, BatchResizeTest, smoke::batch_params_resize_only);
+    PLUGING_CASE_WITH_SUFFIX(HETERO, _gapi_csc_3ch_and_resize_smoke, BatchResizeTest, smoke::batch_params_csc_3ch_and_resize);
+    PLUGING_CASE_WITH_SUFFIX(HETERO, _gapi_csc_4ch_and_resize_smoke, BatchResizeTest, smoke::batch_params_csc_4ch_and_resize);
+
+    PLUGING_CASE_WITH_SUFFIX(HETERO, _gapi_reorder_smoke, ReorderTest, reorder_params);
+
+//    PLUGING_CASE_WITH_SUFFIX(HETERO, _gapi_csc_nv12_and_resize_smoke, NV12ColorConvertTest, nv12_params);
+#endif
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+// nightly:
+
+// FIXME: enable these once smoke/nightly concepts are introduced in CI
+PLUGING_CASE_WITH_SUFFIX(DISABLED_GPU, _gapi_random_roi_c3_nightly, RandomROITest, random_roi_3c);
+PLUGING_CASE_WITH_SUFFIX(DISABLED_GPU, _gapi_random_roi_c4_nightly, RandomROITest, random_roi_4c);
+PLUGING_CASE_WITH_SUFFIX(DISABLED_GPU, _gapi_random_roi_nv12_nightly, RandomROITest, random_roi_nv12);
+
+PLUGING_CASE_WITH_SUFFIX(DISABLED_GPU, _gapi_csc_3ch_and_resize_nightly, CropResizeTest, nightly::params_csc_3ch_and_resize);
+PLUGING_CASE_WITH_SUFFIX(DISABLED_GPU, _gapi_csc_4ch_and_resize_nightly, CropResizeTest, nightly::params_csc_4ch_and_resize);
+
+PLUGING_CASE_WITH_SUFFIX(DISABLED_GPU, _gapi_csc_3ch_and_resize_nightly, BatchResizeTest, nightly::batch_params_csc_3ch_and_resize);
+PLUGING_CASE_WITH_SUFFIX(DISABLED_GPU, _gapi_csc_4ch_and_resize_nightly, BatchResizeTest, nightly::batch_params_csc_4ch_and_resize);
+
+PLUGING_CASE_WITH_SUFFIX(DISABLED_GPU, _gapi_csc_3ch_and_resize_nightly, DynamicBatchResizeTest, nightly::batch_params_csc_3ch_and_resize);
+PLUGING_CASE_WITH_SUFFIX(DISABLED_GPU, _gapi_csc_4ch_and_resize_nightly, DynamicBatchResizeTest, nightly::batch_params_csc_4ch_and_resize);
+
+PLUGING_CASE_WITH_SUFFIX(DISABLED_GPU, _gapi_reorder_nightly, ReorderTest, reorder_params);
+
+PLUGING_CASE_WITH_SUFFIX(DISABLED_GPU, _gapi_csc_nv12_and_resize_nightly, NV12ColorConvertTest, nv12_params);
+
+#if defined(ENABLE_MKL_DNN)
+    PLUGING_CASE_WITH_SUFFIX(DISABLED_HETERO, _gapi_random_roi_c3_nightly, RandomROITest, random_roi_3c);
+    PLUGING_CASE_WITH_SUFFIX(DISABLED_HETERO, _gapi_random_roi_c4_nightly, RandomROITest, random_roi_4c);
+    PLUGING_CASE_WITH_SUFFIX(DISABLED_HETERO, _gapi_random_roi_nv12_nightly, RandomROITest, random_roi_nv12);
+
+    PLUGING_CASE_WITH_SUFFIX(DISABLED_HETERO, _gapi_csc_3ch_and_resize_nightly, CropResizeTest, nightly::params_csc_3ch_and_resize);
+    PLUGING_CASE_WITH_SUFFIX(DISABLED_HETERO, _gapi_csc_4ch_and_resize_nightly, CropResizeTest, nightly::params_csc_4ch_and_resize);
+
+    PLUGING_CASE_WITH_SUFFIX(DISABLED_HETERO, _gapi_csc_3ch_and_resize_nightly, BatchResizeTest, nightly::batch_params_csc_3ch_and_resize);
+    PLUGING_CASE_WITH_SUFFIX(DISABLED_HETERO, _gapi_csc_4ch_and_resize_nightly, BatchResizeTest, nightly::batch_params_csc_4ch_and_resize);
+
+    PLUGING_CASE_WITH_SUFFIX(DISABLED_HETERO, _gapi_reorder_nightly, ReorderTest, reorder_params);
+
+    PLUGING_CASE_WITH_SUFFIX(DISABLED_HETERO, _gapi_csc_nv12_and_resize_nightly, NV12ColorConvertTest, nv12_params);
+#endif
+
+#endif  // USE_OPENCV
diff --git a/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/io_blob_tests/dims_tests.cpp b/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/io_blob_tests/dims_tests.cpp
new file mode 100644 (file)
index 0000000..d8099ed
--- /dev/null
@@ -0,0 +1,11 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "dims_tests.hpp"
+
+PLUGING_CASE_WITH_SUFFIX(GPU, _smoke, IO_BlobTest, params);
+
+#if defined(ENABLE_MKL_DNN)
+    PLUGING_CASE(HETERO, IO_BlobTest, params);
+#endif
diff --git a/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/io_blob_tests/layout_tests.cpp b/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/io_blob_tests/layout_tests.cpp
new file mode 100644 (file)
index 0000000..85ac546
--- /dev/null
@@ -0,0 +1,19 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "layout_tests.hpp"
+
+static auto params = ::testing::Combine(
+        ::testing::Values(conv_p),
+        ::testing::Values(std::make_pair(Precision::FP32, 1e-5)),
+        ::testing::Values(NCHW, NHWC),
+        ::testing::Values(NCHW, NHWC),
+        ::testing::Values(Precision::FP32, Precision::U8, Precision::I16)  // TODO: What about U16/I8/FP16?
+);
+
+PLUGING_CASE_WITH_SUFFIX(GPU, _smoke, LayoutTTTest, params);
+
+#if defined(ENABLE_MKL_DNN)
+    PLUGING_CASE(HETERO, LayoutTTTest, params);
+#endif
diff --git a/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/lstm/lstm_cell_test.cpp b/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/lstm/lstm_cell_test.cpp
new file mode 100644 (file)
index 0000000..20d264f
--- /dev/null
@@ -0,0 +1,7 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "lstm_cell_test.hpp"
+
+RUN_CASE_P_WITH_SUFFIX(GPU, _smoke, LSTMCellTest, workload);
diff --git a/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/lstm/lstm_ir_test.cpp b/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/lstm/lstm_ir_test.cpp
new file mode 100644 (file)
index 0000000..54b295e
--- /dev/null
@@ -0,0 +1,7 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "lstm_ir_test.hpp"
+
+RUN_CASE_P_WITH_SUFFIX(GPU, _smoke, LSTM_IR_Test, workload);
diff --git a/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/lstm/rnn_seq_test.cpp b/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/lstm/rnn_seq_test.cpp
new file mode 100644 (file)
index 0000000..d1f3101
--- /dev/null
@@ -0,0 +1,9 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "rnn_seq_test.hpp"
+
+RUN_CASE_CP_WITH_SUFFIX(GPU, _smoke, RNNSeqTest, workload);
+
+RUN_CASE_CP_WITH_SUFFIX(GPU, _smoke_seq, RNNSeqTest, dyn_seq_workload);
diff --git a/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/activation_tests.cpp b/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/activation_tests.cpp
new file mode 100644 (file)
index 0000000..ef6c44e
--- /dev/null
@@ -0,0 +1,18 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "activation_tests.hpp"
+
+activation_test_params test_cases[] = {
+        activation_test_params("GPU", case_1, "relu"),
+        activation_test_params("GPU", case_1, "exp"),
+        activation_test_params("GPU", case_1, "not"),
+        activation_test_params("GPU", case_1, "sin"),
+        activation_test_params("GPU", case_1, "sinh"),
+        activation_test_params("GPU", case_1, "cos"),
+        activation_test_params("GPU", case_1, "cosh"),
+};
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_GPU_TestsActivationFunctions, ActivationTest, ::testing::ValuesIn(test_cases), getTestCaseName);
diff --git a/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/arg_max_min_tests.cpp b/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/arg_max_min_tests.cpp
new file mode 100644 (file)
index 0000000..d5f2cbe
--- /dev/null
@@ -0,0 +1,84 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "arg_max_min_tests.hpp"
+
+static std::vector<float> in_data = { 0.0f, 1.0f,
+                                  20.0f, 12.0f,
+
+                                  12.0f, 0.0f,
+                                  15.0f, 8.0f,
+
+                                  9.0f, 4.0f,
+                                  25.0f, 15.0f,
+
+
+                                  0.0f, 0.0f,
+                                  1.0f, 1.0f,
+
+                                  0.0f, 0.0f,
+                                  24.0f, 12.0f,
+
+                                  8.0f, 9.0f,
+                                  2.0f, 14.0 };
+
+INSTANTIATE_TEST_CASE_P(
+        nightly_GPU_TestsArgMaxMin, ArgMaxMinTFTests,
+        ::testing::Values(
+                // Params: device_name, in_dim, in_data, has_axis, out_max_val, top_k, axis, ref_dim
+                argMaxMinTF_test_params{ "GPU", "ArgMax", { 2, 3, 2, 2 }, in_data,
+                                                                     1, 0, 1, 0, { 1, 3, 2, 2 } },
+
+                argMaxMinTF_test_params{ "GPU", "ArgMax", { 2, 3, 2, 2 }, in_data,
+                                                                     1, 0, 1, 1, { 2, 1, 2, 2 } },
+
+                argMaxMinTF_test_params{ "GPU", "ArgMax", { 2, 3, 2, 2 }, in_data,
+                                                                     1, 0, 1, 2, { 2, 3, 1, 2 } },
+
+                argMaxMinTF_test_params{ "GPU", "ArgMax", { 2, 3, 2, 2 }, in_data,
+                                                                     1, 0, 1, 3, { 2, 3, 2, 1 } },
+
+                argMaxMinTF_test_params{ "GPU", "ArgMax", { 2, 3, 2, 2 }, in_data,
+                                                                     1, 0, 2, 0, { 2, 3, 2, 2 } },
+
+                argMaxMinTF_test_params{ "GPU", "ArgMax", { 2, 3, 2, 2 }, in_data,
+                                                                     1, 0, 2, 1, { 2, 2, 2, 2 } },
+
+                argMaxMinTF_test_params{ "GPU", "ArgMax", { 2, 3, 2, 2 }, in_data,
+                                                                     1, 0, 2, 2, { 2, 3, 2, 2 } },
+
+                argMaxMinTF_test_params{ "GPU", "ArgMax", { 2, 3, 2, 2 }, in_data,
+                                                                     1, 0, 2, 3, { 2, 3, 2, 2 } },
+
+                argMaxMinTF_test_params{ "GPU", "ArgMax", { 2, 3, 2, 2 }, in_data,
+                                                                     1, 0, 3, 1, { 2, 3, 2, 2 } },
+
+
+                argMaxMinTF_test_params{ "GPU", "ArgMin", { 2, 3, 2, 2 }, in_data,
+                                                                     1, 0, 1, 0, { 1, 3, 2, 2 } },
+
+                argMaxMinTF_test_params{ "GPU", "ArgMin", { 2, 3, 2, 2 }, in_data,
+                                                                     1, 0, 1, 1, { 2, 1, 2, 2 } },
+
+                argMaxMinTF_test_params{ "GPU", "ArgMin", { 2, 3, 2, 2 }, in_data,
+                                                                     1, 0, 1, 2, { 2, 3, 1, 2 } },
+
+                argMaxMinTF_test_params{ "GPU", "ArgMin", { 2, 3, 2, 2 }, in_data,
+                                                                     1, 0, 1, 3, { 2, 3, 2, 1 } },
+
+                argMaxMinTF_test_params{ "GPU", "ArgMin", { 2, 3, 2, 2 }, in_data,
+                                                                     1, 0, 2, 0, { 2, 3, 2, 2 } },
+
+                argMaxMinTF_test_params{ "GPU", "ArgMin", { 2, 3, 2, 2 }, in_data,
+                                                                     1, 0, 2, 1, { 2, 2, 2, 2 } },
+
+                argMaxMinTF_test_params{ "GPU", "ArgMin", { 2, 3, 2, 2 }, in_data,
+                                                                     1, 0, 2, 2, { 2, 3, 2, 2 } },
+
+                argMaxMinTF_test_params{ "GPU", "ArgMin", { 2, 3, 2, 2 }, in_data,
+                                                                     1, 0, 2, 3, { 2, 3, 2, 2 } },
+
+                argMaxMinTF_test_params{ "GPU", "ArgMin", { 2, 3, 2, 2 }, in_data,
+                                                                     1, 0, 3, 1, { 2, 3, 2, 2 } }
+        ));
diff --git a/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/bin_conv_tests.cpp b/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/bin_conv_tests.cpp
new file mode 100644 (file)
index 0000000..acad533
--- /dev/null
@@ -0,0 +1,28 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "bin_conv_tests.hpp"
+
+bin_conv_test_params bin_conv_only_test_cases[] = {
+        bin_conv_test_params("CPU", case_1),
+        bin_conv_test_params("CPU", case_2),
+        bin_conv_test_params("CPU", case_3),
+        bin_conv_test_params("CPU", case_4),
+        bin_conv_test_params("CPU", case_5),
+        bin_conv_test_params("CPU", case_6),
+        bin_conv_test_params("CPU", case_7),
+        bin_conv_test_params("CPU", case_8),
+        bin_conv_test_params("CPU", case_9),
+        // BinaryConvolutions with groups are not supported in clDNN at this moment
+        // bin_conv_test_params("CPU", case_10),
+        // bin_conv_test_params("CPU", case_11),
+        // bin_conv_test_params("CPU", case_12),
+        // bin_conv_test_params("CPU", case_13),
+        bin_conv_test_params("CPU", case_14),
+        bin_conv_test_params("CPU", case_15),
+        bin_conv_test_params("CPU", case_16)
+};
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_GPU_TestBinaryConvolution, BinaryConvolutionOnlyTest, ::testing::ValuesIn(bin_conv_only_test_cases), getTestCaseName);
diff --git a/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/conv_tests.cpp b/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/conv_tests.cpp
new file mode 100644 (file)
index 0000000..dcde1d0
--- /dev/null
@@ -0,0 +1,33 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "conv_tests.hpp"
+
+conv_test_params conv_only_test_cases[] = {
+        conv_test_params("GPU", case_1),
+        conv_test_params("GPU", case_2),
+        conv_test_params("GPU", case_3),
+        conv_test_params("GPU", case_4),
+        conv_test_params("GPU", case_5),
+        conv_test_params("GPU", case_6),
+        conv_test_params("GPU", case_7),
+        conv_test_params("GPU", case_8),
+        conv_test_params("GPU", case_9),
+        conv_test_params("GPU", case_10),
+        conv_test_params("GPU", case_11),
+        conv_test_params("GPU", case_12),
+        conv_test_params("GPU", case_13),
+        conv_test_params("GPU", case_14)
+};
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_GPU_TestConvolution, ConvolutionOnlyTest, ::testing::ValuesIn(conv_only_test_cases), getTestCaseName);
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_GPU_TestConvolutionBlobsAsInputs, ConvolutionBlobsAsInputsTest, ::testing::ValuesIn(conv_only_test_cases), getTestCaseName);
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_GPU_TestConvolutionSameUpper, ConvolutionReshapeTest,
+        ::testing::Values(conv_test_params("GPU", case_si_1)),
+        getTestCaseName);
diff --git a/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/deformable_psroipooling_tests.cpp b/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/deformable_psroipooling_tests.cpp
new file mode 100644 (file)
index 0000000..5192b77
--- /dev/null
@@ -0,0 +1,22 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "deformable_psroi_tests.hpp"
+
+INSTANTIATE_TEST_CASE_P(
+        nightly_TestDeformable, DeformablePSROIOnlyTest,
+        ::testing::Values(
+                deformable_psroi_test_params{"GPU", {1, 7938, 38, 38}, {300, 5}, {300, 162, 7, 7},
+                                             0.0625, 162, 7, 7, 7, 7, 4, true
+                },
+                deformable_psroi_test_params{"GPU", {1, 392, 38, 38}, {300, 5}, {300, 8, 7, 7},
+                                             0.0625, 8, 7, 7, 7, 7, 4, false, 0.1, {300, 2, 7, 7}
+                },
+                deformable_psroi_test_params{"GPU", {1, 98, 38, 38}, {300, 5}, {300, 2, 7, 7},
+                                             0.0625, 2, 7, 7, 7, 7, 4, true
+                },
+                deformable_psroi_test_params{"GPU", {1, 3969, 38, 38}, {300, 5}, {300, 81, 7, 7},
+                                             0.0625, 81, 7, 7, 7, 7, 4, false, 0.1, {300, 162, 7, 7}
+                }
+        ));
diff --git a/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/depth_to_space_tests.cpp b/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/depth_to_space_tests.cpp
new file mode 100644 (file)
index 0000000..1c70704
--- /dev/null
@@ -0,0 +1,17 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "depth_to_space_tests.hpp"
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_GPU_TestsDepthToSpace, DepthToSpaceTests,
+        ::testing::Values(
+        depth_to_space_test_params{ "GPU", "FP32", { 1, 4, 1, 1 }, 2, { 1, 1, 2, 2 } },
+        depth_to_space_test_params{ "GPU", "FP32", { 1, 4, 2, 1 }, 2, { 1, 1, 4, 2 } },
+        depth_to_space_test_params{ "GPU", "FP32", { 1, 4, 2, 2 }, 2, { 1, 1, 4, 4 } },
+        depth_to_space_test_params{ "GPU", "FP32", { 1, 4, 3, 2 }, 2, { 1, 1, 6, 4 } },
+        depth_to_space_test_params{ "GPU", "FP32", { 1, 9, 3, 3 }, 3, { 1, 1, 9, 9 } },
+        depth_to_space_test_params{ "GPU", "FP32", { 1, 18, 3, 3 }, 3, { 1, 2, 9, 9 } },
+        depth_to_space_test_params{ "GPU", "FP32", { 1, 4, 2048, 512 }, 2, { 1, 1, 4096, 1024 } }
+));
diff --git a/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/eltwise_tests.cpp b/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/eltwise_tests.cpp
new file mode 100644 (file)
index 0000000..53f68a2
--- /dev/null
@@ -0,0 +1,33 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "eltwise_tests.hpp"
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_GPU_TestEltwise, EltwiseOnlyTest,
+    ::testing::Values(
+        eltwise_test_params{"GPU", {13, 13, 1}, eltwise_test_params::Sum, 5},
+        eltwise_test_params{"GPU", {23, 23, 1}, eltwise_test_params::Max, 3},
+        eltwise_test_params{"GPU", {23, 23, 1}, eltwise_test_params::Prod, 3},
+        eltwise_test_params{"GPU", {23, 23, 1}, eltwise_test_params::Sub, 3},
+        eltwise_test_params{"GPU", {23, 23, 1}, eltwise_test_params::Min, 7},
+        eltwise_test_params{"GPU", {23, 23, 1}, eltwise_test_params::Div, 2},
+        eltwise_test_params{"GPU", {23, 23, 1}, eltwise_test_params::Squared_diff, 2},
+        eltwise_test_params{"GPU", {23, 23, 1}, eltwise_test_params::Equal, 2},
+        eltwise_test_params{"GPU", {23, 23, 1}, eltwise_test_params::Not_equal, 2},
+        eltwise_test_params{"GPU", {23, 23, 1}, eltwise_test_params::Less, 2},
+        eltwise_test_params{"GPU", {23, 23, 1}, eltwise_test_params::Less_equal, 2},
+        eltwise_test_params{"GPU", {23, 23, 1}, eltwise_test_params::Greater, 2},
+        eltwise_test_params{"GPU", {23, 23, 1}, eltwise_test_params::Greater_equal, 2},
+        eltwise_test_params{"GPU", {23, 23, 1}, eltwise_test_params::Logical_AND, 3},
+        eltwise_test_params{"GPU", {23, 23, 1}, eltwise_test_params::Logical_OR, 4},
+        eltwise_test_params{"GPU", {23, 23, 1}, eltwise_test_params::Logical_XOR, 4},
+        eltwise_test_params{"GPU", {23, 23, 1}, eltwise_test_params::Floor_mod, 2},
+        eltwise_test_params{"GPU", {23, 23, 1}, eltwise_test_params::Pow, 2}
+        // TODO: Add tests for 1D/2D/3D blobs
+));
+
+/*** TBD ***/
+
+
diff --git a/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/gather_ftests.cpp b/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/gather_ftests.cpp
new file mode 100644 (file)
index 0000000..37b61ee
--- /dev/null
@@ -0,0 +1,24 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "gather_tests.hpp"
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_GPU_TestsGather, GatherTFTests,
+        ::testing::Values(
+        gatherTF_test_params{ "GPU", "FP32", { 1, 4 }, in0,{ 2, 2 }, dict2D, 0, { 1, 4, 2 }, ref_in0_a0_d22 },
+        gatherTF_test_params{ "GPU", "FP32", { 2, 2 }, in0,{ 2, 2, 3 }, dict, 0, { 2, 2, 2, 3 }, ref_in0_a0_d223 },
+        gatherTF_test_params{ "GPU", "FP32", { 2, 2 }, in0,{ 2, 2, 3 }, dict,-3, { 2, 2, 2, 3 }, ref_in0_a0_d223 },
+
+        gatherTF_test_params{ "GPU", "FP32", { 2, 2 }, in1,{ 3, 2, 2 }, dict, 0, { 2, 2, 2, 2 }, ref_in1_a0_d322 },
+        gatherTF_test_params{ "GPU", "FP32", { 2, 2 }, in1,{ 3, 2, 2 }, dict,-3, { 2, 2, 2, 2 }, ref_in1_a0_d322 },
+        gatherTF_test_params{ "GPU", "FP32", { 2, 2 }, in1,{ 2, 3, 2 }, dict, 1, { 2, 2, 2, 2 }, ref_in1_a1_d232 },
+        gatherTF_test_params{ "GPU", "FP32", { 2, 2 }, in1,{ 2, 3, 2 }, dict,-2, { 2, 2, 2, 2 }, ref_in1_a1_d232 },
+
+        gatherTF_test_params{ "GPU", "FP32", { 2, 2 }, in1,{ 2, 2, 3 }, dict, 2, { 2, 2, 2, 2 }, ref_in1_a2_d223 },
+        gatherTF_test_params{ "GPU", "FP32", { 2, 2 }, in1,{ 2, 2, 3 }, dict,-1, { 2, 2, 2, 2 }, ref_in1_a2_d223 },
+        gatherTF_test_params{ "GPU", "FP32", { 2, 2 }, in0,{ 2, 3, 2 }, dict, 2, { 2, 3, 2, 2 }, ref_in0_a2_d232 },
+        gatherTF_test_params{ "GPU", "FP32", { 2, 2 }, in0,{ 2, 3, 2 }, dict,-1, { 2, 3, 2, 2 }, ref_in0_a2_d232 },
+        gatherTF_test_params{ "GPU", "FP32", { 2, 2 }, in0,{ 2, 3, 2 }, dict, 2, { 2, 3, 2, 2 }, ref_in0_a2_d232 }
+));
diff --git a/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/gemm_tests.cpp b/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/gemm_tests.cpp
new file mode 100644 (file)
index 0000000..b6c487b
--- /dev/null
@@ -0,0 +1,34 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "gemm_tests.hpp"
+
+gemm_base_params gemm_smoke_cases[] = {
+    case8, case16, case24, case32,
+    case47
+};
+
+INSTANTIATE_TEST_CASE_P(smoke_GPU_GemmRandomTest, GemmRandomTest,
+    testing::Combine(
+        testing::Values("GPU"),
+        testing::Values("FP32", "FP16"),
+        testing::ValuesIn(gemm_smoke_cases)
+));
+
+gemm_base_params gemm_all_cases[] = {
+    case1,  case2,  case3,  case4,  case5,  case6,  case7,
+    case9,  case10, case11, case12, case13, case14, case15,
+    case17, case18, case19, case20, case21, case22, case23,
+    case25, case26, case27, case28, case29, case30, case31,
+    case33, case34, case35, case36, case37, case38,
+    case39, case40, case41, case42, case43, case44,
+    case45, case46
+};
+
+INSTANTIATE_TEST_CASE_P(nightly_GPU_GemmRandomTest, GemmRandomTest,
+    testing::Combine(
+        testing::Values("GPU"),
+        testing::Values("FP32", "FP16"),
+        testing::ValuesIn(gemm_all_cases)
+));
diff --git a/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/one_hot_tests.cpp b/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/one_hot_tests.cpp
new file mode 100644 (file)
index 0000000..bb02c58
--- /dev/null
@@ -0,0 +1,25 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "one_hot_tests.hpp"
+
+one_hot_test_params one_hot_only_4d_test_cases[] = {
+        one_hot_test_params("GPU", case_2d_0),
+        one_hot_test_params("GPU", case_2d_1),
+        one_hot_test_params("GPU", case_2d_2),
+        one_hot_test_params("GPU", case_3d_0),
+        one_hot_test_params("GPU", case_3d_1),
+        one_hot_test_params("GPU", case_3d_2),
+        one_hot_test_params("GPU", case_4d_0),
+        one_hot_test_params("GPU", case_4d_1),
+        one_hot_test_params("GPU", case_4d_2),
+        one_hot_test_params("GPU", case_4d_3),
+        one_hot_test_params("GPU", case_5d_0),
+        one_hot_test_params("GPU", case_5d_1),
+        one_hot_test_params("GPU", case_5d_2),
+        one_hot_test_params("GPU", case_5d_3),
+        one_hot_test_params("GPU", case_5d_4)
+};
+
+INSTANTIATE_TEST_CASE_P(nightly_TestsOneHot, OneHotOnlyTestShared, ::testing::ValuesIn(one_hot_only_4d_test_cases));
diff --git a/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/pad_ftests.cpp b/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/pad_ftests.cpp
new file mode 100644 (file)
index 0000000..8792ae9
--- /dev/null
@@ -0,0 +1,10 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "pad_tests.hpp"
+
+PLUGING_CASE(GPU, PadTFTests, 1, { 3, 4 }, in, { 2, 2 }, { 1, 3 }, "constant", 0.f, { 6, 9 },  ref_constant);
+PLUGING_CASE(GPU, PadTFTests, 2, { 3, 4 }, in, { 2, 2 }, { 1, 3 },     "edge", 0.f, { 6, 9 },      ref_edge);
+PLUGING_CASE(GPU, PadTFTests, 3, { 3, 4 }, in, { 2, 2 }, { 1, 3 },  "reflect", 0.f, { 6, 9 },   ref_reflect);
+PLUGING_CASE(GPU, PadTFTests, 4, { 3, 4 }, in, { 2, 2 }, { 1, 3 },"symmetric", 0.f, { 6, 9 }, ref_symmetric);
diff --git a/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/permute_tests.cpp b/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/permute_tests.cpp
new file mode 100644 (file)
index 0000000..aecba48
--- /dev/null
@@ -0,0 +1,29 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "permute_tests.hpp"
+
+permute_test_params permute_only_test_cases[] = {
+        permute_test_params("GPU", case_1),
+        permute_test_params("GPU", case_2),
+        permute_test_params("GPU", case_3),
+        permute_test_params("GPU", case_4),
+        permute_test_params("GPU", case_5),
+        permute_test_params("GPU", case_6),
+        permute_test_params("GPU", case_7),
+        permute_test_params("GPU", case_8),
+        permute_test_params("GPU", case_9),
+        permute_test_params("GPU", case_10),
+        permute_test_params("GPU", case_11),
+        permute_test_params("GPU", case_12),
+        permute_test_params("GPU", case_13),
+        permute_test_params("GPU", case_14),
+        permute_test_params("GPU", case_15),
+        permute_test_params("GPU", case_16)
+};
+
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_GPU_TestPermute, PermuteOnlyTests, ::testing::ValuesIn(permute_only_test_cases));
+
diff --git a/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/quantize_tests.cpp b/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/quantize_tests.cpp
new file mode 100644 (file)
index 0000000..d8fddac
--- /dev/null
@@ -0,0 +1,34 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "quantize_tests.hpp"
+
+quantize_test_params quantize_only_test_cases[] = {
+        quantize_test_params{"GPU", case_1},
+        quantize_test_params{"GPU", case_2},
+        quantize_test_params{"GPU", case_3},
+        quantize_test_params{"GPU", case_4},
+        quantize_test_params{"GPU", case_5},
+        quantize_test_params{"GPU", case_6},
+        quantize_test_params{"GPU", case_7},
+        quantize_test_params{"GPU", case_8},
+        quantize_test_params{"GPU", case_9},
+        quantize_test_params{"GPU", case_10},
+        quantize_test_params{"GPU", case_11},
+        quantize_test_params{"GPU", case_12},
+        quantize_test_params{"GPU", case_13},
+        quantize_test_params{"GPU", case_14},
+        quantize_test_params{"GPU", case_15},
+        quantize_test_params{"GPU", case_16},
+        quantize_test_params{"GPU", case_17},
+        quantize_test_params{"GPU", case_18},
+        quantize_test_params{"GPU", case_19},
+        quantize_test_params{"GPU", case_20},
+        quantize_test_params{"GPU", case_21},
+        quantize_test_params{"GPU", case_22},
+        quantize_test_params{"GPU", case_23},
+        quantize_test_params{"GPU", case_24},
+};
+
+INSTANTIATE_TEST_CASE_P(smoke_GPU_TestQuantize, QuantizeOnlyTest, ::testing::ValuesIn(quantize_only_test_cases));
diff --git a/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/reduce_ftests.cpp b/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/reduce_ftests.cpp
new file mode 100644 (file)
index 0000000..29009c9
--- /dev/null
@@ -0,0 +1,80 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "reduce_tests.hpp"
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_GPU_TestsReduceSum, ReduceTestsShared,
+        ::testing::Values(
+        // Params: library_name, reduce_type, keep_dims, in_shape, input_tensor, axes_for_reduction, out_shape, reference
+        reduce_test_params{ "GPU", "FP32", "ReduceSum", true,{ 2, 3, 4 },{},{ 0 },{ 1, 3, 4 },{ 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36 } },
+        reduce_test_params{ "GPU", "FP32", "ReduceSum", true,{ 2, 3, 4 },{},{ -3 },{ 1, 3, 4 },{ 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36 } },
+        reduce_test_params{ "GPU", "FP32", "ReduceSum", true,{ 2, 3, 4 },{},{ 2 },{ 2, 3, 1 },{ 10, 26, 42, 58, 74, 90 } },
+        reduce_test_params{ "GPU", "FP32", "ReduceSum", true,{ 2, 3, 4, 1, 1 },{},{ 2 },{ 2, 3, 1, 1, 1 },{ 10, 26, 42, 58, 74, 90 } },
+        reduce_test_params{ "GPU", "FP32", "ReduceSum", true,{ 2, 3, 4 },{},{ -1 },{ 2, 3, 1 },{ 10, 26, 42, 58, 74, 90 } },
+        reduce_test_params{ "GPU", "FP32", "ReduceSum", true,{ 2, 3, 4 },{},{ 0, 2 },{ 1, 3, 1 },{ 68, 100, 132 } },
+        reduce_test_params{ "GPU", "FP32", "ReduceSum", true,{ 2, 3, 4 },{},{ 1, 2 },{ 2, 1, 1 },{ 78, 222 } },
+        reduce_test_params{ "GPU", "FP32", "ReduceSum", true,{ 2, 3, 4 },{},{ 2, 1 },{ 2, 1, 1 },{ 78, 222 } },
+        reduce_test_params{ "GPU", "FP32", "ReduceSum", true,{ 2, 3, 4 },{},{ 0, 1, 2 },{},{ 300 } },
+        reduce_test_params{ "GPU", "FP32", "ReduceSum", true,{ 2, 3, 4 },{},{ 0, -2, 2 },{},{ 300 } },
+        reduce_test_params{ "GPU", "I32", "ReduceSum", true,{ 2, 3, 4 },{},{ 2, 2, 0, 2, 0 },{ 1, 3, 1 },{ 68, 100, 132 } },
+        reduce_test_params{ "GPU", "I32", "ReduceSum", false,{ 2, 3, 4 },{},{ 0 },{ 3, 4 },{ 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36 } },
+        reduce_test_params{ "GPU", "I32", "ReduceSum", false,{ 2, 3, 4 },{},{ -3 },{ 3, 4 },{ 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36 } },
+        reduce_test_params{ "GPU", "I32", "ReduceSum", false,{ 2, 3, 4 },{},{ 2 },{ 2, 3 },{ 10, 26, 42, 58, 74, 90 } },
+        reduce_test_params{ "GPU", "I32", "ReduceSum", false,{ 2, 3, 4 },{},{ -1 },{ 2, 3 },{ 10, 26, 42, 58, 74, 90 } },
+        reduce_test_params{ "GPU", "I32", "ReduceSum", false,{ 2, 3, 4 },{},{ 0, 2 },{ 3 },{ 68, 100, 132 } },
+        reduce_test_params{ "GPU", "I32", "ReduceSum", false,{ 2, 3, 4 },{},{ 1, 2 },{ 2 },{ 78, 222 } },
+        reduce_test_params{ "GPU", "I32", "ReduceSum", false,{ 2, 3, 4 },{},{ 2, 1 },{ 2 },{ 78, 222 } },
+        reduce_test_params{ "GPU", "I32", "ReduceSum", false,{ 2, 3, 4 },{},{ 0, 1, 2 },{},{ 300 } },
+        reduce_test_params{ "GPU", "I32", "ReduceSum", false,{ 2, 3, 4 },{},{ 0, -2, 2 },{},{ 300 } },
+        reduce_test_params{ "GPU", "I32", "ReduceSum", false,{ 2, 3, 4 },{},{ 2, 2, 0, 2, 0 },{ 3 },{ 68, 100, 132 } },
+        reduce_test_params{ "GPU", "I32", "ReduceSum", true,{ 1, 2, 3, 4, 1 },{},{ 1 },{ 1, 1, 3, 4, 1 },{ 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36 } },
+        reduce_test_params{ "GPU", "I32", "ReduceSum", false,{ 1, 2, 3, 4, 1 },{},{ 1 },{ 1, 3, 4, 1 },{ 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36 } }
+));
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_GPU_TestsReduce, ReduceTestsShared,
+        ::testing::Values(
+        // Params: library_name, reduce_type, keep_dims, in_shape, input_tensor, axes_for_reduction, out_shape, reference
+        reduce_test_params{ "GPU", "FP32", "ReduceAnd", true,{ 2, 2, 2 },{1, 0, 1, 1, 0, 1, 1, 0},{ 2 },{ 2, 2, 1 },{ 0, 1, 0, 0} },
+        reduce_test_params{ "GPU", "FP32", "ReduceAnd", false, { 2, 2, 2 },{1, 0, 1, 1, 0, 1, 1, 0},{ 0, 1, 2 },{ },{ 0 } },
+        reduce_test_params{ "GPU", "FP32", "ReduceL1", true,{ 10, 10, 2 },{},{ 2 },{ 10, 10, 1 },{ } },
+        reduce_test_params{ "GPU", "FP32", "ReduceL1", true, { 3, 2, 2 },{},{ 2 },{ 3, 2, 1 },{ 3, 7, 11, 15, 19, 23 } },
+        reduce_test_params{ "GPU", "FP32", "ReduceL1", false, { 3, 2, 2 },{},{ 2 },{ 3, 2 },{ 3, 7, 11, 15, 19, 23 } },
+        reduce_test_params{ "GPU", "FP32", "ReduceL1", false, { 3, 2, 2 },{},{ 0, 1, 2 },{ },{ 78 } },
+        reduce_test_params{ "GPU", "FP32", "ReduceL2", true,{ 10, 10, 2 },{},{ 2 },{ 10, 10, 1 },{} },
+        reduce_test_params{ "GPU", "FP32", "ReduceL2", true,{ 3, 2, 2 },{},{ 2 },{ 3, 2, 1 },{ 2.23606798f, 5.f, 7.81024968f, 10.63014581f, 13.45362405f, 16.2788206f } },
+        reduce_test_params{ "GPU", "FP32", "ReduceL2", false,{ 3, 2, 2 },{},{ 2 },{ 3, 2 },{ 2.23606798f, 5.f, 7.81024968f, 10.63014581f, 13.45362405f, 16.2788206f } },
+        reduce_test_params{ "GPU", "FP32", "ReduceL2", false,{ 3, 2, 2 },{},{ 0, 1, 2 },{ },{ 25.49509757f } },
+        reduce_test_params{ "GPU", "FP32", "ReduceLogSum", true,{ 10, 10, 2 },{},{ 2 },{ 10, 10, 1 },{} },
+        reduce_test_params{ "GPU", "FP32", "ReduceLogSum", true,{ 3, 2, 2 },{ },{ 1 },{ 3, 1, 2 },{ } },
+        reduce_test_params{ "GPU", "FP32", "ReduceLogSum", false,{ 3, 2, 2 },{ },{ 1 },{ 3, 2 },{ } },
+        reduce_test_params{ "GPU", "FP32", "ReduceLogSum", false,{ 3, 2, 2 },{ },{ 0, 1, 2 },{},{ } },
+        reduce_test_params{ "GPU", "FP32", "ReduceLogSumExp", true,{ 5, 5, 2 },{},{ 2 },{ 5, 5, 1 },{} },
+        reduce_test_params{ "GPU", "FP32", "ReduceLogSumExp", true,{ 3, 2, 2 },{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 1 },{ 3, 1, 2 },{ 20.f, 2.31326175f, 40.00004578f, 2.31326175f, 60.00671387f, 2.31326175f } },
+        reduce_test_params{ "GPU", "FP32", "ReduceLogSumExp", false,{ 3, 2, 2 },{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 1 },{ 3, 2 },{ 20.f, 2.31326175f, 40.00004578f, 2.31326175f, 60.00671387f, 2.31326175f } },
+        reduce_test_params{ "GPU", "FP32", "ReduceLogSumExp", false,{ 3, 2, 2 },{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 0, 1, 2 },{},{ 60.00671387f } },
+        reduce_test_params{ "GPU", "FP32", "ReduceMax", true,{ 10, 10, 2 },{},{ 2 },{ 10, 10, 1 },{} },
+        reduce_test_params{ "GPU", "FP32", "ReduceMax", true,{ 3, 2, 2 },{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 1 },{ 3, 1, 2 },{ 20, 2, 40, 2, 60, 2 } },
+        reduce_test_params{ "GPU", "FP32", "ReduceMax", false,{ 3, 2, 2 },{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 1 },{ 3, 2 },{ 20, 2, 40, 2, 60, 2 } },
+        reduce_test_params{ "GPU", "FP32", "ReduceMax", false,{ 3, 2, 2 },{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 0, 1, 2 },{},{ 60 } },
+        reduce_test_params{ "GPU", "FP32", "ReduceMean", true,{ 10, 10, 2 },{},{ 2 },{ 10, 10, 1 },{} },
+        reduce_test_params{ "GPU", "FP32", "ReduceMean", true, { 3, 2, 2 },{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 1 },{ 3, 1, 2 },{ 12.5f, 1.5f, 35.f, 1.5f, 57.5f, 1.5f } },
+        reduce_test_params{ "GPU", "FP32", "ReduceMean", false, { 3, 2, 2 },{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 1 },{ 3, 2 },{ 12.5f, 1.5f, 35.f, 1.5f, 57.5f, 1.5f } },
+        reduce_test_params{ "GPU", "FP32", "ReduceMean", false, { 3, 2, 2 },{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 0, 1, 2 },{ },{ 18.25f } },
+        reduce_test_params{ "GPU", "FP32", "ReduceMin", true,{ 10, 10, 2 },{},{ 2 },{ 10, 10, 1 },{} },
+        reduce_test_params{ "GPU", "FP32", "ReduceMin", true,{ 3, 2, 2 },{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 1 },{ 3, 1, 2 },{ 5, 1, 30, 1, 55, 1 } },
+        reduce_test_params{ "GPU", "FP32", "ReduceMin", false,{ 3, 2, 2 },{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 1 },{ 3, 2 },{ 5, 1, 30, 1, 55, 1 } },
+        reduce_test_params{ "GPU", "FP32", "ReduceMin", false,{ 3, 2, 2 },{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 0, 1, 2 },{},{ 1 } },
+        reduce_test_params{ "GPU", "FP32", "ReduceOr", true,{ 2, 2, 2 },{1, 0, 1, 1, 0, 0, 1, 0},{ 2 },{ 2, 2, 1 },{1, 1, 0, 1 } },
+        reduce_test_params{ "GPU", "FP32", "ReduceOr", false, { 2, 2, 2 },{},{ 0, 1, 2 },{ },{ 1 } },
+        reduce_test_params{ "GPU", "FP32", "ReduceProd", true,{ 10, 10, 2 },{},{ 2 },{ 10, 10, 1 },{} },
+        reduce_test_params{ "GPU", "FP32", "ReduceProd", true,{ 3, 2, 2 },{},{ 1 },{ 3, 1, 2 },{ 3, 8, 35, 48, 99, 120 } },
+        reduce_test_params{ "GPU", "FP32", "ReduceProd", false,{ 3, 2, 2 },{},{ 1 },{ 3, 2 },{ 3, 8, 35, 48, 99, 120 } },
+        reduce_test_params{ "GPU", "FP32", "ReduceProd", false,{ 3, 2, 2 },{},{ 0, 1, 2 },{ },{ 4.790016e+08 } },
+        reduce_test_params{ "GPU", "FP32", "ReduceSumSquare", true,{ 10, 10, 2 },{},{ 2 },{ 10, 10, 1 },{} },
+        reduce_test_params{ "GPU", "FP32", "ReduceSumSquare", true, { 3, 2, 2 },{},{ 1 },{ 3, 1, 2 },{ 10, 20, 74, 100, 202, 244 } },
+        reduce_test_params{ "GPU", "FP32", "ReduceSumSquare", false, { 3, 2, 2 },{},{ 1 },{ 3, 2 },{ 10, 20, 74, 100, 202, 244 } },
+        reduce_test_params{ "GPU", "FP32", "ReduceSumSquare", false, { 3, 2, 2 },{},{ 0, 1, 2 },{ },{ 650 } }
+));
diff --git a/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/resample_tests.cpp b/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/resample_tests.cpp
new file mode 100644 (file)
index 0000000..cac8c94
--- /dev/null
@@ -0,0 +1,45 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "resample_tests.hpp"
+
+INSTANTIATE_TEST_CASE_P(
+        TestsResample, ResampleTests,
+        ::testing::Values(
+                // 4D nearest
+                resample_test_params{"GPU", {2, 64, 15, 25}, 1.f,   "caffe.ResampleParameter.NEAREST"},
+                resample_test_params{"GPU", {2, 64, 10, 20}, 0.25f, "caffe.ResampleParameter.NEAREST"},
+                resample_test_params{"GPU", {1, 1, 10, 20},  0.5f,  "caffe.ResampleParameter.NEAREST"},
+                resample_test_params{"GPU", {2, 3, 15, 25},  1.f,   "caffe.ResampleParameter.NEAREST"},
+                resample_test_params{"GPU", {2, 3, 10, 20},  0.25f, "caffe.ResampleParameter.NEAREST"},
+                resample_test_params{"GPU", {1, 1, 10, 13},  0.52f, "caffe.ResampleParameter.NEAREST"},
+                //// 4D linear
+                resample_test_params{"GPU", {2, 64, 15, 25}, 1.f,   "caffe.ResampleParameter.LINEAR"},
+                resample_test_params{"GPU", {2, 64, 10, 20}, 0.25f, "caffe.ResampleParameter.LINEAR"},
+                resample_test_params{"GPU", {1, 1, 15, 25},  0.5,   "caffe.ResampleParameter.LINEAR"},
+                resample_test_params{"GPU", {1, 3, 15, 25},  0.5,   "caffe.ResampleParameter.LINEAR"},
+                resample_test_params{"GPU", {2, 5, 3, 3},    3.0f,  "caffe.ResampleParameter.LINEAR"},
+                resample_test_params{"GPU", {2, 4, 10, 20},  2.0f,  "caffe.ResampleParameter.LINEAR"},
+                resample_test_params{"GPU", {2, 20, 30, 30}, 3.0f,  "caffe.ResampleParameter.LINEAR"},
+                resample_test_params{"GPU", {2, 20, 3, 6},   3.0f,  "caffe.ResampleParameter.LINEAR"},
+                //// 5D nearest
+                resample_test_params{ "GPU", {1, 64, 20, 15, 25}, 1.f,   "caffe.ResampleParameter.NEAREST" },
+                resample_test_params{ "GPU", {1, 64, 15, 10, 20}, 0.25f, "caffe.ResampleParameter.NEAREST" },
+                resample_test_params{ "GPU", {1, 64, 10, 10, 20}, 0.5f,  "caffe.ResampleParameter.NEAREST" },
+                resample_test_params{ "GPU", {1, 3, 20, 15, 25},  1.f,   "caffe.ResampleParameter.NEAREST" },
+                resample_test_params{ "GPU", {1, 3, 15, 10, 20},  0.25f, "caffe.ResampleParameter.NEAREST" },
+                resample_test_params{ "GPU", {2, 64, 20, 15, 25}, 1.f,   "caffe.ResampleParameter.NEAREST" },
+                resample_test_params{ "GPU", {2, 64, 15, 10, 20}, 0.25f, "caffe.ResampleParameter.NEAREST" },
+                resample_test_params{ "GPU", {2, 64, 10, 10, 20}, 0.5f,  "caffe.ResampleParameter.NEAREST" },
+                resample_test_params{ "GPU", {2, 3, 20, 15, 25},  1.f,   "caffe.ResampleParameter.NEAREST" },
+                resample_test_params{ "GPU", {2, 3, 15, 10, 20},  0.25f, "caffe.ResampleParameter.NEAREST" },
+                // 5D linear
+                resample_test_params{ "GPU", {1, 8, 5, 2, 4},     0.2f,  "caffe.ResampleParameter.LINEAR" },
+                resample_test_params{ "GPU", {1, 8, 10, 10, 20},  0.25f, "caffe.ResampleParameter.LINEAR" },
+                resample_test_params{ "GPU", {1, 2, 16, 12, 20},  4.f,   "caffe.ResampleParameter.LINEAR" },
+                resample_test_params{ "GPU", {2, 16, 15, 10, 20}, 1.f,   "caffe.ResampleParameter.LINEAR" },
+                resample_test_params{ "GPU", {2, 2, 4, 10, 20},   0.25f, "caffe.ResampleParameter.LINEAR" },
+                resample_test_params{ "GPU", {2, 4, 15, 10, 20},  1.f,   "caffe.ResampleParameter.LINEAR" },
+                resample_test_params{ "GPU", {2, 8, 16, 12, 20},  4.f,   "caffe.ResampleParameter.LINEAR" },
+                resample_test_params{ "GPU", {2, 16, 10, 10, 20}, 0.25f, "caffe.ResampleParameter.LINEAR" }));
diff --git a/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/softmax_tests.cpp b/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/softmax_tests.cpp
new file mode 100644 (file)
index 0000000..03e3667
--- /dev/null
@@ -0,0 +1,14 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "softmax_tests.hpp"
+
+softmax_test_params softmax_only_test_cases[] = {
+        softmax_test_params("GPU", case_1),
+        softmax_test_params("GPU", case_8),
+        softmax_test_params("GPU", case_8_nc, "2D"),
+};
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_GPU_TestsSoftmax, SoftmaxOnlyTest, ::testing::ValuesIn(softmax_only_test_cases)/*, getTestCaseName*/);
diff --git a/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/space_to_depth_tests.cpp b/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/space_to_depth_tests.cpp
new file mode 100644 (file)
index 0000000..a827f16
--- /dev/null
@@ -0,0 +1,18 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "space_to_depth_tests.hpp"
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_GPU_TestsSpaceToDepth, SpaceToDepthTests,
+        ::testing::Values(
+        space_to_depth_test_params{ "GPU", "FP32", { 1, 1, 6, 4 }, "blocks_first", 2, { 1, 4, 3, 2 } },
+        space_to_depth_test_params{ "GPU", "FP32", { 1, 1, 9, 9 }, "blocks_first", 3, { 1, 9, 3, 3 } },
+        space_to_depth_test_params{ "GPU", "FP32", { 1, 2, 9, 9 }, "blocks_first", 3, { 1, 18, 3, 3 } },
+        space_to_depth_test_params{ "GPU", "FP32", { 1, 10, 4096, 1024 }, "blocks_first", 4, { 1, 160, 1024, 256 } },
+        space_to_depth_test_params{ "GPU", "FP32", { 1, 1, 6, 4 }, "depth_first", 2, { 1, 4, 3, 2 } },
+        space_to_depth_test_params{ "GPU", "FP32", { 1, 1, 9, 9 }, "depth_first", 3, { 1, 9, 3, 3 } },
+        space_to_depth_test_params{ "GPU", "FP32", { 1, 2, 9, 9 }, "depth_first", 3, { 1, 18, 3, 3 } },
+        space_to_depth_test_params{ "GPU", "FP32", { 1, 10, 4096, 1024 }, "depth_first", 4, { 1, 160, 1024, 256 } }
+));
diff --git a/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/ti_tests.cpp b/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/ti_tests.cpp
new file mode 100644 (file)
index 0000000..55b714f
--- /dev/null
@@ -0,0 +1,12 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "ti_tests.hpp"
+
+ti_test_params ti_test_cases[] = {{"GPU", 1, InferenceEngine::Precision(InferenceEngine::Precision::FP32)},
+                                  {"GPU", 1, InferenceEngine::Precision(InferenceEngine::Precision::FP16)}};
+
+RUN_CASE_P_WITH_SUFFIX(GPU, _smoke, TITest, ti_test_cases);
+
+RUN_CASE_P_WITH_SUFFIX(GPU, _smoke, TITest2, ti_test_cases);
diff --git a/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/topk_tests.cpp b/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/single_layer_tests/topk_tests.cpp
new file mode 100644 (file)
index 0000000..db0d9fe
--- /dev/null
@@ -0,0 +1,85 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "topk_tests.hpp"
+
+INSTANTIATE_TEST_CASE_P(
+        nightly_GPU_TestsTopK, topk_test_fp32,
+        ::testing::Values(
+                // Params: plugin_name, in_shape, input_tensor, axis, src_k, sort, mode, out_shape, reference_val, reference_idx
+                topk_test_params{ "GPU", { 3, 4 }, -1,{ 1 }, "value", "max",{ 3, 1 }, Precision::FP32},
+                topk_test_params{ "GPU", { 3, 4 },  0,{ 1 }, "value", "max",{ 1, 4 }, Precision::FP32},
+                topk_test_params{ "GPU", { 3, 4 }, -1,{ 1 }, "value", "min",{ 3, 1 }, Precision::FP32},
+                topk_test_params{ "GPU", { 3, 4 },  0,{ 1 }, "value", "min",{ 1, 4 }, Precision::FP32},
+                topk_test_params{ "GPU", { 2, 3, 128, 256 }, 1,{ 1 }, "value", "max",{ 2, 1, 128, 256 }, Precision::FP32},
+                topk_test_params{ "GPU", { 3, 5, 128, 256 }, 1,{ 1 }, "index", "max",{ 3, 1, 128, 256 }, Precision::FP32},
+                topk_test_params{ "GPU", { 1, 3, 129, 257 }, 1,{ 1 }, "value", "max",{ 1, 1, 129, 257 }, Precision::FP32},
+                topk_test_params{ "GPU", { 2, 5, 129, 257 }, 1,{ 1 }, "index", "max",{ 2, 1, 129, 257 }, Precision::FP32},
+                topk_test_params{ "GPU", { 3, 4 }, -1,{ 3 }, "value", "max",{ 3, 3 }, Precision::FP32},
+                topk_test_params{ "GPU", { 3, 4 }, -1,{ 3 }, "value", "min",{ 3, 3 }, Precision::FP32},
+                topk_test_params{ "GPU", { 1, 5, 1, 2 }, 1,{ 3 }, "value", "max",{ 1, 3, 1, 2 }, Precision::FP32},
+                topk_test_params{ "GPU", { 1, 5, 1, 2 }, 1,{ 3 }, "value", "min",{ 1, 3, 1, 2 }, Precision::FP32},
+                topk_test_params{ "GPU", { 1, 5, 1, 2 }, 1,{ 3 }, "index", "min",{ 1, 3, 1, 2 }, Precision::FP32},
+                topk_test_params{ "GPU", { 1, 5, 1, 2 }, 1,{ 3 }, "index", "min",{ 1, 3, 1, 2 }, Precision::FP32},
+                topk_test_params{ "GPU", { 1, 20, 12, 12 }, 1,{ 18 }, "value", "min",{ 1, 18, 12, 12 }, Precision::FP32},
+                topk_test_params{ "GPU", { 1, 20, 129, 129 }, 1,{ 3 }, "value", "max",{ 1, 3, 129, 129 }, Precision::FP32},
+                topk_test_params{ "GPU", { 1, 2, 2, 4 }, 3,{ 3 }, "value", "max",{ 1, 2, 2, 3 }, Precision::FP32},
+                topk_test_params{ "GPU", { 1, 2, 2, 4 }, 3,{ 3 }, "index", "max",{ 1, 2, 2, 3 }, Precision::FP32},
+                topk_test_params{ "GPU", { 1, 2, 2, 4 }, 3,{ 3 }, "value", "min",{ 1, 2, 2, 3 }, Precision::FP32},
+                topk_test_params{ "GPU", { 1, 2, 2, 4 }, 3,{ 3 }, "index", "min",{ 1, 2, 2, 3 }, Precision::FP32},
+                topk_test_params{ "GPU", { 1, 2, 2, 4 }, 3,{ 1 }, "value", "max",{ 1, 2, 2, 1 }, Precision::FP32},
+                topk_test_params{ "GPU", { 1, 2, 2, 4 }, 3,{ 1 }, "index", "max",{ 1, 2, 2, 1 }, Precision::FP32},
+                topk_test_params{ "GPU", { 1, 2, 4, 2 }, 2,{ 3 }, "value", "max",{ 1, 2, 3, 2 }, Precision::FP32},
+                topk_test_params{ "GPU", { 1, 2, 4, 2 }, 2,{ 3 }, "index", "max",{ 1, 2, 3, 2 }, Precision::FP32},
+                topk_test_params{ "GPU", { 1, 2, 4, 2 }, 2,{ 3 }, "value", "min",{ 1, 2, 3, 2 }, Precision::FP32},
+                topk_test_params{ "GPU", { 1, 2, 4, 2 }, 2,{ 3 }, "index", "min",{ 1, 2, 3, 2 }, Precision::FP32},
+                topk_test_params{ "GPU", { 1, 2, 2, 4 }, 3,{ 3 }, "index", "min",{ 1, 2, 2, 3 }, Precision::FP32},
+                topk_test_params{ "GPU", { 1, 2, 2, 4 }, 3,{ 3 }, "index", "max",{ 1, 2, 2, 3 }, Precision::FP32},
+                topk_test_params{ "GPU", { 1, 2, 2, 4 }, 3,{ 3 }, "value", "min",{ 1, 2, 2, 3 }, Precision::FP32},
+                topk_test_params{ "GPU", { 1, 2, 2, 4 }, 3,{ 3 }, "value", "max",{ 1, 2, 2, 3 }, Precision::FP32},
+                topk_test_params{ "GPU", { 1, 20, 32, 32 }, 1,{ 18 }, "index", "max",{ 1, 18, 32, 32 }, Precision::FP32},
+                topk_test_params{ "GPU", { 1, 20, 129, 129 }, 1,{ 18 }, "index", "max",{ 1, 18, 129, 129 }, Precision::FP32},
+                topk_test_params{ "GPU", { 1, 20, 32, 32 }, 1,{ 18 }, "index", "min",{ 1, 18, 32, 32 }, Precision::FP32},
+                topk_test_params{ "GPU", { 1, 20, 129, 129 }, 1,{ 18 }, "index", "min",{ 1, 18, 129, 129 }, Precision::FP32}
+        ));
+
+INSTANTIATE_TEST_CASE_P(
+        nightly_GPU_TestsTopK, topk_test_int32,
+        ::testing::Values(
+                // Params: plugin_name, in_shape, input_tensor, axis, src_k, sort, mode, out_shape, reference_val, reference_idx
+                topk_test_params{ "GPU", { 3, 4 }, -1,{ 1 }, "value", "max",{ 3, 1 }, Precision::I32},
+                topk_test_params{ "GPU", { 3, 4 },  0,{ 1 }, "value", "max",{ 1, 4 }, Precision::I32},
+                topk_test_params{ "GPU", { 3, 4 }, -1,{ 1 }, "value", "min",{ 3, 1 }, Precision::I32},
+                topk_test_params{ "GPU", { 3, 4 },  0,{ 1 }, "value", "min",{ 1, 4 }, Precision::I32},
+                topk_test_params{ "GPU", { 2, 3, 128, 256 }, 1,{ 1 }, "value", "max",{ 2, 1, 128, 256 }, Precision::I32},
+                topk_test_params{ "GPU", { 3, 5, 128, 256 }, 1,{ 1 }, "index", "max",{ 3, 1, 128, 256 }, Precision::I32},
+                topk_test_params{ "GPU", { 1, 3, 129, 257 }, 1,{ 1 }, "value", "max",{ 1, 1, 129, 257 }, Precision::I32},
+                topk_test_params{ "GPU", { 2, 5, 129, 257 }, 1,{ 1 }, "index", "max",{ 2, 1, 129, 257 }, Precision::I32},
+                topk_test_params{ "GPU", { 3, 4 }, -1,{ 3 }, "value", "max",{ 3, 3 }, Precision::I32},
+                topk_test_params{ "GPU", { 3, 4 }, -1,{ 3 }, "value", "min",{ 3, 3 }, Precision::I32},
+                topk_test_params{ "GPU", { 1, 5, 1, 2 }, 1,{ 3 }, "value", "max",{ 1, 3, 1, 2 }, Precision::I32},
+                topk_test_params{ "GPU", { 1, 5, 1, 2 }, 1,{ 3 }, "value", "min",{ 1, 3, 1, 2 }, Precision::I32},
+                topk_test_params{ "GPU", { 1, 5, 1, 2 }, 1,{ 3 }, "index", "min",{ 1, 3, 1, 2 }, Precision::I32},
+                topk_test_params{ "GPU", { 1, 5, 1, 2 }, 1,{ 3 }, "index", "min",{ 1, 3, 1, 2 }, Precision::I32},
+                topk_test_params{ "GPU", { 1, 20, 12, 12 }, 1,{ 18 }, "value", "min",{ 1, 18, 12, 12 }, Precision::I32},
+                topk_test_params{ "GPU", { 1, 20, 129, 129 }, 1,{ 3 }, "value", "max",{ 1, 3, 129, 129 }, Precision::I32},
+                topk_test_params{ "GPU", { 1, 2, 2, 4 }, 3,{ 3 }, "value", "max",{ 1, 2, 2, 3 }, Precision::I32},
+                topk_test_params{ "GPU", { 1, 2, 2, 4 }, 3,{ 3 }, "index", "max",{ 1, 2, 2, 3 }, Precision::I32},
+                topk_test_params{ "GPU", { 1, 2, 2, 4 }, 3,{ 3 }, "value", "min",{ 1, 2, 2, 3 }, Precision::I32},
+                topk_test_params{ "GPU", { 1, 2, 2, 4 }, 3,{ 3 }, "index", "min",{ 1, 2, 2, 3 }, Precision::I32},
+                topk_test_params{ "GPU", { 1, 2, 2, 4 }, 3,{ 1 }, "value", "max",{ 1, 2, 2, 1 }, Precision::I32},
+                topk_test_params{ "GPU", { 1, 2, 2, 4 }, 3,{ 1 }, "index", "max",{ 1, 2, 2, 1 }, Precision::I32},
+                topk_test_params{ "GPU", { 1, 2, 4, 2 }, 2,{ 3 }, "value", "max",{ 1, 2, 3, 2 }, Precision::I32},
+                topk_test_params{ "GPU", { 1, 2, 4, 2 }, 2,{ 3 }, "index", "max",{ 1, 2, 3, 2 }, Precision::I32},
+                topk_test_params{ "GPU", { 1, 2, 4, 2 }, 2,{ 3 }, "value", "min",{ 1, 2, 3, 2 }, Precision::I32},
+                topk_test_params{ "GPU", { 1, 2, 4, 2 }, 2,{ 3 }, "index", "min",{ 1, 2, 3, 2 }, Precision::I32},
+                topk_test_params{ "GPU", { 1, 2, 2, 4 }, 3,{ 3 }, "index", "min",{ 1, 2, 2, 3 }, Precision::I32},
+                topk_test_params{ "GPU", { 1, 2, 2, 4 }, 3,{ 3 }, "index", "max",{ 1, 2, 2, 3 }, Precision::I32},
+                topk_test_params{ "GPU", { 1, 2, 2, 4 }, 3,{ 3 }, "value", "min",{ 1, 2, 2, 3 }, Precision::I32},
+                topk_test_params{ "GPU", { 1, 2, 2, 4 }, 3,{ 3 }, "value", "max",{ 1, 2, 2, 3 }, Precision::I32},
+                topk_test_params{ "GPU", { 1, 20, 32, 32 }, 1,{ 18 }, "index", "max",{ 1, 18, 32, 32 }, Precision::I32},
+                topk_test_params{ "GPU", { 1, 20, 129, 129 }, 1,{ 18 }, "index", "max",{ 1, 18, 129, 129 }, Precision::I32},
+                topk_test_params{ "GPU", { 1, 20, 32, 32 }, 1,{ 18 }, "index", "min",{ 1, 18, 32, 32 }, Precision::I32},
+                topk_test_params{ "GPU", { 1, 20, 129, 129 }, 1,{ 18 }, "index", "min",{ 1, 18, 129, 129 }, Precision::I32}
+        ));
diff --git a/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/transformations/low_precision_single_layers_tests.cpp b/inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/transformations/low_precision_single_layers_tests.cpp
new file mode 100644 (file)
index 0000000..4ec3122
--- /dev/null
@@ -0,0 +1,437 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+#include <gtest/gtest.h>
+#include <string>
+#include <memory>
+
+using namespace ::testing;
+using namespace InferenceEngine;
+
+
+TEST_P(SingleLayerTransformationsTest, LPT) {
+}
+
+INSTANTIATE_TEST_CASE_P(
+        SingleLayerTransformationsTestFP32,
+        SingleLayerTransformationsTest,
+        ::testing::Values(
+                //SingleLayerTransformationsTestParams(
+                //    "GPU",
+                //    SingleLayerTestModel::Ptr(new FullyConnectedAndScaleShiftsOnActivationsTestModel()),
+                //    { { 1, 2048 } },
+                //    { { 1, 1000 } }),
+
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new ConvolutionAndQuantizeOnSignedActivationsAndWeightsPositiveTestModel()),
+                        { { 1, 32, 149, 149 } },
+                        { { 1, 32, 147, 147 } }),
+
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new ConvolutionAndQuantizeOnSignedActivationsAndWeightsNegativeTestModel()),
+                        { { 1, 32, 149, 149 } },
+                        { { 1, 32, 147, 147 } }),
+
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new ConvolutionAndQuantizeOnUnsignedActivationsAndWeightsTestModel()),
+                        { { 1, 32, 149, 149 } },
+                        { { 1, 32, 147, 147 } }),
+
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new ConvolutionAndQuantizeOnSignedActivationsAndInvertedWeightsTestModel()),
+                        { { 1, 32, 149, 149 } },
+                        { { 1, 32, 147, 147 } }),
+
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new FakeQuantizeReshapePoolingTestModelWithConstants()),
+                        { { 1, 1280, 7 } },
+                        { { 1, 1280, 7 } }),
+
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new FakeQuantizeReshapePoolingTestModelWithoutConstants()),
+                        { { 1, 1280, 7 } },
+                        { { 1, 1280, 7 } }),
+
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new FullyConnectedAndQuantizeTestModel()),
+                        { { 1, 32, 1, 1 } },
+                        { { 1, 32, 1, 1 } }),
+
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new FullyConnectedAndScaleShiftsOnActivationsTestModel()),
+                        { { 1, 2048 } },
+                        { { 1, 1000 } }),
+
+//                SingleLayerTransformationsTestParams(
+//                        "GPU",
+//                        SingleLayerTestModel::Ptr(new GemmAndQuantizeTestModel()),
+//                        { { 1, 32, 149, 149 } },
+//                        { { 1, 32, 147, 147 } }),
+
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new PoolingTestModel()),
+                        { { 149, 149, 32, 1 } },
+                        { { 149, 149, 32, 1 } }),
+
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new ConvolutionAndQuantizeOnWeightsWithMultiOutputIntervalsTestModel()),
+                        { { 1, 32, 147, 147 } },
+                        { { 1, 64, 147, 147 } }),
+
+                // Const transformation is disabled
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new ConvolutionAndQuantizeOnWeightsWithoutConstTransformationTestModel()),
+                        { { 1, 32, 149, 149 } },
+                        { { 1, 32, 147, 147 } }),
+
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new ConvolutionAndPoolingAndQuantizeOnActivationsTestModel()),
+                        { { 1, 64, 147, 147 } },
+                        { { 1, 80, 73,  73  } }),
+
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new ConvolutionAndQuantizeOnActivationsTestModel()),
+                        { { 1, 3,  299, 299 } },
+                        { { 1, 32, 149, 149 } }),
+
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new ConvolutionAndDequantizationScaleShiftsOnActivationsTestModel()),
+                        { { 1, 3,  299, 299 } },
+                        { { 1, 32, 149, 149 } }),
+
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new ConvolutionAndDequantizationScaleShiftAndQuantizeOnActivationsTestModel()),
+                        { { 1, 3, 299, 299 } },
+                        { { 1, 32, 149, 149 } }),
+
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new ConvolutionDepthwiseTestModel()),
+                        { { 1, 32, 112, 112 } },
+                        { { 1, 32, 112, 112 } }),
+
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new ConvolutionGroupedTestModel()),
+                        { { 1, 32, 112, 112 } },
+                        { { 1, 32, 112, 112 } }),
+
+//                SingleLayerTransformationsTestParams(
+//                        "GPU",
+//                        SingleLayerTestModel::Ptr(new EltwiseTestModel()),
+//                        { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } },
+//                        { { 1, 3, 299, 299 } }),
+
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new EltwiseCpuTestModel()),
+                        { { 1, 3, 299, 299 } },
+                        { { 1, 3, 299, 299 } }),
+
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new ConcatTestModel(true, true, true)),
+                        { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } },
+                        { { 1, 6, 299, 299 } }),
+
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new ConcatTestModel(true, true, false)),
+                        { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } },
+                        { { 1, 6, 299, 299 } }),
+
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new ConcatTestModel(false)),
+                        { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } },
+                        { { 1, 6, 299, 299 } }),
+
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new ConcatMultiChannelTestModel()),
+                        { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } },
+                        { { 1, 6, 299, 299 } }),
+
+                //SingleLayerTransformationsTestParams(
+                //    "GPU",
+                //    SingleLayerTestModel::Ptr(new ConcatMultiBranchTestModel()),
+                //    { { 299, 299, 3, 1 }, { 299, 299, 3, 1 } },
+                //    { { 299, 299, 12, 1 } }),
+
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new QuantizationOnWeightsTestModel()),
+                        { { 1, 32, 149, 149 } },
+                        { { 1, 32, 147, 147 } }),
+
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new QuantizationOnInvertedWeightsTestModel()),
+                        { { 1, 32, 149, 149 } },
+                        { { 1, 32, 147, 147 } }),
+
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new FakeQuantizeAsOutputTest()),
+                        { { 1, 32, 149, 149 } },
+                        { { 1, 32, 147, 147 } }),
+
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new FakeQuantizeWithMultiOutputsTest()),
+                        { { 1, 32, 149, 149 } },
+                        { { 1, 32, 147, 147 } }),
+
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new FakeQuantizeAndScaleShiftTestModel()),
+                        { { 1, 3, 299, 299 } },
+                        { { 1, 3, 299, 299 } }),
+
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new FakeQuantizeAndActivationTestModel({ {-10.25, 10.1641} })),
+                        { { 1, 3, 299, 299 } },
+                        { { 1, 3, 299, 299 } }),
+
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new FakeQuantizeAndActivationTestModel({ {-0.00174255, 0.00174255} })),
+                        { { 1, 3, 299, 299 } },
+                        { { 1, 3, 299, 299 } }),
+
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new FakeQuantizeAndActivationTestModel({ {-329.688, 327.188} })),
+                        { { 1, 3, 299, 299 } },
+                        { { 1, 3, 299, 299 } }),
+
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new FakeQuantizeAndActivationWithNegativeScalesTestModel()),
+                        { { 1, 3, 299, 299 } },
+                        { { 1, 3, 299, 299 } }),
+
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new FakeQuantizeAndActivationWithNegativeSlopeTestModel()),
+                        { { 1, 3, 299, 299 } },
+                        { { 1, 3, 299, 299 } }),
+
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new ScaleShiftAndFakeQuantizeTestModel()),
+                        { { 1, 3, 299, 299 } },
+                        { { 1, 3, 299, 299 } })
+
+        ),
+        SingleLayerTransformationsTestParams::getLowPrecisionTransformerSingleLayerTestName);
+
+
+INSTANTIATE_TEST_CASE_P(
+        SingleLayerTransformationsTestFP16,
+        SingleLayerTransformationsTest,
+        ::testing::Values(
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new FullyConnectedAndScaleShiftsOnActivationsTestModel()),
+                        { { 1, 2048 } },
+                        { { 1, 1000 } },
+                        "FP16"),
+
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new FullyConnectedAndQuantizeTestModel()),
+                        { { 1, 32, 1, 1 } },
+                        { { 1, 32, 1, 1 } },
+                        "FP16"),
+
+                // TODO: uncomment after fix
+                //SingleLayerTransformationsTestParams(
+                //    "GPU",
+                //    SingleLayerTestModel::Ptr(new ConvolutionAndQuantizeOnSignedActivationsAndWeightsTestModel()),
+                //    { { 1, 32, 149, 149 } },
+                //    { { 1, 32, 147, 147 } },
+                //    "FP16"),
+
+                // TODO: uncomment after fix
+                //SingleLayerTransformationsTestParams(
+                //    "GPU",
+                //    SingleLayerTestModel::Ptr(new ConvolutionAndQuantizeOnUnsignedActivationsAndWeightsTestModel()),
+                //    { { 1, 32, 149, 149 } },
+                //    { { 1, 32, 147, 147 } },
+                //    "FP16"),
+
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new FakeQuantizeReshapePoolingTestModelWithConstants()),
+                        { { 1, 1280, 7 } },
+                        { { 1, 1280, 7 } }),
+
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new FakeQuantizeReshapePoolingTestModelWithoutConstants()),
+                        { { 1, 1280, 7 } },
+                        { { 1, 1280, 7 } }),
+
+
+                //Not parametrized yet. Executed on FP32
+
+                //SingleLayerTransformationsTestParams(
+                //    "GPU",
+                //    SingleLayerTestModel::Ptr(new FullyConnectedAndQuantizeTestModel()),
+                //    { { 1, 32, 149, 149 } },
+                //    { { 1, 32, 147, 147 } },
+                //    "FP16"),
+
+                //SingleLayerTransformationsTestParams(
+                //    "GPU",
+                //    SingleLayerTestModel::Ptr(new GemmAndQuantizeTestModel()),
+                //    { { 1, 32, 149, 149 } },
+                //    { { 1, 32, 147, 147 } },
+                //    "FP16"),
+
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new PoolingTestModel()),
+                        { { 149, 149, 32, 1 } },
+                        { { 149, 149, 32, 1 } },
+                        "FP16"),
+
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new ConvolutionAndQuantizeOnWeightsWithMultiOutputIntervalsTestModel()),
+                        { { 1, 32, 147, 147 } },
+                        { { 1, 64, 147, 147 } },
+                        "FP16"),
+
+                // TODO: uncomment after fix
+                //SingleLayerTransformationsTestParams(
+                //    "GPU",
+                //    SingleLayerTestModel::Ptr(new ConvolutionAndQuantizeOnWeightsWithoutConstTransformationTestModel()),
+                //    { { 1, 32, 149, 149 } },
+                //    { { 1, 32, 147, 147 } },
+                //    "FP16"),
+
+                // TODO: uncomment after fix
+                //SingleLayerTransformationsTestParams(
+                //    "GPU",
+                //    SingleLayerTestModel::Ptr(new ConvolutionAndPoolingAndQuantizeOnActivationsTestModel()),
+                //    { { 1, 64, 147, 147 } },
+                //    { { 1, 80, 73,  73  } },
+                //    "FP16"),
+
+                // TODO: uncomment after fix
+                //SingleLayerTransformationsTestParams(
+                //    "GPU",
+                //    SingleLayerTestModel::Ptr(new ConvolutionAndQuantizeOnActivationsTestModel()),
+                //    { { 1, 3,  299, 299 } },
+                //    { { 1, 32, 149, 149 } },
+                //    "FP16"),
+
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new ConvolutionAndDequantizationScaleShiftsOnActivationsTestModel()),
+                        { { 1, 3,  299, 299 } },
+                        { { 1, 32, 149, 149 } },
+                        "FP16"),
+
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new ConvolutionAndDequantizationScaleShiftAndQuantizeOnActivationsTestModel()),
+                        { { 1, 3, 299, 299 } },
+                        { { 1, 32, 149, 149 } },
+                        "FP16"),
+
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new ConvolutionDepthwiseTestModel()),
+                        { { 1, 32, 112, 112 } },
+                        { { 1, 32, 112, 112 } },
+                        "FP16"),
+
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new ConvolutionGroupedTestModel()),
+                        { { 1, 32, 112, 112 } },
+                        { { 1, 32, 112, 112 } }),
+
+//                SingleLayerTransformationsTestParams(
+//                        "GPU",
+//                        SingleLayerTestModel::Ptr(new EltwiseTestModel()),
+//                        { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } },
+//                        { { 1, 3, 299, 299 } },
+//                        "FP16"),
+
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new EltwiseCpuTestModel()),
+                        { { 1, 3, 299, 299 } },
+                        { { 1, 3, 299, 299 } }),
+
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new ConcatTestModel(true)),
+                        { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } },
+                        { { 1, 6, 299, 299 } },
+                        "FP16"),
+
+                SingleLayerTransformationsTestParams(
+                    "GPU",
+                    SingleLayerTestModel::Ptr(new ConcatTestModel(false)),
+                    { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } },
+                    { { 1, 6, 299, 299 } },
+                    "FP16"),
+
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new ConcatMultiChannelTestModel()),
+                        { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } },
+                        { { 1, 6, 299, 299 } }),
+
+                //SingleLayerTransformationsTestParams(
+                //    "GPU",
+                //    SingleLayerTestModel::Ptr(new ConcatMultiBranchTestModel()),
+                //    { { 299, 299, 3, 1 }, { 299, 299, 3, 1 } },
+                //    { { 299, 299, 12, 1 } },
+                //    "FP16"),
+
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new QuantizationOnWeightsTestModel()),
+                        { { 1, 32, 149, 149 } },
+                        { { 1, 32, 147, 147 } },
+                        "FP16"),
+
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new QuantizationOnInvertedWeightsTestModel()),
+                        { { 1, 32, 149, 149 } },
+                        { { 1, 32, 147, 147 } },
+                        "FP16"),
+
+                SingleLayerTransformationsTestParams(
+                        "GPU",
+                        SingleLayerTestModel::Ptr(new FakeQuantizeAndScaleShiftTestModel()),
+                        { { 1, 3, 299, 299 } },
+                        { { 1, 3, 299, 299 } },
+                        "FP16")
+        ),
+        SingleLayerTransformationsTestParams::getLowPrecisionTransformerSingleLayerTestName);
diff --git a/inference-engine/tests_deprecated/functional/cldnn/single_layer_tests/convert_like_tests.cpp b/inference-engine/tests_deprecated/functional/cldnn/single_layer_tests/convert_like_tests.cpp
new file mode 100644 (file)
index 0000000..1840231
--- /dev/null
@@ -0,0 +1,149 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <ie_core.hpp>
+#include <cmath>
+
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+
+
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace std;
+
+
+struct convert_like_test_params {
+    std::string device_name;
+    std::string inPrecision;
+    std::string likePrecision;
+    InferenceEngine::SizeVector in_out_shape;
+    InferenceEngine::SizeVector like_shape;
+};
+
+
+
+class ConvertLikeTest : public TestsCommon, public WithParamInterface<convert_like_test_params> {
+    std::string model_t = R"V0G0N(
+<net Name="ConvertLike_net" version="2" precision="FP32" batch="1">
+    <layers>
+        <layer name="input" type="Input" precision="_INP_" id="0">
+            <output>
+                <port id="0">
+                    _IN_OUT_
+                </port>
+            </output>
+        </layer>
+        <layer name="like" type="Input" precision="_LKP_" id="1">
+            <output>
+                <port id="0">
+                    _LIKE_
+                </port>
+            </output>
+        </layer>
+        <layer name="output" type="ConvertLike" precision="_LKP_" id="2">
+            <input>
+                <port id="0">
+                    _IN_OUT_
+                </port>
+                <port id="1">
+                    _LIKE_
+                </port>
+            </input>
+            <output>
+                <port id="2">
+                    _IN_OUT_
+                </port>
+            </output>
+        </layer>
+    </layers>
+    <edges>
+        <edge from-layer="0" from-port="0" to-layer="2" to-port="0"/>
+        <edge from-layer="1" from-port="0" to-layer="2" to-port="1"/>
+    </edges>
+</net>
+)V0G0N";
+
+
+    std::string getModel(convert_like_test_params p) {
+        std::string model = model_t;
+        std::string in_out_shape, like_shape;
+
+        for (size_t i = 0; i < p.in_out_shape.size(); i++) {
+            in_out_shape += "<dim>";
+            in_out_shape += std::to_string(p.in_out_shape[i]);
+            in_out_shape += "</dim>\n";
+        }
+
+        for (size_t i = 0; i < p.like_shape.size(); i++) {
+            like_shape += "<dim>";
+            like_shape += std::to_string(p.like_shape[i]);
+            like_shape += "</dim>\n";
+        }
+
+        REPLACE_WITH_STR(model, "_INP_", p.inPrecision);
+        REPLACE_WITH_STR(model, "_LKP_", p.likePrecision);
+        REPLACE_WITH_STR(model, "_IN_OUT_", in_out_shape);
+        REPLACE_WITH_STR(model, "_LIKE_", like_shape);
+
+        return model;
+    }
+
+protected:
+    virtual void TearDown() {
+    }
+
+    virtual void SetUp() {
+        try
+        {
+            convert_like_test_params p = ::testing::WithParamInterface<convert_like_test_params>::GetParam();
+            std::string model = getModel(p);
+
+            Core ie;
+            CNNNetwork net = ie.ReadNetwork(model, InferenceEngine::Blob::CPtr());
+
+            ExecutableNetwork executable_network = ie.LoadNetwork(net, p.device_name);
+            InferRequest inferRequest = executable_network.CreateInferRequest();
+
+            // Input Data
+            InputsDataMap inputInfo(net.getInputsInfo());
+            Blob::Ptr input1 = inferRequest.GetBlob("input");
+            input1->allocate();
+            Blob::Ptr input2 = inferRequest.GetBlob("like");
+            input2->allocate();
+
+            inferRequest.Infer();
+
+            // Output Data
+            OutputsDataMap outputInfo(net.getOutputsInfo());
+            Blob::Ptr outputBlob = inferRequest.GetBlob(outputInfo.begin()->first);
+            auto outputPrecision = outputBlob->getTensorDesc().getPrecision();
+            auto likePrecision = input2->getTensorDesc().getPrecision();
+
+            if (outputPrecision != likePrecision)
+            {
+                FAIL() << "Different output and like precision!";
+            }
+
+        }
+        catch (const InferenceEngine::details::InferenceEngineException &e)
+        {
+            FAIL() << e.what();
+        }
+    }
+};
+
+TEST_P(ConvertLikeTest, smoke_GPU_TestsConvertLike) {}
+
+INSTANTIATE_TEST_CASE_P(
+    smoke_TestsConvertLike, ConvertLikeTest,
+    ::testing::Values(
+        convert_like_test_params{ "GPU", "FP32", "I32", { 3, 5 }, { 2 } },
+        convert_like_test_params{ "GPU", "FP32", "I32", { 10, 10, 10, 5 }, { 2 } },
+        convert_like_test_params{ "GPU", "FP32", "I32", { 3, 5 }, { 2, 4, 5 } },
+        convert_like_test_params{ "GPU", "FP32", "FP16", { 3, 5 }, { 2 } },
+        convert_like_test_params{ "GPU", "I32", "FP16", { 3, 5 }, { 2 } },
+        convert_like_test_params{ "GPU", "I32", "FP32", { 3, 5 }, { 2 } }
+));
diff --git a/inference-engine/tests_deprecated/functional/cldnn/single_layer_tests/expand_tests.cpp b/inference-engine/tests_deprecated/functional/cldnn/single_layer_tests/expand_tests.cpp
new file mode 100644 (file)
index 0000000..7833f9a
--- /dev/null
@@ -0,0 +1,165 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <ie_core.hpp>
+#include <cmath>
+
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+
+
+
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace std;
+
+
+struct broadcast_test_params {
+    std::string device_name;
+    InferenceEngine::SizeVector in_dim;
+    InferenceEngine::SizeVector out_dim;
+    std::vector<float> ref;
+};
+
+template<typename data_t>
+void ref_broadcast(InferenceEngine::TBlob<data_t> &dsts, broadcast_test_params &prm) {
+    data_t *dst_data = dsts.buffer().template as<data_t*>();
+    for(int i = 0; i < prm.ref.size(); ++i)
+        dst_data[i] = prm.ref[i];
+}
+
+InferenceEngine::TBlob<uint8_t>::Ptr generateWeights(const SizeVector &data) {
+    TensorDesc td(InferenceEngine::Precision::U8, { data.size() * sizeof(uint32_t) }, InferenceEngine::C );
+    TBlob<uint8_t>::Ptr weights;
+    weights = make_shared_blob<uint8_t>(td);
+    weights->allocate();
+    auto wb = weights->buffer().as<uint32_t*>();
+    for (size_t i = 0; i < data.size(); i++) {
+        wb[i] = data[i];
+    }
+    return weights;
+}
+
+
+class BroadcastTests : public TestsCommon, public WithParamInterface<broadcast_test_params> {
+    std::string model_t = R"V0G0N(
+<net Name="broadcast" version="2" precision="FP32" batch="1">
+    <layers>
+        <layer name="input" type="Input" precision="FP32" id="0">
+            <output>
+                <port id="0">
+                    _IN_
+                </port>
+            </output>
+        </layer>
+        <layer name="input2" type="Const" precision="FP32" id="1">
+            <output>
+                <port id="0">
+                    <dim>4</dim>
+                </port>
+            </output>
+            <blobs>
+                <custom offset="0" size="4"/>
+            </blobs>
+        </layer>
+        <layer name="broadcast" id="2" type="Broadcast" precision="FP32">
+            <input>
+                <port id="0">
+                    _IN_
+                </port>
+                <port id="1">
+                    <dim>4</dim>
+                </port>
+            </input>
+            <output>
+                <port id="1">
+                    _OUT_
+                </port>
+            </output>
+        </layer>
+    </layers>
+    <edges>
+        <edge from-layer="0" from-port="0" to-layer="2" to-port="0"/>
+        <edge from-layer="1" from-port="0" to-layer="2" to-port="1"/>
+    </edges>
+</net>
+)V0G0N";
+
+    std::string getModel(broadcast_test_params p) {
+        std::string in, out;
+
+        for (auto& i : p.in_dim) {
+            in += "<dim>" + std::to_string(i) + "</dim>\n";
+        }
+
+        for (auto& o : p.out_dim) {
+            out += "<dim>" + std::to_string(o) + "</dim>\n";
+        }
+
+        REPLACE_WITH_STR(model_t, "_IN_", in);
+        REPLACE_WITH_STR(model_t, "_OUT_", out);
+
+        return model_t;
+    }
+
+protected:
+    virtual void TearDown() {
+    }
+
+    virtual void SetUp() {
+        try {
+            TestsCommon::SetUp();
+            broadcast_test_params p = ::testing::WithParamInterface<broadcast_test_params>::GetParam();
+            std::string model = getModel(p);
+
+            Core ie;
+            CNNNetwork net = ie.ReadNetwork(model, generateWeights(p.out_dim));
+            ExecutableNetwork executable_network = ie.LoadNetwork(net, p.device_name);
+            InferRequest inferRequest = executable_network.CreateInferRequest();
+
+            // Input Data
+            InputsDataMap inputInfo(net.getInputsInfo());
+            Blob::Ptr inputBlob = inferRequest.GetBlob(inputInfo.begin()->first);
+            float* inputData = inputBlob->buffer().as<float*>();
+            fill_data_dbgval(inputData, inputBlob->size());
+
+            inferRequest.Infer();
+
+            // Output Data
+            OutputsDataMap outputInfo(net.getOutputsInfo());
+            Blob::Ptr outputBlob = inferRequest.GetBlob(outputInfo.begin()->first);
+
+            size_t outSz = outputBlob->size();
+            // Output Reference
+            InferenceEngine::TBlob<float> dst_ref(outputBlob->getTensorDesc());
+            dst_ref.allocate();
+            ref_broadcast<float>(dst_ref, p);
+
+            const float* res = outputBlob->buffer().as<float*>();
+            const float* ref = dst_ref.data();
+            compare(res, ref, outSz);
+        } catch (const InferenceEngine::details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+TEST_P(BroadcastTests, smoke_GPU_TestsBroadcast) {}
+
+//  Test data vectors
+std::vector<float> broadcast_ref0 = { 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f };
+std::vector<float> broadcast_ref1 = { 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 2.f, 2.f, 2.f, 2.f, 2.f, 2.f,
+                                      0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 2.f, 2.f, 2.f, 2.f, 2.f, 2.f};
+std::vector<float> broadcast_ref2 = { 0.f, 0.f, 0.f, 0.f,
+                                      1.f, 1.f, 1.f, 1.f,
+                                      2.f, 2.f, 2.f, 2.f};
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_TestsBroadcast, BroadcastTests,
+        ::testing::Values(
+                broadcast_test_params{ "GPU", { 1, 1, 1, 1 }, { 2, 2, 2, 2 }, broadcast_ref0 },
+                broadcast_test_params{ "GPU", { 1, 1, 3, 1 }, { 1, 2, 3, 6 }, broadcast_ref1 },
+                broadcast_test_params{ "GPU", { 1, 1, 3, 1 }, { 1, 1, 3, 4 }, broadcast_ref2 }
+        ));
diff --git a/inference-engine/tests_deprecated/functional/cldnn/single_layer_tests/gather_tree_tests.cpp b/inference-engine/tests_deprecated/functional/cldnn/single_layer_tests/gather_tree_tests.cpp
new file mode 100644 (file)
index 0000000..cbde1f3
--- /dev/null
@@ -0,0 +1,22 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "gather_tree_tests.hpp"
+
+INSTANTIATE_TEST_CASE_P(
+    smoke_GPU_TestsGatherTree, GatherTreeTests,
+    ::testing::Values(
+        // Params: in_out_shape, step_idx, parent_idx, max_seq_len, end_token, reference
+        gather_tree_test_params{ {3, 2, 3 }, {1, 2, 3, 2, 3, 4, 4, 5, 6, 5, 6, 7, 7, 8, 9, 8, 9, 10}, {0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 0, 2, 1, 2, 2, 1, 1},
+                                  {3, 3 }, {11}, {2, 2, 2, 2, 4, 4, 6, 5, 6, 7, 6, 6, 7, 8, 9, 8, 9, 10}, "GPU"},
+        gather_tree_test_params{ {4, 1, 3}, {1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1}, {0, 0, 0, 0, 1, 1, 2, 1, 2, -1, -1, -1},
+                                  {3}, {10}, {2, 2, 2, 6, 5, 6, 7, 8, 9, 10, 10, 10}, "GPU"},
+        gather_tree_test_params{ {4, 1, 3}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10}, {0, 0, 0, 0, 1, 1, 2, 1, 2, 1, 1, 1},
+                                  {4}, {10}, {2, 2, 2, 5, 5, 5, 8, 8, 8, 10, 10, 10}, "GPU"},
+        gather_tree_test_params{ {5, 1, 3}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 10, 3, 2, 10, 10}, {0, 0, 0, 0, 1, 1, 2, 1, 2, 1, 1, 1, 2, 0, 1},
+                                  {5}, {10}, {2, 2, 2, 5, 5, 5, 8, 8, 8, 3, 1, 10, 2, 10, 10}, "GPU"},
+        gather_tree_test_params{ {4, 2, 3}, {1, 2, 3, 2, 3, 4, 4, 5, 6, 5, 6, 7, 7, 8, 9, 8, 9, 10, 0, 0, 0, 11, 12, 0},
+                                  {0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 2, 1, 2, 2, 0, 1, -1, -1, -1, 0, 1, 0},
+                                  {3, 4}, {11}, {2, 2, 2, 2, 3, 2, 6, 5, 6, 7, 5, 7, 7, 8, 9, 8, 9, 8, 11, 11, 11, 11, 12, 0}, "GPU"}
+));
diff --git a/inference-engine/tests_deprecated/functional/cldnn/single_layer_tests/power_tests.cpp b/inference-engine/tests_deprecated/functional/cldnn/single_layer_tests/power_tests.cpp
new file mode 100644 (file)
index 0000000..72614b4
--- /dev/null
@@ -0,0 +1,137 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <ie_core.hpp>
+#include <cmath>
+
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+
+
+
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace std;
+
+
+struct test_params {
+    std::string device_name;
+    std::string inPrecision;
+    InferenceEngine::SizeVector in_out_shape;
+    float power;
+    float scale;
+    float shift;
+    std::vector<float> reference;
+};
+
+template<typename data_t>
+void ref_power(InferenceEngine::TBlob<float> &dst, test_params const& prm) {
+    data_t *dst_data = dst.data().as<data_t*>();
+
+    for (size_t i = 0; i < prm.in_out_shape.size(); ++i) {
+        dst_data[i] = std::pow(prm.shift + i * prm.scale, prm.power);
+    }
+}
+
+class PowerTests : public TestsCommon, public WithParamInterface<test_params> {
+    std::string model_t = R"V0G0N(
+<net Name="Power_net" version="2" precision="FP32" batch="1">
+    <layers>
+        <layer name="input" type="Input" precision="FP32" id="1">
+            <output>
+                <port id="1">
+                    _IN_OUT_
+                </port>
+            </output>
+        </layer>
+        <layer name="output" id="2" type="Power" precision="FP32">
+            <data power="_POWER_" scale="_SCALE_" shift="_SHIFT_"/>
+            <input>
+                <port id="1">
+                    _IN_OUT_
+                </port>
+           </input>
+            <output>
+                <port id="2">
+                    _IN_OUT_
+                </port>
+            </output>
+        </layer>
+    </layers>
+    <edges>
+        <edge from-layer="1" from-port="1" to-layer="2" to-port="1"/>
+    </edges>
+</net>
+)V0G0N";
+
+    std::string getModel(test_params p) {
+        std::string model = model_t;
+        std::string in_out_shape;
+
+        for (size_t i = 0; i < p.in_out_shape.size(); i++) {
+            in_out_shape += "<dim>";
+            in_out_shape += std::to_string(p.in_out_shape[i]) + "</dim>\n";
+        }
+        REPLACE_WITH_STR(model, "_IN_OUT_", in_out_shape);
+        REPLACE_WITH_NUM(model, "_POWER_", p.power);
+        REPLACE_WITH_NUM(model, "_SCALE_", p.scale);
+        REPLACE_WITH_NUM(model, "_SHIFT_", p.shift);
+
+        return model;
+    }
+
+protected:
+    virtual void TearDown() {
+    }
+
+    virtual void SetUp() {
+        try {
+            test_params p = ::testing::WithParamInterface<test_params>::GetParam();
+            std::string model = getModel(p);
+
+            Core ie;
+            CNNNetwork net = ie.ReadNetwork(model, Blob::CPtr());
+            ExecutableNetwork executable_network = ie.LoadNetwork(net, p.device_name);
+            InferRequest inferRequest = executable_network.CreateInferRequest();
+
+            // Input Data
+            InputsDataMap inputInfo(net.getInputsInfo());
+            Blob::Ptr inputBlob = inferRequest.GetBlob(inputInfo.begin()->first);
+            float* inputData = inputBlob->buffer().as<float*>();
+            fill_data_dbgval(inputData, inputBlob->size());
+
+            inferRequest.Infer();
+
+            // Output Data
+            OutputsDataMap outputInfo(net.getOutputsInfo());
+            Blob::Ptr outputBlob = inferRequest.GetBlob(outputInfo.begin()->first);
+
+            // Output Reference
+            InferenceEngine::TBlob<float> dst_ref(outputBlob->getTensorDesc());
+            dst_ref.allocate();
+            ref_power<float>(dst_ref, p);
+
+            //  Check results
+            if (memcmp(dst_ref.data(), &p.reference[0], p.reference.size() * sizeof(float)) != 0)
+                FAIL() << "Wrong result with compare TF reference!";
+
+            compare(*outputBlob, dst_ref);
+        } catch (const InferenceEngine::details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+TEST_P(PowerTests, smoke_GPU_TestsPower) {}
+
+std::vector<float> power_ref_0 = { 0.f, 1.f, 4.f, 9.f };
+std::vector<float> power_ref_1 = { 0.f, 4.f, 16.f, 36.f };
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_TestsPower, PowerTests,
+        ::testing::Values(
+            test_params{ "GPU", "FP32", { 1, 1, 2, 2 }, 2.f, 1.f, 0.f, power_ref_0 },
+            test_params{ "GPU", "FP32", { 1, 1, 2, 2 }, 2.f, 2.f, 0.f, power_ref_1 }
+        ));
diff --git a/inference-engine/tests_deprecated/functional/cldnn/single_layer_tests/priorbox_tests.cpp b/inference-engine/tests_deprecated/functional/cldnn/single_layer_tests/priorbox_tests.cpp
new file mode 100644 (file)
index 0000000..39b5058
--- /dev/null
@@ -0,0 +1,369 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <ie_core.hpp>
+
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+
+using namespace ::testing;
+using namespace InferenceEngine;
+
+struct priorbox_test_params {
+    std::string device_name;
+
+    size_t mb;
+
+    struct {
+        size_t c;
+        size_t h;
+        size_t w;
+    } in1;
+
+    struct {
+        size_t c;
+        size_t h;
+        size_t w;
+    } in2;
+
+    struct {
+        size_t c;
+        size_t h;
+        size_t w;
+    } out;
+
+    int offset;
+    int stride;
+    int min_size;
+    int max_size;
+    bool flip;
+    bool clip;
+};
+
+class smoke_PriorBoxOnlyTest: public TestsCommon,
+                              public WithParamInterface<priorbox_test_params> {
+
+    std::string model_t = R"V0G0N(
+<Net Name="PriorBox_Only" version="2" precision="FP32" batch="1">
+    <layers>
+        <layer name="input1" type="Input" precision="FP32" id="0">
+            <output>
+                <port id="0">
+                    <dim>1</dim>
+                    <dim>_IC1_</dim>
+                    <dim>_IH1_</dim>
+                    <dim>_IW1_</dim>
+                </port>
+            </output>
+        </layer>
+        <layer name="input2" type="Input" precision="FP32" id="1">
+            <output>
+                <port id="1">
+                    <dim>1</dim>
+                    <dim>_IC2_</dim>
+                    <dim>_IH2_</dim>
+                    <dim>_IW2_</dim>
+                </port>
+            </output>
+        </layer>
+        <layer name="prior" type="PriorBox" precision="FP32" id="2">
+            <data min_size="4.000000" max_size="9.000000" flip="1" clip="1" offset="0" step="0" aspect_ratio="" variance=""/>
+            <input>
+                <port id="2">
+                    <dim>1</dim>
+                    <dim>_IC1_</dim>
+                    <dim>_IH1_</dim>
+                    <dim>_IW1_</dim>
+                </port>
+                <port id="3">
+                    <dim>1</dim>
+                    <dim>_IC2_</dim>
+                    <dim>_IH2_</dim>
+                    <dim>_IW2_</dim>
+                </port>
+            </input>
+            <output>
+                <port id="4">
+                    <dim>1</dim>
+                    <dim>_OC_</dim>
+                    <dim>_OH_</dim>
+                    <dim>_OW_</dim>
+                </port>
+            </output>
+        </layer>
+    </layers>
+    <edges>
+        <edge from-layer="0" from-port="0" to-layer="2" to-port="2"/>
+        <edge from-layer="1" from-port="1" to-layer="2" to-port="3"/>
+    </edges>
+
+</Net>
+)V0G0N";
+
+    std::string getModel(priorbox_test_params p) {
+        std::string model = model_t;
+
+        REPLACE_WITH_NUM(model, "_IW1_", p.in1.w);
+        REPLACE_WITH_NUM(model, "_IH1_", p.in1.h);
+        REPLACE_WITH_NUM(model, "_IC1_", p.in1.c);
+
+        REPLACE_WITH_NUM(model, "_IW2_", p.in2.w);
+        REPLACE_WITH_NUM(model, "_IH2_", p.in2.h);
+        REPLACE_WITH_NUM(model, "_IC2_", p.in2.c);
+
+        REPLACE_WITH_NUM(model, "_OW_", p.out.w);
+        REPLACE_WITH_NUM(model, "_OH_", p.out.h);
+        REPLACE_WITH_NUM(model, "_OC_", p.out.c);
+
+        return model;
+    }
+
+protected:
+    virtual void SetUp() {
+
+        try {
+            priorbox_test_params p = ::testing::WithParamInterface<priorbox_test_params>::GetParam();
+            std::string model = getModel(p);
+
+            Core ie;
+            CNNNetwork network = ie.ReadNetwork(model, Blob::CPtr());
+            network.setBatchSize(p.mb);
+
+            InputsDataMap inputs = network.getInputsInfo();
+
+            DataPtr inputPtr1 = inputs["input1"]->getInputData();
+            DataPtr inputPtr2 = inputs["input2"]->getInputData();
+
+            InferenceEngine::Blob::Ptr input1 = InferenceEngine::make_shared_blob<float>(inputPtr1->getTensorDesc());
+            input1->allocate();
+
+            InferenceEngine::Blob::Ptr input2 = InferenceEngine::make_shared_blob<float>(inputPtr2->getTensorDesc());
+            input2->allocate();
+
+            InferenceEngine::BlobMap inputBlobs;
+            inputBlobs["input1"] = input1;
+            inputBlobs["input2"] = input2;
+
+            OutputsDataMap outputs = network.getOutputsInfo();
+
+            InferenceEngine::TBlob<float>::Ptr output;
+            output = InferenceEngine::make_shared_blob<float>(outputs["prior"]->getTensorDesc());
+            output->allocate();
+
+            InferenceEngine::BlobMap outputBlobs;
+            outputBlobs["prior"] = output;
+
+            ExecutableNetwork exeNetwork = ie.LoadNetwork(network, p.device_name);
+            InferRequest inferRequest = exeNetwork.CreateInferRequest();
+            inferRequest.SetInput(inputBlobs);
+            inferRequest.SetOutput(outputBlobs);
+            inferRequest.Infer();
+
+            // Check results
+
+            const TBlob<float>::Ptr outputArray = std::dynamic_pointer_cast<TBlob<float>>(output);
+            float* dst_ptr = outputArray->data();
+
+            const float eps = 1e-6;
+
+            // pick a few generated priors and compare against the expected number.
+            // first prior
+            EXPECT_NEAR(dst_ptr[0], 0.03, eps);
+            EXPECT_NEAR(dst_ptr[1], 0.03, eps);
+            EXPECT_NEAR(dst_ptr[2], 0.07, eps);
+            EXPECT_NEAR(dst_ptr[3], 0.07, eps);
+            // second prior
+            EXPECT_NEAR(dst_ptr[4], 0.02, eps);
+            EXPECT_NEAR(dst_ptr[5], 0.02, eps);
+            EXPECT_NEAR(dst_ptr[6], 0.08, eps);
+            EXPECT_NEAR(dst_ptr[7], 0.08, eps);
+            // prior in the 5-th row and 5-th col
+            EXPECT_NEAR(dst_ptr[4*10*2*4+4*2*4], 0.43, eps);
+            EXPECT_NEAR(dst_ptr[4*10*2*4+4*2*4+1], 0.43, eps);
+            EXPECT_NEAR(dst_ptr[4*10*2*4+4*2*4+2], 0.47, eps);
+            EXPECT_NEAR(dst_ptr[4*10*2*4+4*2*4+3], 0.47, eps);
+
+            // check variance
+            dst_ptr += p.out.h * p.out.w;
+            for (int d = 0; d < p.out.h * p.out.w; ++d) {
+                EXPECT_NEAR(dst_ptr[d], 0.1, eps);
+            }
+        } catch (const InferenceEngine::details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+TEST_P(smoke_PriorBoxOnlyTest, TestsPriorBox) {}
+
+INSTANTIATE_TEST_CASE_P(
+        TestsPriorBox, smoke_PriorBoxOnlyTest,
+        ::testing::Values(
+                priorbox_test_params{ "GPU",
+                    10, {10, 10, 10}, {3, 100, 100}, {2, 1, 800}, 0, 0, 4, 9, true, true }));
+
+
+class smoke_PriorBoxDensityTest : public TestsCommon,
+                                  public WithParamInterface<priorbox_test_params> {
+
+    std::string model_t = R"V0G0N(
+<Net Name="PriorBox_Only" version="2" precision="FP32" batch="1">
+    <layers>
+        <layer name="input1" type="Input" precision="FP32" id="0">
+            <output>
+                <port id="0">
+                    <dim>1</dim>
+                    <dim>_IC1_</dim>
+                    <dim>_IH1_</dim>
+                    <dim>_IW1_</dim>
+                </port>
+            </output>
+        </layer>
+        <layer name="input2" type="Input" precision="FP32" id="1">
+            <output>
+                <port id="1">
+                    <dim>1</dim>
+                    <dim>_IC2_</dim>
+                    <dim>_IH2_</dim>
+                    <dim>_IW2_</dim>
+                </port>
+            </output>
+        </layer>
+        <layer name="prior" type="PriorBox" precision="FP32" id="2">
+            <data min_size="" fixed_size="4.000000" density="1.000000" flip="1" clip="1" offset="0" step="0" aspect_ratio="1.0"
+             variance=""/>
+            <input>
+                <port id="2">
+                    <dim>1</dim>
+                    <dim>_IC1_</dim>
+                    <dim>_IH1_</dim>
+                    <dim>_IW1_</dim>
+                </port>
+                <port id="3">
+                    <dim>1</dim>
+                    <dim>_IC2_</dim>
+                    <dim>_IH2_</dim>
+                    <dim>_IW2_</dim>
+                </port>
+            </input>
+            <output>
+                <port id="4">
+                    <dim>1</dim>
+                    <dim>_OC_</dim>
+                    <dim>_OH_</dim>
+                    <dim>_OW_</dim>
+                </port>
+            </output>
+        </layer>
+    </layers>
+    <edges>
+        <edge from-layer="0" from-port="0" to-layer="2" to-port="2"/>
+        <edge from-layer="1" from-port="1" to-layer="2" to-port="3"/>
+    </edges>
+
+</Net>
+)V0G0N";
+
+    std::string getModel(priorbox_test_params p) {
+        std::string model = model_t;
+
+        REPLACE_WITH_NUM(model, "_IW1_", p.in1.w);
+        REPLACE_WITH_NUM(model, "_IH1_", p.in1.h);
+        REPLACE_WITH_NUM(model, "_IC1_", p.in1.c);
+
+        REPLACE_WITH_NUM(model, "_IW2_", p.in2.w);
+        REPLACE_WITH_NUM(model, "_IH2_", p.in2.h);
+        REPLACE_WITH_NUM(model, "_IC2_", p.in2.c);
+
+        REPLACE_WITH_NUM(model, "_OW_", p.out.w);
+        REPLACE_WITH_NUM(model, "_OH_", p.out.h);
+        REPLACE_WITH_NUM(model, "_OC_", p.out.c);
+
+        return model;
+    }
+
+protected:
+    virtual void SetUp() {
+
+        try {
+            priorbox_test_params p = ::testing::WithParamInterface<priorbox_test_params>::GetParam();
+            std::string model = getModel(p);
+
+            Core ie;
+            CNNNetwork network = ie.ReadNetwork(model, Blob::CPtr());
+            network.setBatchSize(p.mb);
+
+            InputsDataMap inputs = network.getInputsInfo();
+
+            DataPtr inputPtr1 = inputs["input1"]->getInputData();
+            DataPtr inputPtr2 = inputs["input2"]->getInputData();
+
+            InferenceEngine::Blob::Ptr input1 = InferenceEngine::make_shared_blob<float>(inputPtr1->getTensorDesc());
+            input1->allocate();
+
+            InferenceEngine::Blob::Ptr input2 = InferenceEngine::make_shared_blob<float>(inputPtr2->getTensorDesc());
+            input2->allocate();
+
+            InferenceEngine::BlobMap inputBlobs;
+            inputBlobs["input1"] = input1;
+            inputBlobs["input2"] = input2;
+
+            OutputsDataMap outputs = network.getOutputsInfo();
+
+            InferenceEngine::TBlob<float>::Ptr output;
+            output = InferenceEngine::make_shared_blob<float>(outputs["prior"]->getTensorDesc());
+            output->allocate();
+
+            InferenceEngine::BlobMap outputBlobs;
+            outputBlobs["prior"] = output;
+
+            ExecutableNetwork exeNetwork = ie.LoadNetwork(network, p.device_name);
+            InferRequest inferRequest = exeNetwork.CreateInferRequest();
+            inferRequest.SetInput(inputBlobs);
+            inferRequest.SetOutput(outputBlobs);
+            inferRequest.Infer();
+
+            // Check results
+
+            const TBlob<float>::Ptr outputArray = std::dynamic_pointer_cast<TBlob<float>>(output);
+            float* dst_ptr = outputArray->data();
+
+            // pick a few generated priors and compare against the expected number.
+            // first prior
+            EXPECT_NEAR(dst_ptr[0], 0.03, 1e-6);
+            EXPECT_NEAR(dst_ptr[1], 0.03, 1e-6);
+            EXPECT_NEAR(dst_ptr[2], 0.07, 1e-6);
+            EXPECT_NEAR(dst_ptr[3], 0.07, 1e-6);
+            // second prior
+            EXPECT_NEAR(dst_ptr[4], 0.03, 0.1);
+            EXPECT_NEAR(dst_ptr[5], 0.03, 0.1);
+            EXPECT_NEAR(dst_ptr[6], 0.17, 0.1);
+            EXPECT_NEAR(dst_ptr[7], 0.03, 0.1);
+            // prior in the 5-th row and 5-th col
+            EXPECT_NEAR(dst_ptr[4 * 10 * 2 * 4 + 4 * 2 * 4], 0.83, 0.1);
+            EXPECT_NEAR(dst_ptr[4 * 10 * 2 * 4 + 4 * 2 * 4 + 1], 0.83, 0.1);
+            EXPECT_NEAR(dst_ptr[4 * 10 * 2 * 4 + 4 * 2 * 4 + 2], 0.84, 0.1);
+            EXPECT_NEAR(dst_ptr[4 * 10 * 2 * 4 + 4 * 2 * 4 + 3], 0.84, 0.1);
+
+            // check variance
+            dst_ptr += p.out.h * p.out.w;
+            for (int d = 0; d < p.out.h * p.out.w; ++d) {
+                EXPECT_NEAR(dst_ptr[d], 0.1, 1e-6);
+            }
+        }
+        catch (const InferenceEngine::details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+TEST_P(smoke_PriorBoxDensityTest, TestsPriorBoxDensity) {}
+
+INSTANTIATE_TEST_CASE_P(
+    TestsPriorBoxDensity, smoke_PriorBoxDensityTest,
+    ::testing::Values(
+        priorbox_test_params{ "GPU",
+        10,{ 10, 10, 10 },{ 3, 100, 100 },{ 2, 1, 400 }, 0, 0, 4, 9, true, true }));
+
diff --git a/inference-engine/tests_deprecated/functional/cldnn/single_layer_tests/reverse_sequence_tests.cpp b/inference-engine/tests_deprecated/functional/cldnn/single_layer_tests/reverse_sequence_tests.cpp
new file mode 100644 (file)
index 0000000..dbbcc58
--- /dev/null
@@ -0,0 +1,234 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <ie_core.hpp>
+#include <cmath>
+
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+
+
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace std;
+
+
+struct reverse_sequence_test_params {
+    std::string device_name;
+    std::string inPrecision;
+    SizeVector in_out_shape;
+    std::vector<int32_t> seq_lengths;
+    int seq_axis;
+    int batch_axis;
+    std::vector<float> reference;
+};
+
+template <typename data_t>
+void ref_reverse_sequence(
+        TBlob<float> &src,
+        TBlob<data_t> &seq_lengths,
+        TBlob<float> &dst,
+        int seq_axis,
+        int batch_axis
+) {
+    size_t i, src_idx;
+    const float *src_data = src.data();
+    SizeVector src_dims = src.getTensorDesc().getDims();
+    SizeVector srcStrides = src.getTensorDesc().getBlockingDesc().getStrides();
+    const data_t *seq_lengths_data = seq_lengths.data();
+    SizeVector seq_lengths_dims = seq_lengths.getTensorDesc().getDims();
+    float* dst_data = dst.data();
+
+    if (seq_axis < 0)
+        seq_axis += src_dims.size();
+
+    if (seq_axis < 0 || seq_axis >= src_dims.size())
+        FAIL() << "Incorrect 'seq_axis' parameters dimensions and axis number!";
+
+    if (batch_axis < 0)
+        batch_axis += src_dims.size();
+
+    if (batch_axis < 0 || batch_axis >= src_dims.size())
+        FAIL() << "Incorrect 'batch_axis' parameters dimensions and axis number!";
+
+    for (i = 0; i < src_dims[batch_axis]; i++) {
+        if (static_cast<int32_t>(seq_lengths_data[i]) > src_dims[seq_axis])
+            FAIL() << "Incorrect input 'seq_lengths' values!";
+    }
+
+    size_t work_amount_dst = srcStrides[0] * src_dims[0];
+    SizeVector counters(src_dims.size(), 0);
+    for (size_t iwork = 0; iwork < work_amount_dst; ++iwork) {
+        for (i = 0, src_idx = 0; i < src_dims.size(); ++i) {
+            size_t idx = counters[i];
+            if (i == seq_axis && idx < static_cast<int32_t>(seq_lengths_data[counters[batch_axis]])) {
+                idx = static_cast<int32_t>(seq_lengths_data[counters[batch_axis]]) - idx - 1;
+            }
+            src_idx += idx * srcStrides[i];
+        }
+
+        dst_data[iwork] = src_data[src_idx];
+
+        for (int j = src_dims.size() - 1; j >= 0; j--) {
+            counters[j] = (counters[j] + 1) % src_dims[j];
+            if (counters[j] != 0) break;
+        }
+    }
+}
+
+class ReverseSequenceTests : public TestsCommon, public WithParamInterface<reverse_sequence_test_params> {
+    std::string model_t = R"V0G0N(
+<net Name="ReverseSequence_net" version="2" precision="FP32" batch="1">
+    <layers>
+        <layer name="input" type="Input" precision="_INP_" id="1">
+            <output>
+                <port id="1">
+                    _IN_OUT_
+                </port>
+            </output>
+        </layer>
+        <layer name="seq_lengths" type="Input" precision="FP32" id="2">
+            <output>
+                <port id="2">
+                    <dim>_DIM_SIZE_</dim>
+                </port>
+            </output>
+        </layer>
+        <layer name="ReverseSequence" id="2" type="ReverseSequence" precision="FP32">
+            <data seq_axis="_SA_" batch_axis="_BA_"/>
+            <input>
+                <port id="1">
+                    _IN_OUT_
+                </port>
+                <port id="2">
+                    <dim>_DIM_SIZE_</dim>
+                </port>
+            </input>
+            <output>
+                <port id="3">
+                    _IN_OUT_
+                </port>
+            </output>
+        </layer>
+    </layers>
+    <edges>
+        <edge from-layer="1" from-port="1" to-layer="2" to-port="1"/>
+        <edge from-layer="2" from-port="2" to-layer="2" to-port="2"/>
+    </edges>
+</net>
+)V0G0N";
+
+    std::string getModel(reverse_sequence_test_params p) {
+        std::string model = model_t;
+        std::string in_out_shape;
+        for (size_t i = 0; i < p.in_out_shape.size(); i++) {
+            in_out_shape += "<dim>";
+            in_out_shape += std::to_string(p.in_out_shape[i]) + "</dim>\n";
+        }
+        REPLACE_WITH_STR(model, "_INP_", p.inPrecision);
+        REPLACE_WITH_STR(model, "_IN_OUT_", in_out_shape);
+        REPLACE_WITH_NUM(model, "_DIM_SIZE_", p.seq_lengths.size());
+        REPLACE_WITH_NUM(model, "_SA_", p.seq_axis);
+        REPLACE_WITH_NUM(model, "_BA_", p.batch_axis);
+        return model;
+    }
+
+protected:
+    virtual void TearDown() {
+    }
+
+    virtual void SetUp() {
+        try {
+            reverse_sequence_test_params p = ::testing::WithParamInterface<reverse_sequence_test_params>::GetParam();
+            std::string model = getModel(p);
+
+            Core ie;
+            CNNNetwork network = ie.ReadNetwork(model, Blob::CPtr());
+
+            // Output Data
+            OutputsDataMap out;
+            out = network.getOutputsInfo();
+            BlobMap outputBlobs;
+
+            std::pair<std::string, DataPtr> item = *out.begin();
+
+            TBlob<float>::Ptr output;
+            output = make_shared_blob<float>(item.second->getTensorDesc());
+            output->allocate();
+
+            // Output Reference
+            TBlob<float> dst_ref(item.second->getTensorDesc());
+            dst_ref.allocate();
+
+            // Input Data
+            auto src = make_shared_blob<float>({ Precision::FP32,
+                p.in_out_shape,
+                TensorDesc::getLayoutByDims(p.in_out_shape) });
+            src->allocate();
+            fill_data_dbgval(src->buffer(), src->size());
+
+            SizeVector seq_lengths_dim(1, p.seq_lengths.size());
+            auto seq_lengthsIdx = make_shared_blob<float>({ Precision::FP32,
+                seq_lengths_dim,
+                TensorDesc::getLayoutByDims(seq_lengths_dim) });
+            seq_lengthsIdx->allocate();
+            if (p.seq_lengths.size())
+                for (size_t i = 0; i < p.seq_lengths.size(); i++) {
+                    static_cast<float *>(seq_lengthsIdx->buffer())[i] = static_cast<float>(p.seq_lengths[i]);
+                }
+
+            auto * seq_lengthsIdxPtr = dynamic_cast<TBlob<float>*>(seq_lengthsIdx.get());
+            if (seq_lengthsIdxPtr == nullptr)
+                FAIL() << "Cannot cast blob to TBlob<float>.";
+
+            auto * srcPtr = dynamic_cast<TBlob<float>*>(src.get());
+            if (srcPtr == nullptr)
+                FAIL() << "Cannot cast blob to TBlob<float>.";
+
+            ref_reverse_sequence(*srcPtr, *seq_lengthsIdxPtr, dst_ref, p.seq_axis, p.batch_axis);
+            if (p.reference.size()) {
+                if (memcmp(dst_ref.data(), &p.reference[0], p.reference.size() * sizeof(float)) != 0)
+                    FAIL() << "Wrong result with compare TF reference!";
+            }
+
+            ExecutableNetwork executable_network = ie.LoadNetwork(network, p.device_name);
+            InferRequest inferRequest = executable_network.CreateInferRequest();
+
+            inferRequest.SetBlob("input", src);
+            inferRequest.SetBlob("seq_lengths", seq_lengthsIdx);
+
+            inferRequest.SetBlob(item.first, output);
+            inferRequest.Infer();
+
+            // Check results
+            compare(*output, dst_ref);
+        } catch (const details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+//  Test data vectors
+static std::vector<float> test1 = { 3.f,4.f,5.f,0.f,1.f,2.f,6.f,7.f,8.f,12.f,13.f,14.f,9.f,10.f,11.f,15.f,16.f,17.f,21.f,22.f,23.f,18.f,19.f,20.f,24.f,25.f,26.f };
+static std::vector<float> test2 = { 1.f,0.f,2.f,4.f,3.f,5.f,7.f,6.f,8.f,10.f,9.f,11.f,13.f,12.f,14.f,16.f,15.f,17.f,19.f,18.f,20.f,22.f,21.f,23.f,25.f,24.f,26.f };
+static std::vector<float> test3 = { 2.f,1.f,0.f,4.f,3.f,5.f };
+static std::vector<float> test4 = { 0.f,1.f,2.f,3.f,4.f,5.f,6.f,7.f,8.f,12.f,13.f,14.f,9.f,10.f,11.f,15.f,16.f,17.f,24.f,25.f,26.f,21.f,22.f,23.f,18.f,19.f,20.f };
+static std::vector<float> test5 = { 0.f,4.f,8.f,3.f,1.f,5.f,6.f,7.f,2.f,9.f,13.f,17.f,12.f,10.f,14.f,15.f,16.f,11.f,18.f,22.f,26.f,21.f,19.f,23.f,24.f,25.f,20.f };
+static std::vector<float> test6 = { 0.f,1.f,2.f,3.f,4.f,5.f,6.f,7.f,8.f,9.f,10.f,11.f,13.f,12.f,15.f,14.f,17.f,16.f,19.f,18.f,21.f,20.f,23.f,22.f };
+
+TEST_P(ReverseSequenceTests, smoke_GPU_TestsReverseSequence) {}
+INSTANTIATE_TEST_CASE_P(
+        smoke_TestsReverseSequence, ReverseSequenceTests,
+        ::testing::Values(
+        reverse_sequence_test_params{"GPU", "FP32", { 3, 3, 3 },{ 2, 2, 2 },  1, 0, test1 },
+        reverse_sequence_test_params{"GPU", "FP32", { 3, 3, 3 },{ 2, 2, 2 }, -2, 0, test1 },
+        reverse_sequence_test_params{"GPU", "FP32", { 3, 3, 3 },{ 2, 2, 2 },  2, 1, test2 },
+        reverse_sequence_test_params{"GPU", "FP32", { 3, 3, 3 },{ 2, 2, 2 }, -1, 1, test2 },
+        reverse_sequence_test_params{"GPU", "FP32", { 2, 3 },{ 3, 2 }, 1, 0, test3 },
+        reverse_sequence_test_params{"GPU", "FP32", { 3, 3, 3 },{ 1, 2, 3 },  1, 0, test4 },
+        reverse_sequence_test_params{"GPU", "FP32", { 3, 3, 3 },{ 1, 2, 3 },  1,-3, test4 },
+        reverse_sequence_test_params{"GPU", "FP32", { 3, 3, 3 },{ 1, 2, 3 },  1, 2, test5 },
+        reverse_sequence_test_params{"GPU", "FP32", { 2, 2, 3, 2 },{ 1, 2 }, 3, 0, test6 }
+));
diff --git a/inference-engine/tests_deprecated/functional/cldnn/single_layer_tests/select_tests.cpp b/inference-engine/tests_deprecated/functional/cldnn/single_layer_tests/select_tests.cpp
new file mode 100644 (file)
index 0000000..3b41e7a
--- /dev/null
@@ -0,0 +1,145 @@
+// Copyright (C) 2018-2019 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "select_tests.hpp"
+
+TEST_P(SelectTests, smoke_GPU_TestsSelectNoneBroadcast) {}
+
+INSTANTIATE_TEST_CASE_P(
+    smoke_TestsSelectNoneBroadcast, SelectTests,
+    ::testing::Values(
+          select_params{ "GPU", {1}, {1}, {1}, "none", false },
+          select_params{ "GPU", {17}, {17}, {17}, "none", false  },
+          select_params{ "GPU", {33, 35}, {33, 35}, {33, 35}, "none", false  },
+          select_params{ "GPU", {6, 7, 8}, {6, 7, 8}, {6, 7, 8}, "none", false  },
+          select_params{ "GPU", {2, 3, 4, 5}, {2, 3, 4, 5}, {2, 3, 4, 5}, "none", false  },
+          select_params{ "GPU", {3, 24, 35, 9}, {3, 24, 35, 9}, {3, 24, 35, 9}, "none", false  },
+          select_params{ "GPU", {8, 14, 32, 12}, {8, 14, 32, 12}, {8, 14, 32, 12}, "none", false  },
+          select_params{ "GPU", {16, 32, 15, 54}, {16, 32, 15, 54}, {16, 32, 15, 54}, "none", false  }
+));
+
+INSTANTIATE_TEST_CASE_P(
+    smoke_TestsSelectNumpyBroadcast, SelectTests,
+    ::testing::Values(
+          select_params{ "GPU", {1}, {1}, {1}, "numpy", false },
+          select_params{ "GPU", {17}, {17}, {17}, "numpy", false },
+          select_params{ "GPU", {33, 35}, {33, 35}, {33, 35}, "numpy", false },
+          select_params{ "GPU", {6, 7, 8}, {6, 7, 8}, {6, 7, 8}, "numpy", false },
+          select_params{ "GPU", {2, 3, 4, 5}, {2, 3, 4, 5}, {2, 3, 4, 5}, "numpy", false },
+          select_params{ "GPU", {3, 24, 35, 9}, {3, 24, 35, 9}, {3, 24, 35, 9}, "numpy", false },
+          select_params{ "GPU", {8, 14, 32, 12}, {8, 14, 32, 12}, {8, 14, 32, 12}, "numpy", false },
+          select_params{ "GPU", {16, 32, 15, 54}, {16, 32, 15, 54}, {16, 32, 15, 54}, "numpy", false },
+
+          select_params{ "GPU", {17}, {1}, {17}, "numpy", false },
+          select_params{ "GPU", {1}, {17}, {17}, "numpy", false },
+          select_params{ "GPU", {17}, {17}, {1}, "numpy", false },
+          select_params{ "GPU", {17}, {1}, {1}, "numpy", false },
+          select_params{ "GPU", {1}, {17}, {1}, "numpy", false },
+          select_params{ "GPU", {33, 1}, {33, 35}, {33, 35}, "numpy", false },
+          select_params{ "GPU", {33, 35}, {33, 35}, {35}, "numpy", false },
+          select_params{ "GPU", {33, 35}, {33, 35}, {1}, "numpy", false },
+          select_params{ "GPU", {35}, {33, 1}, {35}, "numpy", false },
+          select_params{ "GPU", {35, 9}, {24, 35, 9}, {24, 35, 9}, "numpy", false },
+          select_params{ "GPU", {24, 35, 9}, {24, 35, 9}, {35, 9}, "numpy", false },
+          select_params{ "GPU", {9}, {24, 35, 1}, {35, 9}, "numpy", false },
+          select_params{ "GPU", {24, 35, 1}, {35, 9}, {24, 35, 1}, "numpy", false },
+          select_params{ "GPU", {24, 1, 9}, {9}, {24, 1, 9}, "numpy", false },
+          select_params{ "GPU", {24, 1, 9}, {24, 35, 1}, {1}, "numpy", false },
+          select_params{ "GPU", {24, 35, 9}, {24, 35, 9}, {24, 1, 9}, "numpy", false },
+          select_params{ "GPU", {24, 1, 9}, {24, 35, 1}, {24, 35, 9}, "numpy", false },
+          select_params{ "GPU", {16, 32, 15, 54}, {16, 1, 15, 54}, {16, 32, 15, 54}, "numpy", false },
+          select_params{ "GPU", {1}, {16, 32, 15, 54}, {16, 32, 1, 54}, "numpy", false },
+          select_params{ "GPU", {3, 24, 35, 9}, {24, 35, 9}, {3, 1, 35, 9}, "numpy", false },
+          select_params{ "GPU", {3, 24, 35, 9}, {9}, {3, 24, 35, 9}, "numpy", false },
+          select_params{ "GPU", {16, 1, 15, 54}, {16, 32, 15, 54}, {16, 32, 1, 54}, "numpy", false },
+          select_params{ "GPU", {16, 32, 1, 1}, {16, 32, 15, 54}, {16, 32, 15, 54}, "numpy", false },
+          select_params{ "GPU", {8, 14, 32, 1}, {8, 14, 32, 12}, {32, 12}, "numpy", false },
+          select_params{ "GPU", {16, 32, 15, 54}, {16, 1, 1, 1}, {16, 32, 1, 54}, "numpy", false },
+          select_params{ "GPU", {16, 1, 15, 54}, {16, 32, 1, 54}, {16, 32, 15, 1}, "numpy", false },
+          select_params{ "GPU", {35, 9}, {3, 24, 1, 1}, {3, 24, 35, 9}, "numpy", false },
+          select_params{ "GPU", {3, 24, 1, 1}, {35, 9}, {35, 9}, "numpy", false },
+          select_params{ "GPU", {9}, {3, 1, 1, 1}, {3, 1, 1, 1}, "numpy", false }
+));
+
+INSTANTIATE_TEST_CASE_P(
+    smoke_TestsSelectNoneBroadcastError, SelectTests,
+    ::testing::Values(
+          select_params{ "GPU", {1, 32, 15, 54}, {1, 32, 15, 54}, {16, 32, 15, 54}, "none", true },
+          select_params{ "GPU", {16, 1, 15, 54}, {16, 1, 15, 54}, {16, 32, 15, 54}, "none", true },
+          select_params{ "GPU", {16, 32, 15, 54}, {16, 32, 15, 54}, {16, 32, 16, 54}, "none", true },
+          select_params{ "GPU", {16, 32, 15, 1}, {16, 32, 15, 1}, {16, 32, 15, 54}, "none", true },
+          select_params{ "GPU", {15, 32, 15, 54}, {16, 32, 15, 54}, {15, 32, 15, 54}, "none", true },
+          select_params{ "GPU", {16, 33, 15, 54}, {16, 32, 15, 54}, {16, 32, 15, 54}, "none", true },
+          select_params{ "GPU", {16, 32, 16, 54}, {16, 32, 15, 54}, {16, 32, 16, 54}, "none", true },
+          select_params{ "GPU", {16, 32, 15, 54}, {16, 32, 15, 54}, {16, 32, 15, 56}, "none", true },
+          select_params{ "GPU", {3, 5, 35, 9}, {3, 24, 35, 7}, {3, 24, 35, 9}, "none", true },
+          select_params{ "GPU", {11, 24, 35, 9}, {3, 24, 35, 9}, {3, 24, 7, 9}, "none", true },
+          select_params{ "GPU", {3, 24, 35, 9}, {3, 24, 35, 9}, {3, 24, 35, 9}, "none", true },
+          select_params{ "GPU", {11, 24, 35, 11}, {7, 13, 35, 9}, {3, 24, 27, 17}, "none", true },
+          select_params{ "GPU", {1}, {1}, {9}, "none", true },
+
+          select_params{ "GPU", {32, 15, 54}, {16, 32, 15, 54}, {15, 32, 15, 54}, "none", true },
+          select_params{ "GPU", {16, 32, 15, 54}, {16, 1, 15, 54}, {16, 33, 15, 54}, "none", true },
+          select_params{ "GPU", {16, 32, 1, 54}, {16, 32, 15, 1}, {16, 32, 2, 3}, "none", true },
+          select_params{ "GPU", {7, 1, 14}, {7, 14, 14}, {7, 7, 14, 14}, "none", true },
+          select_params{ "GPU", {7, 1, 14}, {7, 14, 14}, {7, 1, 1, 14}, "none", true },
+          select_params{ "GPU", {35, 9}, {35, 1}, {24, 35, 9}, "none", true },
+          select_params{ "GPU", {1}, {9}, {35, 9}, "none", true },
+        
+          select_params{ "GPU", {17}, {1}, {17}, "none", true },
+          select_params{ "GPU", {1}, {17}, {17}, "none", true },
+          select_params{ "GPU", {17}, {17}, {1}, "none", true },
+          select_params{ "GPU", {17}, {1}, {1}, "none", true },
+          select_params{ "GPU", {1}, {17}, {1}, "none", true },
+          select_params{ "GPU", {33, 1}, {33, 35}, {33, 35}, "none", true },
+          select_params{ "GPU", {33, 35}, {33, 35}, {35}, "none", true },
+          select_params{ "GPU", {33, 35}, {33, 35}, {1}, "none", true },
+          select_params{ "GPU", {35}, {33, 1}, {35}, "none", true },
+          select_params{ "GPU", {35, 9}, {24, 35, 9}, {24, 35, 9}, "none", true },
+          select_params{ "GPU", {24, 35, 9}, {24, 35, 9}, {35, 9}, "none", true },
+          select_params{ "GPU", {9}, {24, 35, 1}, {35, 9}, "none", true },
+          select_params{ "GPU", {24, 35, 1}, {35, 9}, {24, 35, 1}, "none", true },
+          select_params{ "GPU", {24, 1, 9}, {9}, {24, 1, 9}, "none", true },
+          select_params{ "GPU", {24, 1, 9}, {24, 35, 1}, {1}, "none", true },
+          select_params{ "GPU", {24, 35, 9}, {24, 35, 9}, {24, 1, 9}, "none", true },
+          select_params{ "GPU", {24, 1, 9}, {24, 35, 1}, {24, 35, 9}, "none", true },
+          select_params{ "GPU", {16, 32, 15, 54}, {16, 1, 15, 54}, {16, 32, 15, 54}, "none", true },
+          select_params{ "GPU", {1}, {16, 32, 15, 54}, {16, 32, 1, 54}, "none", true },
+          select_params{ "GPU", {3, 24, 35, 9}, {24, 35, 9}, {3, 1, 35, 9}, "none", true },
+          select_params{ "GPU", {3, 24, 35, 9}, {9}, {3, 24, 35, 9}, "none", true },
+          select_params{ "GPU", {16, 1, 15, 54}, {16, 32, 15, 54}, {16, 32, 1, 54}, "none", true },
+          select_params{ "GPU", {16, 32, 1, 1}, {16, 32, 15, 54}, {16, 32, 15, 54}, "none", true },
+          select_params{ "GPU", {8, 14, 32, 1}, {8, 14, 32, 12}, {32, 12}, "none", true },
+          select_params{ "GPU", {16, 32, 15, 54}, {16, 1, 1, 1}, {16, 32, 1, 54}, "none", true },
+          select_params{ "GPU", {16, 1, 15, 54}, {16, 32, 1, 54}, {16, 32, 15, 1}, "none", true },
+          select_params{ "GPU", {35, 9}, {3, 24, 1, 1}, {3, 24, 35, 9}, "none", true },
+          select_params{ "GPU", {3, 24, 1, 1}, {35, 9}, {35, 9}, "none", true },
+          select_params{ "GPU", {9}, {3, 1, 1, 1}, {3, 1, 1, 1}, "none", true }
+));
+
+INSTANTIATE_TEST_CASE_P(
+    smoke_TestsSelectNumpyBroadcastError, SelectTests,
+    ::testing::Values(
+          select_params{ "GPU", {1, 32, 15, 54}, {1, 32, 15, 54}, {16, 32, 15, 54}, "numpy", true },
+          select_params{ "GPU", {16, 1, 15, 54}, {16, 1, 15, 54}, {16, 32, 15, 54}, "numpy", true },
+          select_params{ "GPU", {16, 32, 15, 54}, {16, 32, 15, 54}, {16, 32, 16, 54}, "numpy", true },
+          select_params{ "GPU", {16, 32, 15, 1}, {16, 32, 15, 1}, {16, 32, 15, 54}, "numpy", true },
+          select_params{ "GPU", {15, 32, 15, 54}, {16, 32, 15, 54}, {15, 32, 15, 54}, "numpy", true },
+          select_params{ "GPU", {16, 33, 15, 54}, {16, 32, 15, 54}, {16, 32, 15, 54}, "numpy", true },
+          select_params{ "GPU", {16, 32, 16, 54}, {16, 32, 15, 54}, {16, 32, 16, 54}, "numpy", true },
+          select_params{ "GPU", {16, 32, 15, 54}, {16, 32, 15, 54}, {16, 32, 15, 56}, "numpy", true },
+          select_params{ "GPU", {3, 5, 35, 9}, {3, 24, 35, 7}, {3, 24, 35, 9}, "numpy", true },
+          select_params{ "GPU", {11, 24, 35, 9}, {3, 24, 35, 9}, {3, 24, 7, 9}, "numpy", true },
+          select_params{ "GPU", {3, 24, 35, 9}, {3, 24, 35, 9}, {3, 24, 35, 9}, "numpy", true },
+          select_params{ "GPU", {11, 24, 35, 11}, {7, 13, 35, 9}, {3, 24, 27, 17}, "numpy", true },
+          select_params{ "GPU", {1}, {1}, {9}, "numpy", true },
+
+          select_params{ "GPU", {32, 15, 54}, {16, 32, 15, 54}, {15, 32, 15, 54}, "numpy", true },
+          select_params{ "GPU", {16, 32, 15, 54}, {16, 1, 15, 54}, {16, 33, 15, 54}, "numpy", true },
+          select_params{ "GPU", {16, 32, 1, 54}, {16, 32, 15, 1}, {16, 32, 2, 3}, "numpy", true },
+          select_params{ "GPU", {7, 1, 14}, {7, 14, 14}, {7, 7, 14, 14}, "numpy", true },
+          select_params{ "GPU", {7, 1, 14}, {7, 14, 14}, {7, 1, 1, 14}, "numpy", true },
+          select_params{ "GPU", {35, 9}, {35, 1}, {24, 35, 9}, "numpy", true },
+          select_params{ "GPU", {1}, {9}, {35, 9}, "numpy", true }
+));
diff --git a/inference-engine/tests_deprecated/functional/cldnn/single_layer_tests/shuffle_channels_tests.cpp b/inference-engine/tests_deprecated/functional/cldnn/single_layer_tests/shuffle_channels_tests.cpp
new file mode 100644 (file)
index 0000000..a6b5dbb
--- /dev/null
@@ -0,0 +1,189 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <ie_core.hpp>
+#include <cmath>
+
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+
+
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace std;
+
+
+struct shuffle_channels_test_params {
+    std::string device_name;
+    std::string inPrecision;
+    SizeVector in_out_shape;
+    int axis;
+    int group;
+    std::vector<float> reference;
+};
+
+void ref_shuffle_channels(TBlob<float> &src, TBlob<float> &dst, int axis, int group) {
+    size_t i;
+    const float *src_data = src.data();
+    float* dst_data = dst.data();
+    SizeVector dst_dims = dst.getTensorDesc().getDims();
+    SizeVector dstStrides = dst.getTensorDesc().getBlockingDesc().getStrides();
+
+    if (axis < 0)
+        axis += dst_dims.size();
+
+    if (axis < 0 || axis >= dst_dims.size())
+        FAIL() << "Incorrect input parameters dimensions and axis number!";
+
+    if (dst_dims[axis] % group)
+        FAIL() << "Group parameter must evenly divide the channel dimension!";
+
+    //  Find number of dictionaries, index range and data length
+    size_t numDictionaries = 1;
+    for (i = 0; i <= axis; i++)
+        numDictionaries *= dst_dims[i];
+
+    size_t channelsNum = dst_dims[axis] / group;
+
+    size_t dataLength = 1;
+    for (i = axis + 1; i < dst_dims.size(); i++)
+        dataLength *= dst_dims[i];
+
+    if (dataLength == 0)
+        FAIL() << "Incorrect input parameters dimension!";
+
+    size_t j, k;
+    for (j = 0, k = 0; j < numDictionaries; j += dst_dims[axis]) {
+        for (i = 0; i < (dst_dims[axis] * channelsNum); i += channelsNum, k += dataLength) {
+            int idx = j + i / dst_dims[axis] + i % dst_dims[axis];
+            memcpy(&dst_data[k], &src_data[dataLength * idx], sizeof(float) * dataLength);
+        }
+    }
+}
+
+class ShuffleChannelsTests : public TestsCommon, public WithParamInterface<shuffle_channels_test_params> {
+    std::string model_t = R"V0G0N(
+<net Name="ShuffleChannels_net" version="2" precision="FP32" batch="1">
+    <layers>
+        <layer name="input" type="Input" precision="FP32" id="1">
+            <output>
+                <port id="1">
+                    _IN_OUT_
+                </port>
+            </output>
+        </layer>
+        <layer name="output" id="2" type="ShuffleChannels" precision="FP32">
+            <data axis="_AX_" group="_GR_"/>
+            <input>
+                <port id="1">
+                    _IN_OUT_
+                </port>
+           </input>
+            <output>
+                <port id="2">
+                    _IN_OUT_
+                </port>
+            </output>
+        </layer>
+    </layers>
+    <edges>
+        <edge from-layer="1" from-port="1" to-layer="2" to-port="1"/>
+    </edges>
+</net>
+)V0G0N";
+
+    std::string getModel(shuffle_channels_test_params p) {
+        std::string model = model_t;
+        std::string in_out_shape;
+
+        for (size_t i = 0; i < p.in_out_shape.size(); i++) {
+            in_out_shape += "<dim>";
+            in_out_shape += std::to_string(p.in_out_shape[i]) + "</dim>\n";
+        }
+        REPLACE_WITH_STR(model, "_IN_OUT_", in_out_shape);
+        REPLACE_WITH_NUM(model, "_AX_", p.axis);
+        REPLACE_WITH_NUM(model, "_GR_", p.group);
+
+        return model;
+    }
+
+protected:
+    virtual void TearDown() {
+    }
+
+    virtual void SetUp() {
+        try {
+            shuffle_channels_test_params p = ::testing::WithParamInterface<shuffle_channels_test_params>::GetParam();
+            std::string model = getModel(p);
+
+            Core ie;
+            CNNNetwork net = ie.ReadNetwork(model, Blob::CPtr());
+
+            // Output Data
+            OutputsDataMap out = net.getOutputsInfo();
+            auto item = *out.begin();
+
+            // Input Data
+            Blob::Ptr src = make_shared_blob<float>({Precision::FP32,
+                p.in_out_shape,
+                TensorDesc::getLayoutByDims(p.in_out_shape)});
+            src->allocate();
+            fill_data_dbgval(src->buffer(), src->size());
+            auto * srcPtr = dynamic_cast<TBlob<float>*>(src.get());
+            if (srcPtr == nullptr)
+                FAIL() << "Cannot cast input blob to TBlob<float>.";
+
+            // Output Reference
+            TBlob<float> dst_ref(item.second->getTensorDesc());
+            dst_ref.allocate();
+            ref_shuffle_channels(*srcPtr, dst_ref, p.axis, p.group);
+
+            // Check results
+            if (memcmp(dst_ref.data(), &p.reference[0], p.reference.size() * sizeof(float)) != 0)
+                FAIL() << "Wrong result of TF reference comparison!";
+
+            // Infer
+            ExecutableNetwork executable_network = ie.LoadNetwork(net, p.device_name);
+            InferRequest inferRequest = executable_network.CreateInferRequest();
+            inferRequest.SetBlob("input", src);
+            auto output = inferRequest.GetBlob(item.first);
+
+            inferRequest.Infer();
+
+            // Check results
+            auto * dstPtr = dynamic_cast<TBlob<float>*>(output.get());
+            if (dstPtr == nullptr)
+                FAIL() << "Cannot cast output blob to TBlob<float>.";
+            compare(*dstPtr, dst_ref);
+        } catch (const details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+TEST_P(ShuffleChannelsTests, smoke_GPU_TestsShuffleChannels) {}
+
+static std::vector<float> test0 = { 0.f, 1.f, 2.f, 3.f, 12.f, 13.f, 14.f, 15.f, 24.f, 25.f, 26.f, 27.f, 36.f, 37.f, 38.f, 39.f, 48.f, 49.f, 50.f, 51.f,
+                                    4.f, 5.f, 6.f, 7.f, 16.f, 17.f, 18.f, 19.f, 28.f, 29.f, 30.f, 31.f, 40.f, 41.f, 42.f, 43.f, 52.f, 53.f, 54.f, 55.f,
+                                    8.f, 9.f, 10.f, 11.f, 20.f, 21.f, 22.f, 23.f, 32.f, 33.f, 34.f, 35.f, 44.f, 45.f, 46.f, 47.f, 56.f, 57.f, 58.f, 59.f };
+static std::vector<float> test4 = { 0.f, 2.f, 4.f, 1.f, 3.f, 5.f, 6.f, 8.f, 10.f, 7.f, 9.f, 11.f, 12.f, 14.f, 16.f, 13.f, 15.f, 17.f, 18.f, 20.f, 22.f, 19.f, 21.f, 23.f };
+static std::vector<float> test5 = { 0.f, 1.f, 4.f, 5.f, 8.f, 9.f, 2.f, 3.f, 6.f, 7.f, 10.f, 11.f, 12.f, 13.f, 16.f, 17.f, 20.f, 21.f, 14.f, 15.f, 18.f, 19.f, 22.f, 23.f };
+static std::vector<float> test6 = { 0.f, 3.f, 1.f, 4.f, 2.f, 5.f, 6.f, 9.f, 7.f, 10.f, 8.f, 11.f, 12.f, 15.f, 13.f, 16.f, 14.f, 17.f, 18.f, 21.f, 19.f, 22.f, 20.f, 23.f };
+static std::vector<float> test7 = { 0.f, 1.f, 6.f, 7.f, 2.f, 3.f, 8.f, 9.f, 4.f, 5.f, 10.f, 11.f, 12.f, 13.f, 18.f, 19.f, 14.f, 15.f, 20.f, 21.f, 16.f, 17.f, 22.f, 23.f };
+static std::vector<float> test8 = { 0.f, 3.f, 1.f, 4.f, 2.f, 5.f };
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_TestsShuffleChannels, ShuffleChannelsTests,
+        ::testing::Values(
+                shuffle_channels_test_params{ "GPU", "FP32", { 1, 15, 2, 2 }, 1, 5, test0 },
+                shuffle_channels_test_params{ "GPU", "FP32", { 1, 15, 2, 2 }, -3, 5, test0 },
+                shuffle_channels_test_params{ "GPU", "FP32", { 15, 2, 2 }, 0, 5, test0 },
+                shuffle_channels_test_params{ "GPU", "FP32", { 15, 2, 2 }, -3, 5, test0 },
+                shuffle_channels_test_params{ "GPU", "FP32", { 2, 2, 6 }, -1, 3, test4 },
+                shuffle_channels_test_params{ "GPU", "FP32", { 2, 6, 2 }, -2, 3, test5 },
+                shuffle_channels_test_params{ "GPU", "FP32", { 2, 2, 6 }, -1, 2, test6 },
+                shuffle_channels_test_params{ "GPU", "FP32", { 2, 6, 2 }, -2, 2, test7 }
+        ));
+
diff --git a/inference-engine/tests_deprecated/functional/cldnn/single_layer_tests/squeeze_unsqueeze_tests.cpp b/inference-engine/tests_deprecated/functional/cldnn/single_layer_tests/squeeze_unsqueeze_tests.cpp
new file mode 100644 (file)
index 0000000..b3f219c
--- /dev/null
@@ -0,0 +1,178 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <ie_core.hpp>
+#include <cmath>
+
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+
+
+
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace std;
+
+
+struct squeeze_unsqueeze_test_params {
+    std::string device_name;
+    std::string layerType;
+    InferenceEngine::SizeVector in_dim;
+    std::vector<int> squeeze_dims;
+    InferenceEngine::SizeVector ref_dim;
+    std::vector<float> ref;
+};
+
+template<typename data_t>
+void ref_squeeze_unsqueeze(InferenceEngine::TBlob<float>& dst, squeeze_unsqueeze_test_params& prm) {
+    data_t* dst_data = dst.buffer().template as<data_t*>();
+
+    for (int i = 0; i < prm.ref.size(); ++i)
+        dst_data[i] = prm.ref[i];
+}
+
+template<typename data_t>
+InferenceEngine::TBlob<uint8_t>::Ptr generateWeights(const std::vector<int> &data) {
+    InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>(
+        {InferenceEngine::Precision::U8,{ data.size() * sizeof(data_t) }, InferenceEngine::C}
+    );
+    weights->allocate();
+    for (size_t i = 0; i < data.size(); i++) {
+        ((data_t*) weights->buffer())[i] = data[i];
+    }
+    return InferenceEngine::TBlob<uint8_t>::Ptr(weights);
+}
+
+class SqueezeUnsqueezeTests : public TestsCommon, public WithParamInterface<squeeze_unsqueeze_test_params> {
+    std::string model_t = R"V0G0N(
+<net Name="squeeze_unsqueeze" version="2" precision="FP32" batch="1">
+    <layers>
+        <layer name="Input1" type="Input" precision="FP32" id="1">
+            <output>
+                <port id="1">
+                    _IN_
+                </port>
+            </output>
+        </layer>
+        <layer id="2" name="Input2" precision="FP32" type="Const">
+            <output>
+                <port id="0">
+                    <dim>_INPUT_COUNT_</dim>
+                </port>
+            </output>
+            <blobs>
+                <custom offset="0" size="4"/>
+            </blobs>
+        </layer>
+        <layer name="squeeze_unsqueeze" id="5" type="_LAYER_" precision="FP32">
+            <input>
+                <port id="5">
+                    _IN_
+                </port>
+                <port id="6">
+                    <dim>_INPUT_COUNT_</dim>
+                </port>
+            </input>
+            <output>
+                <port id="9">
+                    _OUT_
+                </port>
+            </output>
+        </layer>
+    </layers>
+    <edges>
+        <edge from-layer="1" from-port="1" to-layer="5" to-port="5"/>
+        <edge from-layer="2" from-port="0" to-layer="5" to-port="6"/>
+    </edges>
+</net>
+)V0G0N";
+
+    std::string getModel(squeeze_unsqueeze_test_params p) {
+        std::string in, out;
+
+        for (auto& i : p.in_dim) {
+            in += "<dim>" + std::to_string(i) + "</dim>\n";
+        }
+
+        for (auto& o : p.ref_dim) {
+            out += "<dim>" + std::to_string(o) + "</dim>\n";
+        }
+
+        REPLACE_WITH_STR(model_t, "_LAYER_", p.layerType);
+        REPLACE_WITH_STR(model_t, "_IN_", in);
+        REPLACE_WITH_STR(model_t, "_OUT_", out);
+        REPLACE_WITH_NUM(model_t, "_INPUT_COUNT_", p.squeeze_dims.size());
+
+        return model_t;
+    }
+
+protected:
+    virtual void TearDown() {
+    }
+
+    virtual void SetUp() {
+        try {
+            TestsCommon::SetUp();
+            squeeze_unsqueeze_test_params p = ::testing::WithParamInterface<squeeze_unsqueeze_test_params>::GetParam();
+            std::string model = getModel(p);
+
+            Core ie;
+            CNNNetwork net = ie.ReadNetwork(model,generateWeights<float>(p.squeeze_dims) );
+            ExecutableNetwork executable_network = ie.LoadNetwork(net, p.device_name);
+            InferRequest inferRequest = executable_network.CreateInferRequest();
+
+            InferenceEngine::OutputsDataMap out;
+            out = net.getOutputsInfo();
+
+            std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
+
+            InferenceEngine::TBlob<float>::Ptr output;
+            output = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
+            output->allocate();
+            inferRequest.SetBlob(item.first, output);
+
+            // Output Reference
+            InferenceEngine::TBlob<float> dst_ref(item.second->getTensorDesc());
+            dst_ref.allocate();
+
+            // Input Data
+            InferenceEngine::Blob::Ptr src;
+            src = InferenceEngine::make_shared_blob<float>({ InferenceEngine::Precision::FP32, p.in_dim, InferenceEngine::TensorDesc::getLayoutByDims(p.in_dim) });
+            src->allocate();
+            fill_data_dbgval(src->buffer(), src->size());
+            auto * srcPtr = dynamic_cast<InferenceEngine::TBlob<float>*>(src.get());
+            if (srcPtr == nullptr)
+                FAIL() << "Cannot cast blob to TBlob<float>.";
+
+            ref_squeeze_unsqueeze<float>(dst_ref, p);
+
+            inferRequest.SetBlob("Input1", src);
+            inferRequest.Infer();
+
+            compare(*output, dst_ref);
+        } catch (const InferenceEngine::details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+TEST_P(SqueezeUnsqueezeTests, smoke_GPU_TestsSqueezeUnsqueeze) {}
+
+//  Test data vectors
+std::vector<float> squeeze_ref1 = { 0.f, 1.f, 2.f, 3.f, 4.f, 5.f };
+std::vector<float> squeeze_ref2 = { 0.f, 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f };
+std::vector<float> squeeze_ref3 = { 0.f, 1.f, 2.f };
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_TestsSqueezeUnsqueeze, SqueezeUnsqueezeTests,
+        ::testing::Values(
+                squeeze_unsqueeze_test_params{ "GPU", "Squeeze", { 1, 1, 3, 2 }, { 0, 1 }, { 3, 2, 1, 1 }, squeeze_ref1 },
+                squeeze_unsqueeze_test_params{ "GPU", "Squeeze", { 3, 1, 3, 1 }, { 1 }, { 3, 3, 1, 1 }, squeeze_ref2 },
+                squeeze_unsqueeze_test_params{ "GPU", "Squeeze", { 3, 1, 3, 1 }, { 3 }, { 3, 1, 3, 1 }, squeeze_ref2 },
+                squeeze_unsqueeze_test_params{ "GPU", "Unsqueeze", { 3, 1, 1, 1 }, { 0, 2, 3 }, { 1, 3, 1, 1 }, squeeze_ref3 },
+                squeeze_unsqueeze_test_params{ "GPU", "Unsqueeze", { 1, 1, 3, 1 }, { 0 }, { 1, 1, 1, 3 }, squeeze_ref3 },
+                squeeze_unsqueeze_test_params{ "GPU", "Unsqueeze", { 1, 3, 1, 1 }, { 0, 1 }, { 1, 1, 1, 3 }, squeeze_ref3 },
+                squeeze_unsqueeze_test_params{ "GPU", "Unsqueeze", { 3, 1, 1, 1 }, { 0, 1, 2 }, { 1, 1, 1, 3 }, squeeze_ref3 }
+        ));
diff --git a/inference-engine/tests_deprecated/functional/cldnn/single_layer_tests/strided_slice_tests.cpp b/inference-engine/tests_deprecated/functional/cldnn/single_layer_tests/strided_slice_tests.cpp
new file mode 100644 (file)
index 0000000..31330ff
--- /dev/null
@@ -0,0 +1,355 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <ie_core.hpp>
+#include <cmath>
+
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+
+
+
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace std;
+
+
+struct strided_slice_test_params {
+    std::string device_name;
+    InferenceEngine::SizeVector in_dim;
+    std::vector<int> begin;
+    std::vector<int> end;
+    std::vector<int> strides;
+    InferenceEngine::SizeVector ref_dim;
+    std::vector<float> ref;
+};
+
+inline void clipping(int *idx, const int min, const int max) {
+    (*idx) = ((*idx) > min) ? (*idx) : min;
+    (*idx) = ((*idx) < max) ? (*idx) : (max - 1);
+    return;
+}
+
+void ref_strided_slice(
+        InferenceEngine::TBlob<float> &src,
+        InferenceEngine::TBlob<float> &dst,
+        InferenceEngine::SizeVector &out_dims,
+        std::vector<int> begin,
+        std::vector<int> end,
+        std::vector<int> stride,
+        InferenceEngine::SizeVector begin_mask,
+        InferenceEngine::SizeVector end_mask,
+        InferenceEngine::SizeVector ellipsis_mask,
+        InferenceEngine::SizeVector new_axis_mask,
+        InferenceEngine::SizeVector shrink_axis_mask
+) {
+    size_t i;
+    const float *src_data = src.data();
+    InferenceEngine::SizeVector src_dims = src.getTensorDesc().getDims();
+    InferenceEngine::SizeVector srcStrides = src.getTensorDesc().getBlockingDesc().getStrides();
+    float* dst_data = dst.data();
+    InferenceEngine::SizeVector dst_dims = dst.getTensorDesc().getDims();
+    InferenceEngine::SizeVector dstStrides = dst.getTensorDesc().getBlockingDesc().getStrides();
+
+    int new_axis = 0;
+    for (auto& na : new_axis_mask)
+        new_axis += na;
+
+    int shrink_axis = 0;
+    for (auto& sa : shrink_axis_mask)
+        shrink_axis += sa;
+    int max_dims = src_dims.size() + new_axis;
+
+    //  Check beging/end/stride vector sizes
+    int bounds_size = 0;
+    if (begin.size() && end.size() && begin.size() != end.size()) FAIL() << "Begin vector size should be equal end vectror size";
+    if (begin.size() && stride.size() && stride.size() != begin.size()) FAIL() << "Stride vector size should be equal begin vectror size";
+    if (end.size() && stride.size() && stride.size() != end.size()) FAIL() << "Stride vector size should be equal end vectror size";
+
+    if (begin.size()) bounds_size = begin.size();
+    if (end.size()) bounds_size = end.size();
+    if (stride.size()) bounds_size = stride.size();
+
+    //  ellipsis_mask must be a power of two (only one ellipsis), so to take a first position
+    int ellipsis_pos1, ellipsis_pos2;
+    ellipsis_pos1 = ellipsis_pos2 = max_dims;
+    for (i = 0; i < ellipsis_mask.size(); i++) {
+        if (ellipsis_mask[i] > 0) {
+            ellipsis_pos1 = i;
+            break;
+        }
+    }
+    bounds_size -= ellipsis_pos1;
+    if(bounds_size > 0 && (max_dims - bounds_size) > ellipsis_pos1)
+        ellipsis_pos2 = max_dims - bounds_size;
+
+    std::vector<int> begin_dms(max_dims, 0);
+    std::vector<int> end_dms(max_dims, -1);
+    std::vector<int> stride_dms(max_dims, 1);
+
+    int j, k, bj, ej, sj;
+    InferenceEngine::SizeVector our_dims;
+    for (i = 0, j = 0, k = 0, bj = 0, ej = 0, sj = 0; i < max_dims; i++) {
+        if (i >= ellipsis_pos1 && i < ellipsis_pos2) {
+            if (!(new_axis_mask.size() > i && new_axis_mask[i] == 1)) {
+                end_dms[i] = end_dms[i] >= 0 ? end_dms[i] : src_dims[j++] + end_dms[i];
+            } else {
+                //end_dms[i] = 0;
+                end_dms[i] = begin_dms[i];
+            }
+            out_dims.push_back(static_cast<int>(ceil(static_cast<float>(abs(end_dms[i] - begin_dms[i]) + 1) / static_cast<float>(abs(stride_dms[i])))));
+            our_dims.push_back(static_cast<int>(ceil(static_cast<float>(abs(end_dms[i] - begin_dms[i]) + 1) / static_cast<float>(abs(stride_dms[i])))));
+            k = ellipsis_pos1;
+            continue;
+        }
+        stride_dms[i] = (stride.size() > sj && stride[sj] != 0) ? stride[sj++] : 1;
+
+        if (!(begin_mask.size() > j && begin_mask[j] == 0))
+            begin_dms[i] = begin.size() > bj ? begin[bj] : (stride_dms[i] > 0 ? 0 : -1);
+        else
+            begin_dms[i] = stride_dms[i] > 0 ? 0 : -1;
+        bj++;
+        begin_dms[i] = begin_dms[i] >= 0 ? begin_dms[i] : src_dims[j] + begin_dms[i];
+        //  Clipping 'begin'
+        clipping(&begin_dms[i], 0, src_dims[j]);
+
+        if (!(end_mask.size() > j && end_mask[j] == 0)) {
+            int end_dms_tmp = end.size() > ej ? (stride_dms[i] > 0 ? end[ej] - 1 : end[ej] + 1) : end_dms[i];
+            end_dms[i] = end.size() > ej ? end_dms_tmp : (stride_dms[i] > 0 ? -1 : 0);
+        }
+        else {
+            end_dms[i] = stride_dms[i] > 0 ? -1 : 0;
+        }
+        ej++;
+        end_dms[i] = end_dms[i] >= 0 ? end_dms[i] : src_dims[j] + end_dms[i];
+        //  Clipping 'end'
+        clipping(&end_dms[i], 0, src_dims[j]);
+
+        if (!(new_axis_mask.size() > i && new_axis_mask[i] == 1))
+            j++;
+        else
+            end_dms[i] = 0;
+
+        if (shrink_axis_mask.size() > k && shrink_axis_mask[k] == 1)
+            end_dms[i] = begin_dms[i];
+        else
+            out_dims.push_back(static_cast<int>(ceil(static_cast<float>(abs(end_dms[i] - begin_dms[i]) + 1) / static_cast<float>(abs(stride_dms[i])))));
+
+        our_dims.push_back(static_cast<int>(ceil(static_cast<float>(abs(end_dms[i] - begin_dms[i]) + 1) / static_cast<float>(abs(stride_dms[i])))));
+        k++;
+    }
+
+    size_t work_amount_dst = dstStrides[0] * dst_dims[0];
+    InferenceEngine::SizeVector counters(max_dims, 0);
+
+    for (size_t iwork = 0, dst_idx = 0; iwork < work_amount_dst; ++iwork) {
+        int src_idx = 0;
+        for (i = 0, j = 0; i < max_dims; ++i) {
+            src_idx += (begin_dms[i] + counters[i] * stride_dms[i]) * srcStrides[j];
+            if (!(new_axis_mask.size() > i && new_axis_mask[i] == 1)) j++;
+        }
+
+        dst_data[dst_idx++] = src_data[src_idx];
+
+        for (j = max_dims - 1; j >= 0; j--) {
+            counters[j] = (counters[j] + 1) % our_dims[j];
+            if (counters[j] != 0) break;
+        }
+    }
+}
+
+template<typename data_t>
+void ref_strided_slice(std::vector<Blob::Ptr> &dsts, strided_slice_test_params& prm) {
+    data_t *dst_data = dsts[0]->buffer().as<data_t*>();
+
+    for(int i = 0; i < prm.ref.size(); ++i)
+        dst_data[i] = prm.ref[i];
+}
+
+InferenceEngine::TBlob<uint8_t>::Ptr generateWeights(const std::vector<std::vector<int>> &data) {
+    size_t totalSize = 0;
+    for (size_t i = 0; i < data.size(); ++i)
+        totalSize += data[i].size();
+    InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>(
+        { InferenceEngine::Precision::U8,{ totalSize * sizeof(uint32_t) }, Layout::C }
+        );
+    weights->allocate();
+    size_t vectorCounter = 0;
+    size_t innerVectorCounter = 0;
+    for (size_t i = 0; i < totalSize; i++) {
+        if (innerVectorCounter >= data[vectorCounter].size()) {
+            ++vectorCounter;
+            innerVectorCounter = 0;
+        }
+        ((uint32_t*) weights->buffer())[i] = data[vectorCounter][innerVectorCounter];
+        ++innerVectorCounter;
+    }
+    return InferenceEngine::TBlob<uint8_t>::Ptr(weights);
+}
+
+class StridedSliceTests : public TestsCommon, public WithParamInterface<strided_slice_test_params> {
+    std::string model_t = R"V0G0N(
+<net Name="strided_slice" version="2" precision="FP32" batch="1">
+    <layers>
+        <layer name="Input1" type="Input" precision="FP32" id="1">
+            <output>
+                <port id="1">
+                    _IN_
+                </port>
+            </output>
+        </layer>
+        <layer id="2" name="Input2" precision="FP32" type="Const">
+            <output>
+                <port id="0">
+                    <dim>4</dim>
+                </port>
+            </output>
+            <blobs>
+                <custom offset="0" size="4"/>
+            </blobs>
+        </layer>
+        <layer id="3" name="Input3" precision="FP32" type="Const">
+            <output>
+                <port id="0">
+                    <dim>4</dim>
+                </port>
+            </output>
+            <blobs>
+                <custom offset="16" size="4"/>
+            </blobs>
+        </layer>
+        <layer id="4" name="Input4" precision="FP32" type="Const">
+            <output>
+                <port id="0">
+                    <dim>4</dim>
+                </port>
+            </output>
+            <blobs>
+                <custom offset="32" size="4"/>
+            </blobs>
+        </layer>
+        <layer name="strided_slice" id="5" type="StridedSlice" precision="FP32">
+            <data begin_mask=""
+                  end_mask=""
+                  ellipsis_mask=""
+                  new_axis_mask=""
+                  shrink_axis_mask=""/>
+            <input>
+                <port id="5">
+                    _IN_
+                </port>
+                <port id="6">
+                    <dim>4</dim>
+                </port>
+                <port id="7">
+                    <dim>4</dim>
+                </port>
+                <port id="8">
+                    <dim>4</dim>
+                </port>
+            </input>
+            <output>
+                <port id="9">
+                    _OUT_
+                </port>
+            </output>
+        </layer>
+    </layers>
+    <edges>
+        <edge from-layer="1" from-port="1" to-layer="5" to-port="5"/>
+        <edge from-layer="2" from-port="0" to-layer="5" to-port="6"/>
+        <edge from-layer="3" from-port="0" to-layer="5" to-port="7"/>
+        <edge from-layer="4" from-port="0" to-layer="5" to-port="8"/>
+    </edges>
+</net>
+)V0G0N";
+
+    std::string getModel(strided_slice_test_params p) {
+        std::string in, out;
+
+        for (auto& i : p.in_dim) {
+            in += "<dim>" + std::to_string(i) + "</dim>\n";
+        }
+
+        for (auto& o : p.ref_dim) {
+            out += "<dim>" + std::to_string(o) + "</dim>\n";
+        }
+
+        REPLACE_WITH_STR(model_t, "_IN_", in);
+        REPLACE_WITH_STR(model_t, "_OUT_", out);
+
+        return model_t;
+    }
+
+protected:
+    virtual void TearDown() {
+    }
+
+    virtual void SetUp() {
+        try {
+            TestsCommon::SetUp();
+            strided_slice_test_params p = ::testing::WithParamInterface<strided_slice_test_params>::GetParam();
+            std::string model = getModel(p);
+
+            Core ie;
+            CNNNetwork net = ie.ReadNetwork(model, generateWeights({ p.begin, p.end, p.strides }));
+            ExecutableNetwork executable_network = ie.LoadNetwork(net, p.device_name);
+            InferRequest inferRequest = executable_network.CreateInferRequest();
+
+            InferenceEngine::OutputsDataMap out;
+            out = net.getOutputsInfo();
+
+            std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
+
+            InferenceEngine::TBlob<float>::Ptr output;
+            output = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
+            output->allocate();
+            inferRequest.SetBlob(item.first, output);
+
+            // Output Reference
+            InferenceEngine::TBlob<float> dst_ref(item.second->getTensorDesc());
+            dst_ref.allocate();
+
+            // Input Data
+            InferenceEngine::Blob::Ptr src;
+            src = InferenceEngine::make_shared_blob<float>({ InferenceEngine::Precision::FP32, p.in_dim, InferenceEngine::TensorDesc::getLayoutByDims(p.in_dim) });
+            src->allocate();
+            fill_data_dbgval(src->buffer(), src->size());
+
+            auto * srcPtr = dynamic_cast<InferenceEngine::TBlob<float>*>(src.get());
+            if (srcPtr == nullptr)
+                FAIL() << "Cannot cast blob to TBlob<float>.";
+
+            ref_strided_slice(*srcPtr, dst_ref, p.ref_dim, p.begin, p.end, p.strides, {}, {}, {}, {}, {});
+
+            inferRequest.SetBlob("Input1", src);
+
+            inferRequest.Infer();
+
+            compare(*output, dst_ref);
+        } catch (const InferenceEngine::details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+TEST_P(StridedSliceTests, smoke_GPU_TestsStridedSlice) {}
+
+//  Test data vectors
+std::vector<float> ref1 = { 0.f, 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f, 12.f, 13.f, 14.f, 15.f };
+std::vector<float> ref2 = { 15.f };
+std::vector<float> ref3 = { 0.f, 1.f, 2.f, 6.f, 7.f, 8.f, 12.f, 13.f, 14.f, 18.f, 19.f, 20.f, 24.f, 25.f, 26.f, 30.f, 31.f, 32.f, 36.f, 37.f, 38.f, 42.f, 43.f, 44.f };
+std::vector<float> ref4 = { 33.f, 34.f, 35.f, 41.f, 42.f, 43.f, 49.f, 50.f, 51.f, 57.f, 58.f, 59.f };
+std::vector<float> ref5 = { 0.f, 1.f, 2.f, 8.f, 9.f, 10.f, 12.f, 13.f, 14.f, 20.f, 21.f, 22.f, 24.f, 25.f, 26.f, 32.f, 33.f, 34.f, 36.f, 37.f, 38.f, 44.f, 45.f, 46.f };
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_TestsStridedSlice, StridedSliceTests,
+        ::testing::Values(
+                strided_slice_test_params{ "GPU", { 2, 2, 2, 2 }, { 0, 0, 0, 0 }, { 2, 2, 2, 2 }, { 1, 1, 1, 1 }, { 2, 2, 2, 2 }, ref1 },
+                strided_slice_test_params{ "GPU", { 2, 2, 2, 2 }, { 1, 1, 1, 1 }, { 2, 2, 2, 2 }, { 1, 1, 1, 1 }, { 1, 1, 1, 1 }, ref2 },
+                strided_slice_test_params{ "GPU", { 2, 2, 4, 3 }, { 0, 0, 0, 0 }, { 2, 2, 4, 3 }, { 1, 1, 2, 1 }, { 2, 2, 2, 3 }, ref3 },
+                strided_slice_test_params{ "GPU", { 2, 2, 4, 4 }, { 1, 0, 0, 1 }, { 2, 2, 4, 4 }, { 1, 1, 2, 1 }, { 1, 2, 2, 3 }, ref4 },
+                strided_slice_test_params{ "GPU", { 2, 2, 3, 4 }, { 0, 0, 0, 0 }, { 2, 2, 4, 3 }, { 1, 1, 2, 1 }, { 2, 2, 2, 3 }, ref5 }
+        ));
diff --git a/inference-engine/tests_deprecated/functional/cldnn/single_layer_tests/transpose_tests.cpp b/inference-engine/tests_deprecated/functional/cldnn/single_layer_tests/transpose_tests.cpp
new file mode 100644 (file)
index 0000000..611aef0
--- /dev/null
@@ -0,0 +1,153 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <ie_core.hpp>
+#include <cmath>
+
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+
+
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace std;
+
+
+struct transpose_test_params {
+    std::string device_name;
+    InferenceEngine::SizeVector in_shape;
+    InferenceEngine::SizeVector out_shape;
+    bool secondInput;
+};
+
+
+class TransposeTest : public TestsCommon, public WithParamInterface<transpose_test_params> {
+    std::string model_t = R"V0G0N(
+<net Name="Transpose_net" version="2" precision="FP32" batch="1">
+    <layers>
+        <layer name="input" type="Input" precision="FP32" id="0">
+            <output>
+                <port id="0">
+                    _IN_
+                </port>
+            </output>
+        </layer>
+        _SND_INP_
+        <layer name="output" type="Transpose" precision="_LKP_" id="2">
+            <input>
+                <port id="0">
+                    _IN_
+                </port>
+                _SND_INPUT_SHAPE_
+            </input>
+            <output>
+                <port id="2">
+                    _OUT_
+                </port>
+            </output>
+        </layer>
+    </layers>
+    <edges>
+        <edge from-layer="0" from-port="0" to-layer="2" to-port="0"/>
+        _SND_EDGE_
+    </edges>
+</net>
+)V0G0N";
+
+
+    std::string getModel(transpose_test_params p) {
+        std::string model = model_t;
+        std::string in_shape, out_shape, snd_layer, snd_shape, snd_edge;
+        snd_layer = snd_shape = snd_edge = "";
+
+        for (size_t i = 0; i < p.in_shape.size(); i++) {
+            in_shape += "<dim>";
+            in_shape += std::to_string(p.in_shape[i]);
+            in_shape += "</dim>\n";
+        }
+
+        for (size_t i = 0; i < p.out_shape.size(); i++) {
+            out_shape += "<dim>";
+            out_shape += std::to_string(p.out_shape[i]);
+            out_shape += "</dim>\n";
+        }
+
+        if (p.secondInput)
+        {
+            snd_shape += "<port id=\"1\">\n";
+            snd_shape += std::to_string(p.in_shape.size());
+            snd_shape += "\n</port>\n";
+
+            snd_layer += "<layer name=\"order\" type=\"Input\" precision=\"I32\" id=\"1\">\n";
+            snd_layer += "<output>\n";
+            snd_layer += snd_shape;
+            snd_layer += "</output>\n";
+            snd_layer += "</layer>\n";
+
+            snd_edge += "<edge from-layer=\"1\" from-port=\"1\" to-layer=\"2\" to-port=\"1\"/>";
+        }
+
+        REPLACE_WITH_STR(model, "_IN_", in_shape);
+        REPLACE_WITH_STR(model, "_OUT_", out_shape);
+        REPLACE_WITH_STR(model, "_SND_INP_", snd_layer);
+        REPLACE_WITH_STR(model, "_SND_INPUT_SHAPE_", snd_shape);
+        REPLACE_WITH_STR(model, "_SND_EDGE_", snd_edge);
+
+        return model;
+    }
+
+protected:
+    virtual void TearDown() {
+    }
+
+    virtual void SetUp() {
+        try
+        {
+            transpose_test_params p = ::testing::WithParamInterface<transpose_test_params>::GetParam();
+            std::string model = getModel(p);
+
+            Core ie;
+            CNNNetwork net = ie.ReadNetwork(model, Blob::CPtr());
+            ExecutableNetwork executable_network = ie.LoadNetwork(net, p.device_name);
+            InferRequest inferRequest = executable_network.CreateInferRequest();
+
+            Blob::Ptr src = make_shared_blob<float>({Precision::FP32, p.in_shape,
+                TensorDesc::getLayoutByDims(p.in_shape)});
+            src->allocate();
+
+            auto* srcPtr = dynamic_cast<TBlob<float>*>(src.get());
+
+            if (srcPtr == nullptr)
+                FAIL() << "Cannot cast blob to TBlob<float>.";
+
+            inferRequest.SetBlob("input", src);
+
+            inferRequest.Infer();
+
+            OutputsDataMap outputInfo(net.getOutputsInfo());
+            Blob::Ptr outputBlob = inferRequest.GetBlob(outputInfo.begin()->first);
+            auto outputDims = outputBlob->getTensorDesc().getDims();
+
+            compare(outputDims, p.out_shape);
+
+        }
+        catch (const InferenceEngine::details::InferenceEngineException &e)
+        {
+            FAIL() << e.what();
+        }
+    }
+};
+
+TEST_P(TransposeTest, smoke_GPU_TestsTranspose) {}
+
+INSTANTIATE_TEST_CASE_P(
+    smoke_TestsTranspose, TransposeTest,
+    ::testing::Values(
+        transpose_test_params{ "GPU", { 2, 3, 4 }, { 4, 3, 2 }, false },
+        transpose_test_params{ "GPU", { 2, 3, 4, 5 }, { 5, 4, 3, 2 }, false },
+        transpose_test_params{ "GPU", { 2, 3, 4 }, { 4, 3, 2 }, true },
+        transpose_test_params{ "GPU", { 2, 3, 4 }, { 4, 2, 3 }, true },
+        transpose_test_params{ "GPU", { 2, 3, 4, 5 }, { 2, 3, 5, 4 }, true }
+));
diff --git a/inference-engine/tests_deprecated/functional/cldnn/single_layer_tests/variadic_split_tests.cpp b/inference-engine/tests_deprecated/functional/cldnn/single_layer_tests/variadic_split_tests.cpp
new file mode 100644 (file)
index 0000000..c026478
--- /dev/null
@@ -0,0 +1,13 @@
+#include "variadic_split_tests.hpp"
+
+TEST_P(VariadicSplitTests, smoke_GPU_TestsVariadicSplit) {}
+
+INSTANTIATE_TEST_CASE_P(
+    smoke_TestsVariadicSplit, VariadicSplitTests,
+    ::testing::Values(
+        variadic_split_params{ "GPU", 1, {2, 4}, {1, 6, 22, 22}, {{1, 2, 22, 22}, {1, 4, 22, 22}} },
+        variadic_split_params{ "GPU", 1, {4, 6}, {1, 10, 22, 22}, {{1, 4, 22, 22}, {1, 6, 22, 22}} },
+        variadic_split_params{ "GPU", 1, {2, 4, 1}, {1, 7, 22, 22}, {{1, 2, 22, 22}, {1, 4, 22, 22}, {1, 1, 22, 22}} }, 
+        variadic_split_params{ "GPU", 2, {10, 6}, {1, 10, 16, 22}, {{1, 10, 10, 22}, {1, 10, 6, 22}} },
+        variadic_split_params{ "GPU", 3, {2, 4, 9, 10, 11}, {1, 5, 5, 36}, {{1, 5, 5, 2}, {1, 5, 5, 4}, {1, 5, 5, 9}, {1, 5, 5, 10}, {1, 5, 5, 11}} }
+));
diff --git a/inference-engine/tests_deprecated/functional/cldnn/test_model_repo.cpp b/inference-engine/tests_deprecated/functional/cldnn/test_model_repo.cpp
new file mode 100644 (file)
index 0000000..1cb3045
--- /dev/null
@@ -0,0 +1,17 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "test_model_repo.hpp"
+
+std::string get_model_repo() {
+    return "models:";
+};
+
+const char* TestDataHelpers::getModelPathNonFatal() noexcept {
+    return TestDataHelpers::getModelPathNonFatalDefault();
+}
+
+std::string TestDataHelpers::get_data_path() {
+    return TestDataHelpers::get_data_path_default();
+}
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/functional/gna/CMakeLists.txt b/inference-engine/tests_deprecated/functional/gna/CMakeLists.txt
new file mode 100644 (file)
index 0000000..6528fe5
--- /dev/null
@@ -0,0 +1,60 @@
+# Copyright (C) 2018-2020 Intel Corporation
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+
+set(TARGET_NAME GnaFunctionalTests)
+
+file(GLOB TEST_SRC
+        ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp
+        ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instance/io_blob_tests/*.cpp
+        ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instance/ie_class/*.cpp
+        ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instance/input_tests/*.cpp
+        ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instance/lstm/*.cpp
+        ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instance/single_layer_tests/*.cpp)
+
+list(APPEND DEPENDENCIES
+        HeteroPlugin
+        GNAPlugin)
+
+if(ENABLE_MKL_DNN)
+    list(APPEND DEPENDENCIES
+        MKLDNNPlugin)
+endif()
+
+add_executable(${TARGET_NAME} ${TEST_SRC} ${TEST_INCLUDE})
+
+if(GNA_LIBRARY_VERSION STREQUAL "GNA2")
+    set(GNA_LIBRARY_VERSION_NUMBER 2)
+else()
+    set(GNA_LIBRARY_VERSION_NUMBER 1)
+endif()
+
+target_compile_definitions(${TARGET_NAME}
+        PRIVATE
+            USE_GNA=ON
+            GNA_LIB_VER=${GNA_LIBRARY_VERSION_NUMBER}
+            INSTANTIATE_TESTS=1
+        PUBLIC ${ARGV}
+            DATA_PATH=\"${DATA_PATH}\"
+            MODELS_PATH=\"${MODELS_PATH}\")
+
+target_link_libraries(${TARGET_NAME}
+        PRIVATE
+            IESharedTests
+        )
+
+target_include_directories(${TARGET_NAME}
+        PRIVATE
+            ${CMAKE_CURRENT_SOURCE_DIR}/include)
+
+add_dependencies(${TARGET_NAME} ${DEPENDENCIES})
+
+add_test(NAME ${TARGET_NAME}
+        COMMAND ${TARGET_NAME})
+
+set_target_properties(${TARGET_NAME} PROPERTIES COMPILE_PDB_NAME ${TARGET_NAME})
+
+if(GNA_LIBRARY_VERSION STREQUAL "GNA1")
+    target_compile_definitions(${TARGET_NAME} PRIVATE GNA1_LIB)
+endif()
diff --git a/inference-engine/tests_deprecated/functional/gna/shared_tests_instance/ie_class/ie_class.cpp b/inference-engine/tests_deprecated/functional/gna/shared_tests_instance/ie_class/ie_class.cpp
new file mode 100644 (file)
index 0000000..b984f0e
--- /dev/null
@@ -0,0 +1,147 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "ie_class.hpp"
+
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "ie_class.hpp"
+
+//
+// IE Class Common tests with <pluginName, deviceName params>
+//
+
+INSTANTIATE_TEST_CASE_P(
+        nightly_IEClassBasicTestP, IEClassBasicTestP,
+        ::testing::Values(std::make_pair("GNAPlugin", "GNA")));
+
+// TODO
+INSTANTIATE_TEST_CASE_P(
+        DISABLED_IEClassNetworkTestP, IEClassNetworkTestP,
+        ::testing::Values("GNA"));
+
+//
+// IE Class GetMetric
+//
+
+INSTANTIATE_TEST_CASE_P(
+        nightly_IEClassGetMetricTest, IEClassGetMetricTest_SUPPORTED_CONFIG_KEYS,
+        ::testing::Values("GNA", "MULTI", "HETERO"));
+
+INSTANTIATE_TEST_CASE_P(
+        nightly_IEClassGetMetricTest, IEClassGetMetricTest_SUPPORTED_METRICS,
+        ::testing::Values("GNA", "MULTI", "HETERO"));
+
+INSTANTIATE_TEST_CASE_P(
+        nightly_IEClassGetMetricTest, IEClassGetMetricTest_AVAILABLE_DEVICES,
+        ::testing::Values("GNA"));
+
+INSTANTIATE_TEST_CASE_P(
+        nightly_IEClassGetMetricTest, IEClassGetMetricTest_FULL_DEVICE_NAME,
+        ::testing::Values("GNA", "MULTI", "HETERO"));
+
+// TODO: Issue: 30198
+INSTANTIATE_TEST_CASE_P(
+        DISABLED_IEClassGetMetricTest, IEClassGetMetricTest_OPTIMIZATION_CAPABILITIES,
+        ::testing::Values("GNA"));
+
+// TODO: Issue: 30199
+INSTANTIATE_TEST_CASE_P(
+        DISABLED_IEClassGetMetricTest, IEClassGetMetricTest_RANGE_FOR_ASYNC_INFER_REQUESTS,
+        ::testing::Values("GNA"));
+
+INSTANTIATE_TEST_CASE_P(
+        nightly_IEClassGetMetricTest, IEClassGetMetricTest_ThrowUnsupported,
+        ::testing::Values("GNA", "MULTI", "HETERO"));
+
+INSTANTIATE_TEST_CASE_P(
+        nightly_IEClassGetConfigTest, IEClassGetConfigTest_ThrowUnsupported,
+        ::testing::Values("GNA", "MULTI", "HETERO"));
+
+INSTANTIATE_TEST_CASE_P(
+        nightly_IEClassGetAvailableDevices, IEClassGetAvailableDevices,
+        ::testing::Values("GNA"));
+
+//
+// IE Class GetConfig
+//
+
+INSTANTIATE_TEST_CASE_P(
+        nightly_IEClassGetConfigTest, IEClassGetConfigTest,
+        ::testing::Values("GNA"));
+
+//
+// Executable Network GetMetric
+//
+
+INSTANTIATE_TEST_CASE_P(
+    IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS,
+    ::testing::Values("GNA" /*, "MULTI:GNA", "HETERO:GNA" */));
+
+INSTANTIATE_TEST_CASE_P(
+    IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_SUPPORTED_METRICS,
+    ::testing::Values("GNA" /*, "MULTI:GNA",  "HETERO:GNA" */));
+
+// TODO: this metric is not supported by the plugin
+INSTANTIATE_TEST_CASE_P(
+   DISABLED_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_NETWORK_NAME,
+   ::testing::Values("GNA", "MULTI:GNA", "HETERO:GNA"));
+//
+// TODO: this metric is not supported by the plugin
+INSTANTIATE_TEST_CASE_P(
+   DISABLED_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_OPTIMAL_NUMBER_OF_INFER_REQUESTS,
+   ::testing::Values("GNA", "MULTI:GNA", "HETERO:GNA"));
+
+INSTANTIATE_TEST_CASE_P(
+    IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_ThrowsUnsupported,
+    ::testing::Values("GNA", /* "MULTI:GNA", */ "HETERO:GNA"));
+
+//
+// Executable Network GetConfig / SetConfig
+//
+
+INSTANTIATE_TEST_CASE_P(
+    IEClassExecutableNetworkGetConfigTest, IEClassExecutableNetworkGetConfigTest,
+    ::testing::Values("GNA"));
+
+INSTANTIATE_TEST_CASE_P(
+    IEClassExecutableNetworkSetConfigTest, IEClassExecutableNetworkSetConfigTest,
+    ::testing::Values("GNA"));
+
+// IE Class Query network
+
+INSTANTIATE_TEST_CASE_P(
+    IEClassQueryNetworkTest, IEClassQueryNetworkTest,
+    ::testing::Values("GNA"));
+
+// IE Class Load network
+
+INSTANTIATE_TEST_CASE_P(
+   IEClassLoadNetworkTest, IEClassLoadNetworkTest,
+   ::testing::Values("GNA"));
+
+//
+// Hetero Executable Network GetMetric
+//
+
+// TODO: verify hetero interop
+INSTANTIATE_TEST_CASE_P(
+   DISABLED_IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS,
+   ::testing::Values("GNA"));
+
+// TODO: verify hetero interop
+INSTANTIATE_TEST_CASE_P(
+   DISABLED_IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_METRICS,
+   ::testing::Values("GNA"));
+
+// TODO: verify hetero interop
+INSTANTIATE_TEST_CASE_P(
+   DISABLED_IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_NETWORK_NAME,
+   ::testing::Values("GNA"));
+
+INSTANTIATE_TEST_CASE_P(
+   IEClassHeteroExecutableNetworlGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_TARGET_FALLBACK,
+   ::testing::Values("GNA"));
diff --git a/inference-engine/tests_deprecated/functional/gna/shared_tests_instance/input_tests/parser_tests.cpp b/inference-engine/tests_deprecated/functional/gna/shared_tests_instance/input_tests/parser_tests.cpp
new file mode 100644 (file)
index 0000000..5726a03
--- /dev/null
@@ -0,0 +1,35 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "parser_tests.hpp"
+
+ir_test_params ir_test_cases[] = {
+        ir_test_params("GNA", "FP32", negative_conv_kernel_x_case),
+        ir_test_params("GNA", "FP32", negative_conv_kernel_y_case),
+        ir_test_params("GNA", "FP32", negative_conv_stride_x_case),
+        ir_test_params("GNA", "FP32", negative_conv_weights_case),
+        ir_test_params("GNA", "FP32", negative_conv_biases_case),
+
+        ir_test_params("GNA", "FP32", negative_fc_out_size_case),
+        ir_test_params("GNA", "FP32", negative_fc_weights_case),
+        ir_test_params("GNA", "FP32", negative_fc_biases_case),
+
+        ir_test_params("GNA", "FP32", negative_deconv_kernel_x_case),
+        ir_test_params("GNA", "FP32", negative_deconv_kernel_y_case),
+        ir_test_params("GNA", "FP32", negative_deconv_stride_x_case),
+        ir_test_params("GNA", "FP32", negative_deconv_weights_case),
+        ir_test_params("GNA", "FP32", negative_deconv_biases_case),
+
+        ir_test_params("GNA", "FP32", negative_pool_kernel_x_case),
+        ir_test_params("GNA", "FP32", negative_pool_kernel_y_case),
+        ir_test_params("GNA", "FP32", negative_pool_stride_x_case),
+        ir_test_params("GNA", "FP32", incorrect_pool_type_case),
+
+        ir_test_params("GNA", "FP32", negative_norm_local_size_case),
+        ir_test_params("GNA", "FP32", negative_norm_k_case)
+};
+
+INSTANTIATE_TEST_CASE_P(FunctionalTest_smoke, IncorrectIRTests,
+        ::testing::ValuesIn(ir_test_cases),
+        getTestName);
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/functional/gna/shared_tests_instance/lstm/lstm_cell_test.cpp b/inference-engine/tests_deprecated/functional/gna/shared_tests_instance/lstm/lstm_cell_test.cpp
new file mode 100644 (file)
index 0000000..2ff95f0
--- /dev/null
@@ -0,0 +1,38 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gna/gna_config.hpp>
+#include "lstm_cell_test.hpp"
+
+TEST_P(LSTMCellTestBase, GNA_sw_fp32_single_lstm_test) {
+    runSingleLSTMTest({{"GNA_DEVICE_MODE", "GNA_SW_FP32"}, {"GNA_COMPACT_MODE", "NO"}});
+}
+
+TEST_P(LSTMCellTestBase, GNA_I16_single_lstm_test) {
+    DISABLE_TEST_ON_GNA2
+    runSingleLSTMTest( {
+        {"GNA_DEVICE_MODE", "GNA_SW_EXACT"},
+        {"GNA_COMPACT_MODE", "NO"},
+        {"GNA_PRECISION", "I16"},
+        {"GNA_SCALE_FACTOR_0", "1024"},
+        {"GNA_SCALE_FACTOR_1", "1024"},
+        {"GNA_SCALE_FACTOR_2", "1024"}
+    }, 0.099);
+}
+
+TEST_P(LSTMCellTestBase, GNA_I8_single_lstm_test) {
+    DISABLE_TEST_ON_GNA2
+    runSingleLSTMTest({
+        {"GNA_DEVICE_MODE", "GNA_SW_EXACT"},
+        {"GNA_COMPACT_MODE", "NO"},
+        {"GNA_PRECISION", "I8"},
+        {"GNA_SCALE_FACTOR_0", "1024"},
+        {"GNA_SCALE_FACTOR_1", "1024"},
+        {"GNA_SCALE_FACTOR_2", "1024"}
+    }, 0.011);
+}
+
+static const lstm_cell_param gna_workload[] = {{1, StateSize, DataSize}, {1, 16, 16}};
+
+RUN_CASE_P_WITH_SUFFIX(GNA, _smoke, LSTMCellTestBase, gna_workload);
diff --git a/inference-engine/tests_deprecated/functional/gna/shared_tests_instance/single_layer_tests/ti_tests.cpp b/inference-engine/tests_deprecated/functional/gna/shared_tests_instance/single_layer_tests/ti_tests.cpp
new file mode 100644 (file)
index 0000000..eae4908
--- /dev/null
@@ -0,0 +1,56 @@
+// Copyright (C) 2018-2019 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "ti_tests.hpp"
+
+static const ti_test_params ti_test_cases[] = {{"GNA", 8, InferenceEngine::Precision(InferenceEngine::Precision::FP32)}};
+
+static const std::map<std::string, std::string>  config_fp32 =
+        {{"GNA_DEVICE_MODE", "GNA_SW_FP32"}, {"GNA_COMPACT_MODE", "NO"}};
+
+static const std::map<std::string, std::string>  config_I16 = {
+        {"GNA_DEVICE_MODE", "GNA_SW_EXACT"},
+        {"GNA_COMPACT_MODE", "NO"},
+        {"GNA_PRECISION", "I16"},
+        {"GNA_SCALE_FACTOR_0", "1024"},
+        {"GNA_SCALE_FACTOR_1", "1024"},
+        {"GNA_SCALE_FACTOR_2", "1024"}
+};
+
+static const std::map<std::string, std::string>  config_I8 = {
+        {"GNA_DEVICE_MODE", "GNA_SW_EXACT"},
+        {"GNA_COMPACT_MODE", "NO"},
+        {"GNA_PRECISION", "I16"},
+        {"GNA_SCALE_FACTOR_0", "1024"},
+        {"GNA_SCALE_FACTOR_1", "1024"},
+        {"GNA_SCALE_FACTOR_2", "1024"}
+};
+
+TEST_P(TITestBase, GNA_sw_fp32_ti_test) {
+    RunTITest(config_fp32);
+}
+
+TEST_P(TITestBase, GNA_I16_ti_test) {
+    RunTITest(config_I16);
+}
+
+TEST_P(TITestBase, GNA_I8_ti_test) {
+    RunTITest(config_I8);
+}
+
+RUN_CASE_P_WITH_SUFFIX(GNA, _smoke, TITestBase, ti_test_cases);
+
+TEST_P(TITest2Base, GNA_sw_fp32_ti_test) {
+    RunTITest(config_fp32);
+}
+
+TEST_P(TITest2Base, GNA_I16_ti_test) {
+    RunTITest(config_I16);
+}
+
+TEST_P(TITest2Base, GNA_I8_ti_test) {
+    RunTITest(config_I8);
+}
+
+RUN_CASE_P_WITH_SUFFIX(GNA, _smoke, TITest2Base, ti_test_cases);
diff --git a/inference-engine/tests_deprecated/functional/gna/test_model_repo.cpp b/inference-engine/tests_deprecated/functional/gna/test_model_repo.cpp
new file mode 100644 (file)
index 0000000..1cb3045
--- /dev/null
@@ -0,0 +1,17 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "test_model_repo.hpp"
+
+std::string get_model_repo() {
+    return "models:";
+};
+
+const char* TestDataHelpers::getModelPathNonFatal() noexcept {
+    return TestDataHelpers::getModelPathNonFatalDefault();
+}
+
+std::string TestDataHelpers::get_data_path() {
+    return TestDataHelpers::get_data_path_default();
+}
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/functional/ie_tests/CMakeLists.txt b/inference-engine/tests_deprecated/functional/ie_tests/CMakeLists.txt
new file mode 100644 (file)
index 0000000..9c22373
--- /dev/null
@@ -0,0 +1,31 @@
+# Copyright (C) 2018-2020 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+#
+
+set(TARGET_NAME ie_tests)
+
+file(GLOB TEST_INCLUDE ${CMAKE_CURRENT_SOURCE_DIR}/include/*.hpp)
+file(GLOB TEST_SRC ${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp)
+
+# create target
+
+add_library(${TARGET_NAME} STATIC ${TEST_INCLUDE} ${TEST_SRC})
+
+list(APPEND EXPORT_DEPENDENCIES
+        funcTestUtils
+        ieTestHelpers
+        )
+
+target_include_directories(${TARGET_NAME}
+    PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include"
+    PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/src"
+            $<TARGET_PROPERTY:inference_engine_plugin_api,INTERFACE_INCLUDE_DIRECTORIES>)
+
+target_link_libraries(${TARGET_NAME} PUBLIC
+        format_reader
+        ${EXPORT_DEPENDENCIES}
+        )
+
+# developer package
+
+ie_developer_export_targets(${TARGET_NAME} ${EXPORT_DEPENDENCIES})
diff --git a/inference-engine/tests_deprecated/functional/ie_tests/include/base_matcher.hpp b/inference-engine/tests_deprecated/functional/ie_tests/include/base_matcher.hpp
new file mode 100644 (file)
index 0000000..63be7ef
--- /dev/null
@@ -0,0 +1,30 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include "regression_config.hpp"
+#include <tests_common.hpp>
+
+namespace Regression { namespace Matchers {
+
+using namespace InferenceEngine;
+
+class BaseMatcher {
+protected:
+    RegressionConfig config;
+public:
+    explicit BaseMatcher(const RegressionConfig &config) : config(config) {
+#ifndef NDEBUG
+        std::cout << "Matching on " << config._device_name << std::endl;
+#endif
+    }
+
+    void checkImgNumber(int dynBatch = -1);
+};
+
+void loadImage(const std::string &imageFilename, InferenceEngine::Blob::Ptr &blob, bool bgr = true, int batchNumber = 1);
+
+}  // namepspace Matchers
+}  // namespace Regression
diff --git a/inference-engine/tests_deprecated/functional/ie_tests/include/classification_matcher.hpp b/inference-engine/tests_deprecated/functional/ie_tests/include/classification_matcher.hpp
new file mode 100644 (file)
index 0000000..bc3479b
--- /dev/null
@@ -0,0 +1,44 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <inference_engine.hpp>
+#include "base_matcher.hpp"
+#include "regression_reference.hpp"
+#include <precision_utils.h>
+#include <ie_iexecutable_network.hpp>
+#include "label_probability.hpp"
+
+namespace Regression { namespace Matchers {
+
+// this is one more version of classification matcher for new api of async/sync requests
+class ClassificationMatcher : public BaseMatcher {
+private:
+    size_t checkResultNumber;
+    std::vector<std::shared_ptr<InferenceEngine::IExecutableNetwork>> _executableNetworks;
+    std::vector <std::vector<Reference::LabelProbability>> _results;
+    ResponseDesc _resp;
+    InferenceEngine::InputsDataMap _inputsInfo;
+    InferenceEngine::OutputsDataMap _outputsInfo;
+ public:
+    explicit ClassificationMatcher(RegressionConfig &config);
+    void to(std::string modelType);
+    void to(const std::vector <Regression::Reference::ClassificationScoringResultsForTests> &expected);
+
+
+ private:
+    void readLabels(std::string labelFilePath);
+    int getIndexByLabel(const std::string &label);
+    std::string getLabel(unsigned int index);
+    void checkResult(size_t checkNumber,
+                     const std::vector <Regression::Reference::ClassificationScoringResultsForTests> &expected);
+    virtual void match(size_t top);
+    void match_n(size_t top, int index);
+    void saveResults(const std::vector<unsigned> &topIndexes, const std::vector<float> &probs, size_t top);
+
+    size_t top = 5;
+};
+
+} } // namespace matchers
diff --git a/inference-engine/tests_deprecated/functional/ie_tests/include/custom_matcher.hpp b/inference-engine/tests_deprecated/functional/ie_tests/include/custom_matcher.hpp
new file mode 100644 (file)
index 0000000..0218d0f
--- /dev/null
@@ -0,0 +1,80 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include "base_matcher.hpp"
+#include <cmath>
+#include <cpp/ie_cnn_network.h>
+
+IE_SUPPRESS_DEPRECATED_START
+namespace Regression { namespace Matchers {
+
+using namespace InferenceEngine;
+
+class CustomMatcher : public BaseMatcher {
+ protected:
+    InferenceEngine::CNNNetwork network;
+    InferenceContext ctx;
+    bool match_in_dctor = false;
+    int precision;
+
+ public:
+
+    explicit CustomMatcher(const RegressionConfig &config, bool match_in_dctor = false)
+            : BaseMatcher(config),
+              match_in_dctor(match_in_dctor),
+              precision(4) {
+        if (!match_in_dctor) {
+            matchCustom();
+            checkResult();
+        }
+    }
+    ~CustomMatcher() {
+        if (match_in_dctor) {
+            matchCustom();
+            checkResult();
+        }
+    }
+
+    CustomMatcher& withAvgDelta(float value) {
+        BaseMatcher::config.nearAvgValue = value;
+        return *this;
+    }
+
+    CustomMatcher& withDelta(float value) {
+        BaseMatcher::config.nearValue = value;
+        return *this;
+    }
+
+    CustomMatcher& setPrecision(int precision) {
+        this->precision = precision;
+        return *this;
+    }
+
+    void matchCustom();
+
+    template<typename TReal>
+    inline bool isApproximatelyEqual(TReal a, TReal b, TReal tolerance = std::numeric_limits<TReal>::epsilon())
+    {
+        TReal diff = std::fabs(a - b);
+        if (diff <= tolerance)
+            return true;
+
+        if (diff < std::fmax(std::fabs(a), std::fabs(b)) * tolerance)
+            return true;
+
+        return false;
+    }
+
+    void checkResult();
+
+ protected:
+    InferenceEngine::ExecutableNetwork createExecutableNetworkFromIR();
+    InferenceEngine::ExecutableNetwork createExecutableNetworkFromAOT();
+};
+
+}
+} //  namespace Matchers
+IE_SUPPRESS_DEPRECATED_END
diff --git a/inference-engine/tests_deprecated/functional/ie_tests/include/ie_core_adapter.hpp b/inference-engine/tests_deprecated/functional/ie_tests/include/ie_core_adapter.hpp
new file mode 100644 (file)
index 0000000..6dcba5b
--- /dev/null
@@ -0,0 +1,40 @@
+// Copyright (C) 2018-2019 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <ie_core.hpp>
+#include <ie_common.h>
+
+class IECoreAdapter {
+public:
+    using Ptr = std::shared_ptr<IECoreAdapter>;
+
+    IECoreAdapter(
+        std::shared_ptr<InferenceEngine::Core> ieCore, std::string deviceName);
+
+    // -----------------------------------------
+    // IInferencePlugin API (deprecated). Begin.
+    // - InferenceEngine::ICNNNetwork is replaced by InferenceEngine::CNNNetwork
+    // -----------------------------------------
+
+    InferenceEngine::StatusCode LoadNetwork(
+        InferenceEngine::IExecutableNetwork::Ptr& ret, InferenceEngine::CNNNetwork network,
+        const std::map<std::string, std::string>& config, InferenceEngine::ResponseDesc* resp) noexcept;
+
+    InferenceEngine::StatusCode ImportNetwork(
+        InferenceEngine::IExecutableNetwork::Ptr& ret, const std::string& modelFileName,
+        const std::map<std::string, std::string>& config, InferenceEngine::ResponseDesc* resp) noexcept;
+
+    // -----------------------------------------
+    // IInferencePlugin API (deprecated). End.
+    // -----------------------------------------
+
+    InferenceEngine::ExecutableNetwork ImportNetwork(std::istream& networkModel,
+        const std::map<std::string, std::string>& config = {});
+
+private:
+    std::shared_ptr<InferenceEngine::Core> m_ieCore;
+    std::string m_deviceName;
+};
diff --git a/inference-engine/tests_deprecated/functional/ie_tests/include/label_probability.hpp b/inference-engine/tests_deprecated/functional/ie_tests/include/label_probability.hpp
new file mode 100644 (file)
index 0000000..bc5407e
--- /dev/null
@@ -0,0 +1,70 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <string>
+
+namespace Regression {
+namespace Reference {
+
+/**
+ * @class LabelProbability
+ * @brief A LabelProbability represents predicted data in easy to use format
+ */
+class LabelProbability {
+private:
+    /**
+     * @brief Index of current label
+     */
+    int labelIdx = 0;
+    /**
+     * @brief Name of class from file with labels
+     */
+    std::string className;
+    /**
+     * @brief The probability of prediction
+     */
+    float probability = 0.0f;
+
+public:
+    /**
+     * @brief A constructor of InferenceResults class
+     * @param labelIdx - index of current label
+     * @param probability - the probability of prediction
+     * @param className - name of class from file with labels
+     * @return InferenceResults object
+     */
+    LabelProbability(int labelIdx, float probability, std::string className) : labelIdx(labelIdx),
+                                                                               className(className),
+                                                                               probability(probability) {}
+
+    /**
+     * @brief Gets label index
+     * @return index of current label
+     */
+    const int &getLabelIndex() const {
+        return labelIdx;
+    }
+
+    /**
+     * @brief Gets label name
+     * @return label
+     */
+    const std::string &getLabel() const {
+        return className;
+    }
+
+    /**
+     * @brief Gets probability
+     * @return probability
+     */
+    const float &getProbability() const {
+        return probability;
+    }
+};
+
+}  // namespace Reference
+}  // namespace Regression
+
diff --git a/inference-engine/tests_deprecated/functional/ie_tests/include/net_model.hpp b/inference-engine/tests_deprecated/functional/ie_tests/include/net_model.hpp
new file mode 100644 (file)
index 0000000..1451920
--- /dev/null
@@ -0,0 +1,39 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <string>
+#include <unordered_map>
+
+//------------------------------------------------------------------------------
+// class Model
+//------------------------------------------------------------------------------
+
+class Model {
+public:
+    //Constructors
+    Model() = default;
+    explicit Model(const char *that) {
+        fileName_ = folderName_ = that;
+    }
+
+    Model(const std::string &folderName,
+            const std::string &fileName,
+            const std::string &resolution,
+            const std::string & extension = "xml");
+
+    // Accessors
+    inline std::string folderName() const { return folderName_; };
+    inline std::string fileName() const { return fileName_; };
+    inline std::string resolution() const { return resolutionName_; };
+    inline std::string extension() const { return extensionName_; };
+
+private:
+    std::string folderName_;
+    std::string fileName_;
+    std::string resolutionName_;
+    std::string extensionName_;
+};
+
diff --git a/inference-engine/tests_deprecated/functional/ie_tests/include/object_detection_matcher.hpp b/inference-engine/tests_deprecated/functional/ie_tests/include/object_detection_matcher.hpp
new file mode 100644 (file)
index 0000000..56a2967
--- /dev/null
@@ -0,0 +1,127 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <limits>
+#include <gtest/gtest.h>
+#include "base_matcher.hpp"
+#include <math.h>
+#include <ie_icnn_network.hpp>
+
+namespace Regression {
+namespace Matchers {
+//------------------------------------------------------------------------------
+// class ObjectDetectionMatcher
+//------------------------------------------------------------------------------
+
+class ObjectDetectionMatcher : public BaseMatcher {
+
+public:
+    //Helpers
+    struct DetectedObject;
+    class ImageDescription;
+    class NetworkAdapter;
+
+    using ImageDescriptionPtrVect = std::vector<std::shared_ptr<ImageDescription>>;
+    using ScoreFunction = std::function<ImageDescriptionPtrVect(InferenceEngine::CNNNetwork&)>;
+
+    //Constructor
+    ObjectDetectionMatcher(const RegressionConfig &config);
+
+    //Operations
+    virtual void match(const ScoreFunction&);
+    void checkResult(const std::vector<ImageDescription>& desired);
+
+    void to(const ImageDescription &desired, const std::shared_ptr<NetworkAdapter>& adapter);
+    void to(const std::vector<ImageDescription>& desired, const std::shared_ptr<NetworkAdapter>& adapter);
+
+    void to(const ImageDescription &desired, const NetworkAdapter& adapter);
+    void to(const std::vector<ImageDescription>& desired, const NetworkAdapter& adapter);
+
+private:
+    //Operations
+    void to(const std::vector<ImageDescription>& desired, const ScoreFunction&);
+    //Data section
+    ImageDescriptionPtrVect res_desc_;
+};
+
+using DetectedObject = ObjectDetectionMatcher::DetectedObject;
+using ImageDescription = ObjectDetectionMatcher::ImageDescription;
+using NetworkAdapter = ObjectDetectionMatcher::NetworkAdapter;
+
+//------------------------------------------------------------------------------
+// class DetectedObject
+//------------------------------------------------------------------------------
+
+struct ObjectDetectionMatcher::DetectedObject {
+    //Data section
+    int objectType;
+    float xmin, xmax, ymin, ymax, prob;
+
+    //Constructors
+    DetectedObject(int objectType, float xmin, float ymin, float xmax, float ymax, float prob, int = -1);
+    DetectedObject(const DetectedObject& other);
+
+    static float ioU(const DetectedObject& detected_object_1_, const DetectedObject& detected_object_2_);
+
+    //Operations
+    void printObj();
+};
+
+//------------------------------------------------------------------------------
+// class ImageDescription
+//------------------------------------------------------------------------------
+
+class ObjectDetectionMatcher::ImageDescription {
+public:
+    // Constructors
+    ImageDescription(bool check_probs = false);
+    ImageDescription(const std::list<DetectedObject> &alist, bool check_probs = false);
+    ImageDescription(const ImageDescription& obj);
+
+    //Operations
+    static float ioUMultiple(const ImageDescription &detected_objects, const ImageDescription &desired_objects);
+    void addDetectedObject(const DetectedObject& detected_obj);
+
+    // Accessors
+    inline bool checkProbs() const;
+public:
+    //Data section
+    std::list<DetectedObject> alist;
+
+private:
+    //Data section
+    bool check_probs_;
+};
+
+//------------------------------------------------------------------------------
+// class NetworkAdapter
+//------------------------------------------------------------------------------
+
+class ObjectDetectionMatcher::NetworkAdapter {
+public:
+    //Operations
+    virtual std::vector<shared_ptr<ImageDescription>> score(InferenceEngine::CNNNetwork network,
+            std::shared_ptr<InferenceEngine::Core> ie,
+            const std::string& deviceName,
+            const std::map<std::string, std::string>& config,
+            const std::vector<std::string>& images_files_names,
+            bool with_reshape = false,
+            bool useExportImport = false) const = 0;
+
+    //Destructor
+    virtual ~NetworkAdapter() = default;
+};
+
+//------------------------------------------------------------------------------
+// Implementation of methods of class ImageDescription
+//------------------------------------------------------------------------------
+
+inline bool ImageDescription::checkProbs() const {
+    return check_probs_;
+}
+
+} //  namespace matchers
+} //  namespace regression
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/functional/ie_tests/include/optimized_network_matcher.hpp b/inference-engine/tests_deprecated/functional/ie_tests/include/optimized_network_matcher.hpp
new file mode 100644 (file)
index 0000000..8604ef8
--- /dev/null
@@ -0,0 +1,58 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include "custom_matcher.hpp"
+
+namespace Regression { namespace Matchers {
+
+using namespace InferenceEngine;
+
+class OptimizedNetworkMatcher : public CustomMatcher {
+ protected:
+    std::string path_to_reference_dump;
+    std::vector<uint8_t> firmware;
+    InferenceEngine::ExecutableNetwork executableApi;
+ public:
+
+    explicit OptimizedNetworkMatcher(const RegressionConfig &config)
+        : CustomMatcher(config, true) {
+    }
+    ~OptimizedNetworkMatcher() {
+        if (match_in_dctor) {
+            matchCustom();
+            checkResult();
+            //not allow base matcher to match one more time
+            match_in_dctor = false;
+        }
+    }
+
+    void matchCustom();
+
+    void to(std::string path_to_reference_dump);
+    std::vector<uint8_t> readDumpFromFile(std::string path);
+    void checkResult();
+};
+
+class OptimizedNetworkDumper : public OptimizedNetworkMatcher {
+ public:
+    using OptimizedNetworkMatcher::OptimizedNetworkMatcher;
+
+    ~OptimizedNetworkDumper() {
+        if (match_in_dctor) {
+            dump();
+            //not allow base matcher to match one more time
+            match_in_dctor = false;
+        }
+    }
+
+    void match() {}
+
+    void dump();
+
+};
+
+} //  namespace Regression
+} //  namespace Matchers
diff --git a/inference-engine/tests_deprecated/functional/ie_tests/include/raw_matcher.hpp b/inference-engine/tests_deprecated/functional/ie_tests/include/raw_matcher.hpp
new file mode 100644 (file)
index 0000000..6d8b88e
--- /dev/null
@@ -0,0 +1,35 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <gtest/gtest.h>
+#include <string>
+#include <map>
+#include "base_matcher.hpp"
+#include <ie_blob.h>
+
+namespace Regression {
+namespace Matchers {
+
+class RawMatcher : public BaseMatcher {
+    InferenceEngine::BlobMap outputBlobs;
+public:
+    RawMatcher(const RegressionConfig &config)
+            : BaseMatcher(config) {
+    }
+
+    virtual void match();
+
+    void checkResult(const std::map<std::string, std::map<size_t, float>> &allExpected);
+
+    void to(const std::map<std::string, std::map<size_t, float>> &allExpected) {
+        ASSERT_NO_FATAL_FAILURE(match());
+        ASSERT_NO_FATAL_FAILURE(checkResult(allExpected));
+    }
+
+};
+
+}
+} //  namespace matchers
diff --git a/inference-engine/tests_deprecated/functional/ie_tests/include/regression_config.hpp b/inference-engine/tests_deprecated/functional/ie_tests/include/regression_config.hpp
new file mode 100644 (file)
index 0000000..17b0001
--- /dev/null
@@ -0,0 +1,187 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include <ie_blob.h>
+#include <ie_precision.hpp>
+#include <ie_core.hpp>
+
+namespace Regression {
+using namespace std;
+
+class InferenceContext {
+    InferenceEngine::BlobMap _inputs;
+    std::vector<InferenceEngine::BlobMap> _outputs;
+    std::vector<std::string> _fileNames;
+    std::string _modelPath;
+    InferenceEngine::Precision _prec;
+    int _frameNumber = 0;
+    int _inputIndex = 0;
+ public:
+    std::string modelFilePath() const {
+        return _modelPath;
+    }
+    std::vector<std::string> fileNames() const {
+        return _fileNames;
+    }
+
+    void setModelPath(const std::string &path) {
+        _modelPath = path;
+    }
+
+    void setModelPrecision(InferenceEngine::Precision prec) {
+        _prec = prec;
+    }
+
+    InferenceEngine::Precision getModelPrecision() const {
+        return _prec;
+    }
+
+    void  setFileNames(const std::vector<std::string> fileNames)  {
+        _fileNames = fileNames;
+    }
+
+    void setInput(std::string name, InferenceEngine::Blob::Ptr input) {
+        _inputs[name] = input;
+    }
+
+    void setOutput(std::string name, InferenceEngine::Blob::Ptr output) {
+
+        outputs()[name] = output;
+    }
+
+    InferenceEngine::Blob::Ptr getOutput(std::string name) {
+        return outputs()[name];
+    }
+
+    const InferenceEngine::BlobMap& inputs() const {
+        return _inputs;
+    }
+
+    const InferenceEngine::BlobMap& outputs() const {
+        return const_cast<InferenceContext*>(this)->outputs();
+    }
+
+    std::vector<InferenceEngine::BlobMap>& allOutputs() {
+        return _outputs;
+    }
+
+    InferenceEngine::BlobMap& outputs() {
+        if (_outputs.empty()) {
+            _outputs.push_back(InferenceEngine::BlobMap());
+        }
+        return _outputs.front();
+    }
+
+    InferenceEngine::BlobMap& newOutputs() {
+        _outputs.push_back(InferenceEngine::BlobMap());
+        return _outputs.back();
+    }
+
+    void setFrameNumber(int num) {
+        _frameNumber = num;
+    }
+
+    int getFrameNumber() const {
+        return _frameNumber;
+    }
+
+    void setInputIdx(int num) {
+        _inputIndex = num;
+    }
+
+    size_t getInputIdx() const {
+        return _inputIndex;
+    }
+
+    std::string currentInputFile() const {
+        if (fileNames().empty()) {
+            return "";
+        }
+        return fileNames()[std::min(getInputIdx(), fileNames().size()-1)];
+    }
+
+    const InferenceEngine::Blob::Ptr currentInputs() const {
+        auto input = _inputs.begin();
+        std::advance(input, getInputIdx());
+        return input->second;
+    }
+
+};
+
+struct RegressionConfig {
+    struct InputFetcherResult {
+        bool reset = false;
+        bool fetchMore = false;
+        bool fetched = true;
+        bool hasResult = true;
+        int frameNumber = 0;
+        InputFetcherResult() = default;
+        InputFetcherResult(bool reset, bool fetchMore=false, bool fetched=true, int frameNumber = 0, bool hasResult = true)
+                : reset(reset), fetchMore(fetchMore), fetched(fetched), hasResult(hasResult) {}
+    };
+    using input_fetcher = std::function<InputFetcherResult (const InferenceContext & )>;
+    using model_maker = std::function<void(const InferenceContext & )>;
+    using result_fetcher = std::function<InferenceEngine::Blob::Ptr(const InferenceContext & )>;
+
+    std::vector<input_fetcher> fetch_input;
+    result_fetcher fetch_result;
+    model_maker make_model;
+    string _path_to_models;
+    string _path_to_aot_model;
+    vector<string> _paths_to_images;
+    string _device_name;
+    string _firmware;
+    string _tmp_firmware;
+    string _stat_file;
+    vector<string> labels;
+    double nearValue = 0.0;
+    double nearAvgValue = 0.0;
+    double maxRelativeError = 0.0;
+    double meanRelativeError = 0.0;
+    bool batchMode = false;
+    bool compactMode = true;
+    bool int8Mode = false;
+    bool isAsync = false;
+    int batchSize = 1;
+    //number of async infer requests to create
+    int _nrequests = 1;
+    int topKNumbers = -1;
+    int _numNetworks = 1;
+
+    bool useDynamicBatching = false;
+    int dynBatch = -1;
+    bool print = false;
+    bool useExportImport = false;
+    std::size_t printNum = 0;
+
+    vector<float> referenceOutput;
+    vector<uint8_t> referenceBin;
+
+    InferenceEngine::Blob::Ptr outputBlob;
+    std::string outputLayer;
+    InferenceEngine::Precision _inputPrecision;
+    InferenceEngine::Precision modelPrecision;
+    InferenceEngine::Precision _outputPrecision = InferenceEngine::Precision::UNSPECIFIED;
+    std::map<std::string, InferenceEngine::Precision> _outputBlobPrecision;
+    std::map<std::string, InferenceEngine::InferenceEngineProfileInfo>* perfInfoPtr = nullptr;
+    std::map<std::string, std::string> plugin_config;
+    std::map<std::string, std::string> deviceMapping;
+
+    std::shared_ptr<InferenceEngine::Core> ie_core;
+
+    bool _reshape = false;
+};
+
+enum InputFormat {
+    RGB = 0,
+    BGR = 1
+};
+
+}
diff --git a/inference-engine/tests_deprecated/functional/ie_tests/include/regression_reference.hpp b/inference-engine/tests_deprecated/functional/ie_tests/include/regression_reference.hpp
new file mode 100644 (file)
index 0000000..cde008d
--- /dev/null
@@ -0,0 +1,24 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <map>
+#include <vector>
+#include <string>
+#include "label_probability.hpp"
+
+namespace Regression {
+namespace Reference {
+
+struct ClassificationScoringResultsForTests : public LabelProbability{
+    ClassificationScoringResultsForTests(float prob, const std::string & label)
+            : LabelProbability(0, prob, label ){
+    }
+};
+
+extern std::map<std::string, std::vector<ClassificationScoringResultsForTests>> values;
+
+}  // namespace Reference
+}  // namespace Regression
diff --git a/inference-engine/tests_deprecated/functional/ie_tests/include/regression_tests.hpp b/inference-engine/tests_deprecated/functional/ie_tests/include/regression_tests.hpp
new file mode 100644 (file)
index 0000000..fb6e5a9
--- /dev/null
@@ -0,0 +1,695 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <gtest/gtest.h>
+#include <tests_common.hpp>
+#include <tests_file_utils.hpp>
+#include <fstream>
+#include <string>
+#include <vector>
+#include <list>
+#include <algorithm>
+#include <iterator>
+
+#include <streambuf>
+
+#include <format_reader_ptr.h>
+
+#include "regression_reference.hpp"
+#include "regression_config.hpp"
+
+#include "net_model.hpp"
+#include "segmentation_matcher.hpp"
+#include "custom_matcher.hpp"
+#include "raw_matcher.hpp"
+#include "classification_matcher.hpp"
+#include "object_detection_matcher.hpp"
+#include "optimized_network_matcher.hpp"
+
+#include "functional_test_utils/plugin_cache.hpp"
+
+#ifdef near
+#undef near
+#endif
+
+
+namespace Regression {
+using namespace Matchers;
+
+/**
+ * @brief wether to reset plugin after feeding this input, default is false.
+ */
+
+#define afterReset(ACTOR) setCustomInput([&](const Regression::InferenceContext & _) -> \
+    Regression::RegressionConfig::InputFetcherResult{return {true, false, false};})
+
+#define withCustomInput(ACTOR) setCustomInput([&](const Regression::InferenceContext & _) -> \
+    Regression::RegressionConfig::InputFetcherResult{ACTOR; return {};})
+
+#define withCustomOutput(ACTOR) setCustomOutput([&](const Regression::InferenceContext & _){ACTOR;})
+#define withCustomModel(ACTOR) setCustomModel([&](const Regression::InferenceContext & _){ACTOR;})
+
+
+enum EMean {
+    eNo,
+    eValues,
+    eImage
+};
+
+static std::string format_mean(EMean isMean) {
+    switch (isMean) {
+        case eNo:return "_no_mean";
+
+        case eImage:return "_mf";
+
+        case eValues:return "";
+    }
+    return nullptr;
+}
+
+inline std::ostream &operator<<(std::ostream &os, EMean mean) {
+    return os << format_mean(mean);
+}
+
+template<typename M>
+class ModelSelector {
+
+    template<typename T>
+    friend class ModelSelector; // every B<T> is a friend of A
+
+
+    enum EPrecision {
+        eq78, efp32, efp16, ei16, ei8
+    };
+
+    enum EGroup {
+        eNoGroup, eGroup
+    };
+
+
+    static std::string format_precision(EPrecision precision) {
+        switch (precision) {
+            case efp32:return "fp32";
+
+            case eq78:return "q78";
+
+            case efp16:return "fp16";
+
+            case ei16:return "i16";
+
+            case ei8: return "i8";
+        }
+        return nullptr;
+    }
+
+    static std::string format_group(EGroup isGroup) {
+        switch (isGroup) {
+            case eNoGroup:return "";
+
+            case eGroup:return "_group";
+        }
+        return nullptr;
+    }
+
+    friend std::ostream &operator<<(std::ostream &os, EPrecision precision) {
+        return os << format_precision(precision);
+    }
+
+    friend std::ostream &operator<<(std::ostream &os, EGroup group) {
+        return os << format_group(group);
+    }
+
+
+    Model model, statFile;
+    RegressionConfig config;
+    EMean isMean = eValues;
+    EPrecision precision = eq78;
+    EGroup isGroup = eNoGroup;
+
+ private:
+    std::string prepareModelMatching() {
+        std::stringstream path_to_input;
+        path_to_input << TestDataHelpers::get_data_path();
+        path_to_input << kPathSeparator
+                      << model.resolution() << kPathSeparator;
+        for (auto & fileName : config._paths_to_images) {
+            fileName = path_to_input.str() + fileName;
+        }
+
+        if (model.folderName().empty() || model.fileName().empty()) {
+            return "";
+        }
+        ModelsPath path_to_model;
+        std::stringstream prc;
+        path_to_model << kPathSeparator
+                      << model.folderName() << kPathSeparator
+                      << model.fileName() << "_" << precision << isMean << isGroup << "." << model.extension();
+
+        return path_to_model.str();
+    }
+
+    std::string prepareStatMatching() {
+        if (statFile.fileName() == "") return "";
+        ModelsPath path_to_stat;
+        path_to_stat << kPathSeparator
+                      << statFile.folderName() << kPathSeparator
+                      << statFile.fileName();
+
+        return path_to_stat.str();
+    }
+
+    ModelSelector() = default;
+
+    std::string getReferenceResultsLabel() {
+        std::stringstream ss;
+        for (auto&& v: config.ie_core->GetVersions(config._device_name)) {
+            const InferenceEngine::Version& version = v.second;
+            if (nullptr != version.description) {
+                ss << version.description;
+                break;
+            }
+        }
+        std::string pluginName = ss.str();
+        if (pluginName.empty())
+            std::cerr << "getReferenceResultsLabel() failed for device: \"" << config._device_name << "\"" << std::endl;
+
+        return pluginName + "_" + model.folderName() + format_mean(isMean)
+                 + "_" + format_precision(precision) + format_group(isGroup);
+    }
+
+    bool loadBlobFile(const char* fname, std::vector<char>& outData)
+    {
+        if (!fname)
+            return false;
+        FILE *f = fopen(fname, "rb");
+        if (!f) {
+            return false;
+        }
+        fseek(f, 0, SEEK_END);
+        int fsize = ftell(f);
+        fseek(f, 0, SEEK_SET);
+        outData.resize(fsize);
+        size_t bytesRead = fread(outData.data(), 1, fsize, f);
+        if (bytesRead != fsize) {
+            std::cout << "cannot read file" << std::endl;
+            return false;
+        }
+        fclose(f);
+
+        return true;
+    }
+ public :
+
+    explicit ModelSelector(const RegressionConfig& config) : config(config) {}
+
+    template <class T>
+    explicit ModelSelector(T *oldSelector) {
+        config = oldSelector->config;
+    }
+
+    ModelSelector &And(const std::string &fileName) {
+        config._paths_to_images.push_back(fileName);
+        return *this;
+    }
+
+    ModelSelector &And(const std::vector<std::string> &filesNamesVector) {
+        config._paths_to_images.insert(config._paths_to_images.end(), filesNamesVector.begin(), filesNamesVector.end());
+        return *this;
+    }
+
+    ModelSelector &on(const std::string &fileName) {
+        config._paths_to_images.push_back(fileName);
+        return *this;
+    }
+
+    ModelSelector &print(const std::size_t printNum = 10) {
+        config.print = true;
+        config.printNum = printNum;
+        return *this;
+    }
+
+    ModelSelector &useExportImport() {
+        config.useExportImport = true;
+        return *this;
+    }
+
+    /// @breif - tile last batch
+    ModelSelector &onN_infers(int nTimesCopyInputImages) {
+        if (config._paths_to_images.size() != config.batchSize) {
+            THROW_IE_EXCEPTION << "number of input images:"
+                               << config._paths_to_images.size() << " not equal to batch size: " << config.batchSize;
+        }
+        auto first_image =  config._paths_to_images.end();
+        std::advance(first_image, -config.batchSize);
+
+        std::vector<std::string> data_for_last_infer(first_image, config._paths_to_images.end());
+
+        for (;nTimesCopyInputImages > 0; nTimesCopyInputImages--) {
+            config._paths_to_images.insert(config._paths_to_images.end(), data_for_last_infer.begin(), data_for_last_infer.end());
+        }
+        return *this;
+    }
+    /**
+     * @brief - tile last input image
+     * @param nTimesCopyLastImagePlusOne = number of times last image will be tiled + 1
+     * @deprecated
+     */
+    ModelSelector &times(int nTimesCopyLastImagePlusOne) {
+        tile(nTimesCopyLastImagePlusOne - 1);
+        return *this;
+    }
+    /**
+     * @brief - tile last input image
+     * @param nTimesCopyLastImage = number of times last image will be tiled
+     * @deprecated
+     */
+    ModelSelector &tile(int nTimesCopyLastImage) {
+        if (config._paths_to_images.empty()) {
+            return *this;
+        }
+        auto lastImage = config._paths_to_images.back();
+        for (;nTimesCopyLastImage > 0; nTimesCopyLastImage--) {
+            config._paths_to_images.push_back(lastImage);
+        }
+        return *this;
+    }
+
+    ModelSelector &onModel(
+        std::string _folderName,
+        std::string _fileName,
+        std::string _resolutionName) {
+        model = {_folderName, _fileName, _resolutionName};
+        return *this;
+    }
+
+    ModelSelector &onArkInput() {
+        model = {model.folderName(), model.fileName(), "ark"};
+        return *this;
+    }
+
+    ModelSelector &onFP32() {
+        precision = efp32;
+        config.modelPrecision = Precision::FP32;
+        return *this;
+    }
+
+    ModelSelector &onI16() {
+        precision = ei16;
+        config.modelPrecision = Precision::I16;
+        return *this;
+    }
+
+    ModelSelector &onFP16() {
+        precision = efp16;
+        config.modelPrecision = Precision::FP16;
+        return *this;
+    }
+
+    ModelSelector &onQ78() {
+        precision = eq78;
+        config.modelPrecision = Precision::Q78;
+        return *this;
+    }
+
+    ModelSelector& onI8() {
+        precision = ei8;
+        config.modelPrecision = Precision::I8;
+        return *this;
+    }
+
+    ModelSelector &withInputPrecision(InferenceEngine::Precision p) {
+        config._inputPrecision = p;
+        return *this;
+    }
+
+    ModelSelector &withOutputPrecision(InferenceEngine::Precision p) {
+        config._outputPrecision = p;
+        return *this;
+    }
+
+    ModelSelector &withOutputPrecision(std::map<std::string, InferenceEngine::Precision> p) {
+        static_assert(std::is_same<M, RawMatcher>::value, "Output precision per blob implemented only in RawMatcher");
+        config._outputBlobPrecision = p;
+        return *this;
+    }
+
+    template <class Q = M>
+    typename enable_if<std::is_base_of<OptimizedNetworkDumper, Q>::value, bool>::type
+    needInput() const {
+        return false;
+    }
+
+    template <class Q = M>
+    typename enable_if<!std::is_base_of<OptimizedNetworkDumper, Q>::value, bool>::type
+    needInput() const {
+        return true;
+    }
+
+    ModelSelector &withBatch() {
+        config.batchMode = true;
+        return *this;
+    }
+
+    ModelSelector &withBatch(int nBatchSize) {
+        config.batchSize = nBatchSize;
+        // assumption made that inputs already gets provided to matcher
+        if (config._paths_to_images.empty() && needInput()) {
+            THROW_IE_EXCEPTION << "withBatch token should follow after setting up inputs";
+        }
+        if (config._paths_to_images.size() < nBatchSize) {
+            tile(nBatchSize - config._paths_to_images.size());
+        }
+
+        return *this;
+    }
+
+    ModelSelector &withDynBatch(int nLimit, int nBatchSize) {
+        config.batchMode = true;
+        config.useDynamicBatching = true;
+        config.batchSize = nLimit;
+        config.dynBatch = nBatchSize;
+        return *this;
+    }
+
+    ModelSelector &withAsyncInferRequests(int nRequests) {
+        config._nrequests = nRequests;
+        return *this;
+    }
+
+    ModelSelector &onMultipleNetworks(int nNetworks) {
+        config._numNetworks = nNetworks;
+        return *this;
+    }
+
+    ModelSelector &setMean(EMean mean) {
+        isMean = mean;
+        return *this;
+    }
+
+    ModelSelector &withoutMean() {
+        isMean = eNo;
+        return *this;
+    }
+
+    ModelSelector &withMeanValues() {
+        isMean = eValues;
+        return *this;
+    }
+
+    ModelSelector &withMeanImage() {
+        isMean = eImage;
+        return *this;
+    }
+
+    ModelSelector &withGroup() {
+        isGroup = eGroup;
+        return *this;
+    }
+
+    ModelSelector withTopK(int topKNumbers) {
+        config.topKNumbers = topKNumbers;
+        return *this;
+    }
+
+    ModelSelector &withPluginConfig(const std::map<std::string, std::string> & plugin_config) {
+        config.plugin_config = plugin_config;
+        return *this;
+    }
+
+    ModelSelector &addPluginConfig(const std::map<std::string, std::string> & plugin_config) {
+        config.plugin_config.insert(plugin_config.begin(), plugin_config.end());
+        return *this;
+    }
+
+    ModelSelector &withPluginConfigOption(std::string key, std::string value) {
+        config.plugin_config[key] = value;
+        return *this;
+    }
+
+    ModelSelector & withImportedExecutableNetworkFrom(std::string location) {
+        config._path_to_aot_model = location;
+        return *this;
+    }
+
+    template <class T>
+    ModelSelector &modifyConfig(const T & modifier) {
+        modifier(config);
+        return *this;
+    }
+
+    ModelSelector & usingAsync() {
+        config.isAsync = true;
+        return *this;
+    }
+
+    ModelSelector &fromLayer(const std::string & name) {
+        config.outputLayer = name;
+        return *this;
+    }
+
+    ModelSelector& doReshape(bool reshape = true) {
+        config._reshape = reshape;
+        return *this;
+    }
+
+    // type define when class in one of building method converted to new one or not
+#define CUSTOM_TYPE\
+    typename std::conditional<std::is_base_of<CustomMatcher, M>::value,\
+    ModelSelector<M>&,\
+    ModelSelector<CustomMatcher>>::type
+
+ private :
+    template <class A, class Q = M>
+    typename enable_if<std::is_base_of<CustomMatcher, Q>::value, CUSTOM_TYPE>::type modify_config(const A& action) {
+        action(config);
+        return *this;
+    }
+
+    template <class A, class Q = M>
+    typename enable_if<!std::is_base_of<CustomMatcher, Q>::value, CUSTOM_TYPE>::type modify_config(const A& action) {
+        ModelSelector<CustomMatcher> newSelector(this);
+        action(newSelector.config);
+        return newSelector;
+    }
+
+ public:
+
+    template <class T>
+    CUSTOM_TYPE  setCustomModel(const T& model_maker) {
+        return modify_config([&](RegressionConfig & this_config) {
+            this_config.make_model = model_maker;
+        });
+    }
+
+    template <class T>
+    CUSTOM_TYPE setCustomInput(const T & fetcher) {
+        return modify_config([&](RegressionConfig & this_config) {
+            this_config.fetch_input.push_back(fetcher);
+        });
+    }
+
+    template <class T>
+    CUSTOM_TYPE setCustomOutput(const T & fetcher) {
+        return modify_config([&](RegressionConfig & this_config) {
+            this_config.fetch_result = fetcher;
+        });
+    }
+
+    template <class T >
+    M equalsTo(const std::initializer_list<T> & rhs) {
+        config.referenceOutput.insert(config.referenceOutput.end(), rhs.begin(), rhs.end());
+        return near(0.0);
+    }
+
+    template <class T >
+    M near(double nearValue, const TBlob<T> & rhs) {
+        config.nearValue = nearValue;
+        for (const auto & v : rhs) {
+            config.referenceOutput.push_back(v);
+        }
+        config._path_to_models = prepareModelMatching();
+        config._stat_file = prepareStatMatching();
+        return M(config);
+    }
+
+    M to(Blob::Ptr rhs) {
+        config.outputBlob = rhs;
+        config._path_to_models = prepareModelMatching();
+        config._stat_file = prepareStatMatching();
+        return M(config);
+    }
+
+
+    template <class T >
+    M near(double nearValue, const initializer_list<TBlob<T>> & rhs) {
+        config.nearValue = nearValue;
+
+        for (auto && frame : rhs) {
+            for (auto && data : frame) {
+                config.referenceOutput.push_back(data);
+            }
+        }
+        config._path_to_models = prepareModelMatching();
+        config._stat_file = prepareStatMatching();
+        return M(config);
+    }
+
+    template <class T >
+    M near_avg(double nearAvgValue, const TBlob<T> & rhs) {
+        config.nearAvgValue = nearAvgValue;
+        return near(0.0, rhs);
+    }
+
+    M near(double nearValue, double meanRelativeError = 0, double maxRelativeError = 0) {
+        config.nearValue = nearValue;
+        config.meanRelativeError = meanRelativeError;
+        config.maxRelativeError = maxRelativeError;
+        config._path_to_models = prepareModelMatching();
+        config._stat_file = prepareStatMatching();
+        return M(config);
+    }
+
+    void equalToReferenceWithDelta(double nearValue) {
+        config.nearValue = nearValue;
+        config._path_to_models = prepareModelMatching();
+        config._stat_file = prepareStatMatching();
+        M(config).to(getReferenceResultsLabel());
+    }
+
+    template <class T>
+    M equalToReference(const TBlob<T> & rhs) {
+        for (const auto & v : rhs) {
+            config.referenceOutput.push_back(v);
+        }
+        config._path_to_models = prepareModelMatching();
+        config._stat_file = prepareStatMatching();
+        return M(config, true);
+    }
+
+    // place holder to run the matcher without providing any reference
+    void possible() {
+        config._path_to_models = prepareModelMatching();
+        config._stat_file = prepareStatMatching();
+        auto tmp = M(config);
+        ASSERT_NO_FATAL_FAILURE(tmp.match());
+    }
+};
+
+/**
+ * @class PluginVersion
+ * @brief A PluginVersion class stores plugin version and initialization status
+ */
+struct PluginVersion : public InferenceEngine::Version {
+    bool initialized = false;
+
+    explicit PluginVersion(const InferenceEngine::Version *ver) {
+        if (nullptr == ver) {
+            return;
+        }
+        InferenceEngine::Version::operator=(*ver);
+        initialized = true;
+    }
+
+    operator bool() const noexcept {
+        return initialized;
+    }
+};
+
+class Builder {
+private:
+    std::shared_ptr<InferenceEngine::Core> ie;
+    RegressionConfig config;
+
+public:
+    Builder(std::shared_ptr<InferenceEngine::Core> _ie) : ie(_ie) {
+        config.ie_core = ie;
+
+#ifndef NDEBUG
+        auto devices = ie->GetAvailableDevices();
+        std::cout << "Available devices (" << devices.size() << "):" << std::endl;
+        for (auto&& d : devices) {
+            std::cout << "Device: " << d << std::endl;
+            for (auto&& v : ie->GetVersions(d))
+                std::cout << "\t" << v.first << " : " << PluginVersion(&v.second) << std::endl;
+        }
+#endif
+    }
+
+    Builder & usingDevice(const std::string & device_name) {
+        config._device_name = device_name;
+        return *this;
+    }
+
+    Builder& setPerfInfo(std::map<std::string, InferenceEngine::InferenceEngineProfileInfo>& map) {
+        config.perfInfoPtr = &map;
+        config.plugin_config[CONFIG_KEY(PERF_COUNT)] = CONFIG_VALUE(YES);
+        return *this;
+    }
+
+    Builder& setDeviceMapping(const std::map<std::string, std::string> &deviceMapping) {
+        config.deviceMapping = deviceMapping;
+        return *this;
+    }
+
+    ModelSelector<ClassificationMatcher> classificationResults() {
+        return ModelSelector<ClassificationMatcher>(config);
+    }
+
+    ModelSelector<ClassificationMatcher> classificationResultsFor(const std::vector<std::string> & input) {
+        return ModelSelector<ClassificationMatcher>(config).And(input);
+    }
+
+    ModelSelector<OptimizedNetworkMatcher> dumpedOptimizedNetwork() {
+        return ModelSelector<OptimizedNetworkMatcher>(config);
+    }
+
+    ModelSelector<OptimizedNetworkDumper> dumpOptimizedNetworkTo(const std::string & file) {
+        config._path_to_aot_model = file;
+        return ModelSelector<OptimizedNetworkDumper>(config);
+    }
+
+    ModelSelector<ClassificationMatcher> classificationResultsFor(const std::string &input = { }) {
+        auto selector = ModelSelector<ClassificationMatcher>(config);
+        if (!input.empty()) {
+            selector.And(input);
+        }
+        return selector;
+    }
+
+    ModelSelector<SegmentationMatcher> segmentationResultsFor(const std::string &fileName) {
+        return ModelSelector<SegmentationMatcher>(config).And(fileName);
+    }
+    ModelSelector<RawMatcher> rawResultsFor(const std::string &fileName) {
+        return ModelSelector<RawMatcher>(config).And(fileName);
+    }
+    ModelSelector<ObjectDetectionMatcher> objectDetectionResultsFor(const std::string &fileName) {
+        return ModelSelector<ObjectDetectionMatcher>(config).And(fileName);
+    }
+    ModelSelector<ObjectDetectionMatcher> objectDetectionResults() {
+        return ModelSelector<ObjectDetectionMatcher>(config);
+    }
+    ModelSelector<ObjectDetectionMatcher> objectDetectionResultsFor(const vector<std::string> &filesNamesVector) {
+        return ModelSelector<ObjectDetectionMatcher>(config).And(filesNamesVector);
+    }
+};
+
+class RegressionTests : public TestsCommon {
+public:
+    // to force overload
+    virtual std::string getDeviceName() const = 0;
+
+    Builder please() {
+        std::shared_ptr<Core> ie = PluginCache::get().ie(getDeviceName());
+        Builder b(ie);
+        b.usingDevice(getDeviceName());
+        return b;
+    }
+};
+
+}
+
+#define assertThat() SCOPED_TRACE("");please()
+#define saveAfterInfer() SCOPED_TRACE("");please()
diff --git a/inference-engine/tests_deprecated/functional/ie_tests/include/segmentation_matcher.hpp b/inference-engine/tests_deprecated/functional/ie_tests/include/segmentation_matcher.hpp
new file mode 100644 (file)
index 0000000..cdc3b7e
--- /dev/null
@@ -0,0 +1,75 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <gtest/gtest.h>
+#include <vector>
+#include <string>
+
+#include <ie_blob.h>
+#include <inference_engine.hpp>
+#include "base_matcher.hpp"
+
+/**
+ * @class Color
+ * @brief A Color class stores channels of a given color
+ */
+class Color {
+private:
+    unsigned char _r;
+    unsigned char _g;
+    unsigned char _b;
+
+public:
+    /**
+     * A default constructor.
+     * @param r - value for red channel
+     * @param g - value for green channel
+     * @param b - value for blue channel
+     */
+    Color(unsigned char r,
+          unsigned char g,
+          unsigned char b) : _r(r), _g(g), _b(b) {}
+
+    inline unsigned char red() {
+        return _r;
+    }
+
+    inline unsigned char blue() {
+        return _b;
+    }
+
+    inline unsigned char green() {
+        return _g;
+    }
+};
+
+namespace Regression { namespace Matchers {
+
+class SegmentationMatcher : public BaseMatcher {
+ private:
+    InferenceEngine::TBlob<float>::Ptr output;
+    std::vector<std::vector<size_t>> outArray;
+    size_t C = -1;
+
+ public:
+    SegmentationMatcher (const RegressionConfig & config)
+        : BaseMatcher(config) {
+    }
+
+    virtual void match();
+
+    static float compareOutputBmp(std::vector<std::vector<size_t>> data, size_t classesNum, const std::string& inFileName);
+
+    void checkResult(std::string imageFileName);
+
+    SegmentationMatcher& to(std::string imageFileName) {
+        match();
+        checkResult(imageFileName);
+        return *this;
+    }
+};
+
+} }  //  namespace matchers
diff --git a/inference-engine/tests_deprecated/functional/ie_tests/src/base_matcher.cpp b/inference-engine/tests_deprecated/functional/ie_tests/src/base_matcher.cpp
new file mode 100644 (file)
index 0000000..aea0f48
--- /dev/null
@@ -0,0 +1,118 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "base_matcher.hpp"
+#include <precision_utils.h>
+#include <format_reader_ptr.h>
+
+namespace Regression { namespace Matchers {
+
+using namespace InferenceEngine;
+
+void loadImage(const std::string &imageFilename, InferenceEngine::Blob::Ptr &blob, bool bgr, int batchNumber) {
+    TensorDesc tensDesc = blob->getTensorDesc();
+    if (tensDesc.getPrecision() != InferenceEngine::Precision::FP16
+        && tensDesc.getPrecision() != InferenceEngine::Precision::FP32
+        && tensDesc.getPrecision()!= InferenceEngine::Precision::U8
+        && tensDesc.getPrecision()!= InferenceEngine::Precision::I16) {
+        THROW_IE_EXCEPTION << "loadImage error: Input must have FP16, FP32 or U8 precision";
+    }
+
+    if (tensDesc.getLayout() != NHWC && tensDesc.getLayout() != NCHW) {
+        THROW_IE_EXCEPTION << "loadImage error: Input must have NHWC or NHWC layout";
+    }
+
+    FormatReader::ReaderPtr reader(imageFilename.c_str());
+    if (reader.get() == nullptr) {
+        THROW_IE_EXCEPTION << "loadImage error: image " << imageFilename << " cannot be read!";
+    }
+
+    size_t w = tensDesc.getDims()[3];
+    size_t h = tensDesc.getDims()[2];
+    if (reader->width() != w || reader->height() != h) {
+        THROW_IE_EXCEPTION << "loadImage error: Input sizes mismatch, got " << reader->width() << "x" << reader->height()
+                  << " expecting " << w << "x" << h;
+    }
+
+    auto numBlobChannels = tensDesc.getDims()[1];
+    size_t numImageChannels = reader->size() / (reader->width() * reader->height());
+    if (numBlobChannels != numImageChannels && numBlobChannels != 1) {
+        THROW_IE_EXCEPTION << "loadImage error: Input channels mismatch: image channels " << numImageChannels << ", "
+                  << "network channels " << numBlobChannels << ", expecting count of image channels are equal "
+                  << "to count if network channels or count of network channels are equal to 1";
+    }
+
+    auto nPixels = w * h;
+    uint8_t *BGR8 = reader->getData().get();
+    for (unsigned int i = 0; i < nPixels; i++) {
+        for (unsigned int j = 0; j < numBlobChannels; j++) {
+            uint8_t val = bgr ? BGR8[i * numImageChannels + j] : BGR8[i * numBlobChannels + (numBlobChannels - j - 1)];
+            size_t idx = tensDesc.getLayout() == NHWC ? (i * numBlobChannels + j) : (j * nPixels + i)
+                + nPixels * numBlobChannels * batchNumber;
+            auto buf = blob->buffer();
+            switch (blob->getTensorDesc().getPrecision()) {
+            case Precision::U8:
+            {
+                auto inputDataPtr = buf.as<uint8_t*>();
+                inputDataPtr[idx] = val;
+                break;
+            }
+            case Precision::I16:
+            {
+                auto *inputDataPtr = buf.as<int16_t*>();
+                inputDataPtr[idx] = val;
+                break;
+            }
+            case Precision::FP16:
+            {
+                ie_fp16 *inputDataPtr = buf.as<ie_fp16*>();
+                inputDataPtr[idx] = InferenceEngine::PrecisionUtils::f32tof16(static_cast<float>(val));
+                break;
+            }
+            case Precision::FP32:
+            {
+                auto inputDataPtr = buf.as<float*>();
+                inputDataPtr[idx] = static_cast<float>(val);
+                break;
+            }
+            default:
+                THROW_IE_EXCEPTION << "Unsupported precision!";
+            }
+        }
+    }
+}
+
+void BaseMatcher::checkImgNumber(int dynBatch) {
+    InferenceEngine::Core ieCore;
+    CNNNetwork net = ieCore.ReadNetwork(config._path_to_models);
+    auto numInputs = net.getInputsInfo().size();
+
+    int batch = dynBatch > 0 ? dynBatch : config.batchSize;
+
+    if ((numInputs * batch) > config._paths_to_images.size()) {
+
+        auto readImagesSize = config._paths_to_images.size();
+        size_t diff = (numInputs * batch) / readImagesSize;
+
+        for (size_t i = 1; i < diff; i++) {
+            for (size_t j = 0; j < readImagesSize; j++) {
+                config._paths_to_images.push_back(config._paths_to_images[j]);
+            }
+        }
+        if (readImagesSize * diff != (numInputs * batch)) {
+            for (size_t j = 0; j < (numInputs * batch) - readImagesSize * diff; j++) {
+                config._paths_to_images.push_back(config._paths_to_images.at(j));
+            }
+        }
+    } else if ((numInputs * batch) < config._paths_to_images.size()) {
+        while (config._paths_to_images.size() != batch * numInputs) {
+            auto name = config._paths_to_images.back();
+            std::cout << "[WARNING]: Image " << name << " skipped!" << std::endl;
+            config._paths_to_images.pop_back();
+        }
+    }
+}
+
+}  // namepspace Matchers
+}  // namespace Regression
diff --git a/inference-engine/tests_deprecated/functional/ie_tests/src/classification_matcher.cpp b/inference-engine/tests_deprecated/functional/ie_tests/src/classification_matcher.cpp
new file mode 100644 (file)
index 0000000..a99446e
--- /dev/null
@@ -0,0 +1,330 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "classification_matcher.hpp"
+#include <gtest/gtest.h>
+#include <xml_helper.hpp>
+
+using namespace Regression ;
+using namespace Regression :: Matchers ;
+
+IE_SUPPRESS_DEPRECATED_START
+ClassificationMatcher::ClassificationMatcher(RegressionConfig &config)
+    : BaseMatcher(config) {
+    // Get file names for files with weights and labels
+    std::string binFileName = testing::FileUtils::fileNameNoExt(config._path_to_models) + ".bin";
+
+    auto cnnNetwork = config.ie_core->ReadNetwork(config._path_to_models, binFileName);
+
+    std::string labelFileName = testing::FileUtils::fileNameNoExt(config._path_to_models) + ".labels";
+
+    // Try to read labels file
+    readLabels(labelFileName);
+
+    if (config._stat_file != "") {
+        InferenceEngine::NetworkStatsMap stat = testing::loadStatisticFromFile(config._stat_file);
+
+        ICNNNetworkStats *pstats;
+        ((ICNNNetwork&)cnnNetwork).getStats(&pstats, nullptr);
+        pstats->setNodesStats(stat);
+
+        // iterating over layers and fixing suppress_normalization->quantization_level
+        // because we have in tests IR which has old name for fp32 layers
+        for (auto& layer : cnnNetwork) {
+            if (layer->params.find("suppress_normalization") != layer->params.end() &&
+                layer->params["suppress_normalization"] == "I8") {
+                layer->params["quantization_level"] = "FP32";
+            }
+        }
+    }
+
+    if (config._reshape) {
+        auto inputShapes = cnnNetwork.getInputShapes();
+        inputShapes.begin()->second[0] = config.batchSize;
+
+        cnnNetwork.reshape(inputShapes);
+    } else if (config.batchSize != 1) {
+        cnnNetwork.setBatchSize(config.batchSize);
+    }
+
+    _inputsInfo = cnnNetwork.getInputsInfo();
+    _outputsInfo = cnnNetwork.getOutputsInfo();
+    for (auto &info : _inputsInfo) {
+        if (config._inputPrecision != InferenceEngine::Precision::UNSPECIFIED) {
+            info.second->setPrecision(config._inputPrecision);
+        }
+    }
+
+    for (auto &info : _outputsInfo) {
+        if (config._outputPrecision != Precision::UNSPECIFIED) {
+            info.second->setPrecision(config._outputPrecision);
+        } else {
+            info.second->setPrecision(config.modelPrecision);
+        }
+    }
+
+    if (config.useDynamicBatching) {
+        config.plugin_config[PluginConfigParams::KEY_DYN_BATCH_ENABLED] = PluginConfigParams::YES;
+        cnnNetwork.setBatchSize(config.batchSize);
+    }
+
+    for (int i=0; i < config._numNetworks; i++) {
+        auto loadedExecutableNetwork = config.ie_core->LoadNetwork(cnnNetwork, config._device_name, config.plugin_config);
+        InferenceEngine::ExecutableNetwork executableNetwork;
+        if (config.useExportImport) {
+            std::stringstream stream;
+            loadedExecutableNetwork.Export(stream);
+            executableNetwork = config.ie_core->ImportNetwork(stream);
+        } else {
+            executableNetwork = loadedExecutableNetwork;
+        }
+        _executableNetworks.push_back(executableNetwork);
+    }
+
+    top = (-1 == config.topKNumbers) ? 5 : config.topKNumbers;
+}
+IE_SUPPRESS_DEPRECATED_END
+
+void ClassificationMatcher::to(const std::vector <Regression::Reference::ClassificationScoringResultsForTests> &expected) {
+    checkResultNumber = 0;
+    match(std::min(top, expected.size()));
+    checkResult(checkResultNumber, expected);
+    checkResultNumber++;
+}
+
+void ClassificationMatcher::to(std::string modelType) {
+    auto batchSize = config.batchSize;
+
+    if (config.useDynamicBatching) {
+        batchSize = config.dynBatch;
+    }
+
+    checkImgNumber(batchSize);
+    ASSERT_NO_FATAL_FAILURE(match(10));  // This method produces top-10 reference results.
+    for (size_t i = 0; i < config._paths_to_images.size(); i++) {
+        const size_t last_slash_idx = config._paths_to_images[i].find_last_of(kPathSeparator);
+        if (std::string::npos != last_slash_idx) {
+            config._paths_to_images[i].erase(0, last_slash_idx + 1);
+        }
+        if (Regression::Reference::values.find(modelType + "_" + config._paths_to_images[i]) ==
+            Regression::Reference::values.end()) {
+            FAIL() << "Reference result for " << modelType + "_" + config._paths_to_images[i] << " cannot be found";
+        }
+        ASSERT_NO_FATAL_FAILURE(checkResult(i, Regression::Reference::values[modelType + "_" + config._paths_to_images[i]]));
+    }
+    checkResultNumber++;
+}
+
+
+void ClassificationMatcher::readLabels(std::string labelFilePath) {
+    std::fstream fs(labelFilePath, std::ios_base::in);
+    if (fs.is_open()) {
+        std::string line;
+        while (getline(fs, line)) {
+            config.labels.push_back(TestsCommon::trim(line));
+        }
+    } else {
+        THROW_IE_EXCEPTION << "cannot open label file: " << labelFilePath;
+
+    }
+}
+
+int ClassificationMatcher::getIndexByLabel(const std::string &label) {
+    auto result = std::find(begin(config.labels), end(config.labels), label);
+    if (result == config.labels.end()) {
+        THROW_IE_EXCEPTION << "cannot locate index for label : " << label;
+    }
+    return static_cast<int>(std::distance(begin(config.labels), result));
+}
+
+std::string ClassificationMatcher::getLabel(unsigned int index) {
+    if (config.labels.empty()) {
+        return "label #" + std::to_string(index);
+    }
+    if (index >= config.labels.size()) {
+        THROW_IE_EXCEPTION << "index out of labels file: " << index;
+    }
+
+    return config.labels[index];
+}
+
+void ClassificationMatcher::checkResult(size_t checkNumber,
+                                         const std::vector <Regression::Reference::ClassificationScoringResultsForTests> &expected) {
+    if (checkNumber >= _results.size()) {
+        FAIL() << "Expected number of results(" << checkNumber << ") is more than real number of results: "
+               << _results.size();
+    }
+    auto result = _results.at(checkNumber);
+
+    std::map<std::string, float> expected_map;
+    int expectedSize = expected.size();
+    int resultSize = result.size();
+
+    if (config.topKNumbers != -1) {
+        expectedSize = config.topKNumbers;
+        resultSize = config.topKNumbers;
+    }
+
+    for (int i = 0; i < expectedSize; ++i) {
+        expected_map[expected[i].getLabel()] = expected[i].getProbability();
+    }
+
+    for (int i = 0; i < resultSize; ++i) {
+        if (expected_map.count(result[i].getLabel())) {
+            ASSERT_NEAR(result[i].getProbability(), expected_map[result[i].getLabel()], config.nearValue)
+                                << "Failed for label \"" << result[i].getLabel() << "\" index "  << i;
+            expected_map.erase(result[i].getLabel());
+        } else {
+            // Label which not in expected list can be below last expected element
+            ASSERT_LE(result[i].getProbability(), expected.back().getProbability() + config.nearValue)
+                                << "Label \"" << result[i].getLabel() << "\" not found or cannot be in expected list";
+        }
+    }
+
+    if (expected_map.size() != 0) {
+        for (auto & elem: expected_map) {
+            std::cout << "Label \"" << elem.first << "\" with probability="
+                      << elem.second << " not found in result list" << std::endl;
+        }
+        FAIL();
+    }
+}
+
+void ClassificationMatcher::match(size_t top) {
+    for (int i = 0; i != _executableNetworks.size(); i++) {
+        match_n(top, i);
+    }
+}
+
+namespace {
+
+template <class T>
+inline void TopResults(unsigned int n, TBlob<T>& input, std::vector<unsigned>& output) {
+    SizeVector dims = input.getTensorDesc().getDims();
+    size_t input_rank = dims.size();
+    if (!input_rank || !dims[0]) THROW_IE_EXCEPTION << "Input blob has incorrect dimensions!";
+    size_t batchSize = dims[0];
+    std::vector<unsigned> indexes(input.size() / batchSize);
+
+    n = static_cast<unsigned>(std::min<size_t>((size_t)n, input.size()));
+
+    output.resize(n * batchSize);
+
+    for (size_t i = 0; i < batchSize; i++) {
+        size_t offset = i * (input.size() / batchSize);
+        T* batchData = input.data();
+        batchData += offset;
+
+        std::iota(std::begin(indexes), std::end(indexes), 0);
+        std::partial_sort(std::begin(indexes), std::begin(indexes) + n, std::end(indexes),
+                          [&batchData](unsigned l, unsigned r) {
+                              return batchData[l] > batchData[r];
+                          });
+        for (unsigned j = 0; j < n; j++) {
+            output.at(i * n + j) = indexes.at(j);
+        }
+    }
+}
+
+}
+
+void ClassificationMatcher::match_n(size_t top, int index) {
+    try {
+        InferenceEngine::IInferRequest::Ptr inferRequest;
+        if (_executableNetworks[index]->CreateInferRequest(inferRequest, &_resp) != OK) {
+            THROW_IE_EXCEPTION << "Can not create infer request: " << _resp.msg;
+        }
+        std::string prevImageName = "";
+
+        auto batchSize = config.batchSize;
+
+        if (config.useDynamicBatching) {
+            batchSize = config.dynBatch;
+            InferenceEngine::ResponseDesc resp;
+            inferRequest->SetBatch(batchSize, &resp);
+        }
+
+        if (config._paths_to_images.size() % batchSize != 0) {
+            THROW_IE_EXCEPTION << "Can not process all input images("<< config._paths_to_images.size()
+                               <<") using given batch size of " << batchSize;
+        }
+        // loading images in batches
+        for (int i = 0; i < config._paths_to_images.size(); i += batchSize) {
+
+            // has same image names
+            bool areImagesSame = false;
+            if (i > 0)  {
+                areImagesSame = true;
+                for (int j = i;j != i + batchSize; j++) {
+                    if (config._paths_to_images[j] != config._paths_to_images[j - batchSize]) {
+                        areImagesSame = false;
+                        break;
+                    }
+                }
+            }
+            if (!areImagesSame) {
+                for (int j = 0; j != batchSize; j++) {
+                    const auto & imageName  = config._paths_to_images[i + j];
+
+                    InferenceEngine::Blob::Ptr inputBlob;
+                    if (inferRequest->GetBlob(_inputsInfo.begin()->first.c_str(), inputBlob, &_resp) != OK) {
+                        THROW_IE_EXCEPTION << "Can not get input with name: " << _inputsInfo.begin()->first
+                                           << " error message: " << _resp.msg;
+                    }
+                    loadImage(imageName, inputBlob, true, j);
+                }
+            }
+
+            StatusCode status = inferRequest->Infer(&_resp);
+            if (status != OK) {
+                THROW_IE_EXCEPTION << "Can not do infer: " << _resp.msg;
+            }
+
+            InferenceEngine::Blob::Ptr outputBlobPtr;
+            if (inferRequest->GetBlob(_outputsInfo.begin()->first.c_str(), outputBlobPtr, &_resp) != OK) {
+                THROW_IE_EXCEPTION << "Can not get output with name: " << _outputsInfo.begin()->first
+                                   << " error message: " << _resp.msg;
+            }
+
+            InferenceEngine::TBlob<float>::Ptr outputFP32;
+                if (outputBlobPtr->getTensorDesc().getPrecision() == InferenceEngine::Precision::FP16) {
+                    TensorDesc desc = { InferenceEngine::Precision::FP32, outputBlobPtr->getTensorDesc().getDims(),
+                        outputBlobPtr->getTensorDesc().getLayout() };
+                    outputFP32 = make_shared_blob<float>(desc);
+                    outputFP32->allocate();
+                    PrecisionUtils::f16tof32Arrays(outputFP32->buffer().as<float *>(), outputBlobPtr->cbuffer().as<short *>(), outputBlobPtr->size());
+                } else if (outputBlobPtr->getTensorDesc().getPrecision() == InferenceEngine::Precision::FP32) {
+                    outputFP32 = dynamic_pointer_cast<InferenceEngine::TBlob<float>>(outputBlobPtr);
+                } else {
+                    THROW_IE_EXCEPTION << "Unsupported output format for test. Supported FP16, FP32";
+                }
+
+            vector<unsigned> topClassesIndexes;
+            TopResults<float>(top, *outputFP32, topClassesIndexes);
+            std::vector<float> probabilities(outputFP32->buffer().as<float *>(),
+                                             outputFP32->buffer().as<float *>() + outputFP32->size());
+
+            saveResults(topClassesIndexes, probabilities, top);
+        }
+    } catch (InferenceEngine::details::InferenceEngineException &e) {
+        FAIL() << e.what();
+    } catch (std::exception &e) {
+        FAIL() << e.what();
+    }
+}
+
+void ClassificationMatcher::saveResults(const std::vector<unsigned> &topIndexes, const std::vector<float> &probs, size_t top) {
+
+    for(auto idx = topIndexes.begin(); idx != topIndexes.end();) {
+        std::vector<Reference::LabelProbability> topResults;
+        for (int i = 0; i != top; i++) {
+            Reference::LabelProbability labelProb(*idx, probs[*idx], getLabel(*idx));
+            std::cout << "index=" << labelProb.getLabelIndex() << ", probability=" << labelProb.getProbability()
+                      << ", class=" << labelProb.getLabel() << "\n";
+            topResults.push_back(labelProb);
+            idx++;
+        }
+        _results.push_back(topResults);
+    }
+}
diff --git a/inference-engine/tests_deprecated/functional/ie_tests/src/custom_matcher.cpp b/inference-engine/tests_deprecated/functional/ie_tests/src/custom_matcher.cpp
new file mode 100644 (file)
index 0000000..1d2c532
--- /dev/null
@@ -0,0 +1,430 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <algorithm>
+#include <gtest/gtest.h>
+#include <ie_plugin_config.hpp>
+#include "custom_matcher.hpp"
+
+using namespace InferenceEngine;
+
+InferenceEngine::ExecutableNetwork Regression::Matchers::CustomMatcher::createExecutableNetworkFromAOT() {
+    ExecutableNetwork executableApi;
+    try {
+        ctx.setFileNames(config._paths_to_images);
+        ctx.setModelPrecision(config.modelPrecision);
+
+        executableApi = config.ie_core->ImportNetwork(config._path_to_aot_model, config._device_name, config.plugin_config);
+    }
+    catch (std::exception &e) {
+        GTEST_MESSAGE_(e.what(), ::testing::TestPartResult::kFatalFailure);
+    }
+
+    return executableApi;
+
+}
+
+InferenceEngine::ExecutableNetwork Regression::Matchers::CustomMatcher::createExecutableNetworkFromIR(){
+    ExecutableNetwork executableApi;
+    try {
+        ctx.setFileNames(config._paths_to_images);
+        ctx.setModelPrecision(config.modelPrecision);
+
+        if (config.make_model) {
+            ctx.setModelPath(config._path_to_models);
+            config.make_model(ctx);
+        }
+
+        std::string binFileName = testing::FileUtils::fileNameNoExt(config._path_to_models) + ".bin";
+        network = config.ie_core->ReadNetwork(config._path_to_models, binFileName);
+
+        // Change batch size if it is not equal 1
+        auto inputs = network.getInputsInfo();
+
+        if (config._inputPrecision) {
+            for (auto && input : inputs) {
+                input.second->setPrecision(config._inputPrecision);
+                // NC is a proper layout for 2d blob if different is not specified, like CN
+                auto layout = input.second->getTensorDesc().getDims().size() == 4 ? NCHW : NC;
+                input.second->getInputData()->setLayout(layout);
+            }
+        }
+
+        //TODO: why this need
+        if (inputs.begin()->second->getTensorDesc().getDims().at(0) != 1) {
+            std::cerr << "[WARNING]: Batch size will be equal to 1." << std::endl;
+            network.setBatchSize(1);
+        }
+
+        if (config.batchSize != 1) {
+            network.setBatchSize(config.batchSize);
+        }
+
+        if (!config.outputLayer.empty()) {
+            network.addOutput(config.outputLayer);
+        }
+
+        if (config.useDynamicBatching) {
+            config.plugin_config[PluginConfigParams::KEY_DYN_BATCH_ENABLED] = PluginConfigParams::YES;
+        }
+
+        auto outInfo = network.getOutputsInfo();
+
+        auto loadedExecutableNetwork = config.ie_core->LoadNetwork(network, config._device_name, config.plugin_config);
+        if (config.useExportImport) {
+            std::stringstream stream;
+            loadedExecutableNetwork.Export(stream);
+            executableApi = config.ie_core->ImportNetwork(stream);
+        } else {
+            executableApi = loadedExecutableNetwork;
+        }
+
+    }
+    catch (std::exception &e) {
+        GTEST_MESSAGE_(e.what(), ::testing::TestPartResult::kFatalFailure);
+    }
+
+    return executableApi;
+}
+
+void Regression::Matchers::CustomMatcher::matchCustom() {
+    try {
+        ExecutableNetwork executableApi;
+        std::vector<InferRequest> inferRequests;
+        ConstInputsDataMap constInputs;
+        ConstOutputsDataMap constOutInfo;
+        ResponseDesc dsc;
+        StatusCode sts = OK;
+
+        if (!config._path_to_aot_model.empty()) {
+            ASSERT_NO_FATAL_FAILURE(executableApi = createExecutableNetworkFromAOT());
+        } else {
+            ASSERT_NO_FATAL_FAILURE(executableApi = createExecutableNetworkFromIR());
+        }
+
+        if (executableApi.operator IExecutableNetwork::Ptr &() != nullptr) {
+            for (int i=0; i != config._nrequests; i++ ) {
+                inferRequests.push_back(executableApi.CreateInferRequest());
+            }
+        }
+
+        if (config.useDynamicBatching) {
+            for (auto && req : inferRequests) {
+                req.SetBatch(config.dynBatch);
+            }
+        }
+
+        auto make_unified_endpoints = [&] () {
+            if (executableApi.operator IExecutableNetwork::Ptr &() != nullptr) {
+                return std::make_pair(executableApi.GetInputsInfo(), executableApi.GetOutputsInfo());
+            }
+            auto inputs2 = network.getInputsInfo();
+            ConstInputsDataMap constInputs2;
+            for (const auto & input : inputs2) {
+                constInputs2[input.first] = input.second;
+            }
+            auto output2 = network.getOutputsInfo();
+            ConstOutputsDataMap constOutInfo2;
+            for (const auto & output : output2) {
+                constOutInfo2[output.first] = output.second;
+            }
+            return std::make_pair(constInputs2, constOutInfo2);
+        };
+
+        auto endpoints = make_unified_endpoints();
+
+        for (auto && fetch_input : config.fetch_input) {
+            // each fetcher can be used multiple times
+            for (;;) {
+                // load new input - reset if necessary
+                decltype(fetch_input(ctx)) fetchResult;
+
+                int requestProcessed = 0;
+                for (int i = 0; i != config._nrequests; i++) {
+                    int inputId = 0;
+                    for (auto input : endpoints.first) {
+                        InferenceEngine::Blob::Ptr inputBlb;
+                        inputBlb = inferRequests[i].GetBlob(input.first);
+                        ctx.setInput(input.second->name(), inputBlb);
+                        ctx.setInputIdx(inputId);
+                        decltype(fetch_input(ctx)) fetchResultForInput;
+                        ASSERT_NO_FATAL_FAILURE(fetchResultForInput = fetch_input(ctx));
+                        if (inputId != 0) {
+                            ASSERT_EQ(fetchResult.fetched, fetchResultForInput.fetched);
+                            ASSERT_EQ(fetchResult.fetchMore, fetchResultForInput.fetchMore);
+                            ASSERT_EQ(fetchResult.frameNumber, fetchResultForInput.frameNumber);
+                            ASSERT_EQ(fetchResult.reset, fetchResultForInput.reset);
+                        } else {
+                            fetchResult = fetchResultForInput;
+                        }
+                        inputId++;
+                    }
+
+                    if (fetchResult.fetched) {
+                        // number of requests to infer in parallel
+                        requestProcessed++;
+                        // increasing frame number this however might be done in input fetcher if CTX passed by non const reference
+                        // value used in read_next_.. fetchers family
+                        ctx.setFrameNumber(ctx.getFrameNumber() + 1);
+                    }
+                    // cannot spawn more requests due to reset
+                    if (fetchResult.reset) {
+                        break;
+                    }
+                    // end of stream
+                    if (!fetchResult.fetchMore) {
+                        break;
+                    }
+                }
+
+                if (fetchResult.fetched) {
+                    // Infer model
+                    if (requestProcessed == 1) {
+                        inferRequests.front().Infer();
+                        sts = OK;
+                    } else {
+                        for (int i = 0; i != requestProcessed; i++) {
+                            inferRequests[i].StartAsync();
+                        }
+                        for (int i = 0; i != requestProcessed; i++) {
+                            inferRequests[i].Wait(IInferRequest::RESULT_READY);
+                        }
+                        sts = OK;
+                    }
+
+                    if (!fetchResult.hasResult) {
+                        continue;
+                    }
+
+                        // for infer request case will copy resulted blob
+                    for (int i = 0; i != requestProcessed;i++) {
+                        auto &outputs = ctx.newOutputs();
+                        for (auto output : endpoints.second) {
+                            auto tblob = dynamic_pointer_cast<TBlob<float>>(inferRequests[i].GetBlob(output.second->getName()));
+                            outputs[output.second->getName()] = make_shared_blob(*tblob);
+                        }
+                    }
+                }
+
+                if (fetchResult.reset) {
+                    auto states = executableApi.QueryState();
+                    ASSERT_FALSE(states.empty());
+                    states.front().Reset();
+                    // also store reset indicator for comparison routine
+                    auto &outputs = ctx.newOutputs();
+                    outputs["reset"] = nullptr;
+                    //continue;
+                }
+
+                //FAIL()<<"stop after one frame";
+
+                // Check errors
+                if (sts == GENERAL_ERROR) {
+                    THROW_IE_EXCEPTION << "Scoring failed! Critical error: " << dsc.msg;
+                } else if (sts == NOT_IMPLEMENTED) {
+                    THROW_IE_EXCEPTION << "Scoring failed! Input data is incorrect and not supported!";
+                } else if (sts == NETWORK_NOT_LOADED) {
+                    THROW_IE_EXCEPTION << "Scoring failed! " << dsc.msg;
+                }
+                if (!fetchResult.fetchMore) break;
+            }
+        }
+    }
+    catch (std::exception &e) {
+        FAIL() << e.what();
+    }
+}
+
+void Regression::Matchers::CustomMatcher::checkResult() {
+    bool cmpNear = !isApproximatelyEqual(config.nearValue, 0.0);
+    bool cmpNearAvg = !isApproximatelyEqual(config.nearAvgValue, 0.0);
+    bool isSaveOutput = !!config.outputBlob;
+
+    /**
+     * In case where external comparison is used
+     */
+    if (isSaveOutput) {
+        if (!config.fetch_result) {
+
+            decltype(ctx.allOutputs().begin()) output;
+
+            // calculating all outputs size
+            SizeVector dimsMerged;
+            for(auto && output :  ctx.allOutputs()) {
+                auto outputBlobIt = config.outputLayer.empty() ? output.begin() : output.find(config.outputLayer);
+                auto outBlob  = outputBlobIt->second;
+
+                if (dimsMerged.empty()) {
+                    dimsMerged = outBlob->getTensorDesc().getDims();
+                } else {
+                    ASSERT_EQ(dimsMerged.size(), outBlob->getTensorDesc().getDims().size());
+                    int added = 0;
+                    std::transform(begin(dimsMerged),
+                                   end(dimsMerged),
+                                   begin(dimsMerged = outBlob->getTensorDesc().getDims()),
+                                   begin(dimsMerged),
+                                   [&added](size_t l, size_t r) {
+                                       added += l != r;
+                                       return added ? l + r : l;
+                                   });
+                    ASSERT_LE(added,1);
+
+                    if (added == 0 && !dimsMerged.empty()) {
+                        dimsMerged.back() += outBlob->getTensorDesc().getDims().back();
+                    }
+                }
+            }
+
+            config.outputBlob->deallocate();
+            config.outputBlob->getTensorDesc() = TensorDesc(config.outputBlob->getTensorDesc().getPrecision(),
+                                                            dimsMerged,
+                                                            TensorDesc::getLayoutByDims(dimsMerged));
+            config.outputBlob->allocate();
+            float *buff = config.outputBlob->buffer();
+
+            // copying all output frames into allocated blob
+            for(auto && output :  ctx.allOutputs()) {
+
+                auto outputBlobIt = config.outputLayer.empty() ? output.begin() : output.find(config.outputLayer);
+                auto outBlob = dynamic_pointer_cast<TBlob<float>>(outputBlobIt->second);
+
+                for (auto value : *outBlob) {
+                    *(buff++) = value;
+                }
+            }
+
+        } else {
+            auto outBlob = dynamic_pointer_cast<TBlob<float>>(config.fetch_result(ctx));
+
+            config.outputBlob->deallocate();
+            config.outputBlob->getTensorDesc() = TensorDesc(outBlob->getTensorDesc().getPrecision(),
+                                                            outBlob->getTensorDesc().getDims(),
+                                                            TensorDesc::getLayoutByDims(outBlob->getTensorDesc().getDims()));
+            config.outputBlob->allocate();
+            float *buff = config.outputBlob->buffer();
+
+            int i = 0;
+            for (auto value : *outBlob) {
+                buff[i++] = value;
+            }
+        }
+        return;
+    }
+
+    if (cmpNear || cmpNearAvg) {
+        int idx = 0;
+        float avgDiff = 0.0;
+        float sz = 0.0;
+        float maxDiff = 0.0;
+        float maxAverageDiff = 0.0;
+        float rms = 0.0;
+        int nFrame = -1;
+        float avgFrames = 0.0;
+
+        if (!config.fetch_result) {
+            decltype(ctx.allOutputs().begin()) output;
+            for(;;) {
+                avgFrames++;
+                if (nFrame == -1) {
+                    output = ctx.allOutputs().begin();
+                    nFrame = 0;
+                } else {
+                    nFrame++;
+                    ++output;
+                }
+                if (output == ctx.allOutputs().end()) {
+                    break;
+                }
+                auto outputBlobIt = config.outputLayer.empty() ? output->begin() : output->find(config.outputLayer);
+                auto outBlob  = dynamic_pointer_cast<TBlob<float>>(outputBlobIt->second);
+
+                // fo reset case we are storing fake blob pointer
+                if (outBlob == nullptr) {
+                    avgDiff = 0.0;
+                    rms = 0.0;
+                    nFrame--;
+                    avgFrames = 0.0;
+                    continue;
+                }
+                float rmsp = 0.0;
+                float avgDiffp = 0.0;
+                ASSERT_LE(outBlob->size(), config.referenceOutput.size());
+                for (auto value : *outBlob) {
+                    if (cmpNear) {
+                       ASSERT_NEAR(value, config.referenceOutput[idx], config.nearValue) << " at " << idx;
+                    }
+                    auto diff = abs(value - config.referenceOutput[idx]);
+                    avgDiffp += diff;
+                    rmsp     += diff*diff;
+                    maxDiff   = std::max(maxDiff, diff);
+                    idx++;
+                }
+
+                rmsp = sqrt(rmsp / outBlob->size());
+                rms += rmsp;
+                avgDiffp /= outBlob->size();
+                avgDiff += avgDiffp;
+                maxAverageDiff = std::max(maxAverageDiff, avgDiff / avgFrames);
+
+                //TODO: add test_log parse from command line
+// #define TEST_LOG
+#ifdef TEST_LOG
+                auto threshold_similarity_max = config.nearValue - maxDiff;
+                auto threshold_similarity_avg = config.nearAvgValue - avgDiff / avgFrames;
+
+                cout << "Frame #  " << nFrame << "\n";
+                cout << "MaxDiff   : " << maxDiff << " ("
+                    << std::fixed << std::setprecision(5) << threshold_similarity_max <<")" << "\n";
+                cout << "RMSE      : " << rmsp << "\n";
+                cout << "AvgDiff/f : " << avgDiffp << "\n";
+                cout << "MaxAvgDiff: " << maxAverageDiff
+                    << std::fixed << std::setprecision(5) << " (" << threshold_similarity_avg <<")" << std::endl;
+#endif
+
+                if (cmpNearAvg) {
+                    ASSERT_NEAR(avgDiff / avgFrames, 0, config.nearAvgValue);
+                }
+            }
+        } else {
+            auto ptr = dynamic_pointer_cast<TBlob<float>>(config.fetch_result(ctx));
+
+            for (auto value : *ptr) {
+                if (cmpNear) {
+                    ASSERT_NEAR(value, config.referenceOutput[idx], config.nearValue) << " at " << idx;
+                }
+                if (cmpNearAvg) {
+                    avgDiff += abs(value - config.referenceOutput[idx]);
+                }
+                idx++;
+            }
+            if (cmpNearAvg) {
+                avgDiff /= ptr->size();
+            }
+        }
+    } else {
+        // for small expectations lets use string as a compare buddy
+        stringstream ss, ssr;
+
+        if (!config.fetch_result) {
+            for (auto output : ctx.outputs()) {
+                auto outBlob = dynamic_pointer_cast<TBlob<float>>(output.second);
+                for (auto value : *outBlob) {
+                    ss << setprecision(precision) << fixed << (float)value << ".";
+                }
+            }
+        } else {
+            auto ptr = dynamic_pointer_cast<TBlob<float>>(config.fetch_result(ctx));
+
+            for (auto value : *ptr) {
+                ss << setprecision(precision) << fixed << (float)value << ".";
+            }
+        }
+
+        for (auto value : config.referenceOutput) {
+            ssr << setprecision(precision) << fixed << (float)value << ".";
+        }
+
+        ASSERT_STREQ(ssr.str().c_str(), ss.str().c_str());
+    }
+}
diff --git a/inference-engine/tests_deprecated/functional/ie_tests/src/ie_core_adapter.cpp b/inference-engine/tests_deprecated/functional/ie_tests/src/ie_core_adapter.cpp
new file mode 100644 (file)
index 0000000..b81d206
--- /dev/null
@@ -0,0 +1,47 @@
+// Copyright (C) 2018-2019 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "ie_core_adapter.hpp"
+#include "description_buffer.hpp"
+
+using namespace InferenceEngine;
+
+using IECorePtr = std::shared_ptr<InferenceEngine::Core>;
+
+IECoreAdapter::IECoreAdapter(IECorePtr ieCore, std::string deviceName)
+    : m_ieCore(ieCore), m_deviceName(deviceName) {}
+
+StatusCode IECoreAdapter::LoadNetwork(
+    IExecutableNetwork::Ptr& ret, CNNNetwork network,
+    const std::map<std::string, std::string>& config, ResponseDesc* resp) noexcept {
+
+    try {
+        ret = m_ieCore->LoadNetwork(network, m_deviceName, config);
+    } catch (const std::exception& ex) {
+        return DescriptionBuffer(GENERAL_ERROR, resp) << ex.what();
+    }
+
+    return OK;
+}
+
+StatusCode IECoreAdapter::ImportNetwork(
+    IExecutableNetwork::Ptr& ret, const std::string& modelFileName,
+    const std::map<std::string, std::string>& config, ResponseDesc* resp) noexcept {
+
+    try {
+        ret = m_ieCore->ImportNetwork(modelFileName, m_deviceName, config);
+    } catch (const NetworkNotRead& ie_ex) {
+        return DescriptionBuffer(NETWORK_NOT_READ, resp) << ie_ex.what();
+    } catch (const std::exception& ex) {
+        return DescriptionBuffer(GENERAL_ERROR, resp) << ex.what();
+    }
+
+    return OK;
+}
+
+ExecutableNetwork IECoreAdapter::ImportNetwork(
+    std::istream& networkModel,
+    const std::map<std::string, std::string>& config) {
+    return m_ieCore->ImportNetwork(networkModel, m_deviceName, config);
+}
diff --git a/inference-engine/tests_deprecated/functional/ie_tests/src/net_model.cpp b/inference-engine/tests_deprecated/functional/ie_tests/src/net_model.cpp
new file mode 100644 (file)
index 0000000..c030fef
--- /dev/null
@@ -0,0 +1,19 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "net_model.hpp"
+
+//------------------------------------------------------------------------------
+// Implementation of methods of class Model
+//------------------------------------------------------------------------------
+
+Model::Model(const std::string &folderName,
+             const std::string &fileName,
+             const std::string &resolution,
+             const std::string & extension) :
+        folderName_(folderName),
+        fileName_(fileName),
+        resolutionName_(resolution),
+        extensionName_(extension) {
+};
diff --git a/inference-engine/tests_deprecated/functional/ie_tests/src/object_detection_matcher.cpp b/inference-engine/tests_deprecated/functional/ie_tests/src/object_detection_matcher.cpp
new file mode 100644 (file)
index 0000000..0a794cb
--- /dev/null
@@ -0,0 +1,297 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <xml_helper.hpp>
+#include "object_detection_matcher.hpp"
+
+#include <algorithm>
+
+using namespace Regression::Matchers;
+
+namespace Regression {
+namespace Matchers {
+
+using DetectedObject = ObjectDetectionMatcher::DetectedObject;
+using ImageDescription = ObjectDetectionMatcher::ImageDescription;
+using namespace InferenceEngine;
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// ObjectDetectionMatcher::DetectedObject //////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+ObjectDetectionMatcher::DetectedObject::DetectedObject(int objectType,
+                                                       float xmin,
+                                                       float ymin,
+                                                       float xmax,
+                                                       float ymax,
+                                                       float prob,
+                                                       int)
+        : objectType(objectType), xmin(xmin), xmax(xmax), ymin(ymin), ymax(ymax), prob(prob) {
+}
+
+ObjectDetectionMatcher::DetectedObject::DetectedObject(const DetectedObject &other) {
+    this->objectType = other.objectType;
+    this->xmin = other.xmin;
+    this->xmax = other.xmax;
+    this->ymin = other.ymin;
+    this->ymax = other.ymax;
+    this->prob = other.prob;
+}
+
+float ObjectDetectionMatcher::DetectedObject::ioU(const DetectedObject &detected_object_1_,
+                                                  const DetectedObject &detected_object_2_) {
+    // Add small space to eliminate empty squares
+    float epsilon = 1e-3;
+
+    DetectedObject detectedObject1(detected_object_1_.objectType,
+                                   detected_object_1_.xmin - epsilon,
+                                   detected_object_1_.ymin - epsilon,
+                                   detected_object_1_.xmax + epsilon,
+                                   detected_object_1_.ymax + epsilon, detected_object_1_.prob);
+    DetectedObject detectedObject2(detected_object_2_.objectType,
+                                   detected_object_2_.xmin - epsilon,
+                                   detected_object_2_.ymin - epsilon,
+                                   detected_object_2_.xmax + epsilon,
+                                   detected_object_2_.ymax + epsilon, detected_object_2_.prob);
+
+    if (detectedObject1.objectType != detectedObject2.objectType) {
+        // objects are different, so the result is 0
+        return 0.0f;
+    }
+
+    if (detectedObject1.xmax < detectedObject1.xmin) return 0.0;
+    if (detectedObject1.ymax < detectedObject1.ymin) return 0.0;
+    if (detectedObject2.xmax < detectedObject2.xmin) return 0.0;
+    if (detectedObject2.ymax < detectedObject2.ymin) return 0.0;
+
+    float xmin = (std::max)(detectedObject1.xmin, detectedObject2.xmin);
+    float ymin = (std::max)(detectedObject1.ymin, detectedObject2.ymin);
+    float xmax = (std::min)(detectedObject1.xmax, detectedObject2.xmax);
+    float ymax = (std::min)(detectedObject1.ymax, detectedObject2.ymax);
+    // intersection
+    float intr;
+
+    if ((xmax >= xmin) && (ymax >= ymin)) {
+        intr = (xmax - xmin) * (ymax - ymin);
+    } else {
+        intr = 0.0f;
+    }
+
+    // union
+    float square1 = (detectedObject1.xmax - detectedObject1.xmin) * (detectedObject1.ymax - detectedObject1.ymin);
+    float square2 = (detectedObject2.xmax - detectedObject2.xmin) * (detectedObject2.ymax - detectedObject2.ymin);
+
+    float unn = square1 + square2 - intr;
+
+    return float(intr) / unn;
+}
+
+void ObjectDetectionMatcher::DetectedObject::printObj() {
+    printf("[%p] objectType=%d, xmin=%f, xmax=%f, ymin=%f, ymax=%f, prob=%f\n",
+           this,
+           objectType,
+           xmin,
+           xmax,
+           ymin,
+           ymax,
+           prob);
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// ObjectDetectionMatcher::ImageDescription ////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+ObjectDetectionMatcher::ImageDescription::ImageDescription(bool check_probs) :
+        check_probs_(check_probs) {
+}
+
+ObjectDetectionMatcher::ImageDescription::ImageDescription(const std::list<DetectedObject> &alist, bool check_probs)
+        : alist(alist), check_probs_(check_probs) {
+}
+
+ObjectDetectionMatcher::ImageDescription::ImageDescription(const ImageDescription &obj) :
+        check_probs_(obj.checkProbs()) {
+    this->alist = obj.alist;
+}
+
+float ObjectDetectionMatcher::ImageDescription::ioUMultiple(const ImageDescription &detected_objects,
+                                                            const ImageDescription &desired_objects) {
+
+    const ImageDescription *detectedObjectsSmall, *detectedObjectsBig;
+    bool check_probs = desired_objects.checkProbs();
+
+    if (detected_objects.alist.size() < desired_objects.alist.size()) {
+        detectedObjectsSmall = &detected_objects;
+        detectedObjectsBig = &desired_objects;
+    } else {
+        detectedObjectsSmall = &desired_objects;
+        detectedObjectsBig = &detected_objects;
+    }
+
+    std::list<DetectedObject> doS = detectedObjectsSmall->alist;
+    std::list<DetectedObject> doB = detectedObjectsBig->alist;
+
+    float fullScore = 0.0f;
+    while (doS.size() > 0) {
+        float score = 0.0f;
+        std::list<DetectedObject>::iterator bestJ = doB.end();
+        for (auto j = doB.begin(); j != doB.end(); j++) {
+            float curscore = DetectedObject::ioU(*doS.begin(), *j);
+            if (score < curscore) {
+                score = curscore;
+                bestJ = j;
+            }
+        }
+
+        float coeff = 1.0;
+        if (check_probs) {
+            if (bestJ != doB.end()) {
+                DetectedObject test = *bestJ;
+                DetectedObject test1 = *doS.begin();
+                float min = std::min((*bestJ).prob, (*doS.begin()).prob);
+                float max = std::max((*bestJ).prob, (*doS.begin()).prob);
+
+                coeff = min / max;
+            }
+        }
+
+        doS.pop_front();
+        if (bestJ != doB.end()) doB.erase(bestJ);
+        fullScore += coeff * score;
+    }
+    fullScore /= detectedObjectsBig->alist.size();
+
+    return fullScore;
+}
+
+void ObjectDetectionMatcher::ImageDescription::addDetectedObject(const DetectedObject &detected_obj) {
+    alist.push_back(detected_obj);
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// ObjectDetectionMatcher::ObjectDetectionMatcher //////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+ObjectDetectionMatcher::ObjectDetectionMatcher(const RegressionConfig &config)
+        : BaseMatcher(config) {
+}
+
+void ObjectDetectionMatcher::match(const ScoreFunction& score_function) {
+    // Read network
+    string binFileName = testing::FileUtils::fileNameNoExt(config._path_to_models) + ".bin";
+    auto cnnNetwork = config.ie_core->ReadNetwork(config._path_to_models, binFileName);
+
+    if (!config._stat_file.empty()) {
+        InferenceEngine::NetworkStatsMap stat = testing::loadStatisticFromFile(config._stat_file);
+
+        IE_SUPPRESS_DEPRECATED_START
+        ICNNNetworkStats *pstats;
+        ((ICNNNetwork&)cnnNetwork).getStats(&pstats, nullptr);
+        pstats->setNodesStats(stat);
+
+        // iterating over layers and fixing suppress_normalization->quantization_level
+        // because we have in tests IR which has old name for fp32 layers
+        for (auto layer : cnnNetwork) {
+            if (layer->params.find("suppress_normalization") != layer->params.end() &&
+                layer->params["suppress_normalization"] == "I8") {
+                layer->params["quantization_level"] = "FP32";
+            }
+        }
+        IE_SUPPRESS_DEPRECATED_END
+    }
+
+    if (config._reshape) {
+        auto inputShapes = cnnNetwork.getInputShapes();
+        for (auto & shape : inputShapes) {
+            shape.second[0] = config.batchSize;
+        }
+
+        cnnNetwork.reshape(inputShapes);
+    } else if (config.batchSize != 1) {
+        cnnNetwork.setBatchSize(config.batchSize);
+    }
+
+    res_desc_ = score_function(cnnNetwork);
+
+    if (res_desc_.size() != config.batchSize) {
+        FAIL() << "[ERROR]: Result batch size is not equal to initial.";
+    }
+}
+
+void ObjectDetectionMatcher::checkResult(const std::vector<ImageDescription> &desired) {
+    if ((desired.size() < config.batchSize) || (res_desc_.size() != config.batchSize)) {
+        FAIL() << "[ERROR]: Number of ImageDescription objects less then batch size or result batch size is not equal to initial.\n"
+               << "Batch size: " << config.batchSize << "; Expected outputs number: " << desired.size()
+               << "; Result number: " << res_desc_.size();
+    }
+    string sError;
+    for (int i = 0; i < config.batchSize; i++) {
+        double iou = ImageDescription::ioUMultiple(*res_desc_[i], desired[i]);
+        double minimalScore = 1.0 - config.nearValue;
+        if (iou < minimalScore) {
+            sError += "[ERROR]: Batch #" + std::to_string(i) + ". Similarity is too low: " + std::to_string(iou)
+                      + ". Expected " + std::to_string(minimalScore) + "\n";
+        } else {
+            std::cout << "Batch #" << i << ". Similarity " << iou << " is above the expected value: " << minimalScore
+                      << std::endl;
+        }
+    }
+
+    if (!sError.empty()) {
+        FAIL() << sError;
+    }
+}
+
+void ObjectDetectionMatcher::to(const ImageDescription &desired, const std::shared_ptr<NetworkAdapter>& adapter) {
+    std::vector<ImageDescription> desired_vector = {desired};
+    ASSERT_NO_FATAL_FAILURE(to(desired_vector, adapter));
+}
+
+void ObjectDetectionMatcher::to(const std::vector<ImageDescription> &desired,
+                                const std::shared_ptr<NetworkAdapter>& adapter) {
+    to(desired, [&](CNNNetwork & network) -> ImageDescriptionPtrVect {
+        return adapter->score(network,
+                              config.ie_core,
+                              config._device_name,
+                              config.plugin_config,
+                              config._paths_to_images,
+                              config._reshape,
+                              config.useExportImport);
+    });
+}
+
+void ObjectDetectionMatcher::to(const ImageDescription &desired, const NetworkAdapter& adapter) {
+    std::vector<ImageDescription> desired_vector = {desired};
+    ASSERT_NO_FATAL_FAILURE(to(desired_vector, adapter));
+}
+
+void ObjectDetectionMatcher::to(const std::vector<ImageDescription> &desired,
+                                const NetworkAdapter& adapter) {
+    to(desired, [&](CNNNetwork& network) -> ImageDescriptionPtrVect {
+        return adapter.score(network,
+                             config.ie_core,
+                             config._device_name,
+                             config.plugin_config,
+                             config._paths_to_images,
+                             config._reshape);
+    });
+}
+
+void ObjectDetectionMatcher::to(const std::vector<ImageDescription> &desired, const ScoreFunction& score_function) {
+    // ASSERT_NO_FATAL_FAILURE(checkImgNumber());
+    ASSERT_NO_FATAL_FAILURE(match(score_function));
+    if (desired.size() < config.batchSize) {
+        std::cout << "Number of ImageDescription objects less then batch size" << std::endl;
+        std::vector<ImageDescription> newRef;
+        for (int i = 0; i < config.batchSize; i++) {
+            newRef.push_back(desired[0]);
+        }
+        ASSERT_NO_FATAL_FAILURE(checkResult(newRef));
+    } else {
+        ASSERT_NO_FATAL_FAILURE(checkResult(desired));
+    }
+}
+
+} //  namespace matchers
+} //  namespace regression
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/functional/ie_tests/src/optimized_network_matcher.cpp b/inference-engine/tests_deprecated/functional/ie_tests/src/optimized_network_matcher.cpp
new file mode 100644 (file)
index 0000000..52860bf
--- /dev/null
@@ -0,0 +1,66 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <fstream>
+#include <string>
+#include <gtest/gtest.h>
+#include <ie_plugin_config.hpp>
+#include "optimized_network_matcher.hpp"
+
+using namespace InferenceEngine;
+using namespace InferenceEngine::PluginConfigParams;
+
+
+void Regression :: Matchers :: OptimizedNetworkMatcher :: to(std::string path_to_reference_dump) {
+    ModelsPath path_to_firmware;
+    path_to_firmware << kPathSeparator << config._firmware << kPathSeparator;
+
+    auto compact_token = config.compactMode ? "_compact" : "";
+
+    this->path_to_reference_dump = path_to_firmware + path_to_reference_dump + compact_token + "_firmware.bin";
+}
+
+void Regression :: Matchers :: OptimizedNetworkMatcher :: matchCustom ()  {
+    ASSERT_NO_FATAL_FAILURE(createExecutableNetworkFromIR());
+    firmware = readDumpFromFile(config._tmp_firmware);
+    ASSERT_NE(firmware.size(), 0);
+}
+
+std::vector<uint8_t> Regression :: Matchers :: OptimizedNetworkMatcher :: readDumpFromFile(std::string path) {
+    std::ifstream file(path, std::ios::binary | std::ios::ate);
+    std::streamsize size = file.tellg();
+    if (size <=0) {
+        return std::vector<uint8_t>();
+    }
+    file.seekg(0, std::ios::beg);
+
+    std::vector<uint8_t> buffer(size);
+    file.read((char*)buffer.data(), size);
+
+    return buffer;
+}
+
+void Regression :: Matchers :: OptimizedNetworkMatcher :: checkResult() {
+    auto refFirmware = readDumpFromFile(path_to_reference_dump);
+
+    ASSERT_EQ(refFirmware.size(), firmware.size()) << "Reference: " << path_to_reference_dump;
+
+    for (int i = 0; i < refFirmware.size(); ++i) {
+        ASSERT_EQ(refFirmware[i], firmware[i]) << "firmware mismatch at: " << i << " byte";
+    }
+}
+
+////////////////////////////
+
+void Regression :: Matchers :: OptimizedNetworkDumper::dump()  {
+    ExecutableNetwork executableApi;
+    ASSERT_NO_FATAL_FAILURE(executableApi = createExecutableNetworkFromIR());
+    try {
+        executableApi.Export(config._path_to_aot_model);
+    }
+    catch (const std::exception &e) {
+         FAIL() << e.what();
+    }
+
+}
diff --git a/inference-engine/tests_deprecated/functional/ie_tests/src/raw_matcher.cpp b/inference-engine/tests_deprecated/functional/ie_tests/src/raw_matcher.cpp
new file mode 100644 (file)
index 0000000..2293fc9
--- /dev/null
@@ -0,0 +1,364 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <format_reader_ptr.h>
+#include <gtest/gtest.h>
+#include "graph_tools.hpp"
+#include "raw_matcher.hpp"
+#include <precision_utils.h>
+
+namespace Regression {
+namespace Matchers {
+
+void RawMatcher::match() {
+    try {
+        // Read network
+        std::string binFileName = testing::FileUtils::fileNameNoExt(config._path_to_models) + ".bin";
+        std::cout << config._path_to_models << std::endl;
+        auto cnnNetwork = config.ie_core->ReadNetwork(config._path_to_models, binFileName);
+
+        InferenceEngine::InputsDataMap networkInputs;
+        networkInputs = cnnNetwork.getInputsInfo();
+        if (networkInputs.size() == 0) {
+            THROW_IE_EXCEPTION << "No inputs detected.";
+        }
+
+        if (config._paths_to_images.size() % ( config.batchSize * networkInputs.size()) != 0) {
+            std::cerr << "[WARNING]: Can not process all input images("<< config._paths_to_images.size()
+                      <<") using given batch size of " << config.batchSize << ". Batch size will be equal 1." << std::endl;
+            config.batchSize = 1;
+        }
+
+        InferenceEngine::DataPtr inputData = cnnNetwork.getInputsInfo().begin()->second->getInputData();
+        InferenceEngine::SizeVector inputDims = inputData->getTensorDesc().getDims();
+
+        if (config._reshape) {
+            auto inputShapes = cnnNetwork.getInputShapes();
+            inputShapes.begin()->second[0] = config.batchSize;
+
+            cnnNetwork.reshape(inputShapes);
+        } else if (config.batchSize != 1) {
+            cnnNetwork.setBatchSize(config.batchSize);
+        }
+
+        // TODO(amalyshe) quick dirty solution which might not cover all topologies,
+        // but covers only networks having one input passing to one layer
+        CNNLayerPtr layer;
+        for (auto input : networkInputs) {
+            InputInfo::Ptr q = input.second;
+            if (config._inputPrecision) q->setPrecision(config._inputPrecision);
+            DataPtr p = q->getInputData();
+            IE_SUPPRESS_DEPRECATED_START
+            layer = p->getInputTo().begin()->second;
+            IE_SUPPRESS_DEPRECATED_END
+        }
+
+        {
+            // Set output precision
+            InferenceEngine::OutputsDataMap out;
+            out = cnnNetwork.getOutputsInfo();
+            for (auto &&item : out) {
+                Blob::Ptr output;
+                auto  outputName = item.first;
+                auto& outBlob    = item.second;
+                if (config._outputPrecision) outBlob->setPrecision(config._outputPrecision);
+                if (config._outputBlobPrecision.count(outputName)) outBlob->setPrecision(config._outputBlobPrecision[outputName]);
+            }
+        }
+
+        if (!config.deviceMapping.empty()) {
+            IE_SUPPRESS_DEPRECATED_START
+            CNNNetDFS(layer, [&](const CNNLayerPtr &layer) {
+                auto it = config.deviceMapping.find(layer->name);
+                if (it != config.deviceMapping.end()) {
+                    layer->affinity = it->second;
+                } else {
+                    layer->affinity = "CPU";
+                }
+            });
+            IE_SUPPRESS_DEPRECATED_END
+        }
+
+        // Read image
+        std::vector<std::shared_ptr<unsigned char>> imagesData;
+        unsigned int actualNetSize = 0;
+        for (auto & imageName : config._paths_to_images) {
+            FormatReader::ReaderPtr reader(imageName.c_str());
+            if (reader.get() == nullptr) {
+                THROW_IE_EXCEPTION << "[ERROR]: Image " + imageName + " cannot be read!";
+            }
+            actualNetSize += reader->size();
+            // Store image data
+
+            size_t width = 0, height = 0;
+            SizeVector dims = inputData->getTensorDesc().getDims();
+            if (dims.size() == 3) {
+                height = dims.at(1);
+                width = dims.at(2);
+            } else if (dims.size() == 4) {
+                height = dims.at(2);
+                width = dims.at(3);
+            } else if (dims.size() == 5) {
+                height = dims.at(3);
+                width = dims.at(4);
+            } else {
+                THROW_IE_EXCEPTION << inputData->getName() << " has unsupported layout " << inputData->getTensorDesc().getLayout();
+            }
+
+            std::shared_ptr<unsigned char> data(reader->getData(width, height));
+            if (data.get() != nullptr) {
+                imagesData.push_back(data);
+            } else {
+                THROW_IE_EXCEPTION << "Invalid image '" << imageName << "'";
+            }
+        }
+
+        auto out2 = cnnNetwork.getOutputsInfo();
+        for (auto &&item : out2) {
+            if (config._outputPrecision) item.second->setPrecision(config._outputPrecision);
+            if (config._outputBlobPrecision.count(item.first)) {
+                item.second->setPrecision(config._outputBlobPrecision[item.first]);
+            }
+        }
+
+        auto loadedExecutableNetwork = config.ie_core->LoadNetwork(cnnNetwork, config._device_name, config.plugin_config);
+        InferenceEngine::ExecutableNetwork executableNetwork;
+        if (config.useExportImport) {
+            std::stringstream stream;
+            loadedExecutableNetwork.Export(stream);
+            executableNetwork = config.ie_core->ImportNetwork(stream);
+        } else {
+            executableNetwork = loadedExecutableNetwork;
+        }
+        auto inferRequest = executableNetwork.CreateInferRequest();
+
+        InferenceEngine::BlobMap inputBlobs;
+
+        auto allocateBlob = [](const InferenceEngine::TensorDesc& desc) {
+            InferenceEngine::Blob::Ptr blob;
+            switch (desc.getPrecision()) {
+                case InferenceEngine::Precision::FP32 :
+                    blob = InferenceEngine::make_shared_blob<float>(desc);
+                    break;
+                case InferenceEngine::Precision::FP16 :
+                case InferenceEngine::Precision::Q78 :
+                case InferenceEngine::Precision::I16 :
+                    blob = InferenceEngine::make_shared_blob<int16_t>(desc);
+                    break;
+                case InferenceEngine::Precision::U8 :
+                    blob = InferenceEngine::make_shared_blob<uint8_t>(desc);
+                    break;
+                default:
+                    THROW_IE_EXCEPTION << "Unsupported blob precision: " << desc.getPrecision();
+            }
+            blob->allocate();
+
+            return blob;
+        };
+
+        for(auto&& inputInfo : cnnNetwork.getInputsInfo()) {
+            std::string inputName = inputInfo.first;
+
+            if (!inferRequest) {
+                // Allocate blobs
+                inputBlobs[inputName] = allocateBlob(inputInfo.second->getTensorDesc());
+            } else {
+                inputBlobs[inputName] = inferRequest.GetBlob(inputName);
+            }
+        }
+
+        {
+            InferenceEngine::OutputsDataMap out;
+            out = cnnNetwork.getOutputsInfo();
+            for (auto &&item : out) {
+                Blob::Ptr output;
+                auto  outputName = item.first;
+                auto& outBlob    = item.second;
+                if (!inferRequest) {
+                    output = allocateBlob(item.second->getTensorDesc());
+                } else {
+                    // TODO(amalyshe): we need to return GetBlob eventually after the fix bug in mkldnnplugin
+                    output = inferRequest.GetBlob(outputName);
+                    // output = allocateBlob(item.second->getTensorDesc());
+                    // inferRequest.SetBlob(outputName, output);
+                }
+                outputBlobs[outputName] = output;
+            }
+        }
+
+        // loading images in batches
+        for (int i = 0; i < config._paths_to_images.size(); i += config.batchSize * inputBlobs.size()) {
+            int k = 0;
+            for(auto&& input: inputBlobs) {
+                for (int j = 0; j != config.batchSize; j++) {
+                    const auto & imageName  = config._paths_to_images[i + j + k];
+                    loadImage(imageName, input.second, true, j);
+                }
+                k++;
+            }
+
+            if (config.isAsync) {
+                inferRequest.StartAsync();
+                inferRequest.Wait(IInferRequest::WaitMode::RESULT_READY);
+            } else {
+                inferRequest.Infer();
+            }
+
+            // Get performance info
+            if (config.perfInfoPtr != nullptr) {
+                *config.perfInfoPtr = inferRequest.GetPerformanceCounts();
+            }
+        }
+    } catch (details::InferenceEngineException &e) {
+        FAIL() << e.what();
+    }
+    catch (std::exception &e) {
+        FAIL() << e.what();
+    }
+}
+
+void RawMatcher::checkResult(const std::map<std::string, std::map<size_t, float>> &allExpected) {
+    auto prepareResults = [&](const Blob::Ptr& output) {
+        std::vector<float> tmp_buffer;
+
+        if (output->getTensorDesc().getPrecision() == InferenceEngine::Precision::FP16) {
+            tmp_buffer.resize(output->size(), 0.f);
+            PrecisionUtils::f16tof32Arrays(tmp_buffer.data(),
+                                           output->buffer().as<int16_t*>(),
+                                           output->size());
+        } else {
+            assert(output->getTensorDesc().getPrecision() == InferenceEngine::Precision::FP32);
+            tmp_buffer.resize(output->size(), 0.f);
+            std::copy_n(output->buffer().as<float*>(), output->size(), tmp_buffer.begin());
+        }
+
+        return tmp_buffer;
+    };
+    if(config.print) {
+        std::cout << "{";
+        for(auto&& out : outputBlobs) {
+            Blob::Ptr& output = out.second;
+            auto results = prepareResults(output);
+            std::cout << "{{\""  << out.first <<"\", {\n";
+            for(std::size_t i = 0; i < output->size(); i += (output->size() + config.printNum - 1)/ config.printNum) {
+                std::cout << "{" << i <<", "<< results[i] << "},\n";
+            }
+            std::cout << "}}},\n";
+        }
+        std::cout << "};" << std::endl;
+    } else {
+        std::stringstream strm;
+        auto generateInfo = [&](const std::vector<float>& results, const std::map<std::size_t, float>& expected) {
+            double meanRelative = 0;
+            double maxAbsolute = 0;
+            double maxRelative = 0;
+            strm << std::endl << std::setw(15) << "Position" << std::setw(15) <<
+                "Expected" << std::setw(15) <<
+                "Actual" << std::setw(15) <<
+                "Absolute" << std::setw(15) <<
+                "Relative,%" << std::endl;
+            for (auto e : expected) {
+                double absolute = fabs(e.second - results[e.first]);
+                double relative = fabs(e.second - results[e.first]) / fabs(e.second);
+
+                strm << std::setw(15) << e.first
+                     << std::setw(15) << std::setprecision(6) << e.second
+                     << std::setw(15) << std::setprecision(6) << results[e.first]
+                     << std::setw(15) << std::setprecision(6) << absolute
+                     << std::setw(15) << std::setprecision(6) << relative*100 << std::endl;
+                meanRelative += relative;
+                maxAbsolute = std::max(maxAbsolute, absolute);
+                maxRelative = std::max(maxRelative, relative);
+            }
+            strm << "Max Absolute = " << maxAbsolute
+                 << " Mean Relative = " << meanRelative*100/expected.size()
+                 << " Max Relative = " << maxRelative*100  << '\n';
+        };
+
+        if(0 != config.nearValue) {
+            for(auto expectedPair : allExpected) {
+                Blob::Ptr output = outputBlobs[expectedPair.first];
+                if (!output) {
+                    FAIL() << "Was not able to find expected output " << expectedPair.first;
+                }
+
+                auto results = prepareResults(output);
+
+                const std::map<size_t, float> &expected = expectedPair.second;
+
+                for (auto e : expected) {
+                    if (fabs(e.second - results[e.first]) > config.nearValue) {
+                        strm << "In blob " << expectedPair.first
+                             << " element at " << e.first << " index expected to be " << e.second << " but in fact it is "
+                             << results[e.first] <<
+                             " Delta = " << (fabs(e.second - results[e.first]));
+                        generateInfo(results, expected);
+                        FAIL() << strm.str();
+                    }
+                }
+            }
+        }
+        if(0 != config.meanRelativeError) {
+            for(auto expectedPair : allExpected) {
+                Blob::Ptr output = outputBlobs[expectedPair.first];
+                if (!output) {
+                    FAIL() << "Was not able to find expected output " << expectedPair.first;
+                }
+                auto results = prepareResults(output);
+
+                std::map<std::size_t, float>& expected = expectedPair.second;
+
+                double meanRelative = 0;
+                for (auto e : expected) {
+                    double eps = fabs(e.second - results[e.first]) / fabs(e.second);
+                    meanRelative += eps;
+                }
+                meanRelative /= expected.size();
+                meanRelative *= 100;
+
+                if (meanRelative > config.meanRelativeError) {
+                    strm << "In blob " << expectedPair.first
+                         << " Mean Relative Error = " << meanRelative
+                         << " Expected Mean Relative Error = " << config.meanRelativeError;
+                    generateInfo(results, expected);
+                    FAIL() << strm.str();
+                }
+            }
+        }
+        if(0 != config.maxRelativeError) {
+            for(auto expectedPair : allExpected) {
+                Blob::Ptr output = outputBlobs[expectedPair.first];
+                if (!output) {
+                    FAIL() << "Was not able to find expected output " << expectedPair.first;
+                }
+                auto results = prepareResults(output);
+
+                std::map<std::size_t, float>& expected = expectedPair.second;
+
+                double maxRelative = 0;
+                std::size_t maxPos = 0;
+                for (auto e : expected) {
+                    double eps = fabs(e.second - results[e.first]) / fabs(e.second);
+                    if(eps > maxRelative) {
+                        maxRelative = eps;
+                        maxPos = e.first;
+                    }
+                }
+                maxRelative *= 100;
+
+                if (maxRelative > config.maxRelativeError) {
+                    strm << "In blob " << expectedPair.first << " element at " << maxPos << " index"
+                         << " expected to be " << expected[maxPos] << " but in fact it is " << results[maxPos]
+                         << " Max Relative Error = " << maxRelative
+                         << " Expected Max Relative Error = " << config.maxRelativeError;
+                    generateInfo(results, expected);
+                    FAIL() << strm.str();
+                }
+            }
+        }
+    }
+}
+
+}
+} //  namespace matchers
diff --git a/inference-engine/tests_deprecated/functional/ie_tests/src/segmentation_matcher.cpp b/inference-engine/tests_deprecated/functional/ie_tests/src/segmentation_matcher.cpp
new file mode 100644 (file)
index 0000000..2396eb9
--- /dev/null
@@ -0,0 +1,263 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <random>
+#include <fstream>
+#include <format_reader_ptr.h>
+
+#include "segmentation_matcher.hpp"
+
+static std::vector<std::vector<size_t>> blobToImageOutputArray(InferenceEngine::TBlob<float>::Ptr output,
+                                                                      size_t *pWidth, size_t *pHeight,
+                                                                      size_t *pChannels) {
+    std::vector<std::vector<size_t>> outArray;
+    size_t W = 0, C = 0, H = 0;
+
+    auto outputDims = output->getTensorDesc().getDims();
+    if (outputDims.size() == 3) {
+        C = outputDims.at(0);
+        H = outputDims.at(1);
+        W = outputDims.at(2);
+    } else if (outputDims.size() == 4) {
+        C = outputDims.at(1);
+        H = outputDims.at(2);
+        W = outputDims.at(3);
+    } else if (outputDims.size() == 5) {
+        C = outputDims.at(1);
+        H = outputDims.at(3);
+        W = outputDims.at(4);
+    } else {
+        THROW_IE_EXCEPTION << "Output blob has unsupported layout " << output->getTensorDesc().getLayout();
+    }
+
+    // Get classes
+    const float *outData = output->data();
+    for (unsigned h = 0; h < H; h++) {
+        std::vector<size_t> row;
+        for (unsigned w = 0; w < W; w++) {
+            float max_value = outData[h * W + w];
+            size_t index = 0;
+            for (size_t c = 1; c < C; c++) {
+                size_t dataIndex = c * H * W + h * W + w;
+                if (outData[dataIndex] > max_value) {
+                    index = c;
+                    max_value = outData[dataIndex];
+                }
+            }
+            row.push_back(index);
+        }
+        outArray.push_back(row);
+    }
+
+    if (pWidth != nullptr) *pWidth = W;
+    if (pHeight != nullptr) *pHeight = H;
+    if (pChannels != nullptr) *pChannels = C;
+
+    return outArray;
+}
+
+namespace Regression { namespace Matchers {
+
+void SegmentationMatcher::match() {
+    // Read network
+    std::string binFileName = testing::FileUtils::fileNameNoExt(config._path_to_models) + ".bin";
+    auto network = config.ie_core->ReadNetwork(config._path_to_models, binFileName);
+
+    // Change batch size if it is not equal 1
+    InferenceEngine::InputsDataMap inputs;
+    inputs = network.getInputsInfo();
+    ASSERT_EQ(inputs.size() ,1);
+    InferenceEngine::InputInfo::Ptr ii = inputs.begin()->second;
+
+    InferenceEngine::SizeVector inputDims = ii->getTensorDesc().getDims();
+    if (inputDims.at(0) != 1) {
+        std::cerr << "[WARNING]: Batch size will be equal 1." << std::endl;
+        network.setBatchSize(1);
+        inputDims = ii->getTensorDesc().getDims();
+    }
+
+    InferenceEngine::OutputsDataMap outInfo;
+    outInfo = network.getOutputsInfo();
+    ASSERT_EQ(outInfo.size(), 1);
+    ASSERT_NE(outInfo.begin()->second, nullptr);
+
+    InferenceEngine::SizeVector outputDims = outInfo.begin()->second->getDims();
+
+    if (outputDims.size() != 4) {
+        THROW_IE_EXCEPTION << "Incorrect output dimensions for Deconvolution model";
+    }
+
+    // Read image
+    FormatReader::ReaderPtr reader(config._paths_to_images[0].c_str());
+    if (reader.get() == nullptr) {
+        THROW_IE_EXCEPTION << "[ERROR]: Image " << config._paths_to_images[0] << " cannot be read!";
+    }
+
+    int inputNetworkSize = static_cast<int>(std::accumulate(
+        inputDims.begin(), inputDims.end(), (size_t)1, std::multiplies<size_t>()));
+
+    if (reader->size() != inputNetworkSize) {
+        THROW_IE_EXCEPTION << "[ERROR]: Input sizes mismatch, got " << reader->size() << " bytes, expecting "
+                           << inputNetworkSize;
+    }
+
+    // Allocate blobs
+    InferenceEngine::Blob::Ptr input;
+    switch (inputs.begin()->second->getPrecision()) {
+        case InferenceEngine::Precision::FP32 :
+            input = InferenceEngine::make_shared_blob<float>({ InferenceEngine::Precision::FP32, inputDims, NCHW });
+            break;
+        case InferenceEngine::Precision::Q78 :
+        case InferenceEngine::Precision::I16 :
+            input = InferenceEngine::make_shared_blob<short>({ InferenceEngine::Precision::I16, inputDims, NCHW });
+            break;
+        case InferenceEngine::Precision::U8 :
+            input = InferenceEngine::make_shared_blob<uint8_t>({ InferenceEngine::Precision::U8, inputDims, NCHW });
+            break;
+        default:
+            THROW_IE_EXCEPTION << "Unsupported network precision: " << inputs.begin()->second->getPrecision();
+    }
+    input->allocate();
+
+    output = InferenceEngine::make_shared_blob<float>(outInfo.begin()->second->getTensorDesc());
+    output->allocate();
+
+    // Load image to blob
+    ConvertImageToInput(reader->getData().get(), reader->size(), *input);
+
+    InferenceEngine::ResponseDesc dsc;
+    InferenceEngine::StatusCode sts;
+
+    auto loadedExecutableNetwork = config.ie_core->LoadNetwork(network, config._device_name, config.plugin_config);
+    InferenceEngine::ExecutableNetwork executableNetwork;
+    if (config.useExportImport) {
+        std::stringstream stream;
+        loadedExecutableNetwork.Export(stream);
+        executableNetwork = config.ie_core->ImportNetwork(stream);
+    } else {
+        executableNetwork = loadedExecutableNetwork;
+    }
+
+    InferenceEngine::IInferRequest::Ptr inferRequest;
+    sts = static_cast<IExecutableNetwork::Ptr&>(executableNetwork)->CreateInferRequest(inferRequest, &dsc);
+    if (sts != InferenceEngine::OK) {
+        THROW_IE_EXCEPTION << "Failed CreateInferRequest with error: " << dsc.msg;
+    }
+
+    sts = inferRequest->SetBlob(inputs.begin()->first.c_str(), input, &dsc);
+    if (sts != InferenceEngine::OK) {
+        THROW_IE_EXCEPTION << "Failed SetBlob with error: " << dsc.msg;
+    }
+
+    sts = inferRequest->SetBlob(outInfo.begin()->first.c_str(), output, &dsc);
+    if (sts != InferenceEngine::OK) {
+        THROW_IE_EXCEPTION << "Failed SetBlob with error: " << dsc.msg;
+    }
+
+    // Infer model
+    sts = inferRequest->Infer(&dsc);
+
+    // Check errors
+    if (sts == InferenceEngine::GENERAL_ERROR) {
+        THROW_IE_EXCEPTION << "Scoring failed! Critical error: " << dsc.msg;
+    } else if (sts == InferenceEngine::NOT_IMPLEMENTED) {
+        THROW_IE_EXCEPTION << "Scoring failed! Input data is incorrect and not supported!";
+    } else if (sts == InferenceEngine::NETWORK_NOT_LOADED) {
+        THROW_IE_EXCEPTION << "Scoring failed! " << dsc.msg;
+    }
+
+    // Convert output data and save it to image
+    outArray = blobToImageOutputArray(output, nullptr, nullptr, &C);
+}
+
+float SegmentationMatcher::compareOutputBmp(std::vector<std::vector<size_t>> data, size_t classesNum, const std::string& inFileName) {
+    unsigned int seed = (unsigned int)time(NULL);
+    std::vector<Color> colors = {
+        {128, 64,  128},
+        {232, 35,  244},
+        {70,  70,  70},
+        {156, 102, 102},
+        {153, 153, 190},
+        {153, 153, 153},
+        {30,  170, 250},
+        {0,   220, 220},
+        {35,  142, 107},
+        {152, 251, 152},
+        {180, 130, 70},
+        {60,  20,  220},
+        {0,   0,   255},
+        {142, 0,   0},
+        {70,  0,   0},
+        {100, 60,  0},
+        {90,  0,   0},
+        {230, 0,   0},
+        {32,  11,  119},
+        {0,   74,  111},
+        {81,  0,   81}
+    };
+    while (classesNum > colors.size()) {
+        static std::mt19937 rng(seed);
+        std::uniform_int_distribution<int> dist(0, 255);
+        Color color(dist(rng), dist(rng), dist(rng));
+        colors.push_back(color);
+    }
+
+
+    FormatReader::ReaderPtr rd(inFileName.c_str());
+    if (rd.get() == nullptr) {
+        THROW_IE_EXCEPTION << "[ERROR]: Image " << inFileName << " cannot be read!";
+    }
+
+    auto height = data.size();
+    auto width = data.at(0).size();
+
+    if (rd.get()->width() != width || rd.get()->height() != height) {
+        return 0.0;
+    }
+
+    float rate = 0.0;
+
+    unsigned char* pixels = rd.get()->getData().get();
+
+    for (size_t y = 0; y < height; y++) {
+        for (size_t x = 0; x < width; x++) {
+            unsigned char pixel[3];
+            size_t index = data.at(y).at(x);
+            pixel[0] = colors.at(index).red();
+            pixel[1] = colors.at(index).green();
+            pixel[2] = colors.at(index).blue();
+
+            unsigned char pixelR[3];
+            pixelR[0] = pixels[(y*width + x)*3 + 0];
+            pixelR[1] = pixels[(y*width + x)*3 + 1];
+            pixelR[2] = pixels[(y*width + x)*3 + 2];
+
+            if (pixel[0] == pixelR[0] &&
+                pixel[1] == pixelR[1] &&
+                pixel[2] == pixelR[2]) {
+
+                rate ++;
+            }
+        }
+    }
+
+    rate /= (width * height);
+    return rate;
+}
+
+void SegmentationMatcher::checkResult(std::string imageFileName) {
+    std::ifstream inFile;
+
+    float rate = compareOutputBmp(outArray, C, TestDataHelpers::get_data_path() + "/test_results/" + imageFileName/*ifs*/);
+
+    float dist = 1.0f - rate;
+    if (dist > config.nearValue) {
+        FAIL() << "Comparison distance " << dist << " is greater than " << config.nearValue;
+    } else {
+        std::cout << "Comparison distance " << dist << " is smaller than " << config.nearValue << std::endl;
+    }
+}
+
+} }  //  namespace matchers
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/CMakeLists.txt b/inference-engine/tests_deprecated/functional/mkldnn/CMakeLists.txt
new file mode 100644 (file)
index 0000000..63a1f58
--- /dev/null
@@ -0,0 +1,67 @@
+# Copyright (C) 2018-2020 Intel Corporation
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+
+set(TARGET_NAME MklDnnFunctionalTests)
+
+file(GLOB MKL_DNN_TEST_SOURCES
+        ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp
+        ${CMAKE_CURRENT_SOURCE_DIR}/config_param_test/*.cpp
+        ${CMAKE_CURRENT_SOURCE_DIR}/extensions_tests/*.cpp
+        ${CMAKE_CURRENT_SOURCE_DIR}/network_tests/*.cpp
+        ${CMAKE_CURRENT_SOURCE_DIR}/normalization_tests/*.cpp
+        ${CMAKE_CURRENT_SOURCE_DIR}/single_layer_tests/*.cpp
+        ${CMAKE_CURRENT_SOURCE_DIR}/snippet_test/*.cpp
+        ${CMAKE_CURRENT_SOURCE_DIR}/regression_tests/*.cpp
+        ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instance/graph_tools/*.cpp
+        ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instance/io_blob_tests/*.cpp
+        ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instance/int8_tests/*.cpp
+        ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instance/input_tests/*.cpp
+        ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instance/inference_engine_regression_tests/*.cpp
+        ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instance/lstm/*.cpp
+        ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instance/common_single_layer_tests/*.cpp
+        ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instance/ie_class/*.cpp
+        ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instance/single_layer_tests/*.cpp
+        ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instance/network_tests/*.cpp
+        ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instance/transformations/*.cpp
+        )
+
+list(APPEND MKL_DNN_LIBS
+        IESharedTests
+        inference_engine_lp_transformations
+        inference_engine_ir_readers
+        ${Boost_REGEX_LIBRARY})
+
+list(APPEND TEST_SRC ${MKL_DNN_TEST_SOURCES})
+list(APPEND LIBRARIES ${MKL_DNN_LIBS})
+
+list(APPEND DEPENDENCIES
+        MKLDNNPlugin)
+
+source_group("src" FILES ${TEST_SRC})
+source_group("include" FILES ${TEST_INCLUDE})
+
+add_executable(${TARGET_NAME}
+               ${TEST_SRC}
+               ${REGRESSION_TESTS}
+               ${TEST_INCLUDE})
+
+target_compile_definitions(${TARGET_NAME}
+        PRIVATE
+        USE_MKL_DNN=ON
+        INSTANTIATE_TESTS=1
+        PUBLIC ${ARGV}
+        DATA_PATH=\"${DATA_PATH}\"
+        MODELS_PATH=\"${MODELS_PATH}\" PARENT_SCOPE)
+
+target_include_directories(${TARGET_NAME} PRIVATE
+        ${IE_MAIN_SOURCE_DIR}/src/extension
+        ${IE_MAIN_SOURCE_DIR}/src/extension/common)
+
+target_link_libraries(${TARGET_NAME} PRIVATE ${LIBRARIES})
+
+add_dependencies(${TARGET_NAME} ${DEPENDENCIES})
+
+add_test(NAME ${TARGET_NAME}
+         COMMAND ${TARGET_NAME})
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/config_param_test/config_param_test.cpp b/inference-engine/tests_deprecated/functional/mkldnn/config_param_test/config_param_test.cpp
new file mode 100644 (file)
index 0000000..ae9ea61
--- /dev/null
@@ -0,0 +1,51 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <tests_common.hpp>
+#include <tests_common_func.hpp>
+#include <ie_plugin_config.hpp>
+#include <ngraph_functions/subgraph_builders.hpp>
+#include <functional_test_utils/plugin_cache.hpp>
+#include <functional_test_utils/blob_utils.hpp>
+
+using namespace ::testing;
+using namespace InferenceEngine;
+
+class smoke_PropertyTest : public TestsCommon, public TestsCommonFunc{};
+
+TEST_F(smoke_PropertyTest, onSplitConvConcat) {
+    auto fnPtr = ngraph::builder::subgraph::makeSplitConvConcat({1, 4, 100, 100});
+
+    CNNNetwork net(fnPtr);
+    auto ieCore = PluginCache::get().ie();
+    InferenceEngine::ExecutableNetwork exeNet = ieCore->LoadNetwork(net, CommonTestUtils::DEVICE_CPU);
+    InferenceEngine::InferRequest inferRequest0 = exeNet.CreateInferRequest();
+
+    auto blob0 = FuncTestUtils::createAndFillBlob(net.getInputsInfo().begin()->second->getTensorDesc());
+
+    inferRequest0.SetBlob(net.getInputsInfo().begin()->first, blob0);
+    inferRequest0.Infer();
+    float* outRawData = inferRequest0.GetBlob(net.getOutputsInfo().begin()->first)->cbuffer().as<float*>();
+
+
+    exeNet = ieCore->LoadNetwork(net, CommonTestUtils::DEVICE_CPU,
+            {{PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, PluginConfigParams::CPU_THROUGHPUT_AUTO}});
+    InferenceEngine::InferRequest inferRequest1 = exeNet.CreateInferRequest();
+
+    auto blob1 = FuncTestUtils::createAndFillBlob(net.getInputsInfo().begin()->second->getTensorDesc());
+
+    inferRequest1.SetBlob(net.getInputsInfo().begin()->first, blob1);
+    inferRequest1.Infer();
+    float* outRawDataWithConfig = inferRequest1.GetBlob(net.getOutputsInfo().begin()->first)->cbuffer().as<float*>();
+
+    auto thr = FuncTestUtils::GetComparisonThreshold(InferenceEngine::Precision::FP32);
+
+    size_t outElementsCount = std::accumulate(begin(fnPtr->get_output_shape(0)), end(fnPtr->get_output_shape(0)), 1,
+                                              std::multiplies<size_t>());
+
+    FuncTestUtils::compareRawBuffers(outRawData, outRawDataWithConfig, outElementsCount,
+                                     outElementsCount,
+                                     thr);
+}
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/extensions_tests/extensions_test.cpp b/inference-engine/tests_deprecated/functional/mkldnn/extensions_tests/extensions_test.cpp
new file mode 100644 (file)
index 0000000..4023b5c
--- /dev/null
@@ -0,0 +1,421 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <tests_common.hpp>
+#include <tests_common_func.hpp>
+#include <memory>
+#include <tests_utils.hpp>
+#include <multi-device/multi_device_config.hpp>
+#include <ie_core.hpp>
+#include <ie_plugin_ptr.hpp>
+#include <ngraph/opsets/opset.hpp>
+#include <ngraph/ngraph.hpp>
+
+using namespace ::testing;
+using namespace InferenceEngine;
+
+struct extension_params {
+    std::string pluginName;
+    std::shared_ptr<IExtension> extension;
+    std::string plugin() { return pluginName + "Plugin"; }
+    // optional config (used for multi-device)
+    std::map<std::string, std::string> config;
+};
+
+using ext_factory = std::function<InferenceEngine::ILayerImplFactory*(const InferenceEngine::CNNLayer *)>;
+
+class FakePrimitiveImpl : public InferenceEngine::ILayerExecImpl {
+public:
+    FakePrimitiveImpl(const InferenceEngine::CNNLayer *layer) {
+        cnnLayer = const_cast<InferenceEngine::CNNLayer *>(layer);
+    }
+    InferenceEngine::StatusCode getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig>& conf, InferenceEngine::ResponseDesc *resp) noexcept override {
+        InferenceEngine::LayerConfig config;
+        config.dynBatchSupport = true;
+        if (cnnLayer->outData.size() != 1 && cnnLayer->insData.size() != 1)
+            return InferenceEngine::GENERAL_ERROR;
+        InferenceEngine::DataConfig cfg;
+        cfg.constant = false;
+        cfg.inPlace = 0;
+        InferenceEngine::SizeVector order;
+        for(size_t i = 0; i < cnnLayer->outData[0]->getTensorDesc().getDims().size(); i++) {
+            order.push_back(i);
+        }
+        cfg.desc = InferenceEngine::TensorDesc(cnnLayer->outData[0]->getTensorDesc().getPrecision(),
+                                               cnnLayer->outData[0]->getTensorDesc().getDims(),
+                                               {cnnLayer->outData[0]->getTensorDesc().getDims(), order});
+        config.outConfs.push_back(cfg);
+        config.inConfs.push_back(cfg);
+        conf.push_back(config);
+        return InferenceEngine::OK;
+    }
+    InferenceEngine::StatusCode init(InferenceEngine::LayerConfig& config, InferenceEngine::ResponseDesc *resp) noexcept override {
+        return InferenceEngine::OK;
+    }
+    InferenceEngine::StatusCode execute(std::vector<InferenceEngine::Blob::Ptr>& inputs, std::vector<InferenceEngine::Blob::Ptr>& outputs, InferenceEngine::ResponseDesc *resp) noexcept override {
+        return InferenceEngine::OK;
+    }
+
+private:
+    InferenceEngine::CNNLayer* cnnLayer;
+};
+
+class FakePrimitiveFactory : public InferenceEngine::ILayerImplFactory {
+public:
+    FakePrimitiveFactory(const InferenceEngine::CNNLayer *layer) {
+        cnnLayer = const_cast<InferenceEngine::CNNLayer *>(layer);
+    }
+    // First implementation has more priority than next
+    InferenceEngine::StatusCode getImplementations(std::vector<InferenceEngine::ILayerImpl::Ptr>& impls, InferenceEngine::ResponseDesc *resp) noexcept override {
+        impls.push_back(InferenceEngine::ILayerImpl::Ptr(new FakePrimitiveImpl(cnnLayer)));
+        return InferenceEngine::OK;
+    }
+
+private:
+    InferenceEngine::CNNLayer * cnnLayer;
+};
+
+class TestExtension : public InferenceEngine::IExtension {
+public:
+    TestExtension() {
+        factories["Fake"] = [](const InferenceEngine::CNNLayer * cnnLayer) -> InferenceEngine::ILayerImplFactory* { return new FakePrimitiveFactory(cnnLayer); };
+    }
+    void Release() noexcept override { delete this; }
+
+    void GetVersion(const InferenceEngine::Version *&versionInfo) const noexcept override
+    {
+        static const InferenceEngine::Version VERSION{{}, "", ""};
+        versionInfo = &VERSION;
+    }
+
+    void Unload() noexcept override {}
+    StatusCode getPrimitiveTypes(char**& types, unsigned int& size, ResponseDesc* resp) noexcept override {
+        types = new char *[factories.size()];
+        size_t count = 0;
+        for (auto it = factories.begin(); it != factories.end(); it++, count ++) {
+            types[count] = new char[it->first.size() + 1];
+            std::copy(it->first.begin(), it->first.end(), types[count]);
+            types[count][it->first.size() ] = '\0';
+        }
+        return InferenceEngine::OK;
+    }
+
+    StatusCode getFactoryFor(ILayerImplFactory *&factory, const CNNLayer *cnnLayer, ResponseDesc *resp) noexcept override {
+        if (factories.find(cnnLayer->type) == factories.end()) {
+            std::string errorMsg = std::string("Factory for ") + cnnLayer->type + " wasn't found!";
+            errorMsg.copy(resp->msg, sizeof(resp->msg) - 1);
+            return InferenceEngine::NOT_FOUND;
+        }
+        factory = factories[cnnLayer->type](cnnLayer);
+        return InferenceEngine::OK;
+    }
+
+    StatusCode getShapeInferImpl(IShapeInferImpl::Ptr& impl, const char* type, ResponseDesc* resp) noexcept override {
+        return NOT_IMPLEMENTED;
+    }
+private:
+    std::map<std::string, ext_factory> factories;
+};
+
+class NewFakePrimitiveImpl : public InferenceEngine::ILayerExecImpl {
+public:
+    NewFakePrimitiveImpl(const std::shared_ptr<ngraph::Node>& node): node(node) {}
+
+    InferenceEngine::StatusCode getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig>& conf, InferenceEngine::ResponseDesc *resp) noexcept override {
+        InferenceEngine::LayerConfig config;
+        config.dynBatchSupport = true;
+        if (node->outputs().size() != 1 && node->inputs().size() != 1)
+            return InferenceEngine::GENERAL_ERROR;
+        InferenceEngine::DataConfig cfg;
+        cfg.constant = false;
+        cfg.inPlace = 0;
+        InferenceEngine::SizeVector order;
+        auto partialShape = node->get_output_partial_shape(0);
+        if (partialShape.is_dynamic())
+            return InferenceEngine::GENERAL_ERROR;
+        auto shape = node->get_output_shape(0);
+        for(size_t i = 0; i < shape.size(); i++) {
+            order.push_back(i);
+        }
+        cfg.desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32,
+                                               shape, {shape, order});
+        config.outConfs.push_back(cfg);
+        config.inConfs.push_back(cfg);
+        conf.push_back(config);
+        return InferenceEngine::OK;
+    }
+    InferenceEngine::StatusCode init(InferenceEngine::LayerConfig& config, InferenceEngine::ResponseDesc *resp) noexcept override {
+        return InferenceEngine::OK;
+    }
+    InferenceEngine::StatusCode execute(std::vector<InferenceEngine::Blob::Ptr>& inputs, std::vector<InferenceEngine::Blob::Ptr>& outputs, InferenceEngine::ResponseDesc *resp) noexcept override {
+        return InferenceEngine::OK;
+    }
+
+private:
+    const std::shared_ptr<ngraph::Node> node;
+};
+
+class FakeTestOp: public ngraph::op::Op {
+public:
+    static constexpr ngraph::NodeTypeInfo type_info{"Fake", 0};
+    const ngraph::NodeTypeInfo& get_type_info() const override { return type_info;  }
+
+    FakeTestOp() = default;
+    explicit FakeTestOp(const ngraph::Output<ngraph::Node>& arg): Op({arg}) {
+        constructor_validate_and_infer_types();
+    }
+
+    void validate_and_infer_types() override {
+        auto input_shape = get_input_partial_shape(0).to_shape();
+
+        ngraph::Shape output_shape(input_shape);
+        for (int i = 0; i < input_shape.size(); ++i) {
+            output_shape[i] = input_shape[i];
+        }
+
+        set_output_type(0, get_input_element_type(0), ngraph::PartialShape(output_shape));
+    }
+
+    std::shared_ptr<ngraph::Node> copy_with_new_args(const ngraph::NodeVector& new_args) const override {
+        if (new_args.size() != 1) {
+            throw ngraph::ngraph_error("Incorrect number of new arguments");
+        }
+
+        return std::make_shared<FakeTestOp>(new_args.at(0));
+    }
+
+    bool visit_attributes(ngraph::AttributeVisitor& visitor) override {
+        return true;
+    }
+};
+
+constexpr ngraph::NodeTypeInfo FakeTestOp::type_info;
+
+class NewTestExtension : public InferenceEngine::IExtension {
+public:
+    NewTestExtension() {
+        impls["Fake"] = [](const std::shared_ptr<ngraph::Node>& node) -> InferenceEngine::ILayerImpl::Ptr {
+            return std::make_shared<NewFakePrimitiveImpl>(node);
+        };
+    }
+    void Release() noexcept override { delete this; }
+
+    void GetVersion(const InferenceEngine::Version *&versionInfo) const noexcept override {
+        static const InferenceEngine::Version VERSION{{}, "", ""};
+        versionInfo = &VERSION;
+    }
+
+    void SetLogCallback(InferenceEngine::IErrorListener &listener) noexcept override {}
+
+    void Unload() noexcept override {}
+
+    std::vector<std::string> getImplTypes(const std::shared_ptr<ngraph::Node>& node) override {
+        if (impls.find(node->description()) == impls.end())
+            return {};
+        return {"CPU"};
+    }
+
+    InferenceEngine::ILayerImpl::Ptr getImplementation(const std::shared_ptr<ngraph::Node>& node, const std::string& implType) override {
+        if (impls.find(node->description()) == impls.end() || implType != "CPU")
+            return nullptr;
+        return impls[node->description()](node);
+    }
+
+    std::map<std::string, ngraph::OpSet> getOpSets() override {
+        static std::map<std::string, ngraph::OpSet> opsets;
+        if (opsets.empty()) {
+            ngraph::OpSet opset;
+            opset.insert<FakeTestOp>();
+            opsets["experimental"] = opset;
+        }
+        return opsets;
+    }
+private:
+    std::map<std::string, std::function<InferenceEngine::ILayerImpl::Ptr(const std::shared_ptr<ngraph::Node>)>> impls;
+};
+
+class smoke_ExtensionTest : public TestsCommon,
+                            public TestsCommonFunc {
+
+protected:
+    void checkExtensionRemoved(extension_params p) {
+        try {
+            StatusCode sts;
+            ResponseDesc resp;
+            std::unique_ptr<InferenceEnginePluginPtr> score_engine;
+            score_engine.reset(new InferenceEnginePluginPtr(make_plugin_name(p.plugin()).c_str()));
+            sts = (*score_engine)->SetConfig(p.config, &resp);
+            ASSERT_TRUE(sts == OK) << resp.msg;
+            ASSERT_EQ(p.extension.use_count(), 2);
+
+            sts = (*score_engine)->AddExtension(p.extension, &resp);
+            ASSERT_TRUE(sts == OK) << resp.msg;
+            // multi-device holds additional reference of the extension ptr
+            ASSERT_EQ(p.extension.use_count(), p.pluginName.find("Multi")==std::string::npos ? 3 : 4);
+            score_engine.reset();
+
+            ASSERT_EQ(p.extension.use_count(), 2);
+        } catch (const InferenceEngine::details::InferenceEngineException& e) {
+            FAIL() << e.what();
+        }
+    }
+    void checkExtensionNotRemovedFromAnotherEngineObject(extension_params p) {
+        try {
+            StatusCode sts;
+            ResponseDesc resp;
+            std::unique_ptr<InferenceEnginePluginPtr> score_engine1;
+            score_engine1.reset(new InferenceEnginePluginPtr(make_plugin_name(p.plugin()).c_str()));
+            sts = (*score_engine1)->SetConfig(p.config, &resp);
+            ASSERT_TRUE(sts == OK) << resp.msg;
+
+            std::unique_ptr<InferenceEnginePluginPtr> score_engine2;
+            score_engine2.reset(new InferenceEnginePluginPtr(make_plugin_name(p.plugin()).c_str()));
+            sts = (*score_engine2)->SetConfig(p.config, &resp);
+            ASSERT_TRUE(sts == OK) << resp.msg;
+            ASSERT_EQ(p.extension.use_count(), 2);
+
+            sts = (*score_engine1)->AddExtension(p.extension, &resp);
+            ASSERT_TRUE(sts == OK) << resp.msg;
+            // multi-device holds additional reference of the extension ptr
+            ASSERT_EQ(p.extension.use_count(), p.pluginName.find("Multi")==std::string::npos ? 3 : 4);
+            score_engine2.reset();
+
+            // multi-device holds additional reference of the extension ptr
+            ASSERT_EQ(p.extension.use_count(), p.pluginName.find("Multi")==std::string::npos ? 3 : 4);
+            score_engine1.reset();
+            ASSERT_EQ(p.extension.use_count(), 2);
+        } catch (const InferenceEngine::details::InferenceEngineException& e) {
+            FAIL() << e.what();
+        }
+    }
+
+    void checkNotSharedExtensions(std::shared_ptr<IExtension> extension, std::string device) {
+            std::string model = R"V0G0N(
+        <Net Name="DoubleLayer_Only" version="10" precision="FP32" batch="1">
+            <layers>
+                <layer name="in1" type="Parameter" precision="FP32" version="opset1" id="0">
+                    <data element_type="f32" shape="1,3,5,5"/>
+                    <output>
+                        <port id="0" precision="FP32">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>5</dim>
+                            <dim>5</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="fake_layer" id="1" type="Fake" version="experimental" precision="FP32">
+                    <input>
+                        <port id="1">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>5</dim>
+                            <dim>5</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="2" precision="FP32">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>5</dim>
+                            <dim>5</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="output" type="Result" id="2" version="opset1">
+                    <input>
+                        <port id="0" precision="FP32">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>5</dim>
+                            <dim>5</dim>
+                        </port>
+                    </input>
+                </layer>
+            </layers>
+            <edges>
+                <edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
+                <edge from-layer="1" from-port="2" to-layer="2" to-port="0"/>
+            </edges>
+        </Net>
+        )V0G0N";
+
+        try {
+            Core ie;
+            ie.AddExtension(extension, "CPU");
+            Core ie2;
+
+            Blob::Ptr weights;
+            CNNNetwork cnnNet1 = ie.ReadNetwork(model, weights);
+            CNNNetwork cnnNet2 = ie2.ReadNetwork(model, weights);
+            ASSERT_NO_THROW(ie.LoadNetwork(cnnNet1, device));
+            ASSERT_THROW(ie2.LoadNetwork(cnnNet2, device), details::InferenceEngineException);
+        } catch (const InferenceEngine::details::InferenceEngineException& e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+/*************************************************
+ * !!! !!! !!! !!! !!! !!! !!! !!! !!! !!! !!! !!!
+ * All ref values was obtained from Caffe scoring
+ * !!! !!! !!! !!! !!! !!! !!! !!! !!! !!! !!! !!!
+ *************************************************/
+#ifndef ENABLE_MKL_DNN
+ #include "disable_tests.hpp"
+#endif
+
+TEST_F(smoke_ExtensionTest, MKLDNN_delete_extension) {
+    std::shared_ptr<IExtension> ext(new TestExtension());
+    checkExtensionRemoved({"MKLDNN", ext});
+}
+
+TEST_F(smoke_ExtensionTest, MKLDNN_no_delete_extension_from_another_engine) {
+    std::shared_ptr<IExtension> ext(new TestExtension());
+    checkExtensionNotRemovedFromAnotherEngineObject({"MKLDNN", ext});
+}
+
+TEST_F(smoke_ExtensionTest, MKLDNN_no_share_extension_between_engines) {
+    std::shared_ptr<IExtension> ext(new TestExtension());
+    checkNotSharedExtensions(ext, "CPU");
+}
+
+TEST_F(smoke_ExtensionTest, MKLDNN_no_share_new_extension_between_engines) {
+    std::shared_ptr<IExtension> ext(new NewTestExtension());
+    checkNotSharedExtensions(ext, "CPU");
+}
+
+TEST_F(smoke_ExtensionTest, MULTI_delete_extension) {
+    try {
+        InferenceEngine::Core ie;
+        ie.GetVersions("MULTI");
+    } catch (...) {
+        GTEST_SKIP();
+    }
+    std::shared_ptr<IExtension> ext(new TestExtension());
+    checkExtensionRemoved({"MultiDevice", ext, {{MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, "CPU"}}});
+}
+
+TEST_F(smoke_ExtensionTest, MULTI_no_delete_extension_from_another_engine) {
+    try {
+        InferenceEngine::Core ie;
+        ie.GetVersions("MULTI");
+    } catch (...) {
+        GTEST_SKIP();
+    }
+    std::shared_ptr<IExtension> ext(new TestExtension());
+    checkExtensionNotRemovedFromAnotherEngineObject({"MultiDevice", ext, {{MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, "CPU"}}});
+}
+
+TEST_F(smoke_ExtensionTest, MULTI_no_share_extension_between_engines) {
+    try {
+        InferenceEngine::Core ie;
+        ie.GetVersions("MULTI");
+    } catch (...) {
+        GTEST_SKIP();
+    }
+    std::shared_ptr<IExtension> ext(new TestExtension());
+    checkNotSharedExtensions(ext, "MULTI:CPU");
+}
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/network_tests/ngraph_network_test.cpp b/inference-engine/tests_deprecated/functional/mkldnn/network_tests/ngraph_network_test.cpp
new file mode 100644 (file)
index 0000000..fd08dd1
--- /dev/null
@@ -0,0 +1,364 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <tests_common.hpp>
+#include <tests_common_func.hpp>
+#include <memory>
+#include "xml_helper.hpp"
+#include <ie_ir_reader.hpp>
+#include <ie_core.hpp>
+
+#define XBYAK_NO_OP_NAMES
+#define XBYAK_UNDEF_JNL
+
+using namespace ::testing;
+using namespace InferenceEngine;
+
+struct ngraph_network_param {
+    std::string modelFile;
+    std::string imageName;
+    std::string ngraphModel;
+
+    std::string model() {
+        ModelsPath result;
+        result += kPathSeparator;
+        result += modelFile;
+        return result;
+    }
+
+    std::string weights() {
+        ModelsPath result;
+        result += kPathSeparator;
+        result += FileUtils::fileNameNoExt(modelFile);
+        result += ".bin";
+        return result;
+    }
+
+    std::string image() {
+        std::string result = TestDataHelpers::get_data_path();
+        result += kPathSeparator;
+        result += imageName;
+        return result;
+    }
+
+    std::string v7model() {
+        ModelsPath result;
+        result += kPathSeparator;
+        result += ngraphModel;
+        return result;
+    }
+};
+
+class smoke_NGraphNetworkTest : public TestsCommon, public TestsCommonFunc {
+protected:
+    Blob::Ptr classifyV7(ngraph_network_param p, size_t batch_size = 1, float threshold = 0.005f) {
+        IRReader reader;
+        auto ngraph = reader.read(p.v7model());
+
+        auto network = CNNNetwork(ngraph);
+
+        Core ie;
+        ExecutableNetwork exeNetwork = ie.LoadNetwork(network, "CPU");
+        InferRequest inferRequest = exeNetwork.CreateInferRequest();
+
+        Blob::Ptr src = readInput(p.image(), batch_size);
+
+        OutputsDataMap outInfo = network.getOutputsInfo();
+        InputsDataMap inputInfo = network.getInputsInfo();
+
+        auto dst = make_shared_blob<float>(outInfo.begin()->second->getTensorDesc());
+        dst->allocate();
+        inferRequest.SetBlob(inputInfo.begin()->first, src);
+        inferRequest.SetBlob(outInfo.begin()->first, dst);
+        inferRequest.Infer();
+
+        return dst;
+    }
+
+    Blob::Ptr classifyV5(ngraph_network_param p, size_t batch_size = 1, float threshold = 0.005f) {
+        Core ie;
+        CNNNetwork network = ie.ReadNetwork(p.model(), p.weights());
+        if (batch_size != 1)
+            network.setBatchSize(batch_size);
+
+        ExecutableNetwork exeNetwork = ie.LoadNetwork(network, "CPU");
+        InferRequest inferRequest = exeNetwork.CreateInferRequest();
+
+        Blob::Ptr src = readInput(p.image(), batch_size);
+
+        OutputsDataMap outInfo;
+        outInfo = network.getOutputsInfo();
+
+        auto dst = make_shared_blob<float>(outInfo.begin()->second->getTensorDesc());
+        dst->allocate();
+        inferRequest.SetBlob(network.getInputsInfo().begin()->first, src);
+        inferRequest.SetBlob(outInfo.begin()->first, dst);
+        inferRequest.Infer();
+
+        return dst;
+    }
+
+    void classify(ngraph_network_param p) {
+        try {
+            auto v7blb = classifyV7(p);
+            auto v5blb = classifyV5(p);
+
+            auto* v7data = v7blb->buffer().as<float *>();
+            auto* v5data = v5blb->buffer().as<float *>();
+
+            ASSERT_EQ(v7blb->size(), v5blb->size());
+            for (size_t i = 0; i < v7blb->size(); i++) {
+                ASSERT_EQ(v7data[i], v5data[i]);
+            }
+        } catch (const InferenceEngine::details::InferenceEngineException& e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+/*************************************************
+ * !!! !!! !!! !!! !!! !!! !!! !!! !!! !!! !!! !!!
+ * All ref values was obtained from Caffe scoring
+ * !!! !!! !!! !!! !!! !!! !!! !!! !!! !!! !!! !!!
+ *************************************************/
+#ifndef ENABLE_MKL_DNN
+ #include "disable_tests.hpp"
+#endif
+
+TEST_F(smoke_NGraphNetworkTest, reshapeLoadTest) {
+    std::string model = R"V0G0N(
+<?xml version="1.0" ?>
+<net batch="1" name="test" precision="FP32" version="10">
+    <layers>
+        <layer id="0" name="data" type="Parameter" version="opset1">
+            <data element_type="f32" shape="1,1,28,28"/>
+            <output>
+                <port id="0" precision="FP32">
+                    <dim>1</dim>
+                    <dim>1</dim>
+                    <dim>28</dim>
+                    <dim>28</dim>
+                </port>
+            </output>
+        </layer>
+        <layer id="1" name="13/Output_0/Data__const" type="Const" version="opset1">
+            <data offset="0" size="2000"/>
+            <output>
+                <port id="1" precision="FP32">
+                    <dim>20</dim>
+                    <dim>1</dim>
+                    <dim>5</dim>
+                    <dim>5</dim>
+                </port>
+            </output>
+        </layer>
+        <layer id="2" name="conv1" type="Convolution" version="opset1">
+            <data dilations="1,1" group="1" output="20" pads_begin="0,0" pads_end="0,0" strides="1,1"/>
+            <input>
+                <port id="0">
+                    <dim>1</dim>
+                    <dim>1</dim>
+                    <dim>28</dim>
+                    <dim>28</dim>
+                </port>
+                <port id="1">
+                    <dim>20</dim>
+                    <dim>1</dim>
+                    <dim>5</dim>
+                    <dim>5</dim>
+                </port>
+            </input>
+            <output>
+                <port id="2" precision="FP32">
+                    <dim>1</dim>
+                    <dim>20</dim>
+                    <dim>24</dim>
+                    <dim>24</dim>
+                </port>
+            </output>
+        </layer>
+        <layer id="3" name="conv1/Dims215/copy_const" type="Const" version="opset1">
+            <data offset="2000" size="80"/>
+            <output>
+                <port id="1" precision="FP32">
+                    <dim>1</dim>
+                    <dim>20</dim>
+                    <dim>1</dim>
+                    <dim>1</dim>
+                </port>
+            </output>
+        </layer>
+        <layer id="4" name="conv1/Bias" type="Add" version="opset1">
+            <input>
+                <port id="0">
+                    <dim>1</dim>
+                    <dim>20</dim>
+                    <dim>24</dim>
+                    <dim>24</dim>
+                </port>
+                <port id="1">
+                    <dim>1</dim>
+                    <dim>20</dim>
+                    <dim>1</dim>
+                    <dim>1</dim>
+                </port>
+            </input>
+            <output>
+                <port id="2" precision="FP32">
+                    <dim>1</dim>
+                    <dim>20</dim>
+                    <dim>24</dim>
+                    <dim>24</dim>
+                </port>
+            </output>
+        </layer>
+        <layer id="5" name="pool1" type="MaxPool" version="opset1">
+            <data kernel="2,2" pads_begin="0,0" pads_end="0,0" rounding_type="ceil" strides="2,2"/>
+            <input>
+                <port id="0">
+                    <dim>1</dim>
+                    <dim>20</dim>
+                    <dim>24</dim>
+                    <dim>24</dim>
+                </port>
+            </input>
+            <output>
+                <port id="1" precision="FP32">
+                    <dim>1</dim>
+                    <dim>20</dim>
+                    <dim>12</dim>
+                    <dim>12</dim>
+                </port>
+            </output>
+        </layer>
+        <layer id="6" name="11/Output_0/Data__const" type="Const" version="opset1">
+            <data offset="2080" size="100000"/>
+            <output>
+                <port id="1" precision="FP32">
+                    <dim>50</dim>
+                    <dim>20</dim>
+                    <dim>5</dim>
+                    <dim>5</dim>
+                </port>
+            </output>
+        </layer>
+        <layer id="7" name="conv2" type="Convolution" version="opset1">
+            <data dilations="1,1" group="1" output="50" pads_begin="0,0" pads_end="0,0" strides="1,1"/>
+            <input>
+                <port id="0">
+                    <dim>1</dim>
+                    <dim>20</dim>
+                    <dim>12</dim>
+                    <dim>12</dim>
+                </port>
+                <port id="1">
+                    <dim>50</dim>
+                    <dim>20</dim>
+                    <dim>5</dim>
+                    <dim>5</dim>
+                </port>
+            </input>
+            <output>
+                <port id="2" precision="FP32">
+                    <dim>1</dim>
+                    <dim>50</dim>
+                    <dim>8</dim>
+                    <dim>8</dim>
+                </port>
+            </output>
+        </layer>
+        <layer id="8" name="conv2/Dims209/copy_const" type="Const" version="opset1">
+            <data offset="102080" size="200"/>
+            <output>
+                <port id="1" precision="FP32">
+                    <dim>1</dim>
+                    <dim>50</dim>
+                    <dim>1</dim>
+                    <dim>1</dim>
+                </port>
+            </output>
+        </layer>
+        <layer id="9" name="conv2/Bias" type="Add" version="opset1">
+            <input>
+                <port id="0">
+                    <dim>1</dim>
+                    <dim>50</dim>
+                    <dim>8</dim>
+                    <dim>8</dim>
+                </port>
+                <port id="1">
+                    <dim>1</dim>
+                    <dim>50</dim>
+                    <dim>1</dim>
+                    <dim>1</dim>
+                </port>
+            </input>
+            <output>
+                <port id="2" precision="FP32">
+                    <dim>1</dim>
+                    <dim>50</dim>
+                    <dim>8</dim>
+                    <dim>8</dim>
+                </port>
+            </output>
+        </layer>
+        <layer id="10" name="pool2" type="MaxPool" version="opset1">
+            <data kernel="2,2" pads_begin="0,0" pads_end="0,0" rounding_type="ceil" strides="2,2"/>
+            <input>
+                <port id="0">
+                    <dim>1</dim>
+                    <dim>50</dim>
+                    <dim>8</dim>
+                    <dim>8</dim>
+                </port>
+            </input>
+            <output>
+                <port id="1" precision="FP32">
+                    <dim>1</dim>
+                    <dim>50</dim>
+                    <dim>4</dim>
+                    <dim>4</dim>
+                </port>
+            </output>
+        </layer>
+        <layer id="11" name="prob/sink_port_0" type="Result" version="opset1">
+            <input>
+                <port id="0">
+                    <dim>1</dim>
+                    <dim>50</dim>
+                    <dim>4</dim>
+                    <dim>4</dim>
+                </port>
+            </input>
+        </layer>
+    </layers>
+    <edges>
+        <edge from-layer="0" from-port="0" to-layer="2" to-port="0"/>
+        <edge from-layer="1" from-port="1" to-layer="2" to-port="1"/>
+        <edge from-layer="2" from-port="2" to-layer="4" to-port="0"/>
+        <edge from-layer="3" from-port="1" to-layer="4" to-port="1"/>
+        <edge from-layer="4" from-port="2" to-layer="5" to-port="0"/>
+        <edge from-layer="5" from-port="1" to-layer="7" to-port="0"/>
+        <edge from-layer="6" from-port="1" to-layer="7" to-port="1"/>
+        <edge from-layer="7" from-port="2" to-layer="9" to-port="0"/>
+        <edge from-layer="8" from-port="1" to-layer="9" to-port="1"/>
+        <edge from-layer="9" from-port="2" to-layer="10" to-port="0"/>
+        <edge from-layer="10" from-port="1" to-layer="11" to-port="0"/>
+    </edges>
+</net>)V0G0N";
+    InferenceEngine::Blob::Ptr weights = make_shared_blob<uint8_t>({InferenceEngine::Precision::U8, {1724336}, InferenceEngine::C});
+    weights->allocate();
+    fill_data((float *) weights->buffer(), weights->size() / sizeof(float));
+    std::map<std::string, std::vector<size_t>> shape;
+    shape["data"] = {1, 1, 28, 28};
+
+    Core ie;
+    CNNNetwork network = ie.ReadNetwork(model, weights);
+    for (size_t i = 0; i < 10; i++) {
+        network.reshape(shape);
+        ExecutableNetwork exeNetwork = ie.LoadNetwork(network, "CPU");
+    }
+}
+
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/regression_tests/regression_reference.cpp b/inference-engine/tests_deprecated/functional/mkldnn/regression_tests/regression_reference.cpp
new file mode 100644 (file)
index 0000000..1cdaffe
--- /dev/null
@@ -0,0 +1,13 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "regression_reference.hpp"
+
+namespace Regression {
+    namespace Reference {
+
+        std::map<std::string, std::vector<ClassificationScoringResultsForTests>> values = {
+            };
+    }  // namespace Reference
+}  // namespace Regression
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/common_single_layer_tests/single_layer_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/common_single_layer_tests/single_layer_tests.cpp
new file mode 100644 (file)
index 0000000..c593a44
--- /dev/null
@@ -0,0 +1,234 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "single_layer_tests.hpp"
+
+
+static CommonTestUtils::conv_common_params convParams =
+        {
+                PropertyVector<unsigned>{{2, 2}},  // stride
+                PropertyVector<unsigned>{{3, 3}},  // kernel
+                {},                                // pad_begin
+                {},                                // pad_end
+                PropertyVector<unsigned>{{1, 1}},  // dilation
+                "same_upper",                      // auto_pad
+                1,                                 // group
+                2                                  // out_c
+        };
+
+static CommonTestUtils::pool_common_params poolParams =
+        {
+                PropertyVector<unsigned>{{2, 2}},  // stride
+                PropertyVector<unsigned>{{3, 3}},  // kernel
+                {},                                // pad_begin
+                {},                                // pad_end
+                "same_upper",                      // auto_pad
+                true,                              // avg
+                false                              // exclude_pad
+        };
+
+static CommonTestUtils::conv_common_params defConvParamsHeavy =
+        {
+                PropertyVector<unsigned>{{1, 1}},  // stride
+                PropertyVector<unsigned>{{3, 3}},  // kernel
+                {},                                // pad_begin
+                {},                                // pad_end
+                PropertyVector<unsigned>{{2, 2}},  // dilation
+                "same_upper",                      // auto_pad
+                1,                                 // group
+                128                                // out_c
+        };
+
+static CommonTestUtils::conv_common_params defConvParamsLight0 =
+        {
+                PropertyVector<unsigned>{{1, 1}},  // stride
+                PropertyVector<unsigned>{{3, 3}},  // kernel
+                {},                                // pad_begin
+                {},                                // pad_end
+                PropertyVector<unsigned>{{2, 2}},  // dilation
+                "same_upper",                      // auto_pad
+                1,                                 // group
+                4                                  // out_c
+        };
+
+static CommonTestUtils::conv_common_params defConvParamsLight1 =
+        {
+                PropertyVector<unsigned>{{2, 2}},  // stride
+                PropertyVector<unsigned>{{3, 3}},  // kernel
+                {},                                // pad_begin
+                {},                                // pad_end
+                PropertyVector<unsigned>{{1, 1}},  // dilation
+                "same_upper",                      // auto_pad
+                1,                                 // group
+                16                                 // out_c
+        };
+
+
+static CommonTestUtils::conv_common_params defConvParamsLight2 =
+        {
+                PropertyVector<unsigned>{{2, 2}},  // stride
+                PropertyVector<unsigned>{{3, 3}},  // kernel
+                {},                                // pad_begin
+                {},                                // pad_end
+                PropertyVector<unsigned>{{2, 2}},  // dilation
+                "same_upper",                      // auto_pad
+                1,                                 // group
+                15                                 // out_c
+        };
+
+
+static CommonTestUtils::conv_common_params defConvParamsLight3 =
+        {
+                PropertyVector<unsigned>{{1, 1}},  // stride
+                PropertyVector<unsigned>{{3, 3}},  // kernel
+                {},                                // pad_begin
+                {},                                // pad_end
+                PropertyVector<unsigned>{{2, 2}},  // dilation
+                "same_upper",                      // auto_pad
+                2,                                 // group
+                4                                  // out_c
+        };
+
+static std::vector<PluginParams> pluginParams = {
+        PluginDependentParam{"CPU", Layout::NCHW, Precision::FP32, 0.001f}
+};
+
+std::string
+getTestCaseName(testing::TestParamInfo<std::tuple<InitialShapes, NewShapes, PluginParams, Helper>> obj) {
+    auto params = obj.param;
+    LayerTestHelper::Ptr helper = std::get<3>(params);
+    return "MKLDNN" + helper->getType();
+}
+
+#if (defined INSTANTIATE_TESTS)
+
+INSTANTIATE_TEST_CASE_P(
+        Conv_smoke, CommonSingleLayerTest,
+        ::testing::Combine(
+        ::testing::Values(InitialShapes({
+                                                {{1, 2, 16, 16}},           // input
+                                                {{1, 2, 8,  8}}             // output
+                                        })),
+        ::testing::Values(NewShapes({
+                                            {{1, 2, 15, 15}},               // input
+                                            {{1, 2, 8,  8}}                 // output
+                                    })),
+        ::testing::ValuesIn(pluginParams),
+        ::testing::Values(Helper(std::make_shared<ConvolutionTestHelper>(convParams)))
+), getTestCaseName
+);
+
+INSTANTIATE_TEST_CASE_P(
+        Deconv_smoke, CommonSingleLayerTest,
+        ::testing::Combine(
+        ::testing::Values(InitialShapes({
+                                                {{1, 2, 8,  8}},             // input
+                                                {{1, 2, 16, 16}}              // output
+                                        })),
+        ::testing::Values(NewShapes({
+                                            {{1, 2, 7,  7}},                  // input
+                                            {{1, 2, 14, 14}}                  // output
+                                    })),
+        ::testing::ValuesIn(pluginParams),
+        ::testing::Values(Helper(std::make_shared<DeconvolutionTestHelper>(convParams)))
+), getTestCaseName
+);
+
+INSTANTIATE_TEST_CASE_P(
+        Pool_smoke, CommonSingleLayerTest,
+        ::testing::Combine(
+        ::testing::Values(InitialShapes({
+                                                {{1, 2, 16, 16}},           // input
+                                                {{1, 2, 8,  8}}             // output
+                                        })),
+        ::testing::Values(NewShapes({
+                                            {{1, 2, 15, 15}},               // input
+                                            {{1, 2, 8,  8}}                 // output
+                                    })),
+        ::testing::ValuesIn(pluginParams),
+        ::testing::Values(Helper(std::make_shared<PoolingTestHelper>(poolParams)))
+), getTestCaseName
+);
+
+INSTANTIATE_TEST_CASE_P(
+        DefConvLight0_smoke, CommonSingleLayerTest,
+        ::testing::Combine(
+                ::testing::Values(InitialShapes({
+                                                        {{1, 4, 4, 4}, {1, 36, 4, 4}}, // input, trans
+                                                        {{1, 4, 4, 4}}                 // output
+                                                })),
+                ::testing::Values(NewShapes({
+                                                    {{1, 4, 4, 4}, {1, 36, 4, 4}}, // input, trans
+                                                    {{1, 4, 4, 4}}                 // output
+                                            })),
+                ::testing::ValuesIn(pluginParams),
+                ::testing::Values(Helper(std::make_shared<DeformableConvolutionTestHelper>(defConvParamsLight0, 2)))
+        ), getTestCaseName
+);
+
+INSTANTIATE_TEST_CASE_P(
+        DefConvLight1_WithBatch_smoke, CommonSingleLayerTest,
+        ::testing::Combine(
+                ::testing::Values(InitialShapes({
+                                                        {{2, 4, 8, 8}, {2, 36, 4, 4}}, // input, trans
+                                                        {{2, 16, 4, 4}}                // output
+                                                })),
+                ::testing::Values(NewShapes({
+                                                    {{2, 4, 8, 8}, {2, 36, 4, 4}}, // input, trans
+                                                    {{2, 16, 4, 4}}                // output
+                                            })),
+                ::testing::ValuesIn(pluginParams),
+                ::testing::Values(Helper(std::make_shared<DeformableConvolutionTestHelper>(defConvParamsLight1, 2)))
+        ), getTestCaseName
+);
+
+INSTANTIATE_TEST_CASE_P(
+        DefConvLight2_WithBatch_smoke, CommonSingleLayerTest,
+        ::testing::Combine(
+                ::testing::Values(InitialShapes({
+                                                        {{2, 4, 8, 8}, {2, 18, 4, 4}}, // input, trans
+                                                        {{2, 15, 4, 4}}                // output
+                                                })),
+                ::testing::Values(NewShapes({
+                                                    {{2, 4, 8, 8}, {2, 18, 4, 4}}, // input, trans
+                                                    {{2, 15, 4, 4}}                // output
+                                            })),
+                ::testing::ValuesIn(pluginParams),
+                ::testing::Values(Helper(std::make_shared<DeformableConvolutionTestHelper>(defConvParamsLight2, 1)))
+        ), getTestCaseName
+);
+
+INSTANTIATE_TEST_CASE_P(
+        DefConvLight3_WithGroups_smoke, CommonSingleLayerTest,
+        ::testing::Combine(
+                ::testing::Values(InitialShapes({
+                                                        {{1, 4, 4, 4}, {1, 18, 4, 4}}, // input, trans
+                                                        {{1, 4, 4, 4}}                 // output
+                                                })),
+                ::testing::Values(NewShapes({
+                                                    {{1, 4, 4, 4}, {1, 18, 4, 4}}, // input, trans
+                                                    {{1, 4, 4, 4}}                 // output
+                                            })),
+                ::testing::ValuesIn(pluginParams),
+                ::testing::Values(Helper(std::make_shared<DeformableConvolutionTestHelper>(defConvParamsLight3, 1)))
+        ), getTestCaseName
+);
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_DefConvHeavy, CommonSingleLayerTest,
+        ::testing::Combine(
+                ::testing::Values(InitialShapes({
+                                                        {{1, 512, 38, 38}, {1, 72, 38, 38}}, // input, trans
+                                                        {{1, 128, 38, 38}}                   // output
+                                                })),
+                ::testing::Values(NewShapes({
+                                                    {{1, 512, 38, 38}, {1, 72, 38, 38}}, // input, trans
+                                                    {{1, 128, 38, 38}}                   // output
+                                            })),
+                ::testing::ValuesIn(pluginParams),
+                ::testing::Values(Helper(std::make_shared<DeformableConvolutionTestHelper>(defConvParamsHeavy, 4)))
+        ), getTestCaseName
+);
+
+#endif
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/graph_tools/graph_tools_functional_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/graph_tools/graph_tools_functional_tests.cpp
new file mode 100644 (file)
index 0000000..c8752e5
--- /dev/null
@@ -0,0 +1,25 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <ie_core.hpp>
+#include <ngraph_functions/subgraph_builders.hpp>
+#include "graph_tools_functional_tests.hpp"
+
+using namespace testing;
+using namespace InferenceEngine::details;
+using namespace InferenceEngine;
+using namespace std;
+
+TEST_F(GraphToolsFncTest, smoke_canSortSplitConvConcat) {
+    CNNNetwork network(ngraph::builder::subgraph::makeSplitConvConcat());
+    checkSort(CNNNetSortTopologically(network));
+}
+
+
+TEST_F(GraphToolsFncTest, smoke_canSortTIwithLstm) {
+    CNNNetwork network(ngraph::builder::subgraph::makeTIwithLSTMcell());
+    checkSort(CNNNetSortTopologically(network));
+
+    checkSort(CNNNetSortTopologically(network));
+}
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/ie_class/ie_class.cpp b/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/ie_class/ie_class.cpp
new file mode 100644 (file)
index 0000000..e4c55c0
--- /dev/null
@@ -0,0 +1,158 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "ie_class.hpp"
+
+using namespace InferenceEngine::PluginConfigParams;
+
+//
+// IE Class Common tests with <pluginName, deviceName params>
+//
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_IEClassCommon, IEClassBasicTestP,
+        ::testing::Values(std::make_pair("MKLDNNPlugin", "CPU")));
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_IEClassNetworkTestP, IEClassNetworkTestP,
+        ::testing::Values("CPU"));
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_IEClassImportExportTestP, IEClassImportExportTestP,
+        ::testing::Values("HETERO:CPU"));
+
+//
+// IE Class GetMetric
+//
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_IEClassGetMetricTest, IEClassGetMetricTest_SUPPORTED_CONFIG_KEYS,
+        ::testing::Values("CPU", "HETERO"));
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_IEClassGetMetricTest, IEClassGetMetricTest_SUPPORTED_METRICS,
+        ::testing::Values("CPU", "HETERO"));
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_IEClassGetMetricTest, IEClassGetMetricTest_AVAILABLE_DEVICES,
+        ::testing::Values("CPU"));
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_IEClassGetMetricTest, IEClassGetMetricTest_FULL_DEVICE_NAME,
+        ::testing::Values("CPU", "HETERO"));
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_IEClassGetMetricTest, IEClassGetMetricTest_OPTIMIZATION_CAPABILITIES,
+        ::testing::Values("CPU"));
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_IEClassGetMetricTest, IEClassGetMetricTest_RANGE_FOR_ASYNC_INFER_REQUESTS,
+        ::testing::Values("CPU"));
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_IEClassGetMetricTest, IEClassGetMetricTest_RANGE_FOR_STREAMS,
+        ::testing::Values("CPU"));
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_IEClassGetMetricTest, IEClassGetMetricTest_ThrowUnsupported,
+        ::testing::Values("CPU", "HETERO"));
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_IEClassGetConfigTest, IEClassGetConfigTest_ThrowUnsupported,
+        ::testing::Values("CPU", "HETERO"));
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_IEClassGetAvailableDevices, IEClassGetAvailableDevices,
+        ::testing::Values("CPU"));
+
+//
+// IE Class GetConfig
+//
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_IEClassGetConfigTest, IEClassGetConfigTest,
+        ::testing::Values("CPU"));
+
+//
+// Executable Network GetMetric
+//
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS,
+        ::testing::Values("CPU", "HETERO:CPU"));
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_SUPPORTED_METRICS,
+        ::testing::Values("CPU", "HETERO:CPU"));
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_NETWORK_NAME,
+        ::testing::Values("CPU", "HETERO:CPU"));
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_OPTIMAL_NUMBER_OF_INFER_REQUESTS,
+        ::testing::Values("CPU", "HETERO:CPU"));
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_IEClassExecutableNetworkGetMetricTest, IEClassExecutableNetworkGetMetricTest_ThrowsUnsupported,
+        ::testing::Values("CPU", "HETERO:CPU"));
+
+//
+// Executable Network GetConfig / SetConfig
+//
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_IEClassExecutableNetworkGetConfigTest, IEClassExecutableNetworkGetConfigTest,
+        ::testing::Values("CPU"));
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_IEClassExecutableNetworkSetConfigTest, IEClassExecutableNetworkSetConfigTest,
+        ::testing::Values("CPU"));
+
+//
+// Hetero Executable Network GetMetric
+//
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_IEClassHeteroExecutableNetworkGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS,
+        ::testing::Values("CPU"));
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_IEClassHeteroExecutableNetworkGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_METRICS,
+        ::testing::Values("CPU"));
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_IEClassHeteroExecutableNetworkGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_NETWORK_NAME,
+        ::testing::Values("CPU"));
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_IEClassHeteroExecutableNetworkGetMetricTest, IEClassHeteroExecutableNetworkGetMetricTest_TARGET_FALLBACK,
+        ::testing::Values("CPU"));
+
+//////////////////////////////////////////////////////////////////////////////////////////
+
+TEST_F(IEClassBasicTest, smoke_SetConfigAfterCreatedThrow) {
+    Core ie;
+    std::string value = { };
+
+    ASSERT_NO_THROW(ie.SetConfig({ { KEY_CPU_THREADS_NUM, "1" } }, "CPU"));
+    ASSERT_NO_THROW(value = ie.GetConfig("CPU", KEY_CPU_THREADS_NUM).as<std::string>());
+    ASSERT_EQ("1", value);
+
+    ASSERT_NO_THROW(ie.SetConfig({ { KEY_CPU_THREADS_NUM, "4" } }, "CPU"));
+    ASSERT_NO_THROW(value = ie.GetConfig("CPU", KEY_CPU_THREADS_NUM).as<std::string>());
+    ASSERT_EQ("4", value);
+}
+
+// IE Class Query network
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_IEClassQueryNetworkTest, IEClassQueryNetworkTest,
+        ::testing::Values("CPU"));
+
+// IE Class Load network
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_IEClassLoadNetworkTest, IEClassLoadNetworkTest,
+        ::testing::Values("CPU"));
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/inference_engine_regression_tests/common_dyn_batch_regression.cpp b/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/inference_engine_regression_tests/common_dyn_batch_regression.cpp
new file mode 100644 (file)
index 0000000..b896f1f
--- /dev/null
@@ -0,0 +1,16 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "common_dyn_batch_regression.hpp"
+
+std::vector<CommonDynBatchFuncTestParams> supportedDynBatchValues = {
+    { "CPU", 4, 3 },
+    { "CPU", 4, 2 },
+    { "CPU", 4, 1 },
+    { "CPU", 8, 5 },
+    { "CPU", 8, 4 },
+    { "CPU", 8, 3 }
+};
+
+INSTANTIATE_TEST_CASE_P(FunctionalTest_smoke, TestNoRegressionDynBatchFP32, ValuesIn(supportedDynBatchValues), getTestCaseName);
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/input_tests/parser_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/input_tests/parser_tests.cpp
new file mode 100644 (file)
index 0000000..b96b34f
--- /dev/null
@@ -0,0 +1,36 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "parser_tests.hpp"
+
+ir_test_params ir_test_cases[] = {
+        ir_test_params("CPU", "FP32", negative_conv_kernel_x_case),
+        ir_test_params("CPU", "FP32", negative_conv_kernel_y_case),
+        ir_test_params("CPU", "FP32", negative_conv_stride_x_case),
+        ir_test_params("CPU", "FP32", negative_conv_weights_case),
+        ir_test_params("CPU", "FP32", negative_conv_biases_case),
+
+        ir_test_params("CPU", "FP32", negative_fc_out_size_case),
+        ir_test_params("CPU", "FP32", negative_fc_weights_case),
+        ir_test_params("CPU", "FP32", negative_fc_biases_case),
+
+        ir_test_params("CPU", "FP32", negative_deconv_kernel_x_case),
+        ir_test_params("CPU", "FP32", negative_deconv_kernel_y_case),
+        ir_test_params("CPU", "FP32", negative_deconv_stride_x_case),
+        ir_test_params("CPU", "FP32", negative_deconv_weights_case),
+        ir_test_params("CPU", "FP32", negative_deconv_biases_case),
+
+        ir_test_params("CPU", "FP32", negative_pool_kernel_x_case),
+        ir_test_params("CPU", "FP32", negative_pool_kernel_y_case),
+        ir_test_params("CPU", "FP32", negative_pool_stride_x_case),
+        ir_test_params("CPU", "FP32", incorrect_pool_type_case),
+
+        ir_test_params("CPU", "FP32", negative_norm_local_size_case),
+        ir_test_params("CPU", "FP32", negative_norm_k_case)
+};
+
+INSTANTIATE_TEST_CASE_P(FunctionalTest_smoke, IncorrectIRTests,
+        ::testing::ValuesIn(ir_test_cases),
+        getTestName);
+
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/io_blob_tests/cropResize_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/io_blob_tests/cropResize_tests.cpp
new file mode 100644 (file)
index 0000000..4b0aea1
--- /dev/null
@@ -0,0 +1,250 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "cropResize_tests.hpp"
+
+#ifdef USE_OPENCV
+
+#define COMBINE_WITH_DEFAULT(_dims, _in_layouts, _color_formats) \
+    Combine(Values(Precision::FP32), \
+            Values(_dims), \
+            Values(std::make_pair(Precision::FP32, 1e-2), std::make_pair(Precision::U8, 1)), \
+            Values(_in_layouts), \
+            Values(ResizeAlgorithm::RESIZE_BILINEAR, ResizeAlgorithm::RESIZE_AREA), \
+            Values(_color_formats), \
+            Values(ROI({0, 40, 50, 220, 220})), \
+            Values(false, true))
+
+// test resize-only for all dims (as before)
+// test resize + color conversion for smaller number of dims (simple upscale/downscale scenarios only)
+namespace smoke {
+static auto params_resize_only = COMBINE_WITH_DEFAULT(
+    TESTED_DIMS(1),
+    MULTI_VALUE(NCHW, NHWC),
+    COLOR_FORMATS_RAW);
+
+static auto params_csc_3ch_and_resize = COMBINE_WITH_DEFAULT(
+    TESTED_DIMS_SMALL(1),
+    MULTI_VALUE(NCHW, NHWC),
+    COLOR_FORMATS_3CH);
+
+static auto params_csc_4ch_and_resize = COMBINE_WITH_DEFAULT(
+    TESTED_DIMS_SMALL(1),
+    NHWC,
+    COLOR_FORMATS_4CH);
+
+// batch preprocessing parameters:
+static auto batch_params_resize_only = COMBINE_WITH_DEFAULT(
+    TESTED_DIMS(2),
+    MULTI_VALUE(NCHW, NHWC),
+    COLOR_FORMATS_RAW);
+
+static auto batch_params_csc_3ch_and_resize = COMBINE_WITH_DEFAULT(
+    TESTED_DIMS_SMALL(2),
+    MULTI_VALUE(NCHW, NHWC),
+    COLOR_FORMATS_3CH);
+
+static auto batch_params_csc_4ch_and_resize = COMBINE_WITH_DEFAULT(
+    TESTED_DIMS_SMALL(2),
+    NHWC,
+    COLOR_FORMATS_4CH);
+}  // namespace smoke
+
+
+// test everything in nightly (as before)
+namespace nightly {
+static auto params_csc_3ch_and_resize = COMBINE_WITH_DEFAULT(
+    TESTED_DIMS(1),
+    MULTI_VALUE(NCHW, NHWC),
+    MULTI_VALUE(COLOR_FORMATS_RAW, COLOR_FORMATS_3CH));
+
+static auto params_csc_4ch_and_resize = COMBINE_WITH_DEFAULT(
+    TESTED_DIMS(1),
+    NHWC,
+    COLOR_FORMATS_4CH);
+
+// batch preprocessing parameters:
+static auto batch_params_csc_3ch_and_resize = COMBINE_WITH_DEFAULT(
+    MULTI_VALUE(TESTED_DIMS(2), TESTED_DIMS(3)),
+    MULTI_VALUE(NCHW, NHWC),
+    MULTI_VALUE(COLOR_FORMATS_RAW, COLOR_FORMATS_3CH));
+
+static auto batch_params_csc_4ch_and_resize = COMBINE_WITH_DEFAULT(
+    MULTI_VALUE(TESTED_DIMS(2), TESTED_DIMS(3)),
+    NHWC,
+    COLOR_FORMATS_4CH);
+}  // namespace nightly
+
+// reorder preprocessing parameters:
+static auto reorder_params = Combine(
+        Values(Precision::FP32),  // network precision
+        Values(SizeVector({1, 3, 300, 300})),  // sizes of the network
+        Values(std::make_pair(Precision::FP32, 1e-2), std::make_pair(Precision::U8, 1)),  // precision and threshold
+        Values(std::make_pair(NCHW, NHWC), std::make_pair(NHWC, NCHW)),  // Input/network data layout
+        Values(ResizeAlgorithm::NO_RESIZE),
+        Values(ColorFormat::BGR),
+        Values(ROI({0, 0, 0, 300, 300})),  // cropped ROI params (id, x, y, width, height)
+        Values(false, true)  // Infer mode sync/async
+);
+
+// nv12 preprocessing parameters:
+static auto nv12_params = Combine(
+        Values(Precision::FP32),  // network precision
+        Values(cv::Size(300, 300)),  // input image size
+        Values(TESTED_DIMS(1)),  // sizes of the network
+        Values(std::make_pair(Precision::U8, 1)),  // precision and threshold
+        Values(ResizeAlgorithm::RESIZE_BILINEAR, ResizeAlgorithm::RESIZE_AREA),
+        Values(ColorFormat::NV12),
+        Values(ROI({0, 0, 0, 300, 300}), ROI({0, 15, 10, 210, 210})),  // cropped ROI params (id, x, y, width, height)
+        Values(false, true)  // Infer mode sync/async
+);
+
+static auto random_roi_3c = Combine(
+            Values(Precision::FP32),
+            Values(TESTED_DIMS(1)),
+            Values(std::make_pair(Precision::FP32, 1e-2), std::make_pair(Precision::U8, 1)),
+            Values(MULTI_VALUE(NCHW, NHWC)),
+            Values(ResizeAlgorithm::RESIZE_BILINEAR, ResizeAlgorithm::RESIZE_AREA),
+            Values(COLOR_FORMATS_3CH),
+            Values(ROI({0, 0, 0, 0, 0})),
+            Values(false, true)
+);
+
+static auto random_roi_4c = Combine(
+            Values(Precision::FP32),
+            Values(TESTED_DIMS(1)),
+            Values(std::make_pair(Precision::FP32, 1e-2), std::make_pair(Precision::U8, 1)),
+            Values(NHWC),
+            Values(ResizeAlgorithm::RESIZE_BILINEAR, ResizeAlgorithm::RESIZE_AREA),
+            Values(COLOR_FORMATS_4CH),
+            Values(ROI({0, 0, 0, 0, 0})),
+            Values(false, true)
+);
+
+static auto random_roi_nv12 = Combine(
+            Values(Precision::FP32),
+            Values(TESTED_DIMS(1)),
+            Values(std::make_pair(Precision::U8, 1)),
+            Values(NHWC),
+            Values(ResizeAlgorithm::RESIZE_BILINEAR, ResizeAlgorithm::RESIZE_AREA),
+            Values(ColorFormat::NV12),
+            Values(ROI({0, 0, 0, 0, 0})),
+            Values(false, true)
+);
+struct PreprocessRegression: public TestsCommon {};
+
+TEST_F(PreprocessRegression, smoke_DifferentSizes) {
+    // Reproduce "object was compiled for different meta" problem.
+    // When G-API/Fluid is used as a preprocessing engine,
+    // its state wasn't updated internally if input dimensions changed.
+    // Thus while graph itself continued working properly on all dimensions,
+    // it wan't reshaped when it had to:
+    // * On first call (frame size = X), _lastCall is initialized with size X
+    // * On second call (frame size = Y), graph is reshaped to size Y but _lastCall is still X
+    // * On third call (frame size = X), graph is NOT reshaped since this X matches _lastCall,
+    //   exception is thrown since a graph reshaped to input size Y is asked to process input size X.
+
+    Blob::Ptr in_blob;
+    Blob::Ptr out_blob;
+
+    std::vector<cv::Size> in_sizes = {
+        cv::Size(256, 256),
+        cv::Size(72, 72),
+        cv::Size(256, 256),
+    };
+
+    SizeVector out_dims = {1, 3, 64, 64};
+    out_blob = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, out_dims, Layout::NCHW));
+    out_blob->allocate();
+
+    PreProcessInfo info;
+    info.setResizeAlgorithm(RESIZE_BILINEAR);
+
+    PreProcessDataPtr preprocess = CreatePreprocDataHelper();
+    for (auto sz : in_sizes) {
+        cv::Mat in_mat = cv::Mat::eye(sz, CV_8UC3)*255;
+        in_blob = img2Blob<Precision::U8>(in_mat, Layout::NHWC);
+        preprocess->setRoiBlob(in_blob);
+        EXPECT_NO_THROW(preprocess->execute(out_blob, info, false));
+    }
+
+    // Not thrown = test is green.
+};
+
+struct IEPreprocessTest : public TestsCommon {};
+TEST_F(IEPreprocessTest, smoke_NetworkInputSmallSize) {
+    const size_t num_threads = parallel_get_max_threads();
+
+    std::vector<cv::Size> out_sizes = {
+            cv::Size(num_threads, num_threads - 1),
+            cv::Size(num_threads - 1, num_threads),
+            cv::Size(1, 1),
+            cv::Size(1, 0),
+            cv::Size(0, 1)
+    };
+
+    SizeVector in_dims = {1, 3, num_threads * 2, num_threads * 2};
+    cv::Mat in_mat = cv::Mat::eye(cv::Size(in_dims[3], in_dims[2]), CV_8UC3)*255;
+    Blob::Ptr in_blob = img2Blob<Precision::U8>(in_mat, Layout::NHWC);
+
+    PreProcessInfo info;
+    info.setResizeAlgorithm(RESIZE_BILINEAR);
+
+    PreProcessDataPtr preprocess = CreatePreprocDataHelper();
+    preprocess->setRoiBlob(in_blob);
+
+    for (const auto& sz : out_sizes) {
+        SizeVector out_dims = {1, 3, static_cast<size_t>(sz.height), static_cast<size_t>(sz.width)};
+        Blob::Ptr out_blob = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, out_dims, Layout::NHWC));
+        out_blob->allocate();
+        // FIXME: sz with 0 dims must be a separate test
+        if (sz.width > 0 && sz.height > 0) {
+            EXPECT_NO_THROW(preprocess->execute(out_blob, info, false));
+        } else {
+            EXPECT_THROW(preprocess->execute(out_blob, info, false),
+                         InferenceEngine::details::InferenceEngineException);
+        }
+    }
+}
+
+// smoke:
+PLUGING_CASE_WITH_SUFFIX(CPU, _gapi_random_roi_3c_smoke, RandomROITest, random_roi_3c);
+PLUGING_CASE_WITH_SUFFIX(CPU, _gapi_random_roi_4c_smoke, RandomROITest, random_roi_4c);
+PLUGING_CASE_WITH_SUFFIX(CPU, _gapi_random_roi_nv12_smoke, RandomROITest, random_roi_nv12);
+
+PLUGING_CASE_WITH_SUFFIX(CPU, _gapi_resize_only_smoke, CropResizeTest, smoke::params_resize_only);
+PLUGING_CASE_WITH_SUFFIX(CPU, _gapi_csc_3ch_and_resize_smoke, CropResizeTest, smoke::params_csc_3ch_and_resize);
+PLUGING_CASE_WITH_SUFFIX(CPU, _gapi_csc_4ch_and_resize_smoke, CropResizeTest, smoke::params_csc_4ch_and_resize);
+
+PLUGING_CASE_WITH_SUFFIX(CPU, _gapi_resize_only_smoke, DynamicBatchResizeTest, smoke::batch_params_resize_only);
+PLUGING_CASE_WITH_SUFFIX(CPU, _gapi_csc_3ch_and_resize_smoke, DynamicBatchResizeTest, smoke::batch_params_csc_3ch_and_resize);
+PLUGING_CASE_WITH_SUFFIX(CPU, _gapi_csc_4ch_and_resize_smoke, DynamicBatchResizeTest, smoke::batch_params_csc_4ch_and_resize);
+
+PLUGING_CASE_WITH_SUFFIX(CPU, _gapi_reorder_smoke, ReorderTest, reorder_params);
+
+PLUGING_CASE_WITH_SUFFIX(CPU, _gapi_csc_nv12_and_resize_smoke, NV12ColorConvertTest, nv12_params);
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+// nightly:
+
+// FIXME: enable these once smoke/nightly concepts are introduced in CI
+PLUGING_CASE_WITH_SUFFIX(DISABLED_CPU, _gapi_random_roi_3c_nightly, RandomROITest, random_roi_3c);
+PLUGING_CASE_WITH_SUFFIX(DISABLED_CPU, _gapi_random_roi_4c_nightly, RandomROITest, random_roi_4c);
+PLUGING_CASE_WITH_SUFFIX(DISABLED_CPU, _gapi_random_roi_nv12_nightly, RandomROITest, random_roi_nv12);
+
+PLUGING_CASE_WITH_SUFFIX(DISABLED_CPU, _gapi_csc_3ch_and_resize_nightly, CropResizeTest, nightly::params_csc_3ch_and_resize);
+PLUGING_CASE_WITH_SUFFIX(DISABLED_CPU, _gapi_csc_4ch_and_resize_nightly, CropResizeTest, nightly::params_csc_4ch_and_resize);
+
+PLUGING_CASE_WITH_SUFFIX(DISABLED_CPU, _gapi_csc_3ch_and_resize_nightly, BatchResizeTest, nightly::batch_params_csc_3ch_and_resize);
+PLUGING_CASE_WITH_SUFFIX(DISABLED_CPU, _gapi_csc_4ch_and_resize_nightly, BatchResizeTest, nightly::batch_params_csc_4ch_and_resize);
+
+PLUGING_CASE_WITH_SUFFIX(DISABLED_CPU, _gapi_csc_3ch_and_resize_nightly, DynamicBatchResizeTest, nightly::batch_params_csc_3ch_and_resize);
+PLUGING_CASE_WITH_SUFFIX(DISABLED_CPU, _gapi_csc_4ch_and_resize_nightly, DynamicBatchResizeTest, nightly::batch_params_csc_4ch_and_resize);
+
+PLUGING_CASE_WITH_SUFFIX(DISABLED_CPU, _gapi_reorder_nightly, ReorderTest, reorder_params);
+
+PLUGING_CASE_WITH_SUFFIX(DISABLED_CPU, _gapi_csc_nv12_and_resize_nightly, NV12ColorConvertTest, nv12_params);
+
+#endif  // USE_OPENCV
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/io_blob_tests/dims_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/io_blob_tests/dims_tests.cpp
new file mode 100644 (file)
index 0000000..8ec5c6e
--- /dev/null
@@ -0,0 +1,7 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "dims_tests.hpp"
+
+PLUGING_CASE_WITH_SUFFIX(CPU, _smoke, IO_BlobTest, params);
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/io_blob_tests/layout_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/io_blob_tests/layout_tests.cpp
new file mode 100644 (file)
index 0000000..8b59c13
--- /dev/null
@@ -0,0 +1,15 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "layout_tests.hpp"
+
+static auto params = ::testing::Combine(
+        ::testing::Values(conv_p),
+        ::testing::Values(std::make_pair(Precision::FP32, 1e-5)),
+        ::testing::Values(NCHW, NHWC),
+        ::testing::Values(NCHW, NHWC),
+        ::testing::Values(Precision::FP32, Precision::U8, Precision::I16)  // TODO: What about U16/I8/FP16?
+);
+
+PLUGING_CASE_WITH_SUFFIX(CPU, _smoke, LayoutTTTest, params);
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/lstm/lstm_cell_test.cpp b/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/lstm/lstm_cell_test.cpp
new file mode 100644 (file)
index 0000000..965e0f8
--- /dev/null
@@ -0,0 +1,7 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "lstm_cell_test.hpp"
+
+RUN_CASE_P_WITH_SUFFIX(CPU, _smoke, LSTMCellTest, workload);
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/lstm/lstm_ir_test.cpp b/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/lstm/lstm_ir_test.cpp
new file mode 100644 (file)
index 0000000..574dfc5
--- /dev/null
@@ -0,0 +1,10 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "lstm_ir_test.hpp"
+
+RUN_CASE_P_WITH_SUFFIX(CPU, _smoke, LSTM_IR_Test, workload);
+
+static std::vector<ModelInfo> hetero_workload { workload };
+RUN_CASE_P_WITH_SUFFIX(HETERO_CPU, _smoke, LSTM_IR_Test, hetero_workload);
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/lstm/rnn_seq_test.cpp b/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/lstm/rnn_seq_test.cpp
new file mode 100644 (file)
index 0000000..d0f1ddb
--- /dev/null
@@ -0,0 +1,7 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "rnn_seq_test.hpp"
+
+RUN_CASE_CP_WITH_SUFFIX(CPU, _smoke, RNNSeqTest, workload);
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/network_tests/network_test.cpp b/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/network_tests/network_test.cpp
new file mode 100644 (file)
index 0000000..c525fcb
--- /dev/null
@@ -0,0 +1,178 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <memory>
+#include <unordered_set>
+
+#include <gtest/gtest.h>
+#include "ie_precision.hpp"
+#include <tests_common_func.hpp>
+#include <multi-device/multi_device_config.hpp>
+#include "low_precision_transformations/transformer.hpp"
+#include "common/validation.hpp"
+#include "ie_util_internal.hpp"
+
+#include "network_i8.hpp"
+
+#define XBYAK_NO_OP_NAMES
+#define XBYAK_UNDEF_JNL
+#include "../../../../thirdparty/mkl-dnn/src/cpu/xbyak/xbyak_util.h"
+
+/*************************************************
+ * !!! !!! !!! !!! !!! !!! !!! !!! !!! !!! !!! !!!
+ * All ref values was obtained from Caffe scoring
+ * !!! !!! !!! !!! !!! !!! !!! !!! !!! !!! !!! !!!
+ *************************************************/
+#ifndef ENABLE_MKL_DNN
+#include "disable_tests.hpp"
+#endif
+
+TEST_P(ModelTransformationsTest, LPT) {}
+
+ModelParams getModelParams(const std::string modelName) {
+std::map<std::string, ModelParams> modelParams = {
+    {
+        "inception_v3_tf",
+        ModelParams(
+                "inception_v3_tf",
+                "inception_v3/inception_v3_i8.xml",
+                "validation_set/299x299/dog.bmp",
+                {{157, 10.1683},  // 157 row: 'Blenheim spaniel'
+                 { 219, 5.751 },   // 219 row: 'Welsh springer spaniel',
+                 { 153, 4.9502 },  // 153 row: 'Japanese spaniel',
+                 { 216, 4.79769 }}
+        )
+    },
+        {
+            "mobilenet_v2_tf_depthwise",
+            ModelParams(
+                "mobilenet_v2_tf_depthwise",
+                "mobilenet_v2_1.4_224/mobilenet_v2_1.4_224_i8.xml",
+                "validation_set/224x224/dog.bmp",
+                {{ 157, 8.63748 },
+                 { 219, 6.29954 },
+                 { 216, 4.7303 },
+                 { 218, 4.69319 },
+                 { 220, 3.67249 }},
+                {},
+                [](const TransformationsParams& transformationsParam, CNNNetworkImplPtr usedNetwork) {
+                    if (transformationsParam.transformationsInTestEnabled && transformationsParam.params.updatePrecisions) {
+                        const static std::vector<std::pair<std::string, std::string>> fakeQuantizeAndConcolutionItems = {
+                            // U8 with shift on activations
+                            {"MobilenetV2/Conv/Conv2D/fq_input_0", ""},
+                            {"MobilenetV2/expanded_conv/project/Conv2D/fq_input_0", "MobilenetV2/expanded_conv/project/BatchNorm/FusedBatchNormV3/variance/Fused_Add_"},
+                            // I8 on activations
+                            {"MobilenetV2/expanded_conv_1/expand/Conv2D/fq_input_0", ""},
+                            {"MobilenetV2/expanded_conv_1/project/Conv2D/fq_input_0", "MobilenetV2/expanded_conv_1/project/BatchNorm/FusedBatchNormV3/variance/Fused_Add_"},
+                            // I8 on activations
+                            {"MobilenetV2/expanded_conv_2/add/fq_input_1", ""},
+                            {"MobilenetV2/expanded_conv_2/project/Conv2D/fq_input_0", "MobilenetV2/expanded_conv_2/project/BatchNorm/FusedBatchNormV3/variance/Fused_Add_"},
+                            // I8 on activations
+                            {"MobilenetV2/expanded_conv_3/expand/Conv2D/fq_input_0", ""}
+                        };
+
+                        for (const std::pair<std::string, std::string> item : fakeQuantizeAndConcolutionItems) {
+                            TestsCommonFunc::checkLayerOuputPrecision(*usedNetwork, item.first, Precision::U8);
+                            if (!item.second.empty()) {
+                                TestsCommonFunc::checkLayerInputPrecision(*usedNetwork, item.second, Precision::U8, 0);
+                            }
+                        }
+                    }
+                })
+        },
+        {
+            "resnet_50_tf",
+            ModelParams(
+                "resnet_50_tf",
+                "resnet_v1_50/resnet_v1_50_i8.xml",
+                "validation_set/224x224/dog.bmp",
+                {{ 156, 16.1796 },
+                 { 218, 11.9186 },
+                 { 219, 10.8054 },
+                 { 217, 10.1224 },
+                 { 152, 9.60148 }},
+                {},
+                [](const TransformationsParams& transformationsParam, CNNNetworkImplPtr usedNetwork) {
+                    if (transformationsParam.transformationsInTestEnabled && transformationsParam.params.updatePrecisions) {
+                        const Precision originalPrecision = Precision::FP32;
+                        const Precision targetPrecision = Precision::U8;
+
+                        //Eltwise CPU/GPU specific
+                        TestsCommonFunc::checkLayerOuputPrecision(*usedNetwork, "resnet_v1_50/block1/unit_1/bottleneck_v1/add/fq_input_0", originalPrecision);
+                        TestsCommonFunc::checkLayerOuputPrecision(*usedNetwork, "resnet_v1_50/block1/unit_1/bottleneck_v1/add/fq_input_1", Precision::I8);
+
+                        TestsCommonFunc::checkLayerOuputPrecision(*usedNetwork, "resnet_v1_50/block2/unit_1/bottleneck_v1/add/fq_input_0", originalPrecision);
+                        TestsCommonFunc::checkLayerOuputPrecision(*usedNetwork, "resnet_v1_50/block2/unit_1/bottleneck_v1/add/fq_input_1", Precision::I8);
+                    }
+                })
+        },
+    };
+
+    const auto it = modelParams.find(modelName);
+    if (it == modelParams.end()) {
+        THROW_IE_EXCEPTION << "parameters for model '" << modelName << "' were not found";
+    }
+    return it->second;
+}
+
+//0.005f,
+INSTANTIATE_TEST_CASE_P(
+        smoke_Inception,
+        ModelTransformationsTest,
+        ::testing::Values(
+                TransformationsParams("MKLDNN", getModelParams("inception_v3_tf"), 1ul, false, false, createParam(), {}, 3ul),
+                TransformationsParams("MKLDNN", getModelParams("inception_v3_tf"), 1ul, false, true, createParamI8I8(), {}, 0, false),
+                TransformationsParams("MKLDNN", getModelParams("inception_v3_tf"), 1ul, false, true, createParamU8I8(), {}, 0),
+                TransformationsParams("MKLDNN", getModelParams("inception_v3_tf"), 1ul, false, true, createParamU8U8(), {}, 0),
+                TransformationsParams("MKLDNN", getModelParams("inception_v3_tf"), 1ul, false, true, createParamCpu().setQuantizedTensorAlignmentOnActivations(LayerTransformation::QuantizedTensorAlignment::UpdateLevel)),
+                TransformationsParams("MKLDNN", getModelParams("inception_v3_tf"), 1ul, false, true, createParamCpu().setQuantizedTensorAlignmentOnActivations(LayerTransformation::QuantizedTensorAlignment::UpdateIntervals)),
+                TransformationsParams("MKLDNN", getModelParams("inception_v3_tf"), 1ul, true, false, createParam()),
+                TransformationsParams("MKLDNN", getModelParams("inception_v3_tf"), 2ul, true, false, createParam())
+        ),
+        TransformationsParams::getLowPrecisionTransformerSingleLayerTestName);
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_MobileNet,
+        ModelTransformationsTest,
+        ::testing::Values(
+                TransformationsParams("MKLDNN", getModelParams("mobilenet_v2_tf_depthwise"), 1ul, false),
+// TODO: eshoguli: fix this issue
+//                TransformationsParams("MKLDNN", getModelParams("mobilenet_v2_tf_depthwise"), 1ul, false, true, createParamI8I8()),
+//                TransformationsParams("MKLDNN", getModelParams("mobilenet_v2_tf_depthwise"), 1ul, false, true, createParamU8I8()),
+//                TransformationsParams("MKLDNN", getModelParams("mobilenet_v2_tf_depthwise"), 1ul, false, true, createParamU8U8(), {}, 2),
+//                TransformationsParams("MKLDNN", getModelParams("mobilenet_v2_tf_depthwise"), 1ul, false, true, createParamCpu(), { "464/Pool", "465/Pool" }),
+                TransformationsParams("MKLDNN", getModelParams("mobilenet_v2_tf_depthwise"), 1ul, true),
+                TransformationsParams("MKLDNN", getModelParams("mobilenet_v2_tf_depthwise"), 2ul, true)
+        ),
+        TransformationsParams::getLowPrecisionTransformerSingleLayerTestName);
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_ResNet,
+        ModelTransformationsTest,
+        ::testing::Values(
+                TransformationsParams("MKLDNN", getModelParams("resnet_50_tf"), 1ul, false),
+                TransformationsParams("MKLDNN", getModelParams("resnet_50_tf"), 1ul, false, true, createParamI8I8(), {
+                        // TODO: remove when eltwise validation was added
+                        "resnet_v1_50/block1/unit_2/bottleneck_v1/act_quant/FakeQuantWithMinMaxVars",
+                        "resnet_v1_50/block2/unit_3/bottleneck_v1/act_quant/FakeQuantWithMinMaxVars"
+                }),
+                TransformationsParams("MKLDNN", getModelParams("resnet_50_tf"), 1ul, false, true, createParamU8I8(), {
+//            // TODO: remove when eltwise validation was added
+                        "resnet_v1_50/block1/unit_2/bottleneck_v1/act_quant/FakeQuantWithMinMaxVars",
+                        "resnet_v1_50/block2/unit_3/bottleneck_v1/act_quant/FakeQuantWithMinMaxVars"
+                }),
+                TransformationsParams("MKLDNN", getModelParams("resnet_50_tf"), 1ul, false, true, createParamU8U8(), {
+                        // TODO: remove when eltwise validation was added
+                        "resnet_v1_50/block1/unit_2/bottleneck_v1/act_quant/FakeQuantWithMinMaxVars",
+                        "resnet_v1_50/block2/unit_3/bottleneck_v1/act_quant/FakeQuantWithMinMaxVars"
+                }),
+                TransformationsParams("MKLDNN", getModelParams("resnet_50_tf"), 1ul, false, true, createParamCpu(), {
+                        // TODO: remove when eltwise validation was added
+                        "resnet_v1_50/block1/unit_2/bottleneck_v1/act_quant/FakeQuantWithMinMaxVars",
+                        "resnet_v1_50/block2/unit_3/bottleneck_v1/act_quant/FakeQuantWithMinMaxVars"
+                }),
+                TransformationsParams("MKLDNN", getModelParams("resnet_50_tf"), 1ul, true),
+                TransformationsParams("MKLDNN", getModelParams("resnet_50_tf"), 2ul, true)
+        ),
+        TransformationsParams::getLowPrecisionTransformerSingleLayerTestName);
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/activation_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/activation_tests.cpp
new file mode 100644 (file)
index 0000000..8aa9d67
--- /dev/null
@@ -0,0 +1,15 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "activation_tests.hpp"
+
+
+activation_test_params act_test_cases[] = {
+        activation_test_params("CPU", case_1, "relu"),
+        activation_test_params("CPU", case_1, "exp"),
+        activation_test_params("CPU", case_1, "not"),
+};
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_CPU_TestsActivationFunctions, ActivationTest, ::testing::ValuesIn(act_test_cases), getTestCaseName);
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/arg_max_min_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/arg_max_min_tests.cpp
new file mode 100644 (file)
index 0000000..30da9c6
--- /dev/null
@@ -0,0 +1,56 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "arg_max_min_tests.hpp"
+
+static std::vector<float> in_data = { 0.0f, 1.0f,
+                                  20.0f, 12.0f,
+
+                                  12.0f, 0.0f,
+                                  15.0f, 8.0f,
+
+                                  9.0f, 4.0f,
+                                  25.0f, 15.0f,
+
+
+                                  0.0f, 0.0f,
+                                  1.0f, 1.0f,
+
+                                  0.0f, 0.0f,
+                                  24.0f, 12.0f,
+
+                                  8.0f, 9.0f,
+                                  2.0f, 14.0 };
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_mkldnn_TestsArgMaxMin, ArgMaxMinTFTests,
+        ::testing::Values(
+                // Params: device_name, in_dim, in_data, has_axis, out_max_val, top_k, axis, ref_dim, ref_data
+                argMaxMinTF_test_params{ "CPU", "ArgMax", { 2, 3, 2, 2 }, in_data,
+                                                                     1, 0, 1, 0, { 1, 3, 2, 2 } },
+
+                argMaxMinTF_test_params{ "CPU", "ArgMax", { 2, 3, 2, 2 }, in_data,
+                                                                     1, 0, 1, 1, { 2, 1, 2, 2 } },
+
+                argMaxMinTF_test_params{ "CPU", "ArgMax", { 2, 3, 2, 2 }, in_data,
+                                                                     1, 0, 1, 2, { 2, 3, 1, 2 } },
+
+                argMaxMinTF_test_params{ "CPU", "ArgMax", { 2, 3, 2, 2 }, in_data,
+                                                                     1, 0, 1, 3, { 2, 3, 2, 1 } },
+
+                argMaxMinTF_test_params{ "CPU", "ArgMax", { 2, 3, 2, 2 }, in_data,
+                                                                     1, 0, 2, 0, { 2, 3, 2, 2 } },
+
+                argMaxMinTF_test_params{ "CPU", "ArgMax", { 2, 3, 2, 2 }, in_data,
+                                                                     1, 0, 2, 1, { 2, 2, 2, 2 } },
+
+                argMaxMinTF_test_params{ "CPU", "ArgMax", { 2, 3, 2, 2 }, in_data,
+                                                                     1, 0, 2, 2, { 2, 3, 2, 2 } },
+
+                argMaxMinTF_test_params{ "CPU", "ArgMax", { 2, 3, 2, 2 }, in_data,
+                                                                     1, 0, 2, 3, { 2, 3, 2, 2 } },
+
+                argMaxMinTF_test_params{ "CPU", "ArgMax", { 2, 3, 2, 2 }, in_data,
+                                                                     1, 0, 3, 1, { 2, 3, 2, 2 } }
+        ));
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/bin_conv_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/bin_conv_tests.cpp
new file mode 100644 (file)
index 0000000..5567c79
--- /dev/null
@@ -0,0 +1,27 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "bin_conv_tests.hpp"
+
+bin_conv_test_params bin_conv_only_test_cases[] = {
+        bin_conv_test_params("CPU", case_1),
+        bin_conv_test_params("CPU", case_2),
+        bin_conv_test_params("CPU", case_3),
+        bin_conv_test_params("CPU", case_4),
+        bin_conv_test_params("CPU", case_5),
+        bin_conv_test_params("CPU", case_6),
+        bin_conv_test_params("CPU", case_7),
+        bin_conv_test_params("CPU", case_8),
+        bin_conv_test_params("CPU", case_9),
+        bin_conv_test_params("CPU", case_10),
+        bin_conv_test_params("CPU", case_11),
+        bin_conv_test_params("CPU", case_12),
+        bin_conv_test_params("CPU", case_13),
+        bin_conv_test_params("CPU", case_14),
+        bin_conv_test_params("CPU", case_15),
+        bin_conv_test_params("CPU", case_16)
+};
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_CPU_TestBinaryConvolution, BinaryConvolutionOnlyTest, ::testing::ValuesIn(bin_conv_only_test_cases), getTestCaseName);
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/deformable_psroi_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/deformable_psroi_tests.cpp
new file mode 100644 (file)
index 0000000..0f1ed6e
--- /dev/null
@@ -0,0 +1,22 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "deformable_psroi_tests.hpp"
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_TestDeformable, DeformablePSROIOnlyTest,
+        ::testing::Values(
+                deformable_psroi_test_params{"CPU", {1, 7938, 38, 38}, {300, 5}, {300, 162, 7, 7},
+                                             0.0625, 162, 7, 7, 7, 7, 4, true
+                },
+                deformable_psroi_test_params{"CPU", {1, 392, 38, 38}, {300, 5}, {300, 8, 7, 7},
+                                             0.0625, 8, 7, 7, 7, 7, 4, false, 0.1, {300, 2, 7, 7}
+                },
+                deformable_psroi_test_params{"CPU", {1, 98, 38, 38}, {300, 5}, {300, 2, 7, 7},
+                                             0.0625, 2, 7, 7, 7, 7, 4, true
+                },
+                deformable_psroi_test_params{"CPU", {1, 3969, 38, 38}, {300, 5}, {300, 81, 7, 7},
+                                             0.0625, 81, 7, 7, 7, 7, 4, false, 0.1, {300, 162, 7, 7}
+                }
+        ));
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/depth_to_space_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/depth_to_space_tests.cpp
new file mode 100644 (file)
index 0000000..f232810
--- /dev/null
@@ -0,0 +1,18 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "depth_to_space_tests.hpp"
+
+//TEST_P(DepthToSpaceTests, TestsDepthToSpace) {}
+
+//INSTANTIATE_TEST_CASE_P(
+//        TestsDepthToSpace, DepthToSpaceTests,
+//        ::testing::Values(
+//        depth_to_space_test_params{ "CPU", "FP32", { 1, 4, 1, 1 }, input0, 2, { 1, 1, 2, 2 }, ref_input0_bs2 },
+//        depth_to_space_test_params{ "CPU", "FP32", { 1, 4, 2, 1 }, input1, 2, { 1, 1, 4, 2 }, ref_input1_bs2 },
+//        depth_to_space_test_params{ "CPU", "FP32", { 1, 4, 2, 2 }, input2, 2, { 1, 1, 4, 4 }, ref_input2_bs2 },
+//        depth_to_space_test_params{ "CPU", "FP32", { 1, 4, 3, 2 }, input3, 2, { 1, 1, 6, 4 }, ref_input3_bs2 },
+//        depth_to_space_test_params{ "CPU", "FP32", { 1, 9, 3, 3 }, input4, 3, { 1, 1, 9, 9 }, ref_input4_bs3 },
+//        depth_to_space_test_params{ "CPU", "FP32", { 1, 18, 3, 3 }, input5, 3, { 1, 2, 9, 9 }, ref_input5_bs3 }
+//));
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/eltwise_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/eltwise_tests.cpp
new file mode 100644 (file)
index 0000000..576f446
--- /dev/null
@@ -0,0 +1,42 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "eltwise_tests.hpp"
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_CPU_TestEltwise, EltwiseOnlyTest,
+        ::testing::Values(
+                eltwise_test_params{"CPU",
+                                    {13, 13, 1}, eltwise_test_params::Sum, 4},
+                eltwise_test_params{"CPU",
+                                    {23, 23, 1}, eltwise_test_params::Max, 3},
+                eltwise_test_params{"CPU",
+                                    {23, 23, 1}, eltwise_test_params::Prod, 5},
+                eltwise_test_params{"CPU",
+                                    {23, 23, 1}, eltwise_test_params::Sub, 4},
+                eltwise_test_params{"CPU",
+                                    {23, 23, 1}, eltwise_test_params::Min, 3},
+                eltwise_test_params{"CPU",
+                                    {23, 23, 1}, eltwise_test_params::Div, 5},
+                eltwise_test_params{"CPU",
+                                    {23, 23, 1}, eltwise_test_params::Squared_diff, 2},
+                eltwise_test_params{"CPU",
+                                    {13, 13, 1}, eltwise_test_params::Equal, 5},
+                eltwise_test_params{"CPU",
+                                    {23, 23, 1}, eltwise_test_params::Not_equal, 5},
+                eltwise_test_params{"CPU",
+                                    {23, 23, 1}, eltwise_test_params::Less, 5},
+                eltwise_test_params{"CPU",
+                                    {23, 23, 1}, eltwise_test_params::Less_equal, 5},
+                eltwise_test_params{"CPU",
+                                    {23, 23, 1}, eltwise_test_params::Greater, 5},
+                eltwise_test_params{"CPU",
+                                    {23, 23, 1}, eltwise_test_params::Greater_equal, 5},
+                eltwise_test_params{"CPU",
+                                    {23, 23, 1}, eltwise_test_params::Logical_AND, 5},
+                eltwise_test_params{"CPU",
+                                    {23, 23, 1}, eltwise_test_params::Logical_OR, 5},
+                eltwise_test_params{"CPU",
+                                    {23, 23, 1}, eltwise_test_params::Logical_XOR, 5}
+        ));
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/gather_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/gather_tests.cpp
new file mode 100644 (file)
index 0000000..f10aa8d
--- /dev/null
@@ -0,0 +1,27 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "gather_tests.hpp"
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_CPU_TestsGather, GatherTFTests,
+        ::testing::Values(
+                gatherTF_test_params{ "CPU", "FP32", { 1, 4 }, in0,{ 2, 2 }, dict2D, 0, { 1, 4, 2 }, ref_in0_a0_d22 },
+                gatherTF_test_params{ "CPU", "FP32", { 2, 2 }, in0,{ 2, 2, 3 }, dict, 0, { 2, 2, 2, 3 }, ref_in0_a0_d223 },
+                gatherTF_test_params{ "CPU", "FP32", { 2, 2 }, in0,{ 2, 2, 3 }, dict,-3, { 2, 2, 2, 3 }, ref_in0_a0_d223 },
+                gatherTF_test_params{ "CPU", "FP32", { 2, 2 }, in1,{ 2, 2, 3 }, dict, 2, { 2, 2, 2, 2 }, ref_in1_a2_d223 },
+                gatherTF_test_params{ "CPU", "FP32", { 2, 2 }, in1,{ 2, 2, 3 }, dict,-1, { 2, 2, 2, 2 }, ref_in1_a2_d223 },
+                gatherTF_test_params{ "CPU", "FP32", { 2, 2 }, in0,{ 2, 3, 2 }, dict, 2, { 2, 3, 2, 2 }, ref_in0_a2_d232 },
+                gatherTF_test_params{ "CPU", "FP32", { 2, 2 }, in0,{ 2, 3, 2 }, dict,-1, { 2, 3, 2, 2 }, ref_in0_a2_d232 },
+                gatherTF_test_params{ "CPU", "FP32", { 2, 2 }, in1,{ 3, 2, 2 }, dict, 0, { 2, 2, 2, 2 }, ref_in1_a0_d322 },
+                gatherTF_test_params{ "CPU", "FP32", { 2, 2 }, in1,{ 3, 2, 2 }, dict,-3, { 2, 2, 2, 2 }, ref_in1_a0_d322 },
+                gatherTF_test_params{ "CPU", "FP32", { 2, 2 }, in1,{ 2, 3, 2 }, dict, 1, { 2, 2, 2, 2 }, ref_in1_a1_d232 },
+                gatherTF_test_params{ "CPU", "FP32", { 2, 2 }, in1,{ 2, 3, 2 }, dict,-2, { 2, 2, 2, 2 }, ref_in1_a1_d232 },
+                gatherTF_test_params{ "CPU", "FP32", { 2, 2 }, in1,{ 2, 2, 3 }, dict, 2, { 2, 2, 2, 2 }, ref_in1_a2_d223 },
+                gatherTF_test_params{ "CPU", "FP32", { 2, 2 }, in1,{ 2, 2, 3 }, dict,-1, { 2, 2, 2, 2 }, ref_in1_a2_d223 },
+
+                gatherTF_test_params{ "CPU", "I32", { 2, 2 }, in0,{ 2, 2, 3 }, dict, 0, { 2, 2, 2, 3 }, ref_in0_a0_d223 },
+                gatherTF_test_params{ "CPU", "I32", { 2, 2 }, in0,{ 2, 2, 3 }, dict,-3, { 2, 2, 2, 3 }, ref_in0_a0_d223 },
+                gatherTF_test_params{ "CPU", "I32", { 2, 2 }, in0,{ 2, 3, 2 }, dict, 2, { 2, 3, 2, 2 }, ref_in0_a2_d232 }
+        ));
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/gemm_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/gemm_tests.cpp
new file mode 100644 (file)
index 0000000..4eb804d
--- /dev/null
@@ -0,0 +1,35 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "gemm_tests.hpp"
+
+gemm_base_params gemm_smoke_cases[] = {
+    case6, case14, case22, case30,
+    case38
+};
+
+INSTANTIATE_TEST_CASE_P(smoke_CPU_GemmRandomTest, GemmRandomTest,
+    testing::Combine(
+        testing::Values("CPU"),
+        testing::Values("FP32"),
+        testing::ValuesIn(gemm_smoke_cases)
+    ));
+
+gemm_base_params gemm_all_cases[] = {        // 5D cases
+    case1,  case2,  case3,  case4,  case5,   /* case7,  case8,  */
+    case9,  case10, case11, case12, case13,  /* case15, case16, */
+    case17, case18, case19, case20, case21,  /* case23, case24, */
+    case25, case26, case27, case28, case29,  /* case31, case32, */
+    case33, case34, case35, case36, case37, case38,
+    // Cases with mismatched input dimension numbers
+    // case39, case40, case41, case42, case43, case44,
+    // case45, case46, case47
+};
+
+INSTANTIATE_TEST_CASE_P(nightly_CPU_GemmRandomTest, GemmRandomTest,
+    testing::Combine(
+        testing::Values("CPU"),
+        testing::Values("FP32"),
+        testing::ValuesIn(gemm_all_cases)
+    ));
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/pad_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/pad_tests.cpp
new file mode 100644 (file)
index 0000000..02052c2
--- /dev/null
@@ -0,0 +1,10 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "pad_tests.hpp"
+
+PLUGING_CASE(CPU, PadTFTests, 1, { 3, 4 }, in, { 2, 2 }, { 1, 3 }, "constant", 0.f, { 6, 9 },  ref_constant);
+PLUGING_CASE(CPU, PadTFTests, 2, { 3, 4 }, in, { 2, 2 }, { 1, 3 },     "edge", 0.f, { 6, 9 },      ref_edge);
+PLUGING_CASE(CPU, PadTFTests, 3, { 3, 4 }, in, { 2, 2 }, { 1, 3 },  "reflect", 0.f, { 6, 9 },   ref_reflect);
+PLUGING_CASE(CPU, PadTFTests, 4, { 3, 4 }, in, { 2, 2 }, { 1, 3 },"symmetric", 0.f, { 6, 9 }, ref_symmetric);
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/permute_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/permute_tests.cpp
new file mode 100644 (file)
index 0000000..a9c3f60
--- /dev/null
@@ -0,0 +1,29 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "permute_tests.hpp"
+
+permute_test_params permute_only_test_cases[] = {
+        permute_test_params("CPU", case_1),
+        permute_test_params("CPU", case_2),
+        permute_test_params("CPU", case_3),
+        permute_test_params("CPU", case_4),
+        permute_test_params("CPU", case_5),
+        permute_test_params("CPU", case_6),
+        permute_test_params("CPU", case_7),
+        permute_test_params("CPU", case_8),
+        permute_test_params("CPU", case_9),
+        permute_test_params("CPU", case_10),
+        permute_test_params("CPU", case_11),
+        permute_test_params("CPU", case_12),
+        permute_test_params("CPU", case_13),
+        permute_test_params("CPU", case_14),
+        permute_test_params("CPU", case_15),
+        permute_test_params("CPU", case_16)
+};
+
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_CPU_TestPermute, PermuteOnlyTests, ::testing::ValuesIn(permute_only_test_cases));
+
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/quantize_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/quantize_tests.cpp
new file mode 100644 (file)
index 0000000..116e2b7
--- /dev/null
@@ -0,0 +1,35 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "quantize_tests.hpp"
+
+quantize_test_params quantize_only_test_cases[] = {
+        quantize_test_params{"CPU", case_1},
+        quantize_test_params{"CPU", case_2},
+        quantize_test_params{"CPU", case_3},
+        quantize_test_params{"CPU", case_4},
+        quantize_test_params{"CPU", case_5},
+        quantize_test_params{"CPU", case_6},
+        quantize_test_params{"CPU", case_7},
+        quantize_test_params{"CPU", case_8},
+        quantize_test_params{"CPU", case_9},
+        quantize_test_params{"CPU", case_10},
+        quantize_test_params{"CPU", case_11},
+        quantize_test_params{"CPU", case_12},
+        quantize_test_params{"CPU", case_13},
+        quantize_test_params{"CPU", case_14},
+        quantize_test_params{"CPU", case_15},
+        quantize_test_params{"CPU", case_16},
+        quantize_test_params{"CPU", case_17},
+        quantize_test_params{"CPU", case_18},
+        quantize_test_params{"CPU", case_19},
+        quantize_test_params{"CPU", case_20},
+        quantize_test_params{"CPU", case_21},
+        quantize_test_params{"CPU", case_22},
+        quantize_test_params{"CPU", case_23},
+        quantize_test_params{"CPU", case_24},
+};
+
+INSTANTIATE_TEST_CASE_P(smoke_CPUTestQuantize, QuantizeOnlyTest, ::testing::ValuesIn(quantize_only_test_cases));
+
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/reduce_ftests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/reduce_ftests.cpp
new file mode 100644 (file)
index 0000000..93d1e89
--- /dev/null
@@ -0,0 +1,80 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "reduce_tests.hpp"
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_CPU_TestsReduceSum, ReduceTestsShared,
+        ::testing::Values(
+                // Params: library_name, reduce_type, keep_dims, in_shape, input_tensor, axes_for_reduction, out_shape, reference
+/*                reduce_test_params{ "CPU", "FP32", "ReduceSum", true,{ 2, 3, 4 },{},{ 0 },{ 1, 3, 4 },{ 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36 } },
+                reduce_test_params{ "CPU", "FP32", "ReduceSum", true,{ 2, 3, 4 },{},{ -3 },{ 1, 3, 4 },{ 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36 } },
+                reduce_test_params{ "CPU", "FP32", "ReduceSum", true,{ 2, 3, 4 },{},{ 2 },{ 2, 3, 1 },{ 10, 26, 42, 58, 74, 90 } },
+                reduce_test_params{ "CPU", "FP32", "ReduceSum", true,{ 2, 3, 4, 1, 1 },{},{ 2 },{ 2, 3, 1, 1, 1 },{ 10, 26, 42, 58, 74, 90 } },
+                reduce_test_params{ "CPU", "FP32", "ReduceSum", true,{ 2, 3, 4 },{},{ -1 },{ 2, 3, 1 },{ 10, 26, 42, 58, 74, 90 } },
+                reduce_test_params{ "CPU", "FP32", "ReduceSum", true,{ 2, 3, 4 },{},{ 0, 2 },{ 1, 3, 1 },{ 68, 100, 132 } },
+                reduce_test_params{ "CPU", "FP32", "ReduceSum", true,{ 2, 3, 4 },{},{ 1, 2 },{ 2, 1, 1 },{ 78, 222 } },
+                reduce_test_params{ "CPU", "FP32", "ReduceSum", true,{ 2, 3, 4 },{},{ 2, 1 },{ 2, 1, 1 },{ 78, 222 } },
+                reduce_test_params{ "CPU", "FP32", "ReduceSum", true,{ 2, 3, 4 },{},{ 0, 1, 2 },{ 1, 1, 1 },{ 300 } },
+                reduce_test_params{ "CPU", "FP32", "ReduceSum", true,{ 2, 3, 4 },{},{ 0, -2, 2 },{ 1, 1, 1 },{ 300 } },*/
+                reduce_test_params{ "CPU", "I32", "ReduceSum", true,{ 2, 3, 4 },{},{ 2, 2, 0, 2, 0 },{ 1, 3, 1 },{ 68, 100, 132 } },
+                reduce_test_params{ "CPU", "I32", "ReduceSum", false,{ 2, 3, 4 },{},{ 0 },{ 3, 4 },{ 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36 } },
+                reduce_test_params{ "CPU", "I32", "ReduceSum", false,{ 2, 3, 4 },{},{ -3 },{ 3, 4 },{ 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36 } },
+                reduce_test_params{ "CPU", "I32", "ReduceSum", false,{ 2, 3, 4 },{},{ 2 },{ 2, 3 },{ 10, 26, 42, 58, 74, 90 } },
+                reduce_test_params{ "CPU", "I32", "ReduceSum", false,{ 2, 3, 4 },{},{ -1 },{ 2, 3 },{ 10, 26, 42, 58, 74, 90 } },
+                reduce_test_params{ "CPU", "I32", "ReduceSum", false,{ 2, 3, 4 },{},{ 0, 2 },{ 3 },{ 68, 100, 132 } },
+                reduce_test_params{ "CPU", "I32", "ReduceSum", false,{ 2, 3, 4 },{},{ 1, 2 },{ 2 },{ 78, 222 } },
+                reduce_test_params{ "CPU", "I32", "ReduceSum", false,{ 2, 3, 4 },{},{ 2, 1 },{ 2 },{ 78, 222 } },
+                reduce_test_params{ "CPU", "I32", "ReduceSum", false,{ 2, 3, 4 },{},{ 0, 1, 2 },{},{ 300 } },
+                reduce_test_params{ "CPU", "I32", "ReduceSum", false,{ 2, 3, 4 },{},{ 0, -2, 2 },{},{ 300 } },
+                reduce_test_params{ "CPU", "I32", "ReduceSum", false,{ 2, 3, 4 },{},{ 2, 2, 0, 2, 0 },{ 3 },{ 68, 100, 132 } },
+                reduce_test_params{ "CPU", "I32", "ReduceSum", true,{ 1, 2, 3, 4, 1 },{},{ 1 },{ 1, 1, 3, 4, 1 },{ 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36 } },
+                reduce_test_params{ "CPU", "I32", "ReduceSum", false,{ 1, 2, 3, 4, 1 },{},{ 1 },{ 1, 3, 4, 1 },{ 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36 } }
+        ));
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_CPU_TestsReduce, ReduceTestsShared,
+        ::testing::Values(
+                // Params: library_name, reduce_type, keep_dims, in_shape, input_tensor, axes_for_reduction, out_shape, reference
+                reduce_test_params{ "CPU", "I32", "ReduceAnd", true,{ 2, 2, 2 },{1, 0, 1, 1, 0, 1, 1, 0},{ 2 },{ 2, 2, 1 },{ 0, 1, 0, 0} },
+                reduce_test_params{ "CPU", "I32", "ReduceAnd", false, { 2, 2, 2 },{1, 0, 1, 1, 0, 1, 1, 0},{ 0, 1, 2 },{ },{ 0 } },
+                reduce_test_params{ "CPU", "I32", "ReduceL1", true,{ 10, 10, 2 },{},{ 2 },{ 10, 10, 1 },{ } },
+                reduce_test_params{ "CPU", "I32", "ReduceL1", true, { 3, 2, 2 },{},{ 2 },{ 3, 2, 1 },{ 3, 7, 11, 15, 19, 23 } },
+                reduce_test_params{ "CPU", "I32", "ReduceL1", false, { 3, 2, 2 },{},{ 2 },{ 3, 2 },{ 3, 7, 11, 15, 19, 23 } },
+                reduce_test_params{ "CPU", "I32", "ReduceL1", false, { 3, 2, 2 },{},{ 0, 1, 2 },{ },{ 78 } },
+                reduce_test_params{ "CPU", "I32", "ReduceL2", true,{ 10, 10, 2 },{},{ 2 },{ 10, 10, 1 },{} },
+                reduce_test_params{ "CPU", "I32", "ReduceL2", true,{ 3, 2, 2 },{},{ 2 },{ 3, 2, 1 },{ 2.23606798f, 5.f, 7.81024968f, 10.63014581f, 13.45362405f, 16.2788206f } },
+                reduce_test_params{ "CPU", "I32", "ReduceL2", false,{ 3, 2, 2 },{},{ 2 },{ 3, 2 },{ 2.23606798f, 5.f, 7.81024968f, 10.63014581f, 13.45362405f, 16.2788206f } },
+                reduce_test_params{ "CPU", "I32", "ReduceL2", false,{ 3, 2, 2 },{},{ 0, 1, 2 },{ },{ 25.49509757f } },
+                reduce_test_params{ "CPU", "I32", "ReduceLogSum", true,{ 10, 10, 2 },{},{ 2 },{ 10, 10, 1 },{} },
+                reduce_test_params{ "CPU", "I32", "ReduceLogSum", true,{ 3, 2, 2 },{ },{ 1 },{ 3, 1, 2 },{ } },
+                reduce_test_params{ "CPU", "I32", "ReduceLogSum", false,{ 3, 2, 2 },{ },{ 1 },{ 3, 2 },{ } },
+                reduce_test_params{ "CPU", "I32", "ReduceLogSum", false,{ 3, 2, 2 },{ },{ 0, 1, 2 },{},{ } },
+                reduce_test_params{ "CPU", "I32", "ReduceLogSumExp", true,{ 5, 5, 2 },{},{ 2 },{ 5, 5, 1 },{} },
+                reduce_test_params{ "CPU", "I32", "ReduceLogSumExp", true,{ 3, 2, 2 },{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 1 },{ 3, 1, 2 },{ 20.f, 2.31326175f, 40.00004578f, 2.31326175f, 60.00671387f, 2.31326175f } },
+                reduce_test_params{ "CPU", "I32", "ReduceLogSumExp", false,{ 3, 2, 2 },{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 1 },{ 3, 2 },{ 20.f, 2.31326175f, 40.00004578f, 2.31326175f, 60.00671387f, 2.31326175f } },
+                reduce_test_params{ "CPU", "I32", "ReduceLogSumExp", false,{ 3, 2, 2 },{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 0, 1, 2 },{},{ 60.00671387f } },
+                reduce_test_params{ "CPU", "I32", "ReduceMax", true,{ 10, 10, 2 },{},{ 2 },{ 10, 10, 1 },{} },
+                reduce_test_params{ "CPU", "I32", "ReduceMax", true,{ 3, 2, 2 },{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 1 },{ 3, 1, 2 },{ 20, 2, 40, 2, 60, 2 } },
+                reduce_test_params{ "CPU", "I32", "ReduceMax", false,{ 3, 2, 2 },{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 1 },{ 3, 2 },{ 20, 2, 40, 2, 60, 2 } },
+                reduce_test_params{ "CPU", "I32", "ReduceMax", false,{ 3, 2, 2 },{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 0, 1, 2 },{},{ 60 } },
+                reduce_test_params{ "CPU", "I32", "ReduceMean", true,{ 10, 10, 2 },{},{ 2 },{ 10, 10, 1 },{} },
+                reduce_test_params{ "CPU", "I32", "ReduceMean", true, { 3, 2, 2 },{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 1 },{ 3, 1, 2 },{ 12.5f, 1.5f, 35.f, 1.5f, 57.5f, 1.5f } },
+                reduce_test_params{ "CPU", "I32", "ReduceMean", false, { 3, 2, 2 },{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 1 },{ 3, 2 },{ 12.5f, 1.5f, 35.f, 1.5f, 57.5f, 1.5f } },
+                reduce_test_params{ "CPU", "I32", "ReduceMean", false, { 3, 2, 2 },{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 0, 1, 2 },{ },{ 18.25f } },
+                reduce_test_params{ "CPU", "I32", "ReduceMin", true,{ 10, 10, 2 },{},{ 2 },{ 10, 10, 1 },{} },
+                reduce_test_params{ "CPU", "I32", "ReduceMin", true,{ 3, 2, 2 },{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 1 },{ 3, 1, 2 },{ 5, 1, 30, 1, 55, 1 } },
+                reduce_test_params{ "CPU", "I32", "ReduceMin", false,{ 3, 2, 2 },{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 1 },{ 3, 2 },{ 5, 1, 30, 1, 55, 1 } },
+                reduce_test_params{ "CPU", "I32", "ReduceMin", false,{ 3, 2, 2 },{ 5, 1, 20, 2, 30, 1, 40, 2, 55, 1, 60, 2 },{ 0, 1, 2 },{},{ 1 } },
+                reduce_test_params{ "CPU", "I32", "ReduceOr", true,{ 2, 2, 2 },{1, 0, 1, 1, 0, 0, 1, 0},{ 2 },{ 2, 2, 1 },{1, 1, 0, 1 } },
+                reduce_test_params{ "CPU", "I32", "ReduceOr", false, { 2, 2, 2 },{},{ 0, 1, 2 },{ },{ 1 } },
+                reduce_test_params{ "CPU", "I32", "ReduceProd", true,{ 10, 10, 2 },{},{ 2 },{ 10, 10, 1 },{} },
+                reduce_test_params{ "CPU", "I32", "ReduceProd", true,{ 3, 2, 2 },{},{ 1 },{ 3, 1, 2 },{ 3, 8, 35, 48, 99, 120 } },
+                reduce_test_params{ "CPU", "I32", "ReduceProd", false,{ 3, 2, 2 },{},{ 1 },{ 3, 2 },{ 3, 8, 35, 48, 99, 120 } },
+                reduce_test_params{ "CPU", "I32", "ReduceProd", false,{ 3, 2, 2 },{},{ 0, 1, 2 },{ },{ 4.790016e+08 } },
+                reduce_test_params{ "CPU", "I32", "ReduceSumSquare", true,{ 10, 10, 2 },{},{ 2 },{ 10, 10, 1 },{} },
+                reduce_test_params{ "CPU", "I32", "ReduceSumSquare", true, { 3, 2, 2 },{},{ 1 },{ 3, 1, 2 },{ 10, 20, 74, 100, 202, 244 } },
+                reduce_test_params{ "CPU", "I32", "ReduceSumSquare", false, { 3, 2, 2 },{},{ 1 },{ 3, 2 },{ 10, 20, 74, 100, 202, 244 } },
+                reduce_test_params{ "CPU", "I32", "ReduceSumSquare", false, { 3, 2, 2 },{},{ 0, 1, 2 },{ },{ 650 } }
+        ));
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/resample_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/resample_tests.cpp
new file mode 100644 (file)
index 0000000..5b64d3a
--- /dev/null
@@ -0,0 +1,45 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "resample_tests.hpp"
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_CPU_TestsResample, ResampleTests,
+        ::testing::Values(
+                // 4D nearest
+                resample_test_params{"CPU", {2, 64, 15, 25}, 1.f,   "caffe.ResampleParameter.NEAREST"},
+                resample_test_params{"CPU", {2, 64, 10, 20}, 0.25f, "caffe.ResampleParameter.NEAREST"},
+                resample_test_params{"CPU", {1, 1, 10, 20},  0.5f,  "caffe.ResampleParameter.NEAREST"},
+                resample_test_params{"CPU", {2, 3, 15, 25},  1.f,   "caffe.ResampleParameter.NEAREST"},
+                resample_test_params{"CPU", {2, 3, 10, 20},  0.25f, "caffe.ResampleParameter.NEAREST"},
+                resample_test_params{"CPU", {1, 1, 10, 13},  0.52f, "caffe.ResampleParameter.NEAREST"},
+                //// 4D linear
+                resample_test_params{"CPU", {2, 64, 15, 25}, 1.f,   "caffe.ResampleParameter.LINEAR"},
+                resample_test_params{"CPU", {2, 64, 10, 20}, 0.25f, "caffe.ResampleParameter.LINEAR"},
+                resample_test_params{"CPU", {1, 1, 15, 25},  0.5,   "caffe.ResampleParameter.LINEAR"},
+                resample_test_params{"CPU", {1, 3, 15, 25},  0.5,   "caffe.ResampleParameter.LINEAR"},
+                resample_test_params{"CPU", {2, 5, 3, 3},    3.0f,  "caffe.ResampleParameter.LINEAR"},
+                resample_test_params{"CPU", {2, 4, 10, 20},  2.0f,  "caffe.ResampleParameter.LINEAR"},
+                resample_test_params{"CPU", {2, 20, 30, 30}, 3.0f,  "caffe.ResampleParameter.LINEAR"},
+                resample_test_params{"CPU", {2, 20, 3, 6},   3.0f,  "caffe.ResampleParameter.LINEAR"},
+                //// 5D nearest
+                resample_test_params{ "CPU", {1, 64, 20, 15, 25}, 1.f,   "caffe.ResampleParameter.NEAREST" },
+                resample_test_params{ "CPU", {1, 64, 15, 10, 20}, 0.25f, "caffe.ResampleParameter.NEAREST" },
+                resample_test_params{ "CPU", {1, 64, 10, 10, 20}, 0.5f,  "caffe.ResampleParameter.NEAREST" },
+                resample_test_params{ "CPU", {1, 3, 20, 15, 25},  1.f,   "caffe.ResampleParameter.NEAREST" },
+                resample_test_params{ "CPU", {1, 3, 15, 10, 20},  0.25f, "caffe.ResampleParameter.NEAREST" },
+                resample_test_params{ "CPU", {2, 64, 20, 15, 25}, 1.f,   "caffe.ResampleParameter.NEAREST" },
+                resample_test_params{ "CPU", {2, 64, 15, 10, 20}, 0.25f, "caffe.ResampleParameter.NEAREST" },
+                resample_test_params{ "CPU", {2, 64, 10, 10, 20}, 0.5f,  "caffe.ResampleParameter.NEAREST" },
+                resample_test_params{ "CPU", {2, 3, 20, 15, 25},  1.f,   "caffe.ResampleParameter.NEAREST" },
+                resample_test_params{ "CPU", {2, 3, 15, 10, 20},  0.25f, "caffe.ResampleParameter.NEAREST" },
+                // 5D linear
+                resample_test_params{ "CPU", {1, 8, 5, 2, 4},     0.2f,  "caffe.ResampleParameter.LINEAR" },
+                resample_test_params{ "CPU", {1, 8, 10, 10, 20},  0.25f, "caffe.ResampleParameter.LINEAR" },
+                resample_test_params{ "CPU", {1, 2, 16, 12, 20},  4.f,   "caffe.ResampleParameter.LINEAR" },
+                resample_test_params{ "CPU", {2, 16, 15, 10, 20}, 1.f,   "caffe.ResampleParameter.LINEAR" },
+                resample_test_params{ "CPU", {2, 2, 4, 10, 20},   0.25f, "caffe.ResampleParameter.LINEAR" },
+                resample_test_params{ "CPU", {2, 4, 15, 10, 20},  1.f,   "caffe.ResampleParameter.LINEAR" },
+                resample_test_params{ "CPU", {2, 8, 16, 12, 20},  4.f,   "caffe.ResampleParameter.LINEAR" },
+                resample_test_params{ "CPU", {2, 16, 10, 10, 20}, 0.25f, "caffe.ResampleParameter.LINEAR" }));
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/softmax_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/softmax_tests.cpp
new file mode 100644 (file)
index 0000000..2da3b68
--- /dev/null
@@ -0,0 +1,14 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "softmax_tests.hpp"
+
+softmax_test_params softmax_only_test_cases[] = {
+        softmax_test_params("CPU", case_1),
+        softmax_test_params("CPU", case_8),
+        softmax_test_params("CPU", case_8_nc, "2D"),
+};
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_CPU_TestsSoftmax, SoftmaxOnlyTest, ::testing::ValuesIn(softmax_only_test_cases)/*, getTestCaseName*/);
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/ti_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/ti_tests.cpp
new file mode 100644 (file)
index 0000000..c4fc9cf
--- /dev/null
@@ -0,0 +1,15 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "ti_tests.hpp"
+
+ti_test_params ti_test_cases[] = {{std::string("CPU"), 1, InferenceEngine::Precision(InferenceEngine::Precision::FP32)},
+                                  {std::string("CPU"), 8, InferenceEngine::Precision(InferenceEngine::Precision::FP32)},
+                                  {std::string("CPU"), 1, InferenceEngine::Precision(InferenceEngine::Precision::FP32)},
+                                  {std::string("CPU"), 8, InferenceEngine::Precision(InferenceEngine::Precision::FP16)}};
+
+
+RUN_CASE_P_WITH_SUFFIX(CPU, _smoke, TITest, ti_test_cases);
+
+RUN_CASE_P_WITH_SUFFIX(CPU, _smoke, TITest2, ti_test_cases);
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/tile_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/single_layer_tests/tile_tests.cpp
new file mode 100644 (file)
index 0000000..a835a0e
--- /dev/null
@@ -0,0 +1,26 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "tile_tests.hpp"
+
+tile_test_params tile_test_cases[] = {
+        tile_test_params("CPU", case_1),
+        tile_test_params("CPU", case_2),
+        tile_test_params("CPU", case_3),
+        tile_test_params("CPU", case_4),
+        tile_test_params("CPU", case_5),
+        tile_test_params("CPU", case_6),
+        tile_test_params("CPU", case_7),
+        tile_test_params("CPU", case_8),
+        tile_test_params("CPU", case_9),
+        tile_test_params("CPU", case_10),
+        tile_test_params("CPU", case_11),
+        tile_test_params("CPU", case_12),
+        tile_test_params("CPU", case_13),
+        tile_test_params("CPU", case_14),
+        tile_test_params("CPU", case_15),
+        tile_test_params("CPU", case_16),
+};
+
+INSTANTIATE_TEST_CASE_P(smoke_CPU_TestsGeneralTile, TileTest, ::testing::ValuesIn(tile_test_cases));
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/transformations/low_precision_transformer_single_layer_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/transformations/low_precision_transformer_single_layer_tests.cpp
new file mode 100644 (file)
index 0000000..b2ea9be
--- /dev/null
@@ -0,0 +1,763 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+#include <gtest/gtest.h>
+#include <string>
+#include <memory>
+
+using namespace ::testing;
+using namespace InferenceEngine;
+
+
+TEST_P(SingleLayerTransformationsTest, LPT) {
+}
+
+INSTANTIATE_TEST_CASE_P(
+    smoke_SingleLayerTransformationsTestFP32,
+    SingleLayerTransformationsTest,
+    ::testing::Values(
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new ResampleTestModel()),
+            { { 1, 32, 149, 149 } },
+            { { 1, 32, 147, 147 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new FullyConnectedAndScaleShiftsOnActivationsTestModel()),
+            { { 1, 2048 } },
+            { { 1, 1000 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new ConvolutionAndQuantizeOnSignedActivationsAndWeightsPositiveTestModel()),
+            { { 1, 32, 149, 149 } },
+            { { 1, 32, 147, 147 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new ConvolutionAndQuantizeOnSignedActivationsAndWeightsNegativeTestModel()),
+            { { 1, 32, 149, 149 } },
+            { { 1, 32, 147, 147 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new ConvolutionAndQuantizeOnUnsignedActivationsAndWeightsTestModel()),
+            { { 1, 32, 149, 149 } },
+            { { 1, 32, 147, 147 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new ConvolutionAndQuantizeOnSignedActivationsAndInvertedWeightsTestModel()),
+            { { 1, 32, 149, 149 } },
+            { { 1, 32, 147, 147 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new FakeQuantizeReshapePoolingTestModelWithConstants()),
+            { { 1, 1280, 7 } },
+            { { 1, 1280, 7 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new FakeQuantizeReshapePoolingTestModelWithoutConstants()),
+            { { 1, 1280, 7 } },
+            { { 1, 1280, 7 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new FakeQuantizeReshapeTestModelWithConstants()),
+            { { 1, 256, 6, 6 } },
+            { { 1, 9216 } }),
+
+        // TODO: fix asymmetric patern creation issue for NC layout and uncomment
+        //SingleLayerTransformationsTestParams(
+        //    "CPU",
+        //    SingleLayerTestModel::Ptr(new FullyConnectedAndQuantizeTestModel()),
+        //    { { 1, 32, 1, 1 } },
+        //    { { 1, 32, 1, 1 } }),
+
+        // TODO: uncomment when biases correction with absent biases will be fixed
+        //SingleLayerTransformationsTestParams(
+        //    "CPU",
+        //    SingleLayerTestModel::Ptr(new GemmAndQuantizeTestModel()),
+        //    { { 1, 32, 149, 149 } },
+        //    { { 1, 32, 147, 147 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new PoolingTestModel()),
+            { { 149, 149, 32, 1 } },
+            { { 149, 149, 32, 1 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new ConvolutionAndQuantizeOnWeightsWithMultiOutputIntervalsTestModel()),
+            { { 1, 32, 147, 147 } },
+            { { 1, 64, 147, 147 } }),
+
+        // Const transformation is disabled
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new ConvolutionAndQuantizeOnWeightsWithoutConstTransformationTestModel()),
+            { { 1, 32, 149, 149 } },
+            { { 1, 32, 147, 147 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new ConvolutionAndPoolingAndQuantizeOnActivationsTestModel()),
+            { { 1, 64, 147, 147 } },
+            { { 1, 80, 73,  73  } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new ConvolutionAndQuantizeOnActivationsTestModel()),
+            { { 1, 3,  299, 299 } },
+            { { 1, 32, 149, 149 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new ConvolutionAndDequantizationScaleShiftsOnActivationsTestModel()),
+            { { 1, 3,  299, 299 } },
+            { { 1, 32, 149, 149 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new ConvolutionAndDequantizationScaleShiftAndQuantizeOnActivationsTestModel()),
+            { { 1, 3, 299, 299 } },
+            { { 1, 32, 149, 149 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new ConvolutionDepthwiseTestModel()),
+            { { 1, 32, 112, 112 } },
+            { { 1, 32, 112, 112 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new ConvolutionGroupedTestModel()),
+            { { 1, 32, 112, 112 } },
+            { { 1, 32, 112, 112 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new ConcatMultiChannelTestModel()),
+            { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } },
+            { { 1, 6, 299, 299 } }),
+
+        //SingleLayerTransformationsTestParams(
+        //    "CPU",
+        //    SingleLayerTestModel::Ptr(new ConcatMultiBranchTestModel()),
+        //    { { 299, 299, 3, 1 }, { 299, 299, 3, 1 } },
+        //    { { 299, 299, 12, 1 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new QuantizationOnWeightsTestModel()),
+            { { 1, 32, 149, 149 } },
+            { { 1, 32, 147, 147 } }),
+
+        // TODO: fix later
+        //SingleLayerTransformationsTestParams(
+        //    "CPU",
+        //    SingleLayerTestModel::Ptr(new QuantizationOnInvertedWeightsTestModel()),
+        //    { { 1, 32, 149, 149 } },
+        //    { { 1, 32, 147, 147 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new FakeQuantizeAsOutputTest()),
+            { { 1, 32, 149, 149 } },
+            { { 1, 32, 147, 147 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new FakeQuantizeWithMultiOutputsTest()),
+            { { 1, 32, 149, 149 } },
+            { { 1, 32, 147, 147 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new FakeQuantizeAndScaleShiftTestModel()),
+            { { 1, 3, 299, 299 } },
+            { { 1, 3, 299, 299 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new FakeQuantizeAndActivationTestModel({ {-10.25, 10.1641} })),
+            { { 1, 3, 299, 299 } },
+            { { 1, 3, 299, 299 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new FakeQuantizeAndActivationTestModel({ {-0.00174255, 0.00174255} })),
+            { { 1, 3, 299, 299 } },
+            { { 1, 3, 299, 299 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new FakeQuantizeAndActivationTestModel({ {-329.688, 327.188} })),
+            { { 1, 3, 299, 299 } },
+            { { 1, 3, 299, 299 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new FakeQuantizeAndActivationWithNegativeScalesTestModel()),
+            { { 1, 3, 299, 299 } },
+            { { 1, 3, 299, 299 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new FakeQuantizeAndActivationWithNegativeSlopeTestModel()),
+            { { 1, 3, 299, 299 } },
+            { { 1, 3, 299, 299 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new ScaleShiftAndFakeQuantizeTestModel()),
+            { { 1, 3, 299, 299 } },
+            { { 1, 3, 299, 299 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new FakeQuantizeWithTwoScaleShiftsAsOutput()),
+            { { 1, 32, 28, 28 }, { } },
+            { { } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new MvnTestModel(0ul, 0ul)),
+            { { 1, 4, 128, 128, 128 } },
+            { { 1, 4, 128, 128, 128 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new MvnTestModel(1ul, 0ul)),
+            { { 1, 4, 128, 128, 128 } },
+            { { 1, 4, 128, 128, 128 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new MvnTestModel(0ul, 1ul)),
+            { { 1, 4, 128, 128, 128 } },
+            { { 1, 4, 128, 128, 128 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new MvnTestModel(1ul, 1ul)),
+            { { 1, 4, 128, 128, 128 } },
+            { { 1, 4, 128, 128, 128 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new PrecisionSelectionMultibranchPreservedTestModel(true)),
+            { { 1, 32, 149, 149 } },
+            { { 1, 32, 149, 149 }, { 1, 32, 147, 147 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new PrecisionSelectionMultibranchPreservedTestModel(false)),
+            { { 1, 32, 149, 149 } },
+            { { 1, 32, 149, 149 }, { 1, 32, 147, 147 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new PrecisionSelectionMultibranchNotPreservedTestModel(true)),
+            { { 1, 32, 149, 149 } },
+            { { 1, 32, 149, 149 }, { 1, 32, 147, 147 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new PrecisionSelectionMultibranchNotPreservedTestModel(false)),
+            { { 1, 32, 149, 149 } },
+            { { 1, 32, 149, 149 }, { 1, 32, 147, 147 } })
+    ),
+    SingleLayerTransformationsTestParams::getLowPrecisionTransformerSingleLayerTestName);
+
+INSTANTIATE_TEST_CASE_P(
+    smoke_EltwiseTestFP32,
+    SingleLayerTransformationsTest,
+    ::testing::Values(
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new EltwiseTestModel(true, "sum", true)),
+            { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } },
+            { { 1, 3, 299, 299 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new EltwiseTestModel(true, "sum", false)),
+            { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } },
+            { { 1, 3, 299, 299 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new EltwiseTestModel(true, "mul", true)),
+            { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } },
+            { { 1, 3, 299, 299 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new EltwiseTestModel(true, "mul", false)),
+            { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } },
+            { { 1, 3, 299, 299 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new EltwiseFqWithChildrenTestModel(true, "sum", true)),
+            { { 1, 3, 299, 299 } },
+            { { 1, 3, 299, 299 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new EltwiseFqWithChildrenTestModel(true, "sum", false)),
+            { { 1, 3, 299, 299 } },
+            { { 1, 3, 299, 299 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new EltwiseFqWithChildrenTestModel(true, "mul", true)),
+            { { 1, 3, 299, 299 } },
+            { { 1, 3, 299, 299 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new EltwiseFqWithChildrenTestModel(true, "mul", false)),
+            { { 1, 3, 299, 299 } },
+            { { 1, 3, 299, 299 } })
+    ),
+    SingleLayerTransformationsTestParams::getLowPrecisionTransformerSingleLayerTestName);
+
+INSTANTIATE_TEST_CASE_P(
+    smoke_ConcatTestFP32,
+    SingleLayerTransformationsTest,
+    ::testing::Values(
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new ConcatTestModel(true, true, true)),
+            { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } },
+            { { 1, 6, 299, 299 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new ConcatTestModel(true, true, false)),
+            { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } },
+            { { 1, 6, 299, 299 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new ConcatTestModel(true, false)),
+            { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } },
+            { { 1, 6, 299, 299 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new ConcatTestModel(false, true)),
+            { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } },
+            { { 1, 6, 299, 299 } }),
+
+        //SingleLayerTransformationsTestParams(
+        //    "CPU",
+        //    SingleLayerTestModel::Ptr(new ConcatTestModel(false, false)),
+        //    { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } },
+        //    { { 1, 6, 299, 299 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new ConcatTestModel(true, true, true, { 100, 1 })),
+            { { 100, 1 }, { 100, 1 } },
+            { { 100, 2 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new ConcatTestModel(true, true, false, { 100, 1 })),
+            { { 100, 1 }, { 100, 1 } },
+            { { 100, 2 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new ConcatTestModel(false, true, true, { 100, 1 })),
+            { { 100, 1 }, { 100, 1 } },
+            { { 100, 2 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new ConcatTestModel(false, true, false, { 100, 1 })),
+            { { 100, 1 }, { 100, 1 } },
+            { { 100, 2 } })
+    ),
+    SingleLayerTransformationsTestParams::getLowPrecisionTransformerSingleLayerTestName);
+
+INSTANTIATE_TEST_CASE_P(
+    smoke_ScaleShiftToConvolutionFP32,
+    SingleLayerTransformationsTest,
+    ::testing::Values(
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new ScaleShiftToConvolutionAfterNotConcatIgnoreTestModel()),
+            { { 1, 64, 112, 112 } },
+            { { 1, 64, 112, 112 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new ScaleShiftToConvolutionAfterFakeQuantizeIgnoreTestModel()),
+            { { 1, 64, 112, 112 } },
+            { { 1, 64, 112, 112 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new ScaleShiftToConvolutionAfterConcatTestModel(true)),
+            { { 1, 32, 299, 299 }, { 1, 32, 299, 299 } },
+            { { 1, 64, 299, 299 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new ScaleShiftToConvolutionAfterConcatTestModel(false)),
+            { { 1, 32, 299, 299 }, { 1, 32, 299, 299 } },
+            { { 1, 64, 299, 299 } })
+    ),
+    SingleLayerTransformationsTestParams::getLowPrecisionTransformerSingleLayerTestName);
+
+INSTANTIATE_TEST_CASE_P(
+    smoke_UpdateBiases,
+    SingleLayerTransformationsTest,
+    ::testing::Values(
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new UpdateBiasesConvolutionTestModel(false)),
+            { { 1, 32, 112, 112 } },
+            { { 1, 32, 112, 112 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new UpdateBiasesConvolutionTestModel(true)),
+            { { 1, 32, 112, 112 } },
+            { { 1, 32, 112, 112 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new UpdateBiasesFullyConnectedTestModel(true)),
+            { { 1, 32, 112, 112 } },
+            { { 1, 100 } })
+    ),
+    SingleLayerTransformationsTestParams::getLowPrecisionTransformerSingleLayerTestName);
+
+INSTANTIATE_TEST_CASE_P(
+    smoke_EltwiseCpuWithPooling,
+    SingleLayerTransformationsTest,
+    ::testing::Values(
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new EltwiseWithPoolingTestModel(true, "mul", false)),
+            { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } },
+            { { 1, 3, 299, 299 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new EltwiseWithPoolingTestModel(true, "mul", true)),
+            { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } },
+            { { 1, 3, 299, 299 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new EltwiseWithPoolingTestModel(true, "sum", false)),
+            { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } },
+            { { 1, 3, 299, 299 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new EltwiseWithPoolingTestModel(true, "sum", true)),
+            { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } },
+            { { 1, 3, 299, 299 } })
+    ),
+    SingleLayerTransformationsTestParams::getLowPrecisionTransformerSingleLayerTestName);
+
+INSTANTIATE_TEST_CASE_P(
+    smoke_Eltwise,
+    SingleLayerTransformationsTest,
+    ::testing::Values(
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new EltwiseWithPoolingTestModel(true, "sum", false)),
+            { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } },
+            { { 1, 3, 299, 299 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new EltwiseWithPoolingTestModel(true, "sum", true)),
+            { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } },
+            { { 1, 3, 299, 299 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new EltwiseCpuTestModel()),
+            { { 1, 3, 299, 299 } },
+            { {} }),
+
+//        SingleLayerTransformationsTestParams(
+//            "CPU",
+//            SingleLayerTestModel::Ptr(new EltwiseTestModel()),
+//            { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } },
+//            { {} },
+//            "FP16"),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new EltwiseCpuTestModel()),
+            { { 1, 3, 299, 299 } },
+            { {} },
+            "FP16"),
+
+//        SingleLayerTransformationsTestParams(
+//            "CPU",
+//            SingleLayerTestModel::Ptr(new EltwiseBroadcastTestModel()),
+//            { { 1, 128, 128 }, { 1, 128, 128 } },
+//            { { 1, 128, 128 } }),
+
+        SingleLayerTransformationsTestParams( // 5
+            "CPU",
+            SingleLayerTestModel::Ptr(new EltwiseBroadcastTestModel()),
+            { { 1, 1,   128 }, { 1, 128, 128 } },
+            { { 1, 128, 128 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new EltwiseBroadcastTestModel()),
+            { { 1, 128, 128 }, { 1, 128, 1 } },
+            { { 1, 128, 128 } }),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new EltwiseBroadcastTestModel()),
+            { { 1, 1,   128 }, { 1, 128, 1 } },
+            { { 1, 128, 128 } })));
+
+INSTANTIATE_TEST_CASE_P(
+    smoke_SingleLayerTransformationsTestFP16,
+    SingleLayerTransformationsTest,
+    ::testing::Values(
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new FullyConnectedAndScaleShiftsOnActivationsTestModel()),
+            { { 1, 2048 } },
+            { { 1, 1000 } },
+            "FP16"),
+
+        // TODO: uncomment after fix
+        //SingleLayerTransformationsTestParams(
+        //    "CPU",
+        //    SingleLayerTestModel::Ptr(new ConvolutionAndQuantizeOnSignedActivationsAndWeightsTestModel()),
+        //    { { 1, 32, 149, 149 } },
+        //    { { 1, 32, 147, 147 } },
+        //    "FP16"),
+
+        // TODO: uncomment after fix
+        //SingleLayerTransformationsTestParams(
+        //    "CPU",
+        //    SingleLayerTestModel::Ptr(new ConvolutionAndQuantizeOnUnsignedActivationsAndWeightsTestModel()),
+        //    { { 1, 32, 149, 149 } },
+        //    { { 1, 32, 147, 147 } },
+        //    "FP16"),
+
+        // TODO: uncomment after fix
+//        SingleLayerTransformationsTestParams(
+//            "CPU",
+//            SingleLayerTestModel::Ptr(new FakeQuantizeReshapePoolingTestModelWithConstants()),
+//            { { 1, 1280, 7 } },
+//            { { 1, 1280, 7 } },
+//            "FP16"),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new FakeQuantizeReshapePoolingTestModelWithoutConstants()),
+            { { 1, 1280, 7 } },
+            { { 1, 1280, 7 } },
+            "FP16"),
+
+        // TODO: uncomment after fix
+        //SingleLayerTransformationsTestParams(
+        //    "CPU",
+        //    SingleLayerTestModel::Ptr(new FakeQuantizeReshapeTestModelWithConstants()),
+        //    { { 1, 256, 6, 6 } },
+        //    { { 1, 9216 } },
+        //    "FP16"),
+
+        //Not parametrized yet. Executed on FP32
+
+        // TODO: fix asymmetric patern creation issue for NC layout and uncomment
+        //SingleLayerTransformationsTestParams(
+        //    "CPU",
+        //    SingleLayerTestModel::Ptr(new FullyConnectedAndQuantizeTestModel()),
+        //    { { 1, 32, 149, 149 } },
+        //    { { 1, 32, 147, 147 } },
+        //    "FP16"),
+
+        //SingleLayerTransformationsTestParams(
+        //    "CPU",
+        //    SingleLayerTestModel::Ptr(new GemmAndQuantizeTestModel()),
+        //    { { 1, 32, 149, 149 } },
+        //    { { 1, 32, 147, 147 } },
+        //    "FP16"),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new PoolingTestModel()),
+            { { 149, 149, 32, 1 } },
+            { { 149, 149, 32, 1 } },
+            "FP16"),
+
+        // TODO: failed on I8 on activations - uncomment after fix
+        //SingleLayerTransformationsTestParams(
+        //    "CPU",
+        //    SingleLayerTestModel::Ptr(new ConvolutionAndQuantizeOnWeightsWithMultiOutputIntervalsTestModel()),
+        //    { { 1, 32, 147, 147 } },
+        //    { { 1, 64, 147, 147 } },
+        //    "FP16"),
+
+        // TODO: uncomment after fix
+        //SingleLayerTransformationsTestParams(
+        //    "CPU",
+        //    SingleLayerTestModel::Ptr(new ConvolutionAndQuantizeOnWeightsWithoutConstTransformationTestModel()),
+        //    { { 1, 32, 149, 149 } },
+        //    { { 1, 32, 147, 147 } },
+        //    "FP16"),
+
+        // TODO: uncomment after fix
+        //SingleLayerTransformationsTestParams(
+        //    "CPU",
+        //    SingleLayerTestModel::Ptr(new ConvolutionAndPoolingAndQuantizeOnActivationsTestModel()),
+        //    { { 1, 64, 147, 147 } },
+        //    { { 1, 80, 73,  73  } },
+        //    "FP16"),
+
+        // TODO: uncomment after fix
+        //SingleLayerTransformationsTestParams(
+        //    "CPU",
+        //    SingleLayerTestModel::Ptr(new ConvolutionAndQuantizeOnActivationsTestModel()),
+        //    { { 1, 3,  299, 299 } },
+        //    { { 1, 32, 149, 149 } },
+        //    "FP16"),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new ConvolutionAndDequantizationScaleShiftsOnActivationsTestModel()),
+            { { 1, 3,  299, 299 } },
+            { { 1, 32, 149, 149 } },
+            "FP16"),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new ConvolutionAndDequantizationScaleShiftAndQuantizeOnActivationsTestModel()),
+            { { 1, 3, 299, 299 } },
+            { { 1, 32, 149, 149 } },
+            "FP16"),
+
+        // TODO: fix and uncomment
+        //SingleLayerTransformationsTestParams(
+        //    "CPU",
+        //    SingleLayerTestModel::Ptr(new ConvolutionDepthwiseTestModel()),
+        //    { { 1, 32, 112, 112 } },
+        //    { { 1, 32, 112, 112 } },
+        //    "FP16"),
+
+        // TODO: fix and uncomment
+        //SingleLayerTransformationsTestParams(
+        //    "CPU",
+        //    SingleLayerTestModel::Ptr(new ConvolutionGroupedTestModel()),
+        //    { { 1, 32, 112, 112 } },
+        //    { { 1, 32, 112, 112 } },
+        //    "FP16"),
+
+        //SingleLayerTransformationsTestParams(
+        //    "CPU",
+        //    SingleLayerTestModel::Ptr(new ConcatTestModel(true)),
+        //    { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } },
+        //    { { 1, 6, 299, 299 } },
+        //    "FP16"),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new ConcatTestModel(false, true)),
+            { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } },
+            { { 1, 6, 299, 299 } },
+            "FP16"),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new ConcatMultiChannelTestModel()),
+            { { 1, 3, 299, 299 }, { 1, 3, 299, 299 } },
+            { { 1, 6, 299, 299 }, },
+            "FP16"),
+
+        //SingleLayerTransformationsTestParams(
+        //    "CPU",
+        //    SingleLayerTestModel::Ptr(new ConcatMultiBranchTestModel()),
+        //    { { 299, 299, 3, 1 }, { 299, 299, 3, 1 } },
+        //    { { 299, 299, 12, 1 } },
+        //    "FP16"),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new QuantizationOnWeightsTestModel()),
+            { { 1, 32, 149, 149 } },
+            { { 1, 32, 147, 147 } },
+            "FP16"),
+
+        // TODO: fix later
+        //SingleLayerTransformationsTestParams(
+        //    "CPU",
+        //    SingleLayerTestModel::Ptr(new QuantizationOnInvertedWeightsTestModel()),
+        //    { { 1, 32, 149, 149 } },
+        //    { { 1, 32, 147, 147 } },
+        //    "FP16"),
+
+        SingleLayerTransformationsTestParams(
+            "CPU",
+            SingleLayerTestModel::Ptr(new FakeQuantizeAndScaleShiftTestModel()),
+            { { 1, 3, 299, 299 } },
+            { { 1, 3, 299, 299 } },
+            "FP16")
+
+        // TODO: fix and uncomment
+        //SingleLayerTransformationsTestParams(
+        //    "CPU",
+        //    SingleLayerTestModel::Ptr(new ScaleShiftToConvolutionAfterNotConcatIgnoreTestModel()),
+        //    { { 1, 64, 112, 112 } },
+        //    { { 1, 64, 112, 112 } },
+        //    "FP16")
+
+        // TODO: fix and uncomment
+        //SingleLayerTransformationsTestParams(
+        //    "CPU",
+        //    SingleLayerTestModel::Ptr(new ScaleShiftToConvolutionAfterFakeQuantizeIgnoreTestModel()),
+        //    { { 1, 64, 112, 112 } },
+        //    { { 1, 64, 112, 112 } },
+        //    "FP16")
+
+        // TODO: fix and uncomment
+        //SingleLayerTransformationsTestParams(
+        //    "CPU",
+        //    SingleLayerTestModel::Ptr(new ScaleShiftToConvolutionAfterConcatTestModel()),
+        //    { { 1, 32, 299, 299 }, { 1, 32, 299, 299 } },
+        //    { { 1, 64, 299, 299 } },
+        //    "FP16")
+
+        // TODO: fix and uncomment
+        //SingleLayerTransformationsTestParams(
+        //    "CPU",
+        //    SingleLayerTestModel::Ptr(new UpdateBiasesConvolutionTestModel(false)),
+        //    { { 1, 32, 112, 112 } },
+        //    { { 1, 32, 112, 112 } },
+        //    "FP16"),
+
+        // TODO: fix and uncomment
+        //SingleLayerTransformationsTestParams(
+        //    "CPU",
+        //    SingleLayerTestModel::Ptr(new UpdateBiasesConvolutionTestModel(true)),
+        //    { { 1, 32, 112, 112 } },
+        //    { { 1, 32, 112, 112 } },
+        //    "FP16")
+        ),
+    SingleLayerTransformationsTestParams::getLowPrecisionTransformerSingleLayerTestName);
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/argmax_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/argmax_tests.cpp
new file mode 100644 (file)
index 0000000..14e39e6
--- /dev/null
@@ -0,0 +1,211 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <ie_core.hpp>
+
+#include "single_layer_common.hpp"
+#include "tests_common.hpp"
+#include "common_test_utils/data_utils.hpp"
+
+using namespace ::testing;
+using namespace InferenceEngine;
+
+struct argmax_test_params {
+    std::vector<size_t> src_dims;
+    std::vector<size_t> dst_dims;
+    int has_axis;
+    int axis;
+    int out_max_val;
+    int top_k;
+};
+
+static inline int count(std::vector<size_t> dims, size_t start_ind, size_t end_ind) {
+    size_t count = 1;
+    for (size_t i = start_ind; i < end_ind; i++)
+        count *= dims[i];
+    return static_cast<int>(count);
+}
+
+static inline int count(std::vector<size_t> dims, size_t start_ind = 0) {
+    return count(dims, start_ind, dims.size());
+}
+
+static void ref_argmax(InferenceEngine::TBlob<float> &src, InferenceEngine::TBlob<float> &dst, argmax_test_params p) {
+    float *src_data = src.data();
+    float* dst_data = dst.data();
+
+    int dim, axis_dist;
+    if (p.has_axis) {
+        int axis_ = (p.axis < 0) ? p.axis + static_cast<int>(p.src_dims.size()) : p.axis;
+        dim = static_cast<int>(p.src_dims[axis_]);
+        axis_dist = count(p.src_dims, axis_) / dim;
+    } else {
+        dim = count(p.src_dims, 1);
+        axis_dist = 1;
+    }
+
+    int num = count(p.src_dims) / dim;
+    std::vector<std::pair<float, int> > src_vector(dim);
+
+    for (int i = 0; i < num; ++i) {
+        for (int j = 0; j < dim; ++j) {
+            src_vector[j] = std::make_pair(
+                    src_data[(i / axis_dist * dim + j) * axis_dist + i % axis_dist], j);
+        }
+
+        std::partial_sort(src_vector.begin(), src_vector.begin() + p.top_k,
+                          src_vector.end(), std::greater<std::pair<float, int> >());
+
+        for (int j = 0; j < p.top_k; ++j) {
+            if (p.out_max_val) {
+                if (p.has_axis) {
+                    // Produces max_val per axis
+                    dst_data[(i / axis_dist * p.top_k + j) * axis_dist + i % axis_dist] = src_vector[j].first;
+                } else {
+                    // Produces max_ind and max_val
+                    dst_data[2 * i * p.top_k + j] = src_vector[j].second;
+                    dst_data[2 * i * p.top_k + p.top_k + j] = src_vector[j].first;
+                }
+            } else {
+                // Produces max_ind per axis
+                dst_data[(i / axis_dist * p.top_k + j) * axis_dist + i % axis_dist] = src_vector[j].second;
+            }
+        }
+    }
+}
+
+class smoke_CPU_ArgmaxOnlyTest: public TestsCommon, public WithParamInterface<argmax_test_params> {
+    std::string model_t = R"V0G0N(
+<net name="ArgmaxOnly" version="2" precision="FP32" batch="1">
+    <layers>
+        <layer id="0" name="input" type="Input" precision="FP32" >
+            <output>
+                <port id="0">__SRC_DIMS__
+                </port>
+            </output>
+        </layer>
+        <layer id="1" name="argmax" type="ArgMax" precision="FP32">
+            <data _AXIS_ out_max_val="__OUT_MAX_VAL__" top_k="__TOP_K__"/>
+            <input>
+                <port id="0">__SRC_DIMS__
+                </port>
+            </input>
+            <output>
+                <port id="1">__DST_DIMS__
+                </port>
+            </output>
+        </layer>
+    </layers>
+    <edges>
+        <edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
+    </edges>
+</net>
+)V0G0N";
+
+protected:
+    std::string getModel(argmax_test_params p) {
+        std::string model = model_t;
+
+        std::string src_dims;
+        for (auto &dim : p.src_dims) {
+            src_dims += "\n                    <dim>";
+            src_dims += std::to_string(dim) + "</dim>";
+        }
+        REPLACE_WITH_STR(model, "__SRC_DIMS__", src_dims);
+
+        std::string dst_dims;
+        for (auto &dim : p.dst_dims) {
+            dst_dims += "\n                    <dim>";
+            dst_dims += std::to_string(dim) + "</dim>";
+        }
+        REPLACE_WITH_STR(model, "__DST_DIMS__", dst_dims);
+
+        std::string axis;
+        if (p.has_axis) {
+            axis += "axis=\"" + std::to_string(p.axis) + "\"";
+        }
+        REPLACE_WITH_STR(model, "_AXIS_", axis);
+
+        REPLACE_WITH_STR(model, "__OUT_MAX_VAL__", std::to_string(p.out_max_val));
+        REPLACE_WITH_STR(model, "__TOP_K__", std::to_string(p.top_k));
+
+        return model;
+    }
+
+    virtual void SetUp() {
+        try {
+            argmax_test_params p = ::testing::WithParamInterface<argmax_test_params>::GetParam();
+            std::string model = getModel(p);
+
+            Core ie;
+            CNNNetwork net = ie.ReadNetwork(model, Blob::CPtr());
+
+            Blob::Ptr src = make_shared_blob<float>({Precision::FP32, p.src_dims, Layout::ANY});
+            src->allocate();
+
+            TBlob<float>* srcPtr = dynamic_cast<TBlob<float>*>(src.get());
+
+            if (srcPtr == nullptr)
+                FAIL() << "Cannot cast blob to TBlob<float>.";
+            CommonTestUtils::fill_data_sine(src->buffer(), src->size(), 0.5, 0.5, 1);
+
+            BlobMap srcs;
+            srcs.insert(std::pair<std::string, Blob::Ptr>("input", src));
+
+            OutputsDataMap out;
+            out = net.getOutputsInfo();
+            BlobMap outputBlobs;
+
+            std::pair<std::string, DataPtr> item = *out.begin();
+
+            TBlob<float>::Ptr output;
+            output = make_shared_blob<float>(item.second->getTensorDesc());
+            output->allocate();
+            outputBlobs[item.first] = output;
+
+            TBlob<float> dst_ref(item.second->getTensorDesc());
+            dst_ref.allocate();
+
+            ref_argmax(*srcPtr, dst_ref, p);
+
+            ExecutableNetwork exeNetwork = ie.LoadNetwork(net, "CPU");
+            InferRequest inferRequest = exeNetwork.CreateInferRequest();
+            inferRequest.SetInput(srcs);
+            inferRequest.SetOutput(outputBlobs);
+            inferRequest.Infer();
+
+            compare(*outputBlobs.begin()->second, dst_ref);
+
+        } catch (const details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+TEST_P(smoke_CPU_ArgmaxOnlyTest, TestsArgmax) {}
+
+INSTANTIATE_TEST_CASE_P(
+        TestsArgmax, smoke_CPU_ArgmaxOnlyTest,
+        ::testing::Values(
+                argmax_test_params{{1, 3, 1024, 2048}, {1, 1, 1024, 2048}, 1, 1, 0, 1},
+                argmax_test_params{{1, 5, 1024, 2048}, {1, 1, 1024, 2048}, 1, 1, 1, 1},
+                argmax_test_params{{3, 1, 10, 512}, {3}, 0, 1, 0, 1},
+                argmax_test_params{{3, 1, 10, 512}, {3, 2}, 0, 1, 1, 1},
+                argmax_test_params{{1, 20, 128, 128}, {1, 3, 128, 128}, 1, 1, 0, 3},
+                argmax_test_params{{1, 20, 128, 128}, {1, 3, 128, 128}, 1, 1, 1, 3},
+                argmax_test_params{{3, 1, 10, 512}, {3, 5}, 0, 1, 0, 5},
+                argmax_test_params{{3, 1, 10, 512}, {3, 5, 2}, 0, 1, 1, 5},
+                argmax_test_params{{1, 20, 128, 128}, {1, 18, 128, 128}, 1, 1, 0, 18},
+                argmax_test_params{{1, 20, 128, 128}, {1, 18, 128, 128}, 1, 1, 1, 18}
+        ));
+
+INSTANTIATE_TEST_CASE_P(
+        TestsArgmaxOddDims, smoke_CPU_ArgmaxOnlyTest,
+        ::testing::Values(
+                argmax_test_params{{1, 3, 1025, 2049}, {1, 1, 1025, 2049}, 1, 1, 0, 1},
+                argmax_test_params{{1, 5, 1025, 2049}, {1, 1, 1025, 2049}, 1, 1, 1, 1},
+                argmax_test_params{{1, 20, 129, 129}, {1, 3, 129, 129}, 1, 1, 0, 3},
+                argmax_test_params{{1, 20, 129, 129}, {1, 3, 129, 129}, 1, 1, 1, 3}
+        ));
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/concat_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/concat_tests.cpp
new file mode 100644 (file)
index 0000000..1ac3400
--- /dev/null
@@ -0,0 +1,277 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <ie_core.hpp>
+
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+
+using namespace ::testing;
+using namespace InferenceEngine;
+
+
+struct concat_base_params {
+    struct {
+        size_t n;
+        size_t c;
+        size_t h;
+        size_t w;
+    } in1;
+
+    struct {
+        size_t n;
+        size_t c;
+        size_t h;
+        size_t w;
+    } in2;
+
+    struct {
+        size_t n;
+        size_t c;
+        size_t h;
+        size_t w;
+    } out;
+
+    size_t axis;
+};
+
+struct concat_test_params : concat_base_params {
+    std::string device_name;
+
+    concat_test_params(std::string name, concat_base_params params)
+            : concat_base_params(params), device_name(name) {}
+};
+
+template <typename data_t>
+void check_concat_fwd(const TBlob<data_t> &src, concat_test_params prm)
+{
+}
+
+class smoke_CPU_ConcatOnlyTest: public TestsCommon,
+                    public WithParamInterface<concat_test_params> {
+
+    std::string model_t = R"V0G0N(
+<net name="ConcatOnly" version="2" precision="FP32" batch="1">
+    <layers>
+        <layer name="in1" type="Input" precision="FP32" id="1">
+            <output>
+                <port id="1">
+                    <dim>_IN1_</dim>
+                    <dim>_IC1_</dim>
+                    <dim>_IH1_</dim>
+                    <dim>_IW1_</dim>
+                </port>
+            </output>
+        </layer>
+        <layer name="in2" type="Input" precision="FP32" id="2">
+            <output>
+                <port id="2">
+                    <dim>_IN2_</dim>
+                    <dim>_IC2_</dim>
+                    <dim>_IH2_</dim>
+                    <dim>_IW2_</dim>
+                </port>
+            </output>
+        </layer>
+        <layer name="con" id="3" type="Concat" precision="FP32">
+            <concat_data axis="_AXIS_"/>
+            <input>
+                <port id="1">
+                    <dim>_IN1_</dim>
+                    <dim>_IC1_</dim>
+                    <dim>_IH1_</dim>
+                    <dim>_IW1_</dim>
+                </port>
+                <port id="2">
+                    <dim>_IN2_</dim>
+                    <dim>_IC2_</dim>
+                    <dim>_IH2_</dim>
+                    <dim>_IW2_</dim>
+                </port>
+            </input>
+            <output>
+                <port id="3">
+                    <dim>_ON_</dim>
+                    <dim>_OC_</dim>
+                    <dim>_OH_</dim>
+                    <dim>_OW_</dim>
+                </port>
+            </output>
+        </layer>
+    </layers>
+    <edges>
+        <edge from-layer="1" from-port="1" to-layer="3" to-port="1"/>
+        <edge from-layer="2" from-port="2" to-layer="3" to-port="2"/>
+    </edges>
+</net>
+)V0G0N";
+
+    std::string getModel(concat_test_params p) {
+        std::string model = model_t;
+
+        REPLACE_WITH_NUM(model, "_IN1_", p.in1.n);
+        REPLACE_WITH_NUM(model, "_IC1_", p.in1.c);
+        REPLACE_WITH_NUM(model, "_IW1_", p.in1.w);
+        REPLACE_WITH_NUM(model, "_IH1_", p.in1.h);
+
+        REPLACE_WITH_NUM(model, "_IN2_", p.in2.n);
+        REPLACE_WITH_NUM(model, "_IC2_", p.in2.c);
+        REPLACE_WITH_NUM(model, "_IW2_", p.in2.w);
+        REPLACE_WITH_NUM(model, "_IH2_", p.in2.h);
+
+        REPLACE_WITH_NUM(model, "_ON_", p.out.n);
+        REPLACE_WITH_NUM(model, "_OC_", p.out.c);
+        REPLACE_WITH_NUM(model, "_OH_", p.out.h);
+        REPLACE_WITH_NUM(model, "_OW_", p.out.w);
+
+        REPLACE_WITH_NUM(model, "_AXIS_", p.axis);
+        return model;
+    }
+
+protected:
+
+    static void fill_data_ints(float *data, size_t size, int start) {
+        for (size_t i = 0; i < size; i++) {
+            data[i] = (float) (start + i);
+        }
+    }
+
+    virtual void SetUp() {
+
+        try {
+            concat_test_params p = ::testing::WithParamInterface<concat_test_params>::GetParam();
+            std::string model = getModel(p);
+
+            Core ie;
+            CNNNetwork network = ie.ReadNetwork(model, Blob::CPtr());
+
+            SizeVector dims_src1 = {p.in1.n,
+                                    p.in1.c,
+                                    p.in1.h,
+                                    p.in1.w
+                                    };
+
+            SizeVector dims_src2 = {p.in2.n,
+                                    p.in2.c,
+                                    p.in2.h,
+                                    p.in2.w};
+
+            SizeVector dims_dst = {p.out.n,
+                                   p.out.c,
+                                   p.out.h,
+                                   p.out.w};
+
+            Blob::Ptr src1 = make_shared_blob<float>({Precision::FP32, dims_src1, Layout::NCHW});
+            src1->allocate();
+            fill_data_ints(src1->buffer(), src1->size(), 0);
+            Blob::Ptr src2 =  make_shared_blob<float>({Precision::FP32, dims_src2, Layout::NCHW});
+            src2->allocate();
+            fill_data_ints(src2->buffer(), src2->size(), 10000);
+            BlobMap srcs;
+            srcs.insert(std::pair<std::string, Blob::Ptr>("in1", src1));
+            srcs.insert(std::pair<std::string, Blob::Ptr>("in2", src2));
+
+            OutputsDataMap out;
+            out = network.getOutputsInfo();
+            InferenceEngine::BlobMap outputBlobs;
+
+            std::pair<std::string, DataPtr> item = *out.begin();
+
+            InferenceEngine::TBlob<float>::Ptr output;
+            output = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
+            output->allocate();
+            outputBlobs[item.first] = output;
+
+            ExecutableNetwork exeNetwork = ie.LoadNetwork(network, p.device_name);
+            InferRequest inferRequest = exeNetwork.CreateInferRequest();
+            inferRequest.SetInput(srcs);
+            inferRequest.SetOutput(outputBlobs);
+            inferRequest.Infer();
+
+            //compare(src, dst);
+
+            float *src1_ptr = src1->buffer();
+            float *src2_ptr = src2->buffer();
+            float *dst_ptr = output->buffer();
+
+            int len1 = 1, len2 = 1, cycles;
+            for (int dim = p.axis; dim < output->getTensorDesc().getDims().size(); dim++) {
+                len1 *= src1->getTensorDesc().getDims()[dim];
+                len2 *= src2->getTensorDesc().getDims()[dim];
+            }
+            cycles = p.axis;
+
+
+            int index1 = 0, index2 = 0, index = 0;
+            for (int cycle = 0; cycle < cycles; cycle ++) {
+                for (int i1 = 0; i1 < len1; i1++) {
+                    if (src1_ptr[index1] != dst_ptr[index])
+                    {
+                        FAIL() << "index: " << index << " src: " << src1_ptr[index1] << ", dst: " << dst_ptr[index];
+                    }
+                    index1++; index++;
+                }
+                for (int i2 = 0; i2 < len2; i2++) {
+                    if (src2_ptr[index2] != dst_ptr[index])
+                    {
+                        FAIL() << "index: " << index << " src: " << src2_ptr[index2] << ", dst: " << dst_ptr[index];
+                    }
+                    index2++; index++;
+                }
+            }
+
+
+        } catch (const InferenceEngine::details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+#define case_1 concat_base_params({\
+       {1, 7, 2, 5},\
+       {1, 7, 2, 5},\
+       {2, 7, 2, 5},\
+       0})
+#define case_2 concat_base_params({\
+       {1, 7, 2, 5},\
+       {1, 7, 2, 5},\
+       {1, 7, 4, 5},\
+       2})
+#define case_3 concat_base_params({\
+       {1, 7, 2, 5},\
+       {1, 13, 2, 5},\
+       {1, 20, 2, 5},\
+       1})
+#define case_4 concat_base_params({\
+       {1, 7, 2, 13},\
+       {1, 7, 2, 17},\
+       {1, 7, 2, 30},\
+       3})
+#define case_5 concat_base_params({\
+       {1, 8, 8, 16},\
+       {1, 16, 8, 16},\
+       {1, 24, 8, 16},\
+       1})
+
+TEST_P(smoke_CPU_ConcatOnlyTest, TestsConcat) {
+}
+
+std::string  getTestCaseName(testing::TestParamInfo<concat_test_params> obj) {
+    return  obj.param.device_name +
+        "_out_w" + std::to_string(obj.param.out.w) +
+        "_out_h" + std::to_string(obj.param.out.h) +
+        "_out_c" + std::to_string(obj.param.out.c) +
+        "_out_n" + std::to_string(obj.param.out.n);
+}
+
+concat_test_params concat_only_test_cases[] = {
+        concat_test_params("CPU", case_1),
+        concat_test_params("CPU", case_2),
+        concat_test_params("CPU", case_3),
+        concat_test_params("CPU", case_4),
+        concat_test_params("CPU", case_5),
+};
+
+INSTANTIATE_TEST_CASE_P(TestConcat, smoke_CPU_ConcatOnlyTest, ::testing::ValuesIn(concat_only_test_cases), getTestCaseName);
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/conv_int8_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/conv_int8_tests.cpp
new file mode 100644 (file)
index 0000000..d5d8150
--- /dev/null
@@ -0,0 +1,373 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <ie_core.hpp>
+
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+#include "../common_single_layer_tests/conv_ref.hpp"
+#include <string>
+#include <algorithm>
+
+#include "common_test_utils/common_layers_params.hpp"
+
+using namespace ::testing;
+using namespace InferenceEngine;
+using std::vector;
+
+struct conv_base_params {
+    vector<size_t> in_dims;
+    vector<size_t> kernel;
+    vector<size_t> strides;
+    vector<size_t> pads_begin;
+    vector<size_t> pads_end;
+    vector<size_t> dilations;
+
+    size_t out_c;
+    size_t grp_c;
+
+    vector<size_t> out_dims;
+};
+
+struct conv_test_params : conv_base_params {
+    std::string device_name;
+
+    conv_test_params(std::string name, conv_base_params params) :
+            conv_base_params(params), device_name(name) {}
+};
+
+template <typename data_t>
+static void fill_int_data_even(data_t *data, size_t size, bool is_signed) {
+    for (size_t i = 0 ; i < size; i++) {
+        data[i] = (i * 13 % 21 - 10 * is_signed) * 2;
+    }
+}
+
+template <typename data_t>
+static void fill_int_data(data_t *data, size_t size, bool is_signed) {
+    for (size_t i = 0 ; i < size; i++) {
+        data[i] = i * 13 % 21 - 10 * is_signed;
+    }
+}
+
+template <typename src_data_t>
+class smoke_ConvolutionInt8OnlyTest : public TestsCommon,
+                                  public WithParamInterface<conv_test_params> {
+
+    std::string model_t = (std::string)R"V0G0N(
+<net name="Convolution_Only" version="3" precision="FP32" batch="1">
+    <layers>
+        <layer name="in1" type="Input" precision="_IP_" id="0">
+            <output>
+                <port id="0">
+                    _INPUT_DIMS_
+                </port>
+            </output>
+        </layer>
+        <layer name="conv1" id="1" type="Convolution" precision="I8">
+            <convolution strides="_KS_"
+                         pads_begin="_PB_"  pads_end="_PE_"
+                         kernel="_K_"
+                         dilations="_DL_"
+                         output="_OC_"  group="_GC_"/>
+
+            <weights offset="0" size="_S1_" />
+            <biases offset="_S1_" size="_S2_" />
+
+            <input>
+                <port id="1">
+                    _INPUT_DIMS_
+                </port>
+            </input>
+            <output>
+                <port id="2">
+                    _OUTPUT_DIMS_
+                </port>
+            </output>
+        </layer>
+    </layers>
+    <edges>
+        <edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
+    </edges>
+</net>
+)V0G0N";
+
+protected:
+
+    size_t calculateOutDim(size_t in_dim, size_t kernel, size_t stride, size_t pad_begin) {
+        return (in_dim + 2lu * pad_begin - kernel) / stride + 1lu;
+    }
+
+    void createBlobs(const conv_test_params &p, typename TBlob<src_data_t>::Ptr &src, TBlob<float>::Ptr &dst, TBlob<float>::Ptr &dst_ref) {
+        auto in_size = p.in_dims.size();
+        auto out_size = p.out_dims.size();
+        SizeVector dims_dst = {
+                p.out_dims[out_size - 1] == 0 ?
+                calculateOutDim(p.in_dims[in_size - 1], p.kernel[X_AXIS], p.strides[X_AXIS], p.pads_begin[X_AXIS]) : p.out_dims[out_size - 1],
+                p.out_dims[out_size - 2] == 0 ?
+                calculateOutDim(p.in_dims[in_size - 2], p.kernel[Y_AXIS], p.strides[Y_AXIS], p.pads_begin[Y_AXIS]) : p.out_dims[out_size - 2],
+                p.out_c,
+                1lu};
+        SizeVector dims_src;
+        for (int i = in_size; i > 0; i--) {
+            dims_src.push_back(p.in_dims[i - 1]);
+        }
+
+        Layout layout = NCHW;
+        if (in_size == 5) {
+            layout = NCDHW;
+            dims_dst.insert(dims_dst.begin() + 2, p.out_dims.size() > 2 ?
+                                                  (p.out_dims[out_size - 3] == 0 ?
+                                                   calculateOutDim(p.in_dims[in_size - 3], p.kernel[Z_AXIS], p.strides[Z_AXIS], p.pads_begin[Z_AXIS]) : p.out_dims[out_size - 3]) : 1lu);
+        }
+
+        std::reverse(dims_src.begin(), dims_src.end());
+        std::reverse(dims_dst.begin(), dims_dst.end());
+
+        Precision src_precision = (typeid(src_data_t) == typeid(int8_t)) ? Precision::I8 : Precision::U8;
+        src = make_shared_blob<src_data_t>(TensorDesc({src_precision, dims_src, layout}));
+        src->allocate();
+
+        dst = make_shared_blob<float>(TensorDesc({Precision::FP32, dims_dst, layout}));
+        dst->allocate();
+
+        dst_ref = make_shared_blob<float>(TensorDesc({Precision::FP32, dims_dst, layout}));
+        dst_ref->allocate();
+    }
+
+    TBlob<uint8_t>::Ptr fillWeights(const conv_test_params &p) {
+        auto KZ = p.kernel.size() > Z_AXIS ? p.kernel[Z_AXIS] : 1lu;
+        TBlob<uint8_t> *weights_ptr = new TBlob<uint8_t>(TensorDesc({Precision::U8,
+                                                         {p.kernel[X_AXIS] * p.kernel[Y_AXIS] * KZ * p.out_c * p.in_dims[1] / p.grp_c * sizeof(uint8_t)
+                                                         + p.out_c * sizeof(int32_t)}, C}));
+        weights_ptr->allocate();
+        size_t bias_size = p.out_c;
+        size_t weights_size = (weights_ptr->size() - bias_size * sizeof(int32_t)) / sizeof(uint8_t);
+        int8_t *weights_data = (int8_t *) weights_ptr->buffer();
+        auto *bias_data = (int32_t *)(weights_data + weights_size);
+
+        if (typeid(src_data_t) == typeid(int8_t)) {
+            // If input data is signed, weight data is divided by 2 due to the specifics of implementation in mkl-dnn
+            fill_int_data_even(weights_data, weights_size, true);
+        } else {
+            fill_int_data(weights_data, weights_size, true);
+        }
+        fill_int_data(bias_data, bias_size, true);
+
+        return TBlob<uint8_t>::Ptr(weights_ptr);
+    }
+
+    void calculateRef(const TBlob<uint8_t>::Ptr &weights, const conv_test_params &p, const typename TBlob<src_data_t>::Ptr &src,
+                      TBlob<float>::Ptr &dst_ref) {
+        const int8_t *weights_data = (const int8_t *) weights->buffer();
+        size_t bias_size = p.out_c;
+        size_t weights_size = (weights->size() - bias_size * sizeof(int32_t)) / sizeof(uint8_t);
+        auto *bias_data = (const int32_t *)(weights_data + weights_size);
+        CommonTestUtils::conv_common_params params;
+        for (int i = 0; i < p.kernel.size(); i++)
+            params.kernel.insert(i, p.kernel[i]);
+        for (int i = 0; i < p.strides.size(); i++)
+            params.stride.insert(i, p.strides[i]);
+        for (int i = 0; i < p.pads_begin.size(); i++)
+            params.pads_begin.insert(i, p.pads_begin[i]);
+        for (int i = 0; i < p.dilations.size(); i++)
+            params.dilation.insert(i, p.dilations[i]);
+        params.group = p.grp_c;
+        params.out_c = p.out_c;
+        ref_conv_common<>({ src }, *dst_ref.get(), weights_data, weights_size, bias_data, bias_size, params);
+    }
+
+    void SetUp() override {
+        try {
+            conv_test_params p = ::testing::WithParamInterface<conv_test_params>::GetParam();
+            std::string model = getModel(p);
+
+            typename TBlob<src_data_t>::Ptr src;
+            TBlob<float>::Ptr dst, dst_ref;
+            createBlobs(p, src, dst, dst_ref);
+            auto *src_data = src->cbuffer().template as<src_data_t*>();
+            size_t src_size = src->size() / sizeof(src_data_t);
+            if (typeid(src_data_t) == typeid(int8_t)) {
+                fill_int_data(src_data, src_size, true);
+            } else {
+                fill_int_data(src_data, src_size, false);
+            }
+
+            auto weights = fillWeights(p);
+
+            Core ie;
+            CNNNetwork network = ie.ReadNetwork(model, weights);
+
+            BlobMap srcs;
+            srcs.insert(std::pair<std::string, Blob::Ptr>("in1", src));
+
+            OutputsDataMap out;
+            out = network.getOutputsInfo();
+            InferenceEngine::BlobMap outputBlobs;
+
+            std::pair<std::string, DataPtr> item = *out.begin();
+
+            outputBlobs[item.first] = dst;
+
+            ExecutableNetwork exeNetwork = ie.LoadNetwork(network, p.device_name);
+            InferRequest inferRequest = exeNetwork.CreateInferRequest();
+            inferRequest.SetInput(srcs);
+            inferRequest.SetOutput(outputBlobs);
+            inferRequest.Infer();
+
+            calculateRef(weights, p, src, dst_ref);
+            compare(*dst, *dst_ref);
+        } catch (const InferenceEngine::details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+
+    virtual std::string getModel(conv_test_params p) {
+        std::string model = model_t;
+
+        auto in_dims_size = p.in_dims.size();
+        std::string input_dims = "<dim>" + std::to_string(p.in_dims[0]) + "</dim>";
+        for (int i = 1; i < in_dims_size; i++) {
+            input_dims += "\n                    <dim>" + std::to_string(p.in_dims[i]) + "</dim>";
+        }
+        REPLACE_WITH_STR(model, "_INPUT_DIMS_", input_dims);
+
+        auto out_dims_size = p.out_dims.size();
+        std::string output_dims = "<dim>" + std::to_string(p.in_dims[0]) + "</dim>";
+        output_dims += "\n                    <dim>" + std::to_string(p.out_c) + "</dim>";
+        if (out_dims_size > 2) {
+            size_t od = (p.out_dims[out_dims_size - 3] == 0 ?
+                         calculateOutDim(p.in_dims[in_dims_size - 3], p.kernel[Z_AXIS], p.strides[Z_AXIS], p.pads_begin[Z_AXIS]) : p.out_dims[out_dims_size - 3]);
+            output_dims += "\n                    <dim>" + std::to_string(od) + "</dim>";
+        }
+        size_t oh = p.out_dims[out_dims_size - 2] == 0 ?
+                    calculateOutDim(p.in_dims[in_dims_size - 2], p.kernel[Y_AXIS], p.strides[Y_AXIS], p.pads_begin[Y_AXIS]) : p.out_dims[out_dims_size - 2];
+        output_dims += "\n                    <dim>" + std::to_string(oh) + "</dim>";
+        size_t ow = p.out_dims[out_dims_size - 1] == 0 ?
+                    calculateOutDim(p.in_dims[in_dims_size - 1], p.kernel[X_AXIS], p.strides[X_AXIS], p.pads_begin[X_AXIS]) : p.out_dims[out_dims_size - 1];
+        output_dims += "\n                    <dim>" + std::to_string(ow) + "</dim>";
+        REPLACE_WITH_STR(model, "_OUTPUT_DIMS_", output_dims);
+
+        REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_K_", p.kernel);
+        REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_KS_", p.strides);
+        REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_PB_", p.pads_begin);
+        REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_PE_", p.pads_begin);
+        REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_DL_", p.dilations);
+
+        REPLACE_WITH_NUM(model, "_GC_", p.grp_c);
+        REPLACE_WITH_NUM(model, "_OC_", p.out_c);
+
+        std::string ip = (typeid(src_data_t) == typeid(int8_t)) ? "I8" : "U8";
+        REPLACE_WITH_STR(model, "_IP_", ip);
+
+        size_t KD = p.kernel.size() > Z_AXIS ? p.kernel[Z_AXIS] : 1lu;
+        size_t w_data_size = (p.kernel[X_AXIS] * p.kernel[Y_AXIS] * KD * p.out_c * p.in_dims[1] / p.grp_c) * sizeof(uint8_t);
+        size_t b_data_size = p.out_c;
+        REPLACE_WITH_NUM(model, "_S1_", w_data_size);
+        REPLACE_WITH_NUM(model, "_S2_", b_data_size);
+
+        return model;
+    }
+};
+
+// conv_base_params ({in_dims, kernel, strides, pads_begin, pads_end, dilations, out_c, grp_c, out_dims})
+// If out_dims are zero, they are calculated automatically.
+// 2D
+#define case_1  conv_base_params({{1, 9, 16, 32},  {1, 1}, {1, 1}, {0, 0}, {0, 0}, {1, 1}, 17, 1, {0, 0}})
+#define case_2  conv_base_params({{1, 9, 32, 16},  {2, 4}, {1, 1}, {0, 0}, {0, 0}, {1, 1}, 17, 1, {0, 0}})
+#define case_3  conv_base_params({{1, 9, 32, 16},  {2, 4}, {2, 1}, {0, 0}, {0, 0}, {1, 1}, 17, 1, {0, 0}})
+#define case_4  conv_base_params({{1, 3, 40, 40},  {3, 3}, {1, 2}, {0, 0}, {0, 0}, {1, 1}, 20, 1, {0, 0}})
+#define case_5  conv_base_params({{1, 9, 16, 32},  {7, 7}, {2, 2}, {3, 3}, {0, 0}, {1, 1}, 17, 1, {0, 0}})
+#define case_6  conv_base_params({{1, 3, 224, 224}, {7, 7}, {2, 2}, {2, 2}, {0, 0}, {1, 1}, 64, 1, {111, 111}})
+#define case_7  conv_base_params({{1, 16, 40, 40}, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1}, 16, 16, {0, 0}})
+#define case_8  conv_base_params({{1, 32, 16, 32}, {7, 7}, {2, 2}, {3, 3}, {0, 0}, {1, 1}, 32, 32, {0, 0}})
+#define case_9  conv_base_params({{1, 16, 40, 40}, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {9, 9}, 16, 16, {0, 0}})
+#define case_10 conv_base_params({{1, 32, 16, 32}, {7, 7}, {2, 2}, {3, 3}, {0, 0}, {3, 3}, 32, 32, {2, 10}})
+#define case_11 conv_base_params({{1, 4, 16, 32},  {7, 7}, {2, 2}, {3, 3}, {0, 0}, {2, 2}, 4, 4, {5, 13}})
+#define case_12 conv_base_params({{1, 3, 224, 224}, {10, 10}, {1, 1}, {4, 4}, {0, 0}, {1, 1}, 4, 1, {223, 223}})
+#define case_13 conv_base_params({{1, 32, 1, 15000}, {11, 1}, {1, 1}, {20, 0}, {0, 0}, {4, 1}, 32, 1, {1, 15000}})
+#define case_14 conv_base_params({{1, 16, 40, 40}, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1}, 16, 8, {0, 0}})
+#define case_15 conv_base_params({{1, 16, 40, 40}, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1}, 8, 2, {0, 0}})
+#define case_16 conv_base_params({{1, 3, 40, 40}, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1}, 9, 3, {0, 0}})
+// 3D
+#define case_3d_0 conv_base_params({{1, 3, 16, 32, 32},  {1, 1, 1}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, {1, 1, 1}, 17, 1, {0, 0, 0}})
+#define case_3d_1 conv_base_params({{1, 3, 16, 32, 32},  {3, 3, 3}, {2, 2, 1}, {0, 0, 0}, {0, 0, 0}, {1, 1, 1}, 64, 1, {0, 0, 0}})
+#define case_3d_2 conv_base_params({{1, 32, 8, 8, 8},  {3, 3, 3}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, {1, 1, 1}, 32, 32, {0, 0, 0}})
+#define case_3d_3 conv_base_params({{1, 32, 10, 10, 10},  {3, 3, 3}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}, 32, 32, {0, 0, 0}})
+#define case_3d_4 conv_base_params({{1, 32, 8, 8, 8},  {1, 1, 1}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, {1, 1, 1}, 32, 32, {0, 0, 0}})
+#define case_3d_5 conv_base_params({{1, 32, 8, 8, 8},  {3, 3, 3}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, {1, 1, 1}, 16, 16, {0, 0, 0}})
+#define case_3d_6 conv_base_params({{1, 32, 10, 10, 10},  {3, 3, 3}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}, 16, 8, {0, 0, 0}})
+#define case_3d_7 conv_base_params({{1, 4, 8, 8, 8},  {1, 1, 1}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, {1, 1, 1}, 16, 4, {0, 0, 0}})
+
+using smoke_conv_u8s32 = smoke_ConvolutionInt8OnlyTest<uint8_t>;
+using smoke_conv_s8s32 = smoke_ConvolutionInt8OnlyTest<int8_t>;
+
+TEST_P(smoke_conv_u8s32, TestsConvolution) {
+}
+
+TEST_P(smoke_conv_s8s32, TestsConvolution) {
+}
+
+std::string getTestCaseInt8Name(testing::TestParamInfo<conv_test_params> obj) {
+    auto in_dims_size = obj.param.in_dims.size();
+    return obj.param.device_name +
+           "_w" + std::to_string(obj.param.in_dims[in_dims_size - 1]) +
+           "_h" + std::to_string(obj.param.in_dims[in_dims_size - 2]) +
+           (obj.param.in_dims.size() > 4 ? "_d" + std::to_string(obj.param.in_dims[in_dims_size - 3]) : "") +
+           "_c" + std::to_string(obj.param.in_dims[1]) +
+           "_kw" + std::to_string(obj.param.kernel[X_AXIS]) +
+           "_kh" + std::to_string(obj.param.kernel[Y_AXIS]) +
+           (obj.param.kernel.size() > Z_AXIS ? "_kd" + std::to_string(obj.param.kernel[Z_AXIS]) : "") +
+           "_sw" + std::to_string(obj.param.strides[X_AXIS]) +
+           "_sh" + std::to_string(obj.param.strides[Y_AXIS]) +
+           (obj.param.strides.size() > Z_AXIS ? "_sd" + std::to_string(obj.param.strides[Z_AXIS]) : "") +
+           "_dilw" + std::to_string(obj.param.dilations[X_AXIS]) +
+           "_dilh" + std::to_string(obj.param.dilations[Y_AXIS]) +
+           (obj.param.dilations.size() > Z_AXIS ? "_dild" + std::to_string(obj.param.dilations[Z_AXIS]) : "") +
+           "_grpc" + std::to_string(obj.param.grp_c);
+}
+
+conv_test_params conv_only_int8_test_cases[] = {
+        conv_test_params("CPU", case_1),
+        conv_test_params("CPU", case_2),
+        conv_test_params("CPU", case_3),
+        conv_test_params("CPU", case_4),
+        conv_test_params("CPU", case_5),
+        conv_test_params("CPU", case_6),
+//// todo: it does not work on AVX-512
+//        conv_test_params("CPU", case_7),
+//        conv_test_params("CPU", case_8),
+//        conv_test_params("CPU", case_9),
+//        conv_test_params("CPU", case_10),
+//        conv_test_params("CPU", case_11),
+        conv_test_params("CPU", case_12),
+        conv_test_params("CPU", case_13),
+        conv_test_params("CPU", case_14),
+        conv_test_params("CPU", case_15),
+        conv_test_params("CPU", case_16),
+};
+
+conv_test_params conv_only_int8_3d_test_cases[] = {
+        conv_test_params("CPU", case_3d_0),
+        conv_test_params("CPU", case_3d_1),
+        conv_test_params("CPU", case_3d_2),
+        conv_test_params("CPU", case_3d_3),
+        conv_test_params("CPU", case_3d_4),
+        conv_test_params("CPU", case_3d_5),
+        conv_test_params("CPU", case_3d_6),
+        conv_test_params("CPU", case_3d_7),
+};
+
+INSTANTIATE_TEST_CASE_P(
+        TestConvolution, smoke_conv_u8s32, ::testing::ValuesIn(conv_only_int8_test_cases), getTestCaseInt8Name);
+
+INSTANTIATE_TEST_CASE_P(
+        TestConvolution_3d, smoke_conv_u8s32, ::testing::ValuesIn(conv_only_int8_3d_test_cases), getTestCaseInt8Name);
+
+INSTANTIATE_TEST_CASE_P(
+        TestConvolution, smoke_conv_s8s32, ::testing::ValuesIn(conv_only_int8_test_cases), getTestCaseInt8Name);
+
+INSTANTIATE_TEST_CASE_P(
+        TestConvolution_3d, smoke_conv_s8s32, ::testing::ValuesIn(conv_only_int8_3d_test_cases), getTestCaseInt8Name);
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/conv_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/conv_tests.cpp
new file mode 100644 (file)
index 0000000..00cf2da
--- /dev/null
@@ -0,0 +1,427 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <ie_core.hpp>
+
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+#include "../common_single_layer_tests/conv_ref.hpp"
+#include <single_layer_common.hpp>
+#include <string>
+#include "common_test_utils/common_layers_params.hpp"
+
+using namespace ::testing;
+using namespace InferenceEngine;
+using std::vector;
+
+struct conv_base_params {
+    vector<size_t> in_dims;
+    vector<size_t> kernel;
+    vector<size_t> strides;
+    vector<size_t> pads_begin;
+    vector<size_t> pads_end;
+    vector<size_t> dilations;
+
+    size_t out_c;
+    size_t grp_c;
+
+    vector<size_t> out_dims;
+};
+
+struct conv_test_params : conv_base_params {
+    std::string device_name;
+
+    conv_test_params(std::string name, conv_base_params params) :
+            conv_base_params(params), device_name(name) {}
+};
+
+class smoke_ConvolutionOnlyTest : public TestsCommon,
+                            public WithParamInterface<conv_test_params> {
+
+    std::string model_t_4D = R"V0G0N(
+<net name="Convolution_Only" version="3" precision="FP32" batch="1">
+    <layers>
+        <layer name="in1" type="Input" precision="FP32" id="0">
+            <output>
+                <port id="0">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+            </output>
+        </layer>
+        <layer name="conv1" id="1" type="Convolution" precision="FP32">
+            <convolution strides="_KS_"
+                         pads_begin="_PB_" pads_end="_PE_"
+                         kernel="_K_"
+                         dilations="_DL_"
+                         output="_OC_" group="_GC_"/>
+
+            <weights offset="0" size="_S1_" />
+            <biases offset="_S1_" size="_S2_" />
+
+            <input>
+                <port id="1">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+            </input>
+            <output>
+                <port id="2">
+                    <dim>_IN_</dim>
+                    <dim>_OC_</dim>
+                    <dim>_OH_</dim>
+                    <dim>_OW_</dim>
+                </port>
+            </output>
+        </layer>
+    </layers>
+    <edges>
+        <edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
+    </edges>
+</net>
+)V0G0N";
+
+    std::string model_t_5D = R"V0G0N(
+<net name="Convolution_Only" version="3" precision="FP32" batch="1">
+    <layers>
+        <layer name="in1" type="Input" precision="FP32" id="0">
+            <output>
+                <port id="0">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_ID_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+            </output>
+        </layer>
+        <layer name="conv1" id="1" type="Convolution" precision="FP32">
+            <convolution strides="_KS_"
+                         pads_begin="_PB_"  pads_end="_PE_"
+                         kernel="_K_"
+                         dilations="_DL_"
+                         output="_OC_"  group="_GC_"/>
+
+            <weights offset="0" size="_S1_" />
+            <biases offset="_S1_" size="_S2_" />
+
+            <input>
+                <port id="1">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_ID_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+            </input>
+            <output>
+                <port id="2">
+                    <dim>_IN_</dim>
+                    <dim>_OC_</dim>
+                    <dim>_OD_</dim>
+                    <dim>_OH_</dim>
+                    <dim>_OW_</dim>
+                </port>
+            </output>
+        </layer>
+    </layers>
+    <edges>
+        <edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
+    </edges>
+</net>
+)V0G0N";
+
+protected:
+
+    size_t calculateOutDim(size_t in_dim, size_t kernel, size_t stride, size_t pad_begin) {
+        return (in_dim + 2lu * pad_begin - kernel) / stride + 1lu;
+    }
+
+    void createBlobs(const conv_test_params &p, TBlob<float>::Ptr &src, TBlob<float>::Ptr &dst, TBlob<float>::Ptr &dst_ref) {
+        auto in_size = p.in_dims.size();
+        auto out_size = p.out_dims.size();
+        SizeVector dims_dst = {
+                p.out_dims[out_size - 1] == 0 ?
+                    calculateOutDim(p.in_dims[in_size - 1], p.kernel[X_AXIS], p.strides[X_AXIS], p.pads_begin[X_AXIS]) : p.out_dims[out_size - 1],
+                p.out_dims[out_size - 2] == 0 ?
+                    calculateOutDim(p.in_dims[in_size - 2], p.kernel[Y_AXIS], p.strides[Y_AXIS], p.pads_begin[Y_AXIS]) : p.out_dims[out_size - 2],
+                p.out_c,
+                1lu};
+        SizeVector dims_src;
+        for (int i = in_size; i > 0; i--) {
+            dims_src.push_back(p.in_dims[i - 1]);
+        }
+
+        Layout layout = NCHW;
+        if (in_size == 5) {
+            layout = NCDHW;
+            dims_dst.insert(dims_dst.begin() + 2, p.out_dims.size() > 2 ?
+                (p.out_dims[out_size - 3] == 0 ?
+                    calculateOutDim(p.in_dims[in_size - 3], p.kernel[Z_AXIS], p.strides[Z_AXIS], p.pads_begin[Z_AXIS]) : p.out_dims[out_size - 3]) : 1lu);
+        }
+
+        src = make_shared_blob<float>(TensorDesc(Precision::FP32, SizeVector(dims_src.rbegin(), dims_src.rend()), layout));
+        src->allocate();
+
+        dst = make_shared_blob<float>(TensorDesc(Precision::FP32, SizeVector(dims_dst.rbegin(), dims_dst.rend()), layout));
+        dst->allocate();
+
+        dst_ref = make_shared_blob<float>(TensorDesc(Precision::FP32, SizeVector(dims_dst.rbegin(), dims_dst.rend()), layout));
+        dst_ref->allocate();
+    }
+
+    TBlob<uint8_t>::Ptr fillWeights(const conv_test_params &p) {
+        auto KZ = p.kernel.size() > Z_AXIS ? p.kernel[Z_AXIS] : 1lu;
+        TBlob<uint8_t> *weights_ptr = new TBlob<uint8_t>(TensorDesc(Precision::U8,
+                    {(p.kernel[X_AXIS] * p.kernel[Y_AXIS] * KZ * p.out_c * p.in_dims[1] / p.grp_c + p.out_c)
+                     * sizeof(float)}, C));
+        weights_ptr->allocate();
+        fill_data((float *) weights_ptr->buffer(), weights_ptr->size() / sizeof(float));
+        return TBlob<uint8_t>::Ptr(weights_ptr);
+    }
+
+    void calculateRef(const TBlob<uint8_t>::Ptr &weights, const conv_test_params &p, const TBlob<float>::Ptr &src,
+                      TBlob<float>::Ptr &dst_ref) {
+        const float *weights_data = (const float *) weights->buffer();
+        size_t bias_size = p.out_c;
+        size_t weights_size = weights->size() / sizeof(float) - bias_size;
+        const float *bias_data = weights_data + weights_size;
+        CommonTestUtils::conv_common_params params;
+        for (int i = 0; i < p.kernel.size(); i++)
+            params.kernel.insert(i, p.kernel[i]);
+        for (int i = 0; i < p.strides.size(); i++)
+            params.stride.insert(i, p.strides[i]);
+        for (int i = 0; i < p.pads_begin.size(); i++)
+            params.pads_begin.insert(i, p.pads_begin[i]);
+        for (int i = 0; i < p.dilations.size(); i++)
+            params.dilation.insert(i, p.dilations[i]);
+        params.group = p.grp_c;
+        params.out_c = p.out_c;
+        ref_conv_common<>({ src }, *dst_ref.get(), weights_data, weights_size, bias_data, bias_size, params);
+    }
+
+    CNNNetwork getNetwork(const TBlob<uint8_t>::Ptr &weights, const conv_test_params &p) {
+        Core ie;
+        return ie.ReadNetwork(getModel(p), weights);
+    }
+
+    virtual void
+    infer(CNNNetwork &network, const conv_test_params &p, TBlob<float>::Ptr &src, TBlob<float>::Ptr &dst) {
+        Blob::Ptr srcPtr = std::shared_ptr<Blob>(src);
+        Blob::Ptr dstPtr = std::shared_ptr<Blob>(dst);
+
+        Core ie;
+        ExecutableNetwork exeNetwork = ie.LoadNetwork(network, "CPU");
+        InferRequest inferRequest = exeNetwork.CreateInferRequest();
+        OutputsDataMap outInfo;
+        outInfo = network.getOutputsInfo();
+        ASSERT_EQ(outInfo.size(), 1);
+        ASSERT_NE(outInfo.begin()->second, nullptr);
+        inferRequest.SetBlob(network.getInputsInfo().begin()->first, srcPtr);
+        inferRequest.SetBlob(outInfo.begin()->first, dstPtr);
+        inferRequest.Infer();
+    }
+
+    void SetUp() override {
+        try {
+            conv_test_params p = ::testing::WithParamInterface<conv_test_params>::GetParam();
+            TBlob<float>::Ptr src, dst, dst_ref;
+            createBlobs(p, src, dst, dst_ref);
+            fill_data(src->data(), src->size());
+            auto weights = fillWeights(p);
+            calculateRef(weights, p, src, dst_ref);
+            CNNNetwork network = getNetwork(weights, p);
+            infer(network, p, src, dst);
+            compare(*dst, *dst_ref);
+        } catch (const InferenceEngine::details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+
+    virtual std::string getModel(conv_test_params p) {
+        std::string model;
+        auto in_dims_size = p.in_dims.size();
+        if (in_dims_size == 4) {
+            model = model_t_4D;
+        } else if (in_dims_size == 5) {
+            model = model_t_5D;
+        }
+
+        REPLACE_WITH_NUM(model, "_IW_", p.in_dims[in_dims_size - 1]);
+        REPLACE_WITH_NUM(model, "_IH_", p.in_dims[in_dims_size - 2]);
+        REPLACE_WITH_NUM(model, "_ID_", p.in_dims[in_dims_size - 3]);
+        REPLACE_WITH_NUM(model, "_IC_", p.in_dims[1]);
+        REPLACE_WITH_NUM(model, "_IN_", p.in_dims[0]);
+
+        REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_K_", p.kernel);
+        REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_KS_", p.strides);
+        REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_PB_", p.pads_begin);
+        REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_PE_", p.pads_end);
+        REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_DL_", p.dilations);
+
+        auto out_dims_size = p.out_dims.size();
+        REPLACE_WITH_NUM(model, "_GC_", p.grp_c);
+        REPLACE_WITH_NUM(model, "_OC_", p.out_c);
+        REPLACE_WITH_NUM(model, "_OD_", out_dims_size > 2 ?
+                (p.out_dims[out_dims_size - 3] == 0 ?
+                    calculateOutDim(p.in_dims[in_dims_size - 3], p.kernel[Z_AXIS], p.strides[Z_AXIS], p.pads_begin[Z_AXIS]) : p.out_dims[out_dims_size - 3]) :
+                        1lu);
+        REPLACE_WITH_NUM(model, "_OH_", p.out_dims[out_dims_size - 2] == 0 ?
+                calculateOutDim(p.in_dims[in_dims_size - 2], p.kernel[Y_AXIS], p.strides[Y_AXIS], p.pads_begin[Y_AXIS]) : p.out_dims[out_dims_size - 2]);
+        REPLACE_WITH_NUM(model, "_OW_", p.out_dims[out_dims_size - 1] == 0 ?
+                calculateOutDim(p.in_dims[in_dims_size - 1], p.kernel[X_AXIS], p.strides[X_AXIS], p.pads_begin[X_AXIS]) : p.out_dims[out_dims_size - 1]);
+
+        size_t KD = p.kernel.size() > Z_AXIS ? p.kernel[Z_AXIS] : 1lu;
+        size_t w_data_size = (p.kernel[X_AXIS] * p.kernel[Y_AXIS] * KD * p.out_c * p.in_dims[1] / p.grp_c) * sizeof(float);
+        size_t b_data_size = p.out_c * sizeof(float);
+        REPLACE_WITH_NUM(model, "_S1_", w_data_size);
+        REPLACE_WITH_NUM(model, "_S2_", b_data_size);
+        return model;
+    }
+};
+
+class smoke_ConvolutionReshapeTest : public smoke_ConvolutionOnlyTest {
+protected:
+    void SetUp() override {
+        try {
+            conv_test_params p = ::testing::WithParamInterface<conv_test_params>::GetParam();
+            TBlob<float>::Ptr src, dst, dst_ref;
+            auto weights = fillWeights(p);
+            CNNNetwork network = getNetwork(weights, p);
+            infer(network, p, src, dst);
+            updatePaddings(network, p);
+            dst_ref = std::make_shared<TBlob<float>>(dst->getTensorDesc());
+            dst_ref->allocate();
+            calculateRef(weights, p, src, dst_ref);
+            compare(*dst, *dst_ref);
+        } catch (const InferenceEngine::details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+
+    void updatePaddings(const CNNNetwork &network, conv_test_params& p) {
+        auto found = std::find_if(network.begin(), network.end(), [](const CNNLayer::Ptr& layer) {
+            return layer->type == "Convolution";
+        });
+        ASSERT_NE(found, network.end());
+        auto convLayer = std::dynamic_pointer_cast<ConvolutionLayer>(*found);
+        auto allPad = getPaddings(*convLayer.get());
+        p.pads_begin[X_AXIS] = allPad.begin[X_AXIS];
+        p.pads_begin[Y_AXIS] = allPad.begin[Y_AXIS];
+        if (p.pads_begin.size() > Z_AXIS)
+            p.pads_begin[Z_AXIS] = allPad.begin[Z_AXIS];
+    }
+    void
+    infer(CNNNetwork &network, const conv_test_params &p, TBlob<float>::Ptr &src, TBlob<float>::Ptr &dst) override {
+        Core ie;
+        auto firstInputInfo = *network.getInputsInfo().begin();
+        std::string inputName = firstInputInfo.first;
+        auto firstOutputInfo = *network.getOutputsInfo().begin();
+        std::string outputName = firstOutputInfo.first;
+        auto inputShapes = network.getInputShapes();
+        IE_ASSERT(inputShapes.size() == 1);
+        inputShapes.begin()->second = p.in_dims;
+        ASSERT_NO_THROW(network.reshape(inputShapes));
+
+        ExecutableNetwork exeNetwork = ie.LoadNetwork(network, p.device_name);
+        InferRequest request = exeNetwork.CreateInferRequest();
+        Blob::Ptr src_b = request.GetBlob(inputName);
+
+        src = std::dynamic_pointer_cast<TBlob<float>>(src_b);
+        fill_data(src->data(), src->size());
+        request.Infer();
+        Blob::Ptr dst_b = request.GetBlob(outputName);
+        dst = std::dynamic_pointer_cast<TBlob<float>>(dst_b);
+    }
+
+    std::string getModel(conv_test_params p) override {
+        std::string model = smoke_ConvolutionOnlyTest::getModel(p);
+        REPLACE_WITH_STR(model, "convolution", "convolution auto_pad=\"same_upper\"");
+        std::string pads_pattern = "pads_begin=\"";
+        for (int i = p.pads_begin.size(); i > 0; i--) {
+            pads_pattern += std::to_string(p.pads_begin[i - 1]) + ",";
+        }
+        std::string pads = "pads_begin=\"0,0\"";
+        if (p.pads_begin.size() == 3) {
+            pads = "pads_begin=\"0,0,0\"";
+        }
+        REPLACE_WITH_NUM_VECTOR(model, pads_pattern, pads);
+        return model;
+    }
+};
+
+#define case_1  conv_base_params({{1lu, 9lu, 16lu, 32lu},  {1lu, 1lu}, {1lu, 1lu}, {0lu, 0lu}, {0lu, 0lu}, {1lu, 1lu}, 17lu, 1lu, {0lu, 0lu}})
+#define case_2  conv_base_params({{1lu, 9lu, 32lu, 16lu},  {2lu, 4lu}, {1lu, 1lu}, {0lu, 0lu}, {0lu, 0lu}, {1lu, 1lu}, 17lu, 1lu, {0lu, 0lu}})
+#define case_3  conv_base_params({{1lu, 9lu, 32lu, 16lu},  {2lu, 4lu}, {2lu, 1lu}, {0lu, 0lu}, {0lu, 0lu}, {1lu, 1lu}, 17lu, 1lu, {0lu, 0lu}})
+#define case_4  conv_base_params({{1lu, 3lu, 40lu, 40lu},  {3lu, 3lu}, {1lu, 2lu}, {0lu, 0lu}, {0lu, 0lu}, {1lu, 1lu}, 20lu, 1lu, {0lu, 0lu}})
+#define case_5  conv_base_params({{1lu, 9lu, 16lu, 32lu},  {7lu, 7lu}, {2lu, 2lu}, {3lu, 3lu}, {0lu, 0lu}, {1lu, 1lu}, 17lu, 1lu, {0lu, 0lu}})
+#define case_6  conv_base_params({{1lu, 3lu, 224lu, 224lu}, {7lu, 7lu}, {2lu, 2lu}, {2lu, 2lu}, {0lu, 0lu}, {1lu, 1lu}, 64lu, 1lu, {112lu, 112lu}})
+#define case_7  conv_base_params({{1lu, 16lu, 40lu, 40lu}, {3lu, 3lu}, {1lu, 1lu}, {0lu, 0lu}, {0lu, 0lu}, {1lu, 1lu}, 16lu, 16lu, {0lu, 0lu}})
+#define case_8  conv_base_params({{1lu, 32lu, 16lu, 32lu}, {7lu, 7lu}, {2lu, 2lu}, {3lu, 3lu}, {0lu, 0lu}, {1lu, 1lu}, 32lu, 32lu, {0lu, 0lu}})
+#define case_9  conv_base_params({{1lu, 16lu, 40lu, 40lu}, {3lu, 3lu}, {1lu, 1lu}, {0lu, 0lu}, {0lu, 0lu}, {9lu, 9lu}, 16lu, 16lu, {0lu, 0lu}})
+#define case_10 conv_base_params({{1lu, 32lu, 16lu, 32lu}, {7lu, 7lu}, {2lu, 2lu}, {3lu, 3lu}, {0lu, 0lu}, {9lu, 9lu}, 32lu, 32lu, {0lu, 0lu}})
+#define case_11 conv_base_params({{1lu, 4lu, 16lu, 32lu},  {7lu, 7lu}, {2lu, 2lu}, {3lu, 3lu}, {0lu, 0lu}, {9lu, 9lu}, 4lu, 4lu, {0lu, 0lu}})
+#define case_12 conv_base_params({{1lu, 3lu, 224lu, 224lu}, {10lu, 10lu}, {1lu, 1lu}, {4lu, 4lu}, {0lu, 0lu}, {1lu, 1lu}, 4lu, 1lu, {224lu, 224lu}})
+#define case_13 conv_base_params({{1lu, 32lu, 1lu, 15000lu}, {11lu, 1lu}, {1lu, 1lu}, {5lu, 0lu}, {0lu, 0lu}, {4lu, 1lu}, 32lu, 1lu, {15000lu, 1lu}})
+
+
+#define case_14  conv_base_params({{1lu, 3lu, 16lu, 32lu, 32lu},  {1lu, 1lu, 1lu}, {1lu, 1lu, 1lu}, {0lu, 0lu, 0lu}, {0lu, 0lu, 0lu}, {1lu, 1lu, 1lu}, 17lu, 1lu, {0lu, 0lu, 0lu}})
+#define case_15  conv_base_params({{1lu, 3lu, 16lu, 32lu, 32lu},  {3lu, 3lu, 3lu}, {2lu, 2lu, 1lu}, {0lu, 0lu, 0lu}, {0lu, 0lu, 0lu}, {1lu, 1lu, 1lu}, 64lu, 1lu, {0lu, 0lu, 0lu}})
+
+// NOTE: always auto_pad = same_upper. IR with zero_pads, pad from params is used for ref_conv after reshape
+#define case_si_1 conv_base_params({{1lu, 144lu, 75lu, 75lu}, {3lu, 3lu}, {2lu, 2lu}, {1lu, 1lu}, {0lu, 0lu}, {1lu, 1lu}, 144lu, 144lu, {1lu, 1lu}})
+
+TEST_P(smoke_ConvolutionReshapeTest, TestsReshapeConvolution) {
+}
+
+std::string getTestCaseName(testing::TestParamInfo<conv_test_params> obj) {
+    auto in_dims_size = obj.param.in_dims.size();
+    return obj.param.device_name +
+        "_w" + std::to_string(obj.param.in_dims[in_dims_size - 1]) +
+        "_h" + std::to_string(obj.param.in_dims[in_dims_size - 2]) +
+        (obj.param.in_dims.size() > 4 ? "_d" + std::to_string(obj.param.in_dims[in_dims_size - 3]) : "") +
+        "_c" + std::to_string(obj.param.in_dims[1]) +
+        "_kw" + std::to_string(obj.param.kernel[X_AXIS]) +
+        "_kh" + std::to_string(obj.param.kernel[Y_AXIS]) +
+        (obj.param.kernel.size() > Z_AXIS ? "_kd" + std::to_string(obj.param.kernel[Z_AXIS]) : "") +
+        "_sw" + std::to_string(obj.param.strides[X_AXIS]) +
+        "_sh" + std::to_string(obj.param.strides[Y_AXIS]) +
+        (obj.param.strides.size() > Z_AXIS ? "_sd" + std::to_string(obj.param.strides[Z_AXIS]) : "") +
+        "_dilw" + std::to_string(obj.param.dilations[X_AXIS]) +
+        "_dilh" + std::to_string(obj.param.dilations[Y_AXIS]) +
+        (obj.param.dilations.size() > Z_AXIS ? "_dild" + std::to_string(obj.param.dilations[Z_AXIS]) : "") +
+        "_grpc" + std::to_string(obj.param.grp_c);
+}
+
+conv_test_params conv_only_test_cases[] = {
+        conv_test_params("CPU", case_1),
+        conv_test_params("CPU", case_2),
+        conv_test_params("CPU", case_3),
+        conv_test_params("CPU", case_4),
+        conv_test_params("CPU", case_5),
+        conv_test_params("CPU", case_6),
+        conv_test_params("CPU", case_7),
+        conv_test_params("CPU", case_8),
+        conv_test_params("CPU", case_9),
+        conv_test_params("CPU", case_10),
+        conv_test_params("CPU", case_11),
+        conv_test_params("CPU", case_12),
+        conv_test_params("CPU", case_13),
+        conv_test_params("CPU", case_14),
+        conv_test_params("CPU", case_15)
+};
+
+INSTANTIATE_TEST_CASE_P(
+        TestConvolution, smoke_ConvolutionOnlyTest, ::testing::ValuesIn(conv_only_test_cases), getTestCaseName);
+
+INSTANTIATE_TEST_CASE_P(
+        TestSameUpperConvolution, smoke_ConvolutionReshapeTest,
+        ::testing::Values(conv_test_params("CPU", case_si_1)),
+        getTestCaseName);
+
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/conv_tests_int8.cpp b/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/conv_tests_int8.cpp
new file mode 100644 (file)
index 0000000..b4c53ae
--- /dev/null
@@ -0,0 +1,479 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <ie_core.hpp>
+
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+
+#include <cnn_network_stats_impl.hpp>
+
+#include <string>
+
+#include "network_stats.h"
+#include <format_reader/format_reader_ptr.h>
+#include "common_test_utils/data_utils.hpp"
+
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace InferenceEngine::details;
+
+struct conv_base_params {
+    struct {
+        size_t w;
+        size_t h;
+        size_t c;
+    } in;
+
+    size_t krn_w;
+    size_t krn_h;
+    size_t str_w;
+    size_t str_h;
+    size_t pad_w;
+    size_t pad_h;
+    size_t dil_w;
+    size_t dil_h;
+
+    size_t out_c;
+    size_t grp_c;
+
+    struct {
+        size_t w;
+        size_t h;
+    } out;
+};
+
+struct conv_test_int8_params : conv_base_params {
+    std::string device_name;
+
+    conv_test_int8_params(std::string name, conv_base_params params) :
+            conv_base_params(params), device_name(name) {}
+};
+
+template <typename data_t>
+void ref_conv_relu(const TBlob<data_t> &src, const data_t *weights, const size_t weightsSize,
+                   TBlob<data_t> &dst, conv_test_int8_params prm) {
+    size_t KW = prm.krn_w;
+    size_t KH = prm.krn_h;
+    size_t GC = prm.grp_c;
+
+    size_t IW = src.getTensorDesc().getDims()[3];
+    size_t IH = src.getTensorDesc().getDims()[2];
+    size_t IC = src.getTensorDesc().getDims()[1];
+
+    size_t OW = prm.out.w == 0 ? (IW + 2 * prm.pad_w - prm.krn_w) / prm.str_w + 1 : prm.out.w;
+    size_t OH = prm.out.h == 0 ? (IH + 2 * prm.pad_h - prm.krn_h) / prm.str_h + 1 : prm.out.h;
+    size_t OC = prm.out_c;
+
+    const data_t *src_data = src.readOnly();
+    const data_t *weights_data = weights;
+    const data_t *bias_data = weights_data + KW * KH * OC * IC / GC;
+    data_t *dst_data = dst.data();
+
+    IE_ASSERT(KW * KH * OC * IC / GC + OC == weightsSize);
+    IE_ASSERT(OW == dst.getTensorDesc().getDims()[3]);
+    IE_ASSERT(OH == dst.getTensorDesc().getDims()[2]);
+
+    for (uint32_t g = 0; g < GC; g++) {
+        for (uint32_t oc = 0; oc < OC / GC; oc++) {
+            for (uint32_t oh = 0; oh < OH; oh++) {
+                for (uint32_t ow = 0; ow < OW; ow++) {
+                    size_t oidx = g * OC / GC * OH * OW
+                                  + oc * OH * OW + oh * OW + ow;
+                    dst_data[oidx] = bias_data[g * OC / GC + oc];
+
+                    for (size_t ic = 0; ic < IC / GC; ic++) {
+                        for (size_t kh = 0; kh < KH; kh++) {
+                            for (size_t kw = 0; kw < KW; kw++) {
+                                int32_t iw = ow * prm.str_w - prm.pad_w + kw * (1 + prm.dil_w);
+                                int32_t ih = oh * prm.str_h - prm.pad_h + kh * (1 + prm.dil_h);
+                                if (iw < 0 || iw >= (int32_t)IW || ih < 0
+                                    || ih >= (int32_t)IH)
+                                    continue;
+                                size_t iidx = g * IC / GC * IH * IW
+                                              + ic * IH * IW + ih * IW + iw;
+                                size_t widx = g * OC / GC * IC / GC * KH * KW
+                                              + oc * IC / GC * KH * KW
+                                              + ic * KH * KW + kh * KW + kw;
+
+                                dst_data[ oidx] += src_data[iidx] * weights_data[widx];
+                            }
+                        }
+                    }
+
+                    // Applying ReLU
+                    if (dst_data[oidx] < 0) dst_data[oidx] = 0;
+
+                }
+            }
+        }
+    }
+}
+
+class smoke_ConvolutionInt8Test: public TestsCommon,
+                                 public WithParamInterface<conv_test_int8_params> {
+
+    std::string model_t = R"V0G0N(
+<Net Name="Convolution_Only" version="2" precision="FP32" batch="1">
+    <layers>
+        <layer name="data" type="Input" precision="FP32" id="0">
+            <output>
+                <port id="0">
+                    <dim>1</dim>
+                    <dim>_IC_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+            </output>
+        </layer>
+        <layer name="conv1" id="1" type="Convolution" precision="FP32">
+            <convolution stride-x="_SW_" stride-y="_SH_"
+                         pad-x="_PW_"    pad-y="_PH_"
+                         kernel-x="_KW_" kernel-y="_KH_"
+                         output="_OC_"   group="_GC_"/>
+
+            <weights offset="0" size="_S1_" />
+            <biases offset="_S1_" size="_S2_" />
+
+            <input>
+                <port id="1">
+                    <dim>1</dim>
+                    <dim>_IC_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+            </input>
+            <output>
+                <port id="2">
+                    <dim>1</dim>
+                    <dim>_OC_</dim>
+                    <dim>_OH_</dim>
+                    <dim>_OW_</dim>
+                </port>
+            </output>
+        </layer>
+        <layer id="2" name="conv1_relu" type="ReLU" precision="FP32">
+            <input>
+                <port id="3">
+                    <dim>1</dim>
+                    <dim>_OC_</dim>
+                    <dim>_OH_</dim>
+                    <dim>_OW_</dim>
+                </port>
+            </input>
+            <output>
+                <port id="4">
+                    <dim>1</dim>
+                    <dim>_OC_</dim>
+                    <dim>_OH_</dim>
+                    <dim>_OW_</dim>
+                </port>
+            </output>
+        </layer>
+    </layers>
+    <edges>
+        <edge from-layer="0" from-port="0" to-layer="1" to-port="1" />
+        <edge from-layer="1" from-port="2" to-layer="2" to-port="3" />
+    </edges>
+</Net>
+)V0G0N";
+
+    std::string getModel(conv_test_int8_params p) {
+        std::string model = model_t;
+
+        REPLACE_WITH_NUM(model, "_IW_", p.in.w);
+        REPLACE_WITH_NUM(model, "_IH_", p.in.h);
+        REPLACE_WITH_NUM(model, "_IC_", p.in.c);
+
+        REPLACE_WITH_NUM(model, "_KW_", p.krn_w);
+        REPLACE_WITH_NUM(model, "_KH_", p.krn_h);
+        REPLACE_WITH_NUM(model, "_SW_", p.str_w);
+        REPLACE_WITH_NUM(model, "_SH_", p.str_h);
+        REPLACE_WITH_NUM(model, "_PW_", p.pad_w);
+        REPLACE_WITH_NUM(model, "_PH_", p.pad_h);
+
+        REPLACE_WITH_NUM(model, "_GC_", p.grp_c);
+        REPLACE_WITH_NUM(model, "_OC_", p.out_c);
+        REPLACE_WITH_NUM(model, "_OH_", p.out.h == 0 ? (p.in.h + 2 * p.pad_h - p.krn_h) / p.str_h + 1 : p.out.h);
+        REPLACE_WITH_NUM(model, "_OW_", p.out.w == 0 ? (p.in.w + 2 * p.pad_w - p.krn_w) / p.str_w + 1 : p.out.w);
+
+        size_t w_data_size = (p.krn_w * p.krn_h * p.out_c * p.in.c / p.grp_c )* sizeof(float);
+        size_t b_data_size = p.out_c * sizeof(float);
+        REPLACE_WITH_NUM(model, "_S1_", w_data_size);
+        REPLACE_WITH_NUM(model, "_S2_", b_data_size);
+        return model;
+    }
+
+protected:
+    const char* DEFAULT_PATH_P = "./lib";
+
+    std::map<std::string, NetworkNodeStatsPtr> collectStatistics(const void *model, size_t size, const InferenceEngine::TBlob<uint8_t>::Ptr &weights, const std::vector<std::string> outputNodes, const std::vector<std::string> images) {
+        InferenceEngine::Core ie;
+
+        std::shared_ptr<NetworkStatsCollector> netStats = std::shared_ptr<NetworkStatsCollector>(new NetworkStatsCollector(ie, "CPU"));
+
+        size_t batchSize = images.size();
+
+        std::cout << "Batch size: " << batchSize << std::endl;
+
+        std::map<std::string, NetworkNodeStatsPtr> netNodesStats;
+
+        netStats->ReadNetworkAndSetWeights(model, size, weights, batchSize);
+
+        std::cout << "Inferencing and collecting statistics..." << std::endl;
+        netStats->InferAndCollectStats(images, netNodesStats);
+
+        return netNodesStats;
+    }
+
+    static void compare_NRMSD(InferenceEngine::Blob &res, InferenceEngine::Blob &ref, float max_nrmsd = 0.01f) {
+        float *res_ptr = res.buffer().as<float*>();
+        size_t res_size = res.size();
+
+        float *ref_ptr = ref.buffer().as<float*>();
+        size_t ref_size = ref.size();
+
+        ASSERT_EQ(res_size, ref_size);
+
+        float sum = 0;
+
+        float mmin = ref_ptr[0], mmax = ref_ptr[0];
+
+        for (size_t i = 0; i < ref_size; i++) {
+            float sqr = (ref_ptr[i] - res_ptr[i]);
+            sqr *= sqr;
+            sum += sqr;
+
+            mmin = (std::min)(mmin, ref_ptr[i]);
+            mmax = (std::max)(mmax, ref_ptr[i]);
+
+            if (i % 10007 == 0) {
+                std::cout << i << ": " << res_ptr[i] << "\t" << ref_ptr[i] << "\t" << "\tdiv: " << ref_ptr[i] / res_ptr[i] << std::endl;
+            }
+
+        }
+        sum /= ref_size;
+
+        sum = pow(sum, 0.5f);
+
+        sum /= mmax - mmin;
+
+        ASSERT_LE(sum, max_nrmsd);
+    }
+
+    virtual void SetUp() {
+        try {
+            conv_test_int8_params p = ::testing::WithParamInterface<conv_test_int8_params>::GetParam();
+            std::string model = getModel(p);
+
+            TBlob<uint8_t> *weights = new TBlob<uint8_t>(TensorDesc(Precision::U8, {(p.krn_w * p.krn_h * p.out_c * p.in.c / p.grp_c + p.out_c)
+                                                                                    * sizeof(float)}, C));
+            weights->allocate();
+
+            //fill_data_sine((float *) weights->buffer(), weights->size() / sizeof(float), 0.00, 0.005, 0.1);
+            CommonTestUtils::fill_data_sine((float *) weights->buffer(), weights->size() / sizeof(float), 1, 4, 0.3);
+            //fill_data_dbgval((float *) weights->buffer(), weights->size() / sizeof(float));
+            //size_t bias_start = p.krn_w * p.krn_h * p.out_c * p.in.c / p.grp_c;
+            //fill_data_const((float *) weights->buffer() + bias_start, p.out_c, 0.00);
+
+            // Set biases to 0
+            /*for (int i = weights->size() / sizeof(float) - C - 1; i < weights->size() / sizeof(float); i++) {
+                ((float *) weights->buffer())[i] = 0;
+            }*/
+
+
+            TBlob<uint8_t>::Ptr weights_ptr = TBlob<uint8_t>::Ptr(weights);
+
+            // Collecting statistics
+
+            // TODO Load nodes stats from file
+            std::string imageFilename = TestDataHelpers::get_data_path() + "/validation_set/224x224/dog.bmp";
+            std::cout << "Using image file: " << imageFilename << std::endl;
+            std::map<std::string, NetworkNodeStatsPtr> netNodesStats = collectStatistics(model.data(), model.length(), weights_ptr, { "conv1" }, { imageFilename });
+
+            Core ie;
+            auto network = ie.ReadNetwork(model, weights_ptr);
+
+            SizeVector dims_dst = {p.out.w == 0 ? (p.in.w + 2 * p.pad_w - p.krn_w) / p.str_w + 1 : p.out.w,
+                                   p.out.h == 0 ? (p.in.h + 2 * p.pad_h - p.krn_h) / p.str_h + 1 : p.out.h,
+                                   p.out_c,
+                                   1};
+            Blob::Ptr dst = make_shared_blob<float>(TensorDesc(Precision::FP32, SizeVector(dims_dst.rbegin(), dims_dst.rend()), NCHW));
+            dst->allocate();
+
+            // Setting the statistics data
+
+            CNNNetwork myNetwork = ie.ReadNetwork(model, weights_ptr);
+
+            ICNNNetworkStats* pstats;
+            ((ICNNNetwork&)myNetwork).getStats(&pstats, nullptr);
+            pstats->setNodesStats(netNodesStats);
+
+            SizeVector dims_src = {p.in.w,
+                                   p.in.h,
+                                   p.in.c,
+                                   1};          // 1 is a batch size
+            Blob::Ptr src = make_shared_blob<float>(TensorDesc(Precision::FP32, SizeVector(dims_src.rbegin(), dims_src.rend()), NCHW));
+            src->allocate();
+            fill_data(src->buffer().as<float *>(), src->size());
+
+
+
+
+
+
+            std::vector<std::string> imageNames = { imageFilename };
+
+            /** Taking information about all topology inputs **/
+            InputsDataMap inputInfo(myNetwork.getInputsInfo());
+
+            if (inputInfo.size() != 1) throw std::logic_error("Sample supports topologies only with 1 input");
+            auto inputInfoItem = *inputInfo.begin();
+
+            /** Specifying the precision of input data provided by the user.
+             * This should be called before load of the network to the plugin **/
+            inputInfoItem.second->setPrecision(Precision::FP32);
+            inputInfoItem.second->setLayout(Layout::NCHW);
+
+
+            std::vector<std::shared_ptr<unsigned char>> imagesData;
+            for (auto & i : imageNames) {
+                FormatReader::ReaderPtr reader(i.c_str());
+                if (reader.get() == nullptr) {
+                    std::cout << "Image " + i + " cannot be read!" << std::endl;
+                    continue;
+                }
+                /** Store image data **/
+                SizeVector dims = inputInfoItem.second->getTensorDesc().getDims();
+                std::shared_ptr<unsigned char> data(reader->getData(dims.back(), dims.at(dims.size() - 2)));
+                if (data.get() != nullptr) {
+                    imagesData.push_back(data);
+                }
+            }
+            if (imagesData.empty()) throw std::logic_error("Valid input images were not found!");
+
+            OutputsDataMap outputInfo(myNetwork.getOutputsInfo());
+            for (auto itOut : outputInfo) {
+                itOut.second->setPrecision(Precision::FP32);
+            }
+
+            /** Filling input tensor with images. First b channel, then g and r channels **/
+            size_t num_chanels = src->getTensorDesc().getDims()[1];
+            size_t image_size = src->getTensorDesc().getDims()[2] * src->getTensorDesc().getDims()[3];
+
+            float* data = src->buffer().as<PrecisionTrait<Precision::FP32>::value_type*>();
+
+            /** Iterate over all input images **/
+            for (size_t image_id = 0; image_id < imagesData.size(); ++image_id) {
+                /** Iterate over all pixel in image (b,g,r) **/
+                for (size_t pid = 0; pid < image_size; pid++) {
+                    /** Iterate over all channels **/
+                    for (size_t ch = 0; ch < num_chanels; ++ch) {
+                        /**          [images stride + channels stride + pixel id ] all in bytes            **/
+                        data[image_id * image_size * num_chanels + ch * image_size + pid ] = (float)(imagesData.at(image_id).get()[pid*num_chanels + ch]);
+                    }
+                }
+            }
+
+            // Inferring the converted network and comparing the result with the reference
+            ExecutableNetwork exeNetwork = ie.LoadNetwork(network, p.device_name);
+            InferRequest inferRequest = exeNetwork.CreateInferRequest();
+            OutputsDataMap outInfo;
+            outInfo = network.getOutputsInfo();
+            ASSERT_EQ(outInfo.size(), 1);
+            ASSERT_NE(outInfo.begin()->second, nullptr);
+            inferRequest.SetBlob(network.getInputsInfo().begin()->first, src);
+            inferRequest.SetBlob(outInfo.begin()->first, dst);
+
+            std::cout << "Inferring int8" << std::endl;
+            inferRequest.Infer();
+
+            // Calculating FP32 reference
+            TBlob<float> dst_ref(TensorDesc(Precision::FP32, SizeVector(dims_dst.rbegin(), dims_dst.rend()), NCHW));
+            dst_ref.allocate();
+            auto * srcPtr = dynamic_cast<TBlob<float>*>(src.get());
+            ref_conv_relu<float>(*srcPtr, (const float *)weights->buffer(), weights->size() / sizeof(float), dst_ref, p);
+
+            // Comparing the result with the reference
+            compare_NRMSD(*dst, dst_ref, 0.17);
+        } catch (const details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+/*
+    struct {
+        size_t w;
+        size_t h;
+        size_t c;
+    } in;
+
+    size_t krn_w;
+    size_t krn_h;
+    size_t str_w;
+    size_t str_h;
+    size_t pad_w;
+    size_t pad_h;
+    size_t dil_w;
+    size_t dil_h;
+
+    size_t out_c;
+    size_t grp_c;
+
+    struct {
+        size_t w;
+        size_t h;
+    } out;
+*/
+// Wo=(Wi−F+2P)/S+1
+
+#define case_1 conv_base_params({{4, 4, 3}, 1, 1, 1, 1, 0, 0, 0, 0, 3, 1})
+#define case_2 conv_base_params({{16, 32, 3}, 2, 4, 1, 1, 0, 0, 0, 0, 17, 1})
+#define case_3 conv_base_params({{16, 32, 3}, 2, 4, 2, 1, 0, 0, 0, 0, 17, 1})
+#define case_4 conv_base_params({{40, 40, 3}, 3, 3, 1, 2, 0, 0, 0, 0, 20, 1})
+#define case_5 conv_base_params({{32, 16, 3}, 7, 7, 2, 2, 3, 3, 0, 0, 17, 1})
+#define case_6 conv_base_params({{224, 224, 3}, 7, 7, 2, 2, 2, 2, 0, 0, 64, 1, {112, 112}})
+/*#define case_7 conv_base_params({{40, 40, 16}, 3, 3, 1, 1, 0, 0, 0, 0, 16, 16})
+#define case_8 conv_base_params({{32, 16, 32}, 7, 7, 2, 2, 3, 3, 0, 0, 32, 32})*/
+
+// These tests use dilated convolution and don't work yet
+/*#define case_9 conv_base_params({{40, 40, 16}, 3, 3, 1, 1, 0, 0, 8, 8, 16, 16})
+#define case_10 conv_base_params({{32, 16, 32}, 7, 7, 2, 2, 3, 3, 8, 8, 32, 32})
+#define case_11 conv_base_params({{32, 16, 4}, 7, 7, 2, 2, 3, 3, 8, 8, 4, 4})*/
+
+TEST_P(smoke_ConvolutionInt8Test, TestsConvolution) {
+}
+
+std::string  getTestCaseName(testing::TestParamInfo<conv_test_int8_params> obj) {
+    return  obj.param.device_name +
+        "_w" + std::to_string(obj.param.in.w) +
+        "_h" + std::to_string(obj.param.in.h) +
+        "_c" + std::to_string(obj.param.in.c) +
+        "_krnw" + std::to_string(obj.param.krn_w) +
+        "_krnh" + std::to_string(obj.param.krn_h) +
+        "_strw" + std::to_string(obj.param.str_w) +
+        "_strh" + std::to_string(obj.param.str_h) +
+        "_dilw" + std::to_string(obj.param.dil_w) +
+        "_dilh" + std::to_string(obj.param.dil_h) +
+        "_grpc" + std::to_string(obj.param.grp_c);
+}
+
+conv_test_int8_params conv_int8_test_cases[] = {
+    conv_test_int8_params("CPU", case_1),
+    conv_test_int8_params("CPU", case_2),
+    conv_test_int8_params("CPU", case_3),
+    conv_test_int8_params("CPU", case_4),
+    conv_test_int8_params("CPU", case_5),
+    // conv_test_int8_params("CPU", case_6),
+    //conv_test_int8_params("CPU", case_7),
+    //conv_test_int8_params("CPU", case_8),
+    //conv_test_int8_params("CPU", case_9),
+    //conv_test_int8_params("CPU", case_10),
+    //conv_test_int8_params("CPU", case_11),
+};
+
+INSTANTIATE_TEST_CASE_P(
+        TestConvolution, smoke_ConvolutionInt8Test, ::testing::ValuesIn(conv_int8_test_cases), getTestCaseName);
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/crop_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/crop_tests.cpp
new file mode 100644 (file)
index 0000000..a845eaf
--- /dev/null
@@ -0,0 +1,247 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <ie_core.hpp>
+
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+#include "ir_gen_helper.hpp"
+
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace single_layer_tests;
+
+struct crop_base_params {
+    std::vector<size_t> in_dims;
+    std::vector<size_t> out_dims;
+    std::vector<size_t> offsets;
+};
+
+#ifdef IN
+#undef IN
+#endif
+
+struct crop_test_params : crop_base_params {
+    std::string device_name;
+
+    crop_test_params(std::string name, crop_base_params params) :
+            crop_base_params(params), device_name(name) {}
+};
+
+template <typename data_t>
+void ref_crop(InferenceEngine::TBlob<data_t> &src, InferenceEngine::TBlob<data_t> &dst, crop_test_params prm) {
+    data_t *dst_ptr = dst.data();
+
+    int ndims = prm.in_dims.size();
+
+    size_t OFFSET_N = prm.offsets.at(0);
+    size_t OFFSET_C = prm.offsets.at(1);
+    size_t OFFSET_D = ndims == 5 ? prm.offsets.at(ndims - 3) : 0;
+    size_t OFFSET_H = prm.offsets.at(ndims - 2);
+    size_t OFFSET_W = prm.offsets.at(ndims - 1);
+
+    size_t ON = prm.out_dims[0];
+    size_t OC = prm.out_dims[1];
+    size_t OD = ndims == 5 ? prm.out_dims[ndims - 3] : 1;
+    size_t OH = prm.out_dims[ndims - 2];
+    size_t OW = prm.out_dims[ndims - 1];
+
+    size_t IN = prm.in_dims[0];
+    size_t IC = prm.in_dims[1];
+    size_t ID = ndims == 5 ? prm.in_dims[ndims - 3] : 1;
+    size_t IH = prm.in_dims[ndims - 2];
+    size_t IW = prm.in_dims[ndims - 1];
+
+    auto dst_off = [=](size_t n, size_t c, size_t d, size_t h, size_t w) -> size_t {
+        return (n * OC * OD * OH * OW + c * OD * OH * OW + d * OH * OW + h * OW + w);
+    };
+    auto src_off = [=](size_t n, size_t c, size_t d, size_t h, size_t w) -> size_t {
+        return (n * IC * ID * IH * IW + c * ID * IH * IW + d * IH * IW + h * IW + w);
+    };
+
+    ASSERT_GE(IN - OFFSET_N, ON);
+    ASSERT_GE(IC - OFFSET_C, OC);
+    ASSERT_GE(ID - OFFSET_D, OD);
+    ASSERT_GE(IH - OFFSET_H, OH);
+    ASSERT_GE(IW - OFFSET_W, OW);
+
+    data_t* src_ptr = src.data();
+    for (size_t n = 0; n < ON; ++n) {
+        for (size_t c = 0; c < OC; ++c) {
+            for (size_t d = 0; d < OD; ++d) {
+                for (size_t h = 0; h < OH; ++h) {
+                    for (size_t w = 0; w < OW; ++w) {
+                        dst_ptr[dst_off(n, c, d, h, w)] = src_ptr[src_off(n + OFFSET_N, c + OFFSET_C, d + OFFSET_D,
+                                                                          h + OFFSET_H, w + OFFSET_W)];
+                    }
+                }
+            }
+        }
+    }
+}
+
+class smoke_CropOnlyTest: public TestsCommon,
+                           public WithParamInterface<crop_test_params> {
+    std::string layers_t = R"V0G0N(
+        <layer name="crop" id="1" type="Crop" precision="FP32">
+            <crop-data>
+                <crop axis="0" offset="_OF0_" dim="_OD0_" />
+                <crop axis="1" offset="_OF1_" dim="_OD1_" />
+                <crop axis="2" offset="_OF2_" dim="_OD2_" />
+                <crop axis="3" offset="_OF3_" dim="_OD3_" />
+                <crop axis="4" offset="_OF4_" dim="_OD4_" />
+            </crop-data>
+            <input>
+                <port id="0">
+                    <dim>_ID0_</dim>
+                    <dim>_ID1_</dim>
+                    <dim>_ID2_</dim>
+                    <dim>_ID3_</dim>
+                    <dim>_ID4_</dim>
+                </port>
+            </input>
+            <output>
+                <port id="1">
+                    <dim>_OD0_</dim>
+                    <dim>_OD1_</dim>
+                    <dim>_OD2_</dim>
+                    <dim>_OD3_</dim>
+                    <dim>_OD4_</dim>
+                </port>
+            </output>
+        </layer>
+)V0G0N";
+    
+    std::string edges_t = R"V0G0N(
+        <edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
+)V0G0N";
+
+    std::string getModel(crop_test_params p) {
+        std::string model = layers_t;
+
+        auto dims_size = p.in_dims.size();
+
+        if (dims_size == 4) {
+            REMOVE_LINE(model, "<crop axis=\"4\" offset=\"_OF4_\" dim=\"_OD4_\" />");
+            REMOVE_LINE(model, "<dim>_ID4_</dim>");
+            REMOVE_LINE(model, "<dim>_OD4_</dim>");
+        }
+
+        REPLACE_WITH_NUM(model, "_ID0_", p.in_dims[0]);
+        REPLACE_WITH_NUM(model, "_ID1_", p.in_dims[1]);
+        REPLACE_WITH_NUM(model, "_ID2_", p.in_dims[2]);
+        REPLACE_WITH_NUM(model, "_ID3_", p.in_dims[3]);
+        if (dims_size == 5)
+            REPLACE_WITH_NUM(model, "_ID4_", p.in_dims[4]);
+
+        REPLACE_WITH_NUM(model, "_OD0_", p.out_dims[0]);
+        REPLACE_WITH_NUM(model, "_OD1_", p.out_dims[1]);
+        REPLACE_WITH_NUM(model, "_OD2_", p.out_dims[2]);
+        REPLACE_WITH_NUM(model, "_OD3_", p.out_dims[3]);
+        if (dims_size == 5)
+            REPLACE_WITH_NUM(model, "_OD4_", p.out_dims[4]);
+
+        REPLACE_WITH_NUM(model, "_OF0_", p.offsets[0]);
+        REPLACE_WITH_NUM(model, "_OF1_", p.offsets[1]);
+        REPLACE_WITH_NUM(model, "_OF2_", p.offsets[2]);
+        REPLACE_WITH_NUM(model, "_OF3_", p.offsets[3]);
+        if (dims_size == 5)
+            REPLACE_WITH_NUM(model, "_OF4_", p.offsets[4]);
+
+        model = IRTemplateGenerator::getIRTemplate("Crop_Only", p.in_dims, "FP32", model, edges_t);
+
+        return model;
+    }
+
+protected:
+    virtual void SetUp() {
+        try {
+            crop_test_params p = ::testing::WithParamInterface<crop_test_params>::GetParam();
+            std::string model = getModel(p);
+            
+            Core ie;
+            CNNNetwork network = ie.ReadNetwork(model, Blob::CPtr());
+
+               InferenceEngine::Layout layout = InferenceEngine::ANY;
+               switch (p.in_dims.size()) {
+                   case 4: layout = InferenceEngine::NCHW; break;
+                   case 5: layout = InferenceEngine::NCDHW; break;
+               }
+
+            InputsDataMap inputs = network.getInputsInfo();
+            DataPtr inPtr1 = inputs["in1"]->getInputData();
+
+            InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float>(inPtr1->getTensorDesc());
+            src->allocate();
+            fill_data(src->buffer(), src->size());
+
+            TBlob<float>* srcPtr = dynamic_cast<TBlob<float>*>(src.get());
+            BlobMap srcs;
+            srcs.insert(std::pair<std::string, Blob::Ptr>("in1", src));
+
+            OutputsDataMap out = network.getOutputsInfo();
+            BlobMap dstBlobs;
+            std::pair<std::string, DataPtr> item = *out.begin();
+            TBlob<float>::Ptr dst;
+            dst = make_shared_blob<float>(item.second->getTensorDesc());
+            dst->allocate();
+            dstBlobs[item.first] = dst;
+
+            TBlob<float>::Ptr dst_ref;
+            dst_ref = make_shared_blob<float>(item.second->getTensorDesc());
+            dst_ref->allocate();
+
+            ref_crop(*srcPtr, *dst_ref, p);
+
+            ExecutableNetwork exeNetwork = ie.LoadNetwork(network, p.device_name);
+            InferRequest inferRequest = exeNetwork.CreateInferRequest();
+            inferRequest.SetInput(srcs);
+            inferRequest.SetOutput(dstBlobs);
+            inferRequest.Infer();
+
+            compare(*dstBlobs.begin()->second, *dst_ref);
+
+        } catch (const details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+#define case_1 crop_base_params({{1, 5, 32, 32}, {1, 2, 23, 23}, {0, 2, 5, 4}})
+#define case_2 crop_base_params({{1, 5, 32, 32}, {1, 5, 5, 5}, {0, 0, 20, 20}})
+#define case_3 crop_base_params({{1, 5, 32, 32}, {1, 5, 32, 10}, {0, 0, 0, 20}})
+#define case_4 crop_base_params({{1, 5, 32, 20}, {1, 5, 30, 10}, {0, 0, 2, 10}})
+#define case_5 crop_base_params({{1, 5, 32, 20, 14}, {1, 5, 30, 10, 8}, {0, 0, 2, 10, 6}})
+#define case_6 crop_base_params({{5, 9, 32, 20, 14}, {2, 5, 30, 10, 8}, {3, 4, 2, 10, 6}})
+
+TEST_P(smoke_CropOnlyTest, TestsCrop) {}
+
+std::string  getTestCaseName(testing::TestParamInfo<crop_test_params> obj) {
+    int ndims = obj.param.in_dims.size();
+
+    return  obj.param.device_name +
+        "_in" + std::to_string(obj.param.in_dims[0]) +
+        "_ic" + std::to_string(obj.param.in_dims[1]) +
+        "_id" + std::to_string(ndims == 5 ? obj.param.in_dims[ndims - 3] : 1) +
+        "_ih" + std::to_string(obj.param.in_dims[ndims - 2]) +
+        "_iw" + std::to_string(obj.param.in_dims[ndims - 1]) +
+        "_on" + std::to_string(obj.param.out_dims[0]) +
+        "_oc" + std::to_string(obj.param.out_dims[1]) +
+        "_od" + std::to_string(ndims == 5 ? obj.param.out_dims[ndims - 3] : 1) +
+        "_oh" + std::to_string(obj.param.out_dims[ndims - 2]) +
+        "_ow" + std::to_string(obj.param.out_dims[ndims - 1]);
+}
+
+crop_test_params crop_only_test_cases[] = {
+               crop_test_params("CPU", case_1),
+               crop_test_params("CPU", case_2),
+               crop_test_params("CPU", case_3),
+               crop_test_params("CPU", case_4),
+               crop_test_params("CPU", case_5),
+               crop_test_params("CPU", case_6),
+};
+
+INSTANTIATE_TEST_CASE_P(
+        TestsPooling, smoke_CropOnlyTest, ::testing::ValuesIn(crop_only_test_cases), getTestCaseName);
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/detectionout_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/detectionout_tests.cpp
new file mode 100644 (file)
index 0000000..341bf4d
--- /dev/null
@@ -0,0 +1,189 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <ie_core.hpp>
+
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+
+using namespace ::testing;
+using namespace InferenceEngine;
+
+struct detectionout_test_params {
+    std::string device_name;
+
+    size_t mb;
+
+    struct {
+        size_t c;
+    } in1;
+
+    struct {
+        size_t c;
+    } in2;
+
+    struct {
+        size_t c;
+        size_t h;
+        size_t w;
+    } in3;
+
+    struct {
+        size_t c;
+        size_t h;
+        size_t w;
+    } out;
+};
+
+class smoke_CPUDetectionOutOnlyTest: public TestsCommon,
+                             public WithParamInterface<detectionout_test_params> {
+
+    std::string model_t = R"V0G0N(
+<Net Name="PriorBox_Only" version="2" precision="FP32" batch="1">
+    <layers>
+        <layer name="input1" type="Input" precision="FP32" id="1">
+            <output>
+                <port id="1">
+                    <dim>1</dim>
+                    <dim>_IC1_</dim>
+                </port>
+            </output>
+        </layer>
+        <layer name="input2" type="Input" precision="FP32" id="2">
+            <output>
+                <port id="2">
+                    <dim>1</dim>
+                    <dim>_IC2_</dim>
+                </port>
+            </output>
+        </layer>
+        <layer name="input3" type="Input" precision="FP32" id="3">
+            <output>
+                <port id="3">
+                    <dim>1</dim>
+                    <dim>_IC3_</dim>
+                    <dim>_IH3_</dim>
+                    <dim>_IW3_</dim>
+                </port>
+            </output>
+        </layer>
+        <layer name="detection_out" type="DetectionOutput" precision="FP32" id="11">
+            <data num_classes="4" share_location="1" background_label_id="0" nms_threshold="0.400000" top_k="400"
+                  output_directory="" output_name_prefix="" output_format="" label_map_file=""
+                  name_size_file="" num_test_image="0" code_type="caffe.PriorBoxParameter.CENTER_SIZE"
+                  variance_encoded_in_target="0" keep_top_k="200" confidence_threshold="0.010000"
+                  visualize="0" visualize_threshold="0.000000" num_orient_classes="8"
+                  interpolate_orientation="1" clip="1" decrease_label_id="1" />
+            <input>
+                <port id="11">
+                    <dim>1</dim>
+                    <dim>_IC1_</dim>
+                </port>
+                <port id="12">
+                    <dim>1</dim>
+                    <dim>_IC2_</dim>
+                </port>
+                <port id="13">
+                    <dim>1</dim>
+                    <dim>_IC3_</dim>
+                    <dim>_IH3_</dim>
+                    <dim>_IW3_</dim>
+                </port>
+            </input>
+            <output>
+                <port id="14">
+                    <dim>1</dim>
+                    <dim>_OC_</dim>
+                    <dim>_OH_</dim>
+                    <dim>_OW_</dim>
+                </port>
+            </output>
+        </layer>
+    </layers>
+    <edges>
+        <edge from-layer="1" from-port="1" to-layer="11" to-port="11"/>
+        <edge from-layer="2" from-port="2" to-layer="11" to-port="12"/>
+        <edge from-layer="3" from-port="3" to-layer="11" to-port="13"/>
+    </edges>
+
+</Net>
+)V0G0N";
+
+    std::string getModel(detectionout_test_params p) {
+        std::string model = model_t;
+
+        REPLACE_WITH_NUM(model, "_IC1_", p.in1.c);
+        REPLACE_WITH_NUM(model, "_IC2_", p.in2.c);
+
+        REPLACE_WITH_NUM(model, "_IC3_", p.in3.c);
+        REPLACE_WITH_NUM(model, "_IH3_", p.in3.h);
+        REPLACE_WITH_NUM(model, "_IW3_", p.in3.w);
+
+        REPLACE_WITH_NUM(model, "_OC_", p.out.c);
+        REPLACE_WITH_NUM(model, "_OH_", p.out.h);
+        REPLACE_WITH_NUM(model, "_OW_", p.out.w);
+
+        return model;
+    }
+
+protected:
+    virtual void SetUp() {
+
+        try {
+            detectionout_test_params p = ::testing::WithParamInterface<detectionout_test_params>::GetParam();
+            std::string model = getModel(p);
+
+            Core ie;
+            CNNNetwork network = ie.ReadNetwork(model, Blob::CPtr());
+            network.setBatchSize(p.mb);
+
+            InputsDataMap inputs = network.getInputsInfo();
+
+            DataPtr inputPtr1 = inputs["input1"]->getInputData();
+            DataPtr inputPtr2 = inputs["input2"]->getInputData();
+            DataPtr inputPtr3 = inputs["input3"]->getInputData();
+
+            InferenceEngine::Blob::Ptr input1 = InferenceEngine::make_shared_blob<float>(inputPtr1->getTensorDesc());
+            input1->allocate();
+
+            InferenceEngine::Blob::Ptr input2 = InferenceEngine::make_shared_blob<float>(inputPtr2->getTensorDesc());
+            input2->allocate();
+
+            InferenceEngine::Blob::Ptr input3 = InferenceEngine::make_shared_blob<float>(inputPtr3->getTensorDesc());
+            input3->allocate();
+
+            InferenceEngine::BlobMap inputBlobs;
+            inputBlobs["input1"] = input1;
+            inputBlobs["input2"] = input2;
+            inputBlobs["input3"] = input3;
+
+            OutputsDataMap outputs = network.getOutputsInfo();
+
+            InferenceEngine::TBlob<float>::Ptr output;
+            output = InferenceEngine::make_shared_blob<float>(outputs["detection_out"]->getTensorDesc());
+            output->allocate();
+
+            InferenceEngine::BlobMap outputBlobs;
+            outputBlobs["detection_out"] = output;
+
+            ExecutableNetwork exeNetwork = ie.LoadNetwork(network, "CPU");
+            InferRequest inferRequest = exeNetwork.CreateInferRequest();
+            inferRequest.SetInput(inputBlobs);
+            inferRequest.SetOutput(outputBlobs);
+            inferRequest.Infer();
+
+        } catch (const details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+TEST_P(smoke_CPUDetectionOutOnlyTest, TestsDetectionOut) {}
+
+INSTANTIATE_TEST_CASE_P(
+        TestsDetectionOut, smoke_CPUDetectionOutOnlyTest,
+        ::testing::Values(
+                detectionout_test_params{ "CPU",
+                    10, {147264}, {147264}, {2, 1, 147264}, {1, 200, 7} }));
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/fullycon_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/fullycon_tests.cpp
new file mode 100644 (file)
index 0000000..632559c
--- /dev/null
@@ -0,0 +1,185 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <ie_core.hpp>
+
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+#include "ir_gen_helper.hpp"
+
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace single_layer_tests;
+
+struct fc_base_params {
+    struct {
+        size_t w;
+        size_t h;
+        size_t c;
+    } in;
+
+    size_t out_c;
+};
+
+struct fc_test_params : fc_base_params {
+    std::string device_name;
+
+    fc_test_params(std::string name, fc_base_params params) :
+            fc_base_params(params), device_name(name) {}
+};
+
+template <typename data_t>
+void ref_innerproduct(const TBlob<data_t> &src, const data_t *weights, const size_t weightsSize,
+                      TBlob<data_t> &dst, fc_test_params prm)
+{
+    size_t IW = src.getTensorDesc().getDims()[3];
+    size_t IH = src.getTensorDesc().getDims()[2];
+    size_t IC = src.getTensorDesc().getDims()[1];
+
+    size_t OC = prm.out_c;
+
+    const data_t *src_data = src.readOnly();
+    const data_t *weights_data = weights;
+    const data_t *bias_data = weights_data + IW*IH*IC*OC;
+    data_t *dst_data = dst.data();
+
+    IE_ASSERT( IW*IH*IC*OC + OC == weightsSize);
+    IE_ASSERT( OC == dst.getTensorDesc().getDims()[1]);
+
+    for (size_t oc = 0; oc < OC; oc++) {
+        dst_data[oc] = bias_data[oc];
+        for (size_t ic = 0; ic < IC; ic++) {
+            for (size_t kh = 0; kh < IH; kh++) {
+                for (size_t  kw = 0; kw < IW; kw++) {
+                    size_t iidx = ic * IH * IW + kh * IW + kw;
+                    size_t widx = oc * IC * IH * IW
+                                    + ic * IH * IW + kh * IW + kw;
+
+                    dst_data[oc] += src_data[iidx] * weights_data[widx];
+                }
+            }
+        }
+    }
+}
+
+class smoke_FullyConnectedOnlyTest: public TestsCommon,
+                              public WithParamInterface<fc_test_params> {
+
+    std::string layers_t = R"V0G0N(
+        <layer name="FullyConnected" id="1" type="InnerProduct" precision="FP32">
+            <fc out-size="_OC_" />
+            <weights offset="0" size="_S1_" />
+            <biases offset="_S1_" size="_S2_" />
+
+            <input>
+                <port id="0">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+            </input>
+            <output>
+                <port id="1">
+                    <dim>_IN_</dim>
+                    <dim>_OC_</dim>
+                </port>
+            </output>
+        </layer>
+)V0G0N";
+
+    std::string edges_t = R"V0G0N(
+        <edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
+)V0G0N";
+
+    std::string getModel(fc_test_params p) {
+        std::string model = layers_t;
+
+        REPLACE_WITH_NUM(model, "_IN_", 1);
+        REPLACE_WITH_NUM(model, "_IW_", p.in.w);
+        REPLACE_WITH_NUM(model, "_IH_", p.in.h);
+        REPLACE_WITH_NUM(model, "_IC_", p.in.c);
+        REPLACE_WITH_NUM(model, "_OC_", p.out_c);
+
+        size_t w_data_size = (p.in.w * p.in.h * p.in.c * p.out_c )* sizeof(float);
+        size_t b_data_size = p.out_c * sizeof(float);
+        REPLACE_WITH_NUM(model, "_S1_", w_data_size);
+        REPLACE_WITH_NUM(model, "_S2_", b_data_size);
+
+        model = IRTemplateGenerator::getIRTemplate("FullyConnected_Only", {1lu, p.in.c, p.in.h, p.in.w}, "FP32", model, edges_t);
+
+        return model;
+    }
+
+protected:
+    virtual void SetUp() {
+
+        try {
+            fc_test_params p = ::testing::WithParamInterface<fc_test_params>::GetParam();
+            std::string model = getModel(p);
+
+            TBlob<uint8_t> *weights = new TBlob<uint8_t>({Precision::U8, {(p.in.w * p.in.h * p.in.c * p.out_c + p.out_c) * sizeof(float)}, Layout::C});
+            weights->allocate();
+            fill_data((float *) weights->buffer(), weights->size() / sizeof(float));
+            TBlob<uint8_t>::Ptr weights_ptr = TBlob<uint8_t>::Ptr(weights);
+            Core ie;
+            CNNNetwork network = ie.ReadNetwork(model, weights_ptr);
+
+            SizeVector dims_src = {1,
+                                   p.in.c,
+                                   p.in.h,
+                                   p.in.w};
+            Blob::Ptr src = make_shared_blob<float>(TensorDesc({ Precision::FP32, dims_src, Layout::NCHW }));
+            src->allocate();
+            fill_data(src->buffer().as<float *>(), src->size());
+
+            SizeVector dims_dst = {1, p.out_c};
+            Blob::Ptr dst = make_shared_blob<float>(TensorDesc({ Precision::FP32, dims_dst, Layout::NC }));
+            dst->allocate();
+
+            TBlob<float> dst_ref({Precision::FP32, dims_dst, Layout::NC});
+            dst_ref.allocate();
+
+            ExecutableNetwork exeNetwork = ie.LoadNetwork(network, p.device_name);
+            InferRequest inferRequest = exeNetwork.CreateInferRequest();
+            OutputsDataMap outInfo;
+            outInfo = network.getOutputsInfo();
+            ASSERT_EQ(outInfo.size(), 1);
+            ASSERT_NE(outInfo.begin()->second, nullptr);
+            inferRequest.SetBlob(network.getInputsInfo().begin()->first, src);
+            inferRequest.SetBlob(outInfo.begin()->first, dst);
+            inferRequest.Infer();
+
+            auto * srcPtr = dynamic_cast<TBlob<float>*>(src.get());
+            ref_innerproduct(*srcPtr, weights->readOnly().as<const float *>(), weights->size() / sizeof(float), dst_ref, p);
+            compare(*dst, dst_ref, 0.9f);
+
+        } catch (const InferenceEngine::details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+#define case_1 fc_base_params({{227, 227, 3}, 96})
+#define case_2 fc_base_params({{227, 227, 4}, 8})
+
+TEST_P(smoke_FullyConnectedOnlyTest, TestsFullyConnected) {}
+
+std::string  getTestCaseName(testing::TestParamInfo<fc_test_params> obj) {
+    return  obj.param.device_name +
+        "_w" + std::to_string(obj.param.in.w) +
+        "_h" + std::to_string(obj.param.in.h) +
+        "_c" + std::to_string(obj.param.in.c) +
+        "_outc" + std::to_string(obj.param.out_c);
+}
+
+fc_test_params fc_only_test_cases[] = {
+               fc_test_params("CPU", case_1),
+               fc_test_params("CPU", case_2),
+};
+
+INSTANTIATE_TEST_CASE_P(
+        TestsFullyConnected, smoke_FullyConnectedOnlyTest, ::testing::ValuesIn(fc_only_test_cases), getTestCaseName);
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/gather_tree_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/gather_tree_tests.cpp
new file mode 100644 (file)
index 0000000..9ff01d8
--- /dev/null
@@ -0,0 +1,23 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "gather_tree_tests.hpp"
+
+INSTANTIATE_TEST_CASE_P(
+    smoke_CPU_TestsGatherTree, GatherTreeTests,
+            ::testing::Values(
+                // Params: in_out_shape, step_idx, parent_idx, max_seq_len, end_token, reference
+                gather_tree_test_params{ {3, 2, 3 }, {1, 2, 3, 2, 3, 4, 4, 5, 6, 5, 6, 7, 7, 8, 9, 8, 9, 10},
+                                          {0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 0, 2, 1, 2, 2, 1, 1},
+                                          {3, 3 }, {11}, {2, 2, 2, 2, 4, 4, 6, 5, 6, 7, 6, 6, 7, 8, 9, 8, 9, 10}, "CPU" },
+                gather_tree_test_params{ {4, 1, 3}, {1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1}, {0, 0, 0, 0, 1, 1, 2, 1, 2, -1, -1, -1},
+                                          {3}, {10}, {2, 2, 2, 6, 5, 6, 7, 8, 9, 10, 10, 10}, "CPU" },
+                gather_tree_test_params{ {4, 1, 3}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10}, {0, 0, 0, 0, 1, 1, 2, 1, 2, 1, 1, 1},
+                                          {4}, {10}, {2, 2, 2, 5, 5, 5, 8, 8, 8, 10, 10, 10}, "CPU" },
+                gather_tree_test_params{ {5, 1, 3}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 10, 3, 2, 10, 10}, {0, 0, 0, 0, 1, 1, 2, 1, 2, 1, 1, 1, 2, 0, 1},
+                                          {5}, {10}, {2, 2, 2, 5, 5, 5, 8, 8, 8, 3, 1, 10, 2, 10, 10}, "CPU" },
+                gather_tree_test_params{ {4, 2, 3}, {1, 2, 3, 2, 3, 4, 4, 5, 6, 5, 6, 7, 7, 8, 9, 8, 9, 10, 0, 0, 0, 11, 12, 0},
+                                          {0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 2, 1, 2, 2, 0, 1, -1, -1, -1, 0, 1, 0},
+                                          {3, 4}, {11}, {2, 2, 2, 2, 3, 2, 6, 5, 6, 7, 5, 7, 7, 8, 9, 8, 9, 8, 11, 11, 11, 11, 12, 0}, "CPU" }
+));
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_batchnorm_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_batchnorm_tests.cpp
new file mode 100644 (file)
index 0000000..986f501
--- /dev/null
@@ -0,0 +1,175 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <vector>
+#include <gtest/gtest.h>
+#include <ie_core.hpp>
+
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+#include "ir_gen_helper.hpp"
+
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace single_layer_tests;
+
+struct batchnorm4D_test_params {
+    std::string device_name;
+
+    struct {
+        size_t w;
+        size_t h;
+        size_t c;
+    } in;
+
+    double epsilon;
+};
+
+template <typename data_t>
+void ref_batchnorm4D(const TBlob<data_t> &src, const data_t *variance, const data_t *mean,
+                    TBlob<data_t> &dst, batchnorm4D_test_params prm) {
+    size_t IW = src.getTensorDesc().getDims()[3];
+    size_t IH = src.getTensorDesc().getDims()[2];
+    size_t IC = src.getTensorDesc().getDims()[1];
+    size_t MB = src.getTensorDesc().getDims()[0];
+
+    const double eps = prm.epsilon;
+
+    const data_t *src_data = src.readOnly();
+    data_t *dst_data = dst.data();
+
+    for (int c = 0; c < IC; ++c) {
+        data_t v_mean = mean[c];
+        data_t v_variance = variance[c];
+        data_t sqrt_variance = 0;
+
+        sqrt_variance = 1. / sqrt(v_variance + eps);
+
+        for (int n = 0; n < MB; ++n)
+            for (int h = 0; h < IH; ++h)
+                for (int w = 0; w < IW; ++w) {
+                    size_t idx = n * IC * IH * IW
+                                 + c * IH * IW
+                                 + h * IW + w;
+                    dst_data[idx] = (src_data[idx] - v_mean) * sqrt_variance;
+                }
+    }
+}
+
+class smoke_CPUBatchNorn4DOnlyTest: public TestsCommon,
+                                public WithParamInterface<batchnorm4D_test_params> {
+    std::string layers_t = R"V0G0N(
+        <layer name="batchNorm" id="1" type="BatchNormalization" precision="FP32">
+            <batch_norm_data epsilon="_EPSILON_"/>
+
+            <weights offset="0" size="_S1_" />
+            <biases offset="_S1_" size="_S2_" />
+
+            <input>
+                <port id="0">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+            </input>
+            <output>
+                <port id="1">
+                    <dim>_IN_</dim>
+                    <dim>_OC_</dim>
+                    <dim>_OH_</dim>
+                    <dim>_OW_</dim>
+                </port>
+            </output>
+        </layer>
+)V0G0N";
+
+    std::string edges_t = R"V0G0N(
+        <edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
+)V0G0N";
+    std::string getModel(batchnorm4D_test_params p) {
+        std::string model = layers_t;
+
+        REPLACE_WITH_NUM(model, "_IN_", 1);
+        REPLACE_WITH_NUM(model, "_IW_", p.in.w);
+        REPLACE_WITH_NUM(model, "_IH_", p.in.h);
+        REPLACE_WITH_NUM(model, "_IC_", p.in.c);
+        REPLACE_WITH_NUM(model, "_EPSILON_", p.epsilon);
+
+        REPLACE_WITH_NUM(model, "_OW_", p.in.w);
+        REPLACE_WITH_NUM(model, "_OH_", p.in.h);
+        REPLACE_WITH_NUM(model, "_OC_", p.in.c);
+
+        size_t w_data_size = p.in.c * sizeof(float);
+        size_t b_data_size = p.in.c * sizeof(float);
+        REPLACE_WITH_NUM(model, "_S1_", w_data_size);
+        REPLACE_WITH_NUM(model, "_S2_", b_data_size);
+
+        model = IRTemplateGenerator::getIRTemplate("BatchNorm4D_Only", {1lu, p.in.c, p.in.h, p.in.w}, "FP32", model, edges_t);
+
+        return model;
+    }
+
+protected:
+    virtual void SetUp() {
+        try {
+            batchnorm4D_test_params p = ::testing::WithParamInterface<batchnorm4D_test_params>::GetParam();
+            std::string model = getModel(p);
+
+            TBlob<uint8_t> *weights = new TBlob<uint8_t>(TensorDesc(Precision::U8, {p.in.c * 2 * sizeof(float)}, C));
+            weights->allocate();
+            fill_data(weights->buffer(), weights->size() / sizeof(float));
+            float * data = weights->buffer();
+            for (size_t i = 0; i < weights->size() / sizeof(float); i++) {
+                if (data[i] < 0) {
+                    data[i] *= -1;
+                }
+            }
+
+            TBlob<uint8_t>::Ptr weights_ptr = TBlob<uint8_t>::Ptr(weights);
+
+            Core ie;
+            CNNNetwork network = ie.ReadNetwork(model, weights_ptr);
+
+            SizeVector dims_src = {p.in.w,
+                                   p.in.h,
+                                   p.in.c,
+                                   1};          // 1 is a batch size
+            Blob::Ptr src = make_shared_blob<float>(TensorDesc(Precision::FP32, SizeVector(dims_src.rbegin(), dims_src.rend()), NCHW));
+            src->allocate();
+            fill_data(src->buffer().as<float *>(), src->size());
+
+            Blob::Ptr dst = make_shared_blob<float>(TensorDesc(Precision::FP32, SizeVector(dims_src.rbegin(), dims_src.rend()), NCHW));
+            dst->allocate();
+
+            ExecutableNetwork exeNetwork = ie.LoadNetwork(network, "CPU");
+            InferRequest inferRequest = exeNetwork.CreateInferRequest();
+            OutputsDataMap outInfo;
+            outInfo = network.getOutputsInfo();
+            ASSERT_EQ(outInfo.size(), 1);
+            ASSERT_NE(outInfo.begin()->second, nullptr);
+            inferRequest.SetBlob(network.getInputsInfo().begin()->first, src);
+            inferRequest.SetBlob(outInfo.begin()->first, dst);
+            inferRequest.Infer();
+
+            TBlob<float> dst_ref(TensorDesc(Precision::FP32, SizeVector(dims_src.rbegin(), dims_src.rend()), NCHW));
+            dst_ref.allocate();
+
+            auto * srcPtr = dynamic_cast<TBlob<float>*>(src.get());
+            ref_batchnorm4D(*srcPtr, (const float*) weights->buffer(), ((const float*) weights->buffer() + p.in.c), dst_ref, p);
+
+            compare(*dst, dst_ref);
+        } catch (const InferenceEngine::details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+TEST_P(smoke_CPUBatchNorn4DOnlyTest, TestsBatchNorm4D) {}
+
+INSTANTIATE_TEST_CASE_P(
+        TestBatchNorm4D, smoke_CPUBatchNorn4DOnlyTest,
+        ::testing::Values(
+                batchnorm4D_test_params{ "CPU",
+                                         {256, 128, 32}, 1e-6}));
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_deconv_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_deconv_tests.cpp
new file mode 100644 (file)
index 0000000..21ebfd2
--- /dev/null
@@ -0,0 +1,231 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <string>
+#include <ie_core.hpp>
+
+#include "tests_common.hpp"
+#include "../common_single_layer_tests/deconv_ref.hpp"
+#include "ir_gen_helper.hpp"
+#include "common_test_utils/common_layers_params.hpp"
+
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace single_layer_tests;
+
+struct deconv_test_params {
+    std::string device_name;
+
+    struct {
+        size_t w;
+        size_t h;
+        size_t c;
+    } in;
+
+    size_t krn_w;
+    size_t krn_h;
+    size_t str_w;
+    size_t str_h;
+    size_t pad_w;
+    size_t pad_h;
+
+    size_t out_c;
+
+    bool with_bias;
+};
+
+template<typename data_t>
+void ref_deconv(const Blob::Ptr &src, const Blob::Ptr &weights, const Blob::Ptr &bias,
+                Blob::Ptr &dst_ref, deconv_test_params p) {
+    const float *weights_data = (const float *) weights->buffer();
+    size_t bias_size = p.out_c;
+    size_t weights_size = weights->size() / sizeof(float) - bias_size;
+    const float *bias_data = p.with_bias ? (const float *) bias->buffer() : nullptr;
+    CommonTestUtils::conv_common_params params;
+    params.kernel.insert(X_AXIS, p.krn_w);
+    params.kernel.insert(Y_AXIS, p.krn_h);
+    params.stride.insert(X_AXIS, p.str_w);
+    params.stride.insert(Y_AXIS, p.str_h);
+    params.pads_begin.insert(X_AXIS, p.pad_w);
+    params.pads_begin.insert(Y_AXIS, p.pad_h);
+    params.out_c = p.out_c;
+    ref_deconv_common<float>({ src }, *dst_ref.get(), weights_data, weights_size, bias_data, bias_size, params);
+}
+
+class smoke_CPUDeconvolutionOnlyTest : public TestsCommon,
+                                    public WithParamInterface<deconv_test_params> {
+    std::string layers_t = R"V0G0N(
+        <layer name="deconv1" id="1" type="Deconvolution" precision="FP32">
+            <deconvolution
+                kernel="_KH_,_KW_"
+                strides="_SH_,_SW_"
+                pads_begin="_PH_,_PW_"  pads_end="_PH_,_PW_"
+                output="_OC_"/>
+
+            <weights offset="0" size="_S1_" />
+            <biases offset="_OFF2_" size="_S2_" />
+
+            <input>
+                <port id="0">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+            </input>
+            <output>
+                <port id="1">
+                    <dim>_IN_</dim>
+                    <dim>_OC_</dim>
+                    <dim>_OH_</dim>
+                    <dim>_OW_</dim>
+                </port>
+            </output>
+        </layer>
+)V0G0N";
+    
+    std::string edges_t = R"V0G0N(
+        <edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
+)V0G0N";
+
+    std::string getModel(deconv_test_params p) {
+        std::string model = layers_t;
+
+        REPLACE_WITH_NUM(model, "_IN_", 1);
+        REPLACE_WITH_NUM(model, "_IW_", p.in.w);
+        REPLACE_WITH_NUM(model, "_IH_", p.in.h);
+        REPLACE_WITH_NUM(model, "_IC_", p.in.c);
+
+        REPLACE_WITH_NUM(model, "_KW_", p.krn_w);
+        REPLACE_WITH_NUM(model, "_KH_", p.krn_h);
+        REPLACE_WITH_NUM(model, "_SW_", p.str_w);
+        REPLACE_WITH_NUM(model, "_SH_", p.str_h);
+        REPLACE_WITH_NUM(model, "_PW_", p.pad_w);
+        REPLACE_WITH_NUM(model, "_PH_", p.pad_h);
+
+        REPLACE_WITH_NUM(model, "_OC_", p.out_c);
+        REPLACE_WITH_NUM(model, "_OH_", p.str_h * (p.in.h - 1) + p.krn_h - 2 * p.pad_h);
+        REPLACE_WITH_NUM(model, "_OW_", p.str_w * (p.in.w - 1) + p.krn_w - 2 * p.pad_w);
+
+        if (!p.with_bias) REMOVE_LINE(model, "<biases offset=\"_OFF2_\" size=\"_S2_\" />");
+
+        size_t w_data_size = (p.krn_w * p.krn_h * p.out_c * p.in.c) * sizeof(float);
+        size_t b_data_size = p.out_c * sizeof(float);
+        REPLACE_WITH_NUM(model, "_S1_", w_data_size);
+        REPLACE_WITH_NUM(model, "_OFF2_", w_data_size);
+        REPLACE_WITH_NUM(model, "_S2_", b_data_size);
+        
+        model = IRTemplateGenerator::getIRTemplate("Deconvolution_Only", {1lu, p.in.c, p.in.h, p.in.w}, "FP32", model, edges_t);
+
+        return model;
+    }
+
+protected:
+    virtual void SetUp() {
+        try {
+            deconv_test_params p = ::testing::WithParamInterface<deconv_test_params>::GetParam();
+            std::string model = getModel(p);
+
+            std::vector<Blob::Ptr> blob_to_model;
+            Blob::Ptr weights = make_shared_blob<float>(TensorDesc(Precision::FP32,
+                                                        {p.krn_w * p.krn_h * p.out_c * p.in.c}, C));
+            weights->allocate();
+            fill_data(weights->buffer().as<float *>(), weights->size());
+            blob_to_model.push_back(weights);
+
+            Blob::Ptr bias = nullptr;
+            if (p.with_bias) {
+                bias = make_shared_blob<float>(TensorDesc(Precision::FP32,
+                                               {p.krn_w * p.krn_h * p.out_c * p.in.c}, C));
+                bias->allocate();
+                fill_data(bias->buffer().as<float *>(), bias->size());
+                blob_to_model.push_back(bias);
+            }
+
+            size_t total_size_in_bytes = 0;
+            for (Blob::Ptr blb : blob_to_model) total_size_in_bytes += blb->byteSize();
+
+            TBlob<uint8_t>::Ptr model_blob = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, { total_size_in_bytes }, C));
+            model_blob->allocate();
+            uint8_t *model_blob_ptr = model_blob->buffer().as<uint8_t *>();
+            for (Blob::Ptr blb : blob_to_model) {
+                memcpy(model_blob_ptr, blb->buffer().as<uint8_t *>(), blb->byteSize());
+                model_blob_ptr += blb->byteSize();
+            }
+
+            Core ie;
+            CNNNetwork network = ie.ReadNetwork(model, model_blob);
+
+            SizeVector dims_src = {p.in.w, p.in.h, p.in.c, 1};  // 1 is a batch size
+
+            Blob::Ptr src = make_shared_blob<float>(TensorDesc(Precision::FP32, SizeVector(dims_src.rbegin(), dims_src.rend()), NCHW));
+            src->allocate();
+            fill_data(src->buffer().as<float *>(), src->size());
+
+            size_t OW = p.str_w * (p.in.w - 1) + p.krn_w - 2 * p.pad_w;
+            size_t OH = p.str_h * (p.in.h - 1) + p.krn_h - 2 * p.pad_h;
+
+            SizeVector dims_dst = {OW, OH, p.out_c, 1};
+
+            Blob::Ptr dst = make_shared_blob<float>(TensorDesc(Precision::FP32, SizeVector(dims_dst.rbegin(), dims_dst.rend()), NCHW));
+            dst->allocate();
+            fill_data(dst->buffer().as<float *>(), dst->size());
+
+            ExecutableNetwork exeNetwork = ie.LoadNetwork(network, "CPU");
+            InferRequest inferRequest = exeNetwork.CreateInferRequest();
+            OutputsDataMap outInfo;
+            outInfo = network.getOutputsInfo();
+            ASSERT_EQ(outInfo.size(), 1);
+            ASSERT_NE(outInfo.begin()->second, nullptr);
+            inferRequest.SetBlob(network.getInputsInfo().begin()->first, src);
+            inferRequest.SetBlob(outInfo.begin()->first, dst);
+            inferRequest.Infer();
+
+            Blob::Ptr dst_ref = make_shared_blob<float>(TensorDesc(Precision::FP32, SizeVector(dims_dst.rbegin(), dims_dst.rend()), NCHW));
+            dst_ref->allocate();
+
+            ref_deconv<float>(src, weights, bias, dst_ref, p);
+
+            compare(*dst.get(), *dst_ref.get());
+        } catch (const InferenceEngine::details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+TEST_P(smoke_CPUDeconvolutionOnlyTest, TestsDeconvolution) {}
+
+INSTANTIATE_TEST_CASE_P(
+        TestDeconvolution, smoke_CPUDeconvolutionOnlyTest,
+        ::testing::Values(
+                deconv_test_params{"CPU",
+                                   {3, 3, 3},
+                                   3, 3, 1, 1, 0, 0, 2, true},
+                deconv_test_params{"CPU",
+                                   {3, 3, 3},
+                                   4, 3, 1, 1, 0, 0, 2, true},
+                deconv_test_params{"CPU",
+                                   {3, 3, 3},
+                                   4, 3, 1, 2, 0, 0, 2, true},
+                deconv_test_params{"CPU",
+                                   {4, 4, 3},
+                                   3, 3, 1, 2, 0, 0, 2, true}, // jit impl should work
+                deconv_test_params{"CPU",
+                                   {4, 4, 3},
+                                   3, 3, 1, 2, 0, 0, 2, false}, // jit impl should work
+                deconv_test_params{"CPU",
+                                   {3, 3, 3},
+                                   3, 3, 1, 1, 0, 0, 2, false},
+                deconv_test_params{"CPU",
+                                   {3, 3, 3},
+                                   4, 3, 1, 1, 0, 0, 2, false},
+                deconv_test_params{"CPU",
+                                   {3, 3, 3},
+                                   4, 3, 1, 2, 0, 0, 2, false}));
+
+
+/*** TBD ***/
+
+
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_logistic_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_logistic_tests.cpp
new file mode 100644 (file)
index 0000000..b1f8173
--- /dev/null
@@ -0,0 +1,139 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <ie_core.hpp>
+
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+#include "ir_gen_helper.hpp"
+
+#include <math.h>
+
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace single_layer_tests;
+
+struct logistic_test_params {
+    std::string device_name;
+
+    struct {
+        size_t w;
+        size_t h;
+        size_t c;
+    } in;
+};
+
+template <typename T>
+T logistic_fwd(T s) {
+    T v = ::expf((float)(s));
+    return v / (v + 1);
+}
+
+template<typename data_t>
+void ref_logistic(const TBlob<data_t> &src, TBlob<data_t> &dst, logistic_test_params prm) {
+    data_t *dst_data = dst.data();
+
+    const data_t *src_data = src.readOnly();
+
+    for (int i = 0; i < src.size(); i++) {
+        dst_data[i] = logistic_fwd(src_data[i]);
+    }
+}
+
+class smoke_CPULogisticOnlyTest : public TestsCommon,
+                               public WithParamInterface<logistic_test_params> {
+
+    std::string layers_t = R"V0G0N(
+        <layer name="logistic" id="1" type="Logistic" precision="FP32">
+            <input>
+                <port id="0">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+            </input>
+            <output>
+                <port id="1">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+            </output>
+        </layer>
+)V0G0N";
+
+    std::string edges_t = R"V0G0N(
+        <edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
+)V0G0N";
+
+    std::string getModel(logistic_test_params p) {
+        std::string model = layers_t;
+
+        REPLACE_WITH_NUM(model, "_IN_", 1);
+        REPLACE_WITH_NUM(model, "_IW_", p.in.w);
+        REPLACE_WITH_NUM(model, "_IH_", p.in.h);
+        REPLACE_WITH_NUM(model, "_IC_", p.in.c);
+        model = IRTemplateGenerator::getIRTemplate("Logistic_Only", {1lu, p.in.c, p.in.h, p.in.w}, "FP32", model, edges_t);
+        return model;
+    }
+
+ protected:
+    virtual void SetUp() {
+
+        try {
+            logistic_test_params p = ::testing::WithParamInterface<logistic_test_params>::GetParam();
+            std::string model = getModel(p);
+
+            Core ie;
+            CNNNetwork network;
+            ASSERT_NO_THROW(network = ie.ReadNetwork(model, Blob::CPtr()));
+
+            SizeVector dims_src = {p.in.w,
+                                   p.in.h,
+                                   p.in.c,
+                                   1};
+
+            Blob::Ptr src = make_shared_blob<float>(TensorDesc(Precision::FP32, SizeVector(dims_src.rbegin(), dims_src.rend()), NCHW));
+            src->allocate();
+            fill_data(src->buffer().as<float *>(), src->size());
+
+            SizeVector dims_dst = dims_src;
+
+            Blob::Ptr dst = make_shared_blob<float>(TensorDesc(Precision::FP32, SizeVector(dims_dst.rbegin(), dims_dst.rend()), NCHW));
+            dst->allocate();
+
+            TBlob<float> dst_ref(TensorDesc(Precision::FP32, SizeVector(dims_dst.rbegin(), dims_dst.rend()), NCHW));
+            dst_ref.allocate();
+
+            auto * srcPtr = dynamic_cast<TBlob<float>*>(src.get());
+            ref_logistic(*srcPtr, dst_ref, p);
+
+            ExecutableNetwork exeNetwork = ie.LoadNetwork(network, "CPU");
+            InferRequest inferRequest = exeNetwork.CreateInferRequest();
+            inferRequest.SetBlob(network.getInputsInfo().begin()->first, src);
+            inferRequest.SetBlob(network.getOutputsInfo().begin()->first, dst);
+            inferRequest.Infer();
+
+            compare(*dst, dst_ref);
+
+        } catch (const InferenceEngine::details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+TEST_P(smoke_CPULogisticOnlyTest, TestsLogistic) {}
+
+INSTANTIATE_TEST_CASE_P(
+    TestLogistic, smoke_CPULogisticOnlyTest,
+    ::testing::Values(
+        logistic_test_params{"CPU",
+                            {13, 13, 8}}
+    )
+);
+
+/*** TBD ***/
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_power_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_power_tests.cpp
new file mode 100644 (file)
index 0000000..45e82e7
--- /dev/null
@@ -0,0 +1,152 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <ie_core.hpp>
+#include <cmath>
+
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+#include "ir_gen_helper.hpp"
+
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace single_layer_tests;
+
+struct power_test_params {
+    std::string device_name;
+
+    struct {
+        size_t w;
+        size_t h;
+        size_t c;
+    } in;
+
+    float power;
+    float scale;
+    float shift;
+};
+
+template <typename data_t>
+void ref_power(const TBlob<data_t> &src, TBlob<data_t> &dst, power_test_params prm) {
+
+    data_t *dst_data = dst.data();
+    const data_t *src_data = src.readOnly();
+
+    const double scale = prm.scale;
+    const double power = prm.power;
+    const double shift = prm.shift;
+
+    for(int i = 0; i < src.size(); i++) {
+        dst_data[i] = (float)std::pow(shift + src_data[i] * scale, power);
+    }
+}
+
+class smoke_CPUPowerOnlyTest: public TestsCommon,
+                           public WithParamInterface<power_test_params> {
+    std::string layers_t = R"V0G0N(
+        <layer name="power" id="1" type="Power" precision="FP32">
+            <power_data power="_POWER_" scale="_SCALE_" shift="_SHIFT_"/>
+            <input>
+                <port id="0">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+            </input>
+            <output>
+                <port id="0">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+            </output>
+        </layer>
+)V0G0N";
+    
+    std::string edges_t = R"V0G0N(
+        <edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
+)V0G0N";
+
+    std::string getModel(power_test_params p) {
+        std::string model = layers_t;
+
+        REPLACE_WITH_NUM(model, "_IN_", 1);
+        REPLACE_WITH_NUM(model, "_IW_", p.in.w);
+        REPLACE_WITH_NUM(model, "_IH_", p.in.h);
+        REPLACE_WITH_NUM(model, "_IC_", p.in.c);
+        REPLACE_WITH_NUM(model, "_POWER_", p.power);
+        REPLACE_WITH_NUM(model, "_SCALE_", p.scale);
+        REPLACE_WITH_NUM(model, "_SHIFT_", p.shift);
+
+        model = IRTemplateGenerator::getIRTemplate("Power_Only", {1lu, p.in.c, p.in.h, p.in.w}, "FP32", model, edges_t);
+
+        return model;
+    }
+
+protected:
+    virtual void SetUp() {
+
+        try {
+            power_test_params p = ::testing::WithParamInterface<power_test_params>::GetParam();
+            std::string model = getModel(p);
+
+            Core ie;
+            CNNNetwork network = ie.ReadNetwork(model, Blob::CPtr());
+
+            SizeVector dims_src = {p.in.w,
+                                   p.in.h,
+                                   p.in.c,
+                                   1};
+
+            Blob::Ptr src = make_shared_blob<float>(TensorDesc(Precision::FP32, SizeVector(dims_src.rbegin(), dims_src.rend()), NCHW));
+            src->allocate();
+            fill_data(src->buffer().as<float *>(), src->size());
+
+            SizeVector dims_dst = dims_src;
+
+            Blob::Ptr dst = make_shared_blob<float>(TensorDesc(Precision::FP32, SizeVector(dims_dst.rbegin(), dims_dst.rend()), NCHW));
+            dst->allocate();
+
+            TBlob<float> dst_ref(TensorDesc(Precision::FP32, SizeVector(dims_dst.rbegin(), dims_dst.rend()), NCHW));
+            dst_ref.allocate();
+
+            auto * srcPtr = dynamic_cast<TBlob<float>*>(src.get());
+            ref_power(*srcPtr, dst_ref, p);
+
+            ExecutableNetwork exeNetwork = ie.LoadNetwork(network, "CPU");
+            InferRequest inferRequest = exeNetwork.CreateInferRequest();
+            OutputsDataMap outInfo;
+            outInfo = network.getOutputsInfo();
+            ASSERT_EQ(outInfo.size(), 1);
+            ASSERT_NE(outInfo.begin()->second, nullptr);
+            inferRequest.SetBlob(network.getInputsInfo().begin()->first, src);
+            inferRequest.SetBlob(outInfo.begin()->first, dst);
+            inferRequest.Infer();
+
+            compare(*dst, dst_ref);
+
+        } catch (const details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+TEST_P(smoke_CPUPowerOnlyTest, TestsPower) {}
+
+INSTANTIATE_TEST_CASE_P(
+        TestPower, smoke_CPUPowerOnlyTest,
+        ::testing::Values(
+                power_test_params{ "CPU",
+                    {13, 13, 3}, 1, 2, 0.5f },
+                power_test_params{ "CPU",
+                    {23, 23, 1}, 3, 8, 2 },
+                power_test_params{ "CPU",
+                    {23, 23, 8}, 8, 2, 1 }));
+
+/*** TBD ***/
+
+
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_roipooling_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_roipooling_tests.cpp
new file mode 100644 (file)
index 0000000..ff987e4
--- /dev/null
@@ -0,0 +1,101 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+#include <ie_core.hpp>
+#include "ir_gen_helper.hpp"
+
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace single_layer_tests;
+
+struct roipooling_test_params {
+    std::string device_name;
+
+    struct {
+        size_t w;
+        size_t h;
+        size_t c;
+    } in;
+
+    size_t pooled_h;
+    size_t pooled_w;
+    float spatial_scale;
+};
+
+template <typename data_t>
+void ref_roipool(const TBlob<data_t> &src, TBlob<data_t> &dst, roipooling_test_params prm)
+{
+}
+
+class MKLDNNROIPoolingOnlyTest: public TestsCommon,
+                             public WithParamInterface<roipooling_test_params> {
+    std::string layers_t = R"V0G0N(
+        <layer name="roi_pool" type="ROIPooling" precision="FP32" id="1">
+            <data pooled_h="_POOLED_H_" pooled_w="_POOLED_H_" spatial_scale="_SPATIAL_SCALE_"/>
+            <input>
+                <port id="10">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_IW_</dim>
+                    <dim>_IH_</dim>
+                </port>
+                <port id="11">
+                    <dim>300</dim>
+                    <dim>5</dim>
+                </port>
+            </input>
+            <output>
+                <port id="12">
+                    <dim>300</dim>
+                    <dim>256</dim>
+                    <dim>6</dim>
+                    <dim>6</dim>
+                </port>
+            </output>
+        </layer>
+    </layers>
+)V0G0N";
+    
+    std::string edges_t = R"V0G0N(
+        <edge from-layer="0" from-port="0" to-layer="1" to-port="10"/>
+)V0G0N";
+
+    std::string getModel(roipooling_test_params p) {
+        std::string model = layers_t;
+
+        REPLACE_WITH_NUM(model, "_IN_", 1);
+        REPLACE_WITH_NUM(model, "_IW_", p.in.w);
+        REPLACE_WITH_NUM(model, "_IH_", p.in.h);
+        REPLACE_WITH_NUM(model, "_IC_", p.in.c);
+
+        REPLACE_WITH_NUM(model, "_POOLED_H_", p.pooled_h);
+        REPLACE_WITH_NUM(model, "_POOLED_W_", p.pooled_w);
+        REPLACE_WITH_NUM(model, "_SPATIAL_SCALE_", p.spatial_scale);
+
+        model = IRTemplateGenerator::getIRTemplate("ROIPooling_Only", {1lu, p.in.c, p.in.h, p.in.w}, "FP32", model, edges_t);
+
+        return model;
+    }
+
+protected:
+    virtual void SetUp() {
+
+        try {
+            roipooling_test_params p = ::testing::WithParamInterface<roipooling_test_params>::GetParam();
+            std::string model = getModel(p);
+
+            InferenceEngine::Core ie;
+            ASSERT_NO_THROW(ie.ReadNetwork(model, Blob::CPtr()));
+
+        } catch (const InferenceEngine::details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+TEST_P(MKLDNNROIPoolingOnlyTest, nightly_TestsROIPooling) {}
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_scaleshift_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_scaleshift_tests.cpp
new file mode 100644 (file)
index 0000000..1e9f95f
--- /dev/null
@@ -0,0 +1,170 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <ie_core.hpp>
+
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+#include "ir_gen_helper.hpp"
+
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace single_layer_tests;
+
+struct scaleshift_test_params {
+    std::string device_name;
+
+    struct {
+        size_t w;
+        size_t h;
+        size_t c;
+    } in;
+
+    int broadcast;
+};
+
+template <typename data_t>
+void ref_scaleshift(const TBlob<data_t> &src, const data_t *weights, const size_t weightsSize,
+              TBlob<data_t> &dst, scaleshift_test_params prm) {
+
+    size_t IW = src.getTensorDesc().getDims()[3];
+    size_t IH = src.getTensorDesc().getDims()[2];
+    size_t IC = src.getTensorDesc().getDims()[1];
+    size_t MB = src.getTensorDesc().getDims()[0];
+
+    const data_t *src_data = src.readOnly();
+    const data_t *weights_data = weights;
+    const data_t *bias_data = weights_data + IC;
+    data_t *dst_data = dst.data();
+
+    for(int mb = 0; mb < MB; mb++) {
+        for(int c = 0; c < IC; c++) {
+            for(int h = 0; h < IH; h++) {
+                for(int w = 0; w < IW; w++) {
+                    int idx = mb * IC * IH * IW
+                        + c * IH * IW
+                        + h * IW + w;
+
+                    int widx = c;
+                    int bidx = c;
+
+                    dst_data[idx] = src_data[idx] * weights_data[widx] + bias_data[bidx];
+                }
+            }
+        }
+    }
+}
+
+class smoke_CPUScaleShiftOnlyTest: public TestsCommon,
+                           public WithParamInterface<scaleshift_test_params> {
+    std::string layers_t = R"V0G0N(
+        <layer name="scaleshift" id="1" type="ScaleShift" precision="FP32">
+            <data broadcast="_BROADCAST_"/>
+
+            <weights offset="0" size="_S1_" />
+            <biases offset="_S1_" size="_S2_" />
+
+            <input>
+                <port id="1">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+            </input>
+            <output>
+                <port id="2">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+            </output>
+        </layer>
+)V0G0N";
+    
+    std::string edges_t = R"V0G0N(
+        <edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
+)V0G0N";
+
+    std::string getModel(scaleshift_test_params p) {
+        std::string model = layers_t;
+
+        REPLACE_WITH_NUM(model, "_IN_", 1);
+        REPLACE_WITH_NUM(model, "_IW_", p.in.w);
+        REPLACE_WITH_NUM(model, "_IH_", p.in.h);
+        REPLACE_WITH_NUM(model, "_IC_", p.in.c);
+        REPLACE_WITH_NUM(model, "_BROADCAST_", p.broadcast);
+
+        size_t w_data_size = p.in.c * sizeof(float);
+        size_t b_data_size = p.in.c * sizeof(float);
+        REPLACE_WITH_NUM(model, "_S1_", w_data_size);
+        REPLACE_WITH_NUM(model, "_S2_", b_data_size);
+
+        model = IRTemplateGenerator::getIRTemplate("ScaleShift_Only", {1lu, p.in.c, p.in.h, p.in.w}, "FP32", model, edges_t);
+
+        return model;
+    }
+
+protected:
+    virtual void SetUp() {
+
+        try {
+            scaleshift_test_params p = ::testing::WithParamInterface<scaleshift_test_params>::GetParam();
+            std::string model = getModel(p);
+
+            TBlob<uint8_t> *weights = new TBlob<uint8_t>(TensorDesc(Precision::U8, { p.in.c * 2 * sizeof(float) }, C));
+            weights->allocate();
+            fill_data( weights->data().as<float*>(), weights->size() / sizeof(float));
+
+            TBlob<uint8_t>::Ptr weights_ptr = TBlob<uint8_t>::Ptr(weights);
+
+            Core ie;
+            CNNNetwork network = ie.ReadNetwork(model, weights_ptr);
+
+            SizeVector dims_src = {p.in.w,
+                                   p.in.h,
+                                   p.in.c,
+                                   1};          // 1 is a batch size
+            Blob::Ptr src = make_shared_blob<float>(TensorDesc(Precision::FP32, SizeVector(dims_src.rbegin(), dims_src.rend()), NCHW));
+            src->allocate();
+            fill_data(src->buffer().as<float *>(), src->size());
+
+            Blob::Ptr dst = make_shared_blob<float>(TensorDesc(Precision::FP32, SizeVector(dims_src.rbegin(), dims_src.rend()), NCHW));
+            dst->allocate();
+
+            ExecutableNetwork exeNetwork = ie.LoadNetwork(network, "CPU");
+            InferRequest inferRequest = exeNetwork.CreateInferRequest();
+            OutputsDataMap outInfo;
+            outInfo = network.getOutputsInfo();
+            ASSERT_EQ(outInfo.size(), 1);
+            ASSERT_NE(outInfo.begin()->second, nullptr);
+            inferRequest.SetBlob(network.getInputsInfo().begin()->first, src);
+            inferRequest.SetBlob(outInfo.begin()->first, dst);
+            inferRequest.Infer();
+
+
+            TBlob<float> dst_ref(TensorDesc(Precision::FP32, SizeVector(dims_src.rbegin(), dims_src.rend()), NCHW));
+            dst_ref.allocate();
+
+            auto * srcPtr = dynamic_cast<TBlob<float>*>(src.get());
+            ref_scaleshift(*srcPtr, weights->readOnly().as<const float*>(), weights->size() / sizeof(float), dst_ref, p);
+
+            compare(*dst, dst_ref);
+
+        } catch (const details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+TEST_P(smoke_CPUScaleShiftOnlyTest, TestsScaleShift) {}
+
+INSTANTIATE_TEST_CASE_P(
+        TestScaleShift, smoke_CPUScaleShiftOnlyTest,
+        ::testing::Values(
+                scaleshift_test_params{ "CPU",
+                                  {256, 128, 32}, 0}));
+
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_simplernms_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/mkldnn_simplernms_tests.cpp
new file mode 100644 (file)
index 0000000..dbe4dfe
--- /dev/null
@@ -0,0 +1,151 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <ie_core.hpp>
+
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+#include "ir_gen_helper.hpp"
+
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace single_layer_tests;
+
+struct simplernms_test_params {
+    std::string device_name;
+
+    struct {
+        size_t w;
+        size_t h;
+        size_t c;
+    } in;
+
+    float cls_threshold;
+    size_t max_num_proposals;
+    float iou_threshold;
+    size_t min_bbox_size;
+    size_t feat_stride;
+    size_t pre_nms_topn;
+    size_t post_nms_topn;
+    float scale1;
+    float scale2;
+    float scale3;
+};
+
+template <typename data_t>
+void ref_simplernms(const TBlob<data_t> &src, TBlob<data_t> &dst, simplernms_test_params prm)
+{
+}
+
+class MKLDNNSimplerNMSOnlyTest: public TestsCommon,
+                             public WithParamInterface<simplernms_test_params> {
+
+    std::string layers_t = R"V0G0N(
+        <layer name="power" id="1" type="Power" precision="FP32">
+            <power_data power="1" scale="1" shift="0"/>
+            <input>
+                <port id="0"/>
+            </input>
+            <output>
+                <port id="1"/>
+            </output>
+        </layer>
+        <layer name="proposal" type="SimplerNMS" precision="FP32" id="2">
+            <data cls_threshold="_CLS_THR_" max_num_proposals="_MAX_NUM_"
+                iou_threshold="_IOU_THR_" min_bbox_size="_MIN_BB_SIZE_" feat_stride="_FEAT_STRIDE_"
+                pre_nms_topn="_PRE_NMS_TOPN_" post_nms_topn="_POST_NMS_TOPN_"
+                scale="_SCALE1_,_SCALE2_,_SCALE3_"/>
+            <input>
+                <port id="2">
+                    <dim>18</dim>
+                    <dim>39</dim>
+                    <dim>64</dim>
+                </port>
+                <port id="3">
+                    <dim>18</dim>
+                    <dim>39</dim>
+                    <dim>64</dim>
+                </port>
+            </input>
+            <output>
+                <port id="4">
+                    <dim>300</dim>
+                    <dim>5</dim>
+                </port>
+            </output>
+        </layer>
+)V0G0N";
+    
+    std::string edges_t = R"V0G0N(
+        <edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
+        <edge from-layer="1" from-port="1" to-layer="2" to-port="2"/>
+        <edge from-layer="1" from-port="1" to-layer="2" to-port="3"/>
+)V0G0N";
+
+    std::string getModel(simplernms_test_params p) {
+        std::string model = layers_t;
+
+        REPLACE_WITH_NUM(model, "_IN_", p.in.w);
+        REPLACE_WITH_NUM(model, "_IW_", p.in.w);
+        REPLACE_WITH_NUM(model, "_IH_", p.in.h);
+        REPLACE_WITH_NUM(model, "_IC_", p.in.c);
+
+        REPLACE_WITH_NUM(model, "_CLS_THR_", p.cls_threshold);
+        REPLACE_WITH_NUM(model, "_MAX_NUM_", p.max_num_proposals);
+        REPLACE_WITH_NUM(model, "_IOU_THR_", p.iou_threshold);
+        REPLACE_WITH_NUM(model, "_MIN_BB_SIZE_", p.min_bbox_size);
+        REPLACE_WITH_NUM(model, "_FEAT_STRIDE_", p.feat_stride);
+        REPLACE_WITH_NUM(model, "_PRE_NMS_TOPN_", p.pre_nms_topn);
+        REPLACE_WITH_NUM(model, "_POST_NMS_TOPN_", p.post_nms_topn);
+        REPLACE_WITH_NUM(model, "_SCALE1_", p.scale1);
+        REPLACE_WITH_NUM(model, "_SCALE2_", p.scale2);
+        REPLACE_WITH_NUM(model, "_SCALE3_", p.scale3);
+
+        model = IRTemplateGenerator::getIRTemplate("SimplerNMS_Only", {1lu, p.in.c, p.in.h, p.in.w}, "FP32", model, edges_t);
+
+        return model;
+    }
+
+protected:
+    virtual void SetUp() {
+
+        try {
+            simplernms_test_params p = ::testing::WithParamInterface<simplernms_test_params>::GetParam();
+            std::string model = getModel(p);
+
+            Core ie;
+            CNNNetwork network = ie.ReadNetwork(model, Blob::CPtr());
+
+            SizeVector dims_src = {p.in.w,
+                p.in.h,
+                p.in.c,
+                1};
+
+            Blob::Ptr src = make_shared_blob<float>(TensorDesc(Precision::FP32, SizeVector(dims_src.rbegin(), dims_src.rend()), NCHW));
+            src->allocate();
+            fill_data(src->buffer().as<float *>(), src->size());
+
+            SizeVector dims_dst = {300, 5, 1};
+
+            Blob::Ptr dst = make_shared_blob<float>(TensorDesc(Precision::FP32, SizeVector(dims_dst.rbegin(), dims_dst.rend()), NCHW));
+            dst->allocate();
+
+            ExecutableNetwork exeNetwork = ie.LoadNetwork(network, "CPU");
+            InferRequest inferRequest = exeNetwork.CreateInferRequest();
+            OutputsDataMap outInfo;
+            outInfo = network.getOutputsInfo();
+            ASSERT_EQ(outInfo.size(), 1);
+            ASSERT_NE(outInfo.begin()->second, nullptr);
+            inferRequest.SetBlob(network.getInputsInfo().begin()->first, src);
+            inferRequest.SetBlob(outInfo.begin()->first, dst);
+            inferRequest.Infer();
+
+        } catch (const details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+TEST_P(MKLDNNSimplerNMSOnlyTest, nightly_TestSimplerNMS) {}
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/network_stats.cpp b/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/network_stats.cpp
new file mode 100644 (file)
index 0000000..35d7557
--- /dev/null
@@ -0,0 +1,396 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <cfloat>
+#include <fstream>
+#include <limits>
+#include <memory>
+
+#include <pugixml.hpp>
+
+#include <format_reader_ptr.h>
+
+#include "network_stats.h"
+#include <samples/slog.hpp>
+
+using namespace InferenceEngine;
+
+class DataStats {
+public:
+    template <typename T>
+    static void GetDataMinMax(const T* data, size_t count, T& min, T& max);
+
+    template <typename T>
+    static void GetDataAverage(const T* data, size_t count, T& ave);
+
+    template <typename T>
+    static void GetDataAbsMax(const T* data, size_t count, T& max);
+
+    template <typename T>
+    static T GetAbsMax(T min, T max);
+};
+
+template <typename T>
+void DataStats::GetDataMinMax(const T* data, size_t count, T& min, T& max) {
+    for (size_t i = 0; i < count; i++) {
+        T val = data[i];
+
+        if (min > val) {
+            min = val;
+        }
+
+        if (max < val) {
+            max = val;
+        }
+    }
+}
+
+template <typename T>
+void DataStats::GetDataAbsMax(const T* data, size_t count, T& max) {
+    T min = FLT_MAX;
+
+    GetDataMinMax(data, count, min, max);
+
+    max = GetAbsMax(min, max);
+}
+
+template void DataStats::GetDataMinMax<float>(const float* data, size_t count, float& min, float& max);
+template void DataStats::GetDataMinMax<uint8_t>(const uint8_t* data, size_t count, uint8_t& min, uint8_t& max);
+
+template void DataStats::GetDataAbsMax<float>(const float* data, size_t count, float& max);
+
+template <typename T>
+void DataStats::GetDataAverage(const T* data, size_t count, T& ave) {
+    ave = 0;
+
+    for (size_t i = 0; i < count; i++) {
+        ave += data[i];
+    }
+
+    ave /= count;
+}
+
+template void DataStats::GetDataAverage<float>(const float* data, size_t count, float& ave);
+
+template <typename T>
+T DataStats::GetAbsMax(T min, T max) {
+    if (min < 0) {
+        min *= -1;
+    }
+
+    if (max < 0) {
+        max *= -1;
+    }
+
+    return (max > min) ? max : min;
+}
+
+template float DataStats::GetAbsMax<float>(float min, float max);
+
+
+CNNLayerPtr NetworkStatsCollector::addScaleShiftBeforeLayer(std::string name, CNNLayer::Ptr beforeLayer, size_t port, std::vector<float> scale) {
+    if (beforeLayer->insData.size() < port) {
+        THROW_IE_EXCEPTION << "cannot find appropraite port for addScaleShiftBeforeLayer";
+    }
+
+    DataPtr pData = beforeLayer->insData[port].lock();
+    LayerParams params;
+    params.name = name;
+    params.precision = Precision::FP32;
+    params.type = "ScaleShift";
+    CNNLayerPtr lptr = std::make_shared<ScaleShiftLayer>(params);
+    ScaleShiftLayer *pScaleShift = dynamic_cast<ScaleShiftLayer *>(lptr.get());
+
+    IE_ASSERT(4 == pData->getDims().size());
+    std::size_t num_chanels = pData->getDims().at(1);
+    SizeVector wdims({ num_chanels });
+
+    if (scale.size() == 1) {
+        scale.resize(wdims[0]);
+        for (int i = 1; i < wdims[0]; i++) {
+            scale[i] = scale[0];
+        }
+    }
+
+    if (scale.size() != num_chanels) {
+        THROW_IE_EXCEPTION << "Failed to add scaleshift before " << beforeLayer->name << " due to scales and layer output dims incossitency";
+    }
+
+    Blob::Ptr weights = nullptr;
+    weights = make_shared_blob<float>({Precision::FP32, wdims, Layout::C});
+    weights->allocate();
+    float *buffer = weights->buffer().as<float *>();
+    for (size_t i = 0; i < num_chanels; i++) {
+        buffer[i] = scale[i];
+    }
+    pScaleShift->_weights = weights;
+
+
+    SizeVector bdims({ num_chanels });
+    Blob::Ptr biases = nullptr;
+    biases = make_shared_blob<float>({Precision::FP32, bdims, Layout::C});
+    biases->allocate();
+    buffer = biases->buffer().as<float *>();
+    for (size_t i = 0; i < num_chanels; i++) {
+        buffer[i] = 0.f;
+    }
+    pScaleShift->_biases = biases;
+
+    Data *edge2 = new Data(*pData.get());
+    DataPtr newEdge(edge2);
+    lptr->insData.push_back(pData);
+    lptr->outData.push_back(newEdge);
+    newEdge->setName(/*"EdgeAfter_" +*/ params.name);
+    newEdge->getCreatorLayer() = lptr;
+    newEdge->getInputTo().clear();
+    newEdge->getInputTo()[beforeLayer->name] = beforeLayer;
+
+    pData->getInputTo().erase(beforeLayer->name);
+    pData->getInputTo()[params.name] = lptr;
+
+    for (size_t i = 0; i < beforeLayer->insData.size(); i++) {
+        DataPtr d = beforeLayer->insData[i].lock();
+        if (d == pData) {
+            beforeLayer->insData[i] = newEdge;
+            break;
+        }
+    }
+    return lptr;
+}
+
+NetworkStatsCollector::NetworkStatsCollector(const InferenceEngine::Core & ie, const std::string & deviceName) :
+    _ie(ie), _deviceName(deviceName) {
+}
+
+NetworkStatsCollector::~NetworkStatsCollector() {
+}
+
+void NetworkStatsCollector::ReadNetworkAndSetWeights(const void *model, size_t size, const InferenceEngine::TBlob<uint8_t>::Ptr &weights, size_t batch) {
+    /** Reading network model **/
+    _network = _ie.ReadNetwork((const char*)model, weights);
+    _network.setBatchSize(batch);
+}
+
+std::string FileNameNoExt(const std::string& filePath) {
+    auto pos = filePath.rfind('.');
+
+    if (pos == std::string::npos) {
+        return filePath;
+    }
+
+    return filePath.substr(0, pos);
+}
+
+void NetworkStatsCollector::LoadNetwork(const std::string& modelPath, size_t batch) {
+    /** Reading network model **/
+    _network = _ie.ReadNetwork(modelPath);
+    _network.setBatchSize(batch);
+}
+
+void NetworkStatsCollector::InferAndCollectStats(const std::vector<std::string>& images,
+                                                 std::map<std::string, NetworkNodeStatsPtr>& netNodesStats) {
+    slog::info << "Collecting statistics for layers:" << slog::endl;
+
+    std::vector<CNNLayerPtr> layersAfterInputs;
+
+    std::string hackPrefix = "scaleshifted_input:";
+
+    std::map<std::string, std::string> inputsFromLayers;
+    for (auto&& layer : _network) {
+        if (layer->insData.size() > 0) {
+            std::string inName = layer->input()->getName();
+            for (auto&& input : _network.getInputsInfo()) {
+                if (inName == input.first) {
+                    layersAfterInputs.push_back(layer);
+                    inputsFromLayers[hackPrefix + layer->name] = inName;
+                }
+            }
+        }
+    }
+
+    for (auto&& layer : layersAfterInputs) {
+        std::string firstInputName = hackPrefix + layer->name;
+        auto scaleShiftLayer = addScaleShiftBeforeLayer(firstInputName, layer, 0, { 1.f });
+        ((ICNNNetwork&)_network).addLayer(scaleShiftLayer);
+    }
+
+    // Adding output to every layer
+    for (auto&& layer : _network) {
+        slog::info << "\t" << layer->name << slog::endl;
+
+        std::string layerType = _network.getLayerByName(layer->name.c_str())->type;
+        if (/*layerType != "Split" &&*/ layerType != "Input") {
+            _network.addOutput(layer->name);
+        }
+    }
+
+    NetworkNodeStatsPtr nodeStats;
+
+    const size_t batchSize = _network.getBatchSize();
+
+    std::vector<std::string> imageNames;
+
+    size_t rounded = images.size() - images.size() % batchSize;
+
+    auto executable_network = _ie.LoadNetwork(_network, _deviceName);
+
+    std::map<std::string, std::vector<float>> min_outputs, max_outputs;
+
+    for (size_t i = 0; i < rounded; i += batchSize) {
+        slog::info << "Inferring image " << i+1 << " of " << rounded << slog::endl;
+
+        imageNames.clear();
+
+        for (size_t img = 0; img < batchSize; img++) {
+            imageNames.push_back(images[i + img]);
+        }
+
+
+        /** Taking information about all topology inputs **/
+        InputsDataMap inputInfo(_network.getInputsInfo());
+
+        if (inputInfo.size() != 1) throw std::logic_error("Sample supports topologies only with 1 input");
+        auto inputInfoItem = *inputInfo.begin();
+
+        /** Specifying the precision of input data provided by the user.
+         * This should be called before load of the network to the device **/
+        inputInfoItem.second->setPrecision(Precision::FP32);
+        inputInfoItem.second->setLayout(Layout::NCHW);
+
+        std::vector<std::shared_ptr<unsigned char>> imagesData;
+        for (auto & i : imageNames) {
+            FormatReader::ReaderPtr reader(i.c_str());
+            if (reader.get() == nullptr) {
+                slog::warn << "Image " + i + " cannot be read!" << slog::endl;
+                continue;
+            }
+            /** Store image data **/
+            auto data_dims = inputInfoItem.second->getTensorDesc().getDims();
+            std::shared_ptr<unsigned char> data(reader->getData(data_dims.back(), data_dims.at(data_dims.size() - 2)));
+            if (data.get() != nullptr) {
+                imagesData.push_back(data);
+            }
+        }
+        if (imagesData.empty()) throw std::logic_error("Valid input images were not found!");
+
+        OutputsDataMap outputInfo(_network.getOutputsInfo());
+        for (auto itOut : outputInfo) {
+            itOut.second->setPrecision(Precision::FP32);
+        }
+
+        auto infer_request = executable_network.CreateInferRequest();
+
+        // -------------------------------Set input data----------------------------------------------------
+        /** Iterate over all the input blobs **/
+
+        /** Creating input blob **/
+        Blob::Ptr input = infer_request.GetBlob(inputInfoItem.first);
+        if (!input) {
+            throw std::logic_error("Invalid input blob " + inputInfoItem.first + " pointer");
+        }
+
+        /** Filling input tensor with images. First b channel, then g and r channels **/
+        auto input_dims = input->getTensorDesc().getDims();
+        size_t num_chanels = input_dims.at(1);
+        size_t image_size = input_dims.at(input_dims.size() - 2) * input_dims.back();
+
+        auto data = input->buffer().as<PrecisionTrait<Precision::FP32>::value_type*>();
+
+        /** Iterate over all input images **/
+        for (size_t image_id = 0; image_id < imagesData.size(); ++image_id) {
+            /** Iterate over all pixel in image (b,g,r) **/
+            for (size_t pid = 0; pid < image_size; pid++) {
+                /** Iterate over all channels **/
+                for (size_t ch = 0; ch < num_chanels; ++ch) {
+                    /**          [images stride + channels stride + pixel id ] all in bytes            **/
+                    data[image_id * image_size * num_chanels + ch * image_size + pid ] = imagesData.at(image_id).get()[pid*num_chanels + ch];
+                }
+            }
+        }
+
+        infer_request.Infer();
+
+
+        for (auto itOut : outputInfo) {
+            auto outBlob = infer_request.GetBlob(itOut.first);
+
+            std::string outName = itOut.first;
+            if (inputsFromLayers.find(itOut.first) != inputsFromLayers.end()) {
+                outName = inputsFromLayers[itOut.first];
+            }
+
+            size_t N, C, statCount;
+            auto output_dims = outBlob->getTensorDesc().getDims();
+            if (output_dims.size() == 4 && outBlob->getTensorDesc().getLayout() == Layout::NCHW) {
+                N = output_dims[0];
+                C = output_dims[1];
+                statCount = C;
+            } else if (output_dims.size() == 2 && outBlob->getTensorDesc().getLayout() == Layout::NC) {
+                N = output_dims[0];
+                C = output_dims[1];
+                statCount = 1;
+            } else {
+                slog::warn << "Only NCHW and NC layouts are supported. Skipping layer \"" << outName << "\"" << slog::endl;
+                continue;
+            }
+
+
+            if (netNodesStats.find(outName) == netNodesStats.end()) {
+                nodeStats = NetworkNodeStatsPtr(new NetworkNodeStats(statCount));
+
+                netNodesStats[outName] = nodeStats;
+            } else {
+                nodeStats = netNodesStats[outName];
+            }
+
+            // Counting min/max outputs per channel
+            for (size_t n = 0; n < N; n++) {
+                if (output_dims.size() == 4) {
+                    size_t _HW = output_dims.back() * output_dims.at(output_dims.size() - 2);
+                    for (size_t c = 0; c < C; c++) {
+                        if (outBlob->getTensorDesc().getPrecision() == InferenceEngine::Precision::FP32) {
+                            float* ptr = &outBlob->buffer().as<float*>()[(n * C + c) * _HW];
+
+                            float min = nodeStats->_minOutputs[c];
+                            float max = nodeStats->_maxOutputs[c];
+                            DataStats::GetDataMinMax<float>(ptr, _HW, min, max);
+                            nodeStats->_minOutputs[c] = min;
+                            nodeStats->_maxOutputs[c] = max;
+                        } else if (outBlob->getTensorDesc().getPrecision() == InferenceEngine::Precision::U8) {
+                            uint8_t* ptr = &outBlob->buffer().as<uint8_t*>()[(n * C + c) * _HW];
+
+                            uint8_t min = nodeStats->_minOutputs[c];
+                            uint8_t max = nodeStats->_maxOutputs[c];
+                            DataStats::GetDataMinMax<uint8_t>(ptr, _HW, min, max);
+                            nodeStats->_minOutputs[c] = min;
+                            nodeStats->_maxOutputs[c] = max;
+                        } else {
+                            throw std::logic_error(std::string("Unsupported precision: ") + outBlob->getTensorDesc().getPrecision().name());
+                        }
+                    }
+                } else if (output_dims.size() == 2) {
+                    if (outBlob->getTensorDesc().getPrecision() == InferenceEngine::Precision::FP32) {
+                        float* ptr = &outBlob->buffer().as<float*>()[n * C];
+
+                        float min = nodeStats->_minOutputs[0];
+                        float max = nodeStats->_maxOutputs[0];
+                        DataStats::GetDataMinMax<float>(ptr, C, min, max);
+                        nodeStats->_minOutputs[0] = min;
+                        nodeStats->_maxOutputs[0] = max;
+                    } else if (outBlob->getTensorDesc().getPrecision() == InferenceEngine::Precision::U8) {
+                        uint8_t* ptr = &outBlob->buffer().as<uint8_t*>()[n * C];
+
+                        uint8_t min = nodeStats->_minOutputs[0];
+                        uint8_t max = nodeStats->_maxOutputs[0];
+                        DataStats::GetDataMinMax<uint8_t>(ptr, C, min, max);
+                        nodeStats->_minOutputs[0] = min;
+                        nodeStats->_maxOutputs[0] = max;
+                    } else {
+                        throw std::logic_error(std::string("Unsupported precision: ") + outBlob->getTensorDesc().getPrecision().name());
+                    }
+                }
+            }
+        }
+    }
+}
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/network_stats.h b/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/network_stats.h
new file mode 100644 (file)
index 0000000..03a91f8
--- /dev/null
@@ -0,0 +1,44 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <string>
+#include <vector>
+#include <map>
+
+#include <ie_core.hpp>
+#include <ie_icnn_network_stats.hpp>
+
+class NetworkStatsCollector {
+public:
+    NetworkStatsCollector(const InferenceEngine::Core & ie, const std::string & deviceName);
+    ~NetworkStatsCollector();
+
+public:
+    void ReadNetworkAndSetWeights(const void *model, size_t size, const InferenceEngine::TBlob<uint8_t>::Ptr &weights, size_t batch);
+    void LoadNetwork(const std::string& modelPath, size_t batch);
+
+    void InferAndCollectStats(const std::vector<std::string>& images,
+                              std::map<std::string, InferenceEngine::NetworkNodeStatsPtr>& netNodesStats);
+
+/*    void InferAndCollectHistogram(const std::vector<std::string>& images,
+                              const std::vector<std::string>& layerNames,
+                              std::map<std::string, InferenceEngine::NetworkNodeStatsPtr>& netNodesStats);
+
+    void InferAndFindOptimalThreshold(const std::vector<std::string>& images,
+                                  const std::vector<std::string>& layerNames,
+                                  std::map<std::string, InferenceEngine::NetworkNodeStatsPtr>& netNodesStats);
+
+    void CalculateThreshold(std::map<std::string, InferenceEngine::NetworkNodeStatsPtr>& netNodesStats);*/
+
+    void CalculatePotentialMax(const float* weights, const InferenceEngine::SizeVector& weightDism, float& max);
+    static InferenceEngine::CNNLayerPtr addScaleShiftBeforeLayer(std::string name, InferenceEngine::CNNLayer::Ptr beforeLayer,
+            size_t port, std::vector<float> scale);
+
+private:
+    InferenceEngine::Core _ie;
+    InferenceEngine::CNNNetwork _network;
+    std::string _deviceName;
+};
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/norm_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/norm_tests.cpp
new file mode 100644 (file)
index 0000000..d1568c7
--- /dev/null
@@ -0,0 +1,182 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <ie_core.hpp>
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+#include "ir_gen_helper.hpp"
+
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace single_layer_tests;
+
+
+struct norm_base_params {
+    struct {
+        size_t w;
+        size_t h;
+        size_t c;
+    } in;
+
+    size_t local_size;
+    float alpha;
+    float beta;
+    size_t k;
+
+};
+
+struct norm_test_params : norm_base_params {
+    std::string device_name;
+
+    norm_test_params(std::string name, norm_base_params params) :
+            norm_base_params(params), device_name(name) {}
+};
+
+
+template <typename data_t>
+void ref_norm(const TBlob<data_t> &src, TBlob<data_t> &dst, norm_test_params prm)
+{
+    size_t IW = prm.in.w;
+    size_t IH = prm.in.h;
+    size_t IC = prm.in.c;
+
+    const data_t *src_data = src.readOnly();
+    data_t *dst_data = dst.data();
+
+        for (uint32_t c = 0; c < IC; c++) {
+            for (uint32_t h = 0; h < IH; h++) {
+                for (uint32_t w = 0; w < IW; w++) {
+                    uint32_t oidx = c * IH * IW
+                                    + h * IW + w;
+
+                    uint32_t sz = prm.local_size;
+                    int32_t c_start = c - sz / 2;
+                    int32_t c_end = c_start + sz;
+                    if (c_start < 0) c_start = 0;
+                    if (c_end > (int32_t)IC) c_end = IC;
+                    data_t sum = 0.0;
+                    for (int32_t c1 = c_start; c1 < c_end; c1++) {
+                        uint32_t idx = c1 * IH * IW + h * IW + w;
+                        data_t s = src_data[idx];
+
+                        sum += s * s;
+                    }
+
+                    data_t norm_coef = powf(1. + prm.alpha * sum / sz, -prm.beta);
+                    dst_data[oidx] = norm_coef * src_data[oidx];
+                }
+            }
+        }
+}
+
+class smoke_NormOnlyTest: public TestsCommon,
+                    public WithParamInterface<norm_test_params> {
+    std::string layers_t = R"V0G0N(
+        <layer name="norm" id="1" type="LRN" precision="FP32">
+            <lrn local_size="_LS_" alpha="_A__" beta="_B__" k="_K__" region="ACROSS" />
+
+            <input>
+                <port id="0">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+            </input>
+            <output>
+                <port id="1">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+            </output>
+        </layer>
+)V0G0N";
+    
+    std::string edges_t = R"V0G0N(
+        <edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
+)V0G0N";
+
+    std::string getModel(norm_test_params p) {
+        std::string model = layers_t;
+
+        REPLACE_WITH_NUM(model, "_IN_", 1);
+        REPLACE_WITH_NUM(model, "_IW_", p.in.w);
+        REPLACE_WITH_NUM(model, "_IH_", p.in.h);
+        REPLACE_WITH_NUM(model, "_IC_", p.in.c);
+
+        REPLACE_WITH_NUM(model, "_LS_", p.local_size);
+        REPLACE_WITH_NUM(model, "_A__", p.alpha);
+        REPLACE_WITH_NUM(model, "_B__", p.beta);
+        REPLACE_WITH_NUM(model, "_K__", p.k);
+
+        model = IRTemplateGenerator::getIRTemplate("FullyConnected_Only", {1lu, p.in.c, p.in.h, p.in.w}, "FP32", model, edges_t);
+
+        return model;
+    }
+
+protected:
+    virtual void SetUp() {
+
+        try {
+            norm_test_params p = ::testing::WithParamInterface<norm_test_params>::GetParam();
+            std::string model = getModel(p);
+
+            Core ie;
+            CNNNetwork network = ie.ReadNetwork(model, Blob::CPtr());
+
+            SizeVector dims_src = {1,
+                                   p.in.c,
+                                   p.in.h,
+                                   p.in.w};
+            Blob::Ptr src = make_shared_blob<float>(TensorDesc({ Precision::FP32, dims_src, Layout::NCHW }));
+            src->allocate();
+            fill_data(src->buffer().as<float *>(), src->size());
+
+            SizeVector dims_dst = dims_src;
+            Blob::Ptr dst = make_shared_blob<float>(TensorDesc({ Precision::FP32, dims_dst, Layout::NCHW }));
+            dst->allocate();
+
+            TBlob<float> dst_ref({Precision::FP32, dims_dst, Layout::NCHW});
+            dst_ref.allocate();
+
+            ExecutableNetwork exeNetwork = ie.LoadNetwork(network, p.device_name);
+            InferRequest inferRequest = exeNetwork.CreateInferRequest();
+            OutputsDataMap outInfo;
+            outInfo = network.getOutputsInfo();
+            ASSERT_EQ(outInfo.size(), 1);
+            ASSERT_NE(outInfo.begin()->second, nullptr);
+            inferRequest.SetBlob(network.getInputsInfo().begin()->first, src);
+            inferRequest.SetBlob(outInfo.begin()->first, dst);
+            inferRequest.Infer();
+
+            auto * srcPtr = dynamic_cast<TBlob<float>*>(src.get());
+            ref_norm(*srcPtr, dst_ref, p);
+            compare(*dst, dst_ref);
+
+        } catch (const details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+#define case_1 norm_base_params({{228, 228, 3}, 5, 0.0001f, 0.75f, 1})
+
+TEST_P(smoke_NormOnlyTest, TestsNorm) {}
+
+std::string  getTestCaseName(testing::TestParamInfo<norm_test_params> obj) {
+    return  obj.param.device_name +
+        "_w" + std::to_string(obj.param.in.w) +
+        "_h" + std::to_string(obj.param.in.h) +
+        "_c" + std::to_string(obj.param.in.c);
+}
+
+norm_test_params norm_only_test_cases[] = {
+               norm_test_params("CPU", case_1),
+};
+
+INSTANTIATE_TEST_CASE_P(
+        TestsNorm, smoke_NormOnlyTest, ::testing::ValuesIn(norm_only_test_cases), getTestCaseName);
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/pooling_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/pooling_tests.cpp
new file mode 100644 (file)
index 0000000..14b69b3
--- /dev/null
@@ -0,0 +1,213 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <ie_core.hpp>
+
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+#include "ie_core.hpp"
+#include "../common_single_layer_tests/pool_ref.hpp"
+#include "common_test_utils/common_layers_params.hpp"
+
+using namespace ::testing;
+using namespace InferenceEngine;
+
+struct pooling_base_params {
+    struct { size_t n, c, h, w; } in;
+    struct { size_t h, w; } out;
+
+    size_t krn_h;
+    size_t krn_w;
+    size_t str_h;
+    size_t str_w;
+    size_t pad_h;
+    size_t pad_w;
+
+    bool avg;
+    bool exclude_pad;
+};
+
+struct pooling_test_params : pooling_base_params {
+    std::string device_name;
+
+    pooling_test_params(std::string name, pooling_base_params params) :
+            pooling_base_params(params), device_name(name) {}
+};
+
+template <typename data_t>
+void ref_pool(const Blob::Ptr &src, Blob::Ptr &dst, pooling_test_params p)
+{
+    CommonTestUtils::pool_common_params params;
+    params.kernel.insert(X_AXIS, p.krn_w);
+    params.kernel.insert(Y_AXIS, p.krn_h);
+    params.stride.insert(X_AXIS, p.str_w);
+    params.stride.insert(Y_AXIS, p.str_h);
+    params.pads_begin.insert(X_AXIS, p.pad_w);
+    params.pads_begin.insert(Y_AXIS, p.pad_h);
+    params.exclude_pad = p.exclude_pad;
+    params.avg = p.avg;
+    ref_pool_common<float>({ src }, *dst.get(), params);
+}
+
+class smoke_CPU_PoolingOnlyTest: public TestsCommon,
+                       public WithParamInterface<pooling_test_params> {
+
+    std::string model_t = R"V0G0N(
+<net name="Pooling_Only" version="2" precision="FP32" batch="1">
+    <layers>
+        <layer id="1" name="input" precision="FP32" type="Input">
+            <output>
+                <port id="0">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+            </output>
+        </layer>
+        <layer id="2" name="pool" type="Pooling" precision="FP32">
+
+            <data
+                exclude-pad="_EXCL_PAD_"
+                pool-method="_ALG_"
+                kernel-x="_KW_" kernel-y="_KH_"
+                pad-x="_PW_" pad-y="_PH_"
+                stride-x="_SW_" stride-y="_SH_"  />
+
+            <input>
+                <port id="1">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+            </input>
+            <output>
+                <port id="2">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_OH_</dim>
+                    <dim>_OW_</dim>
+                </port>
+            </output>
+        </layer>
+    </layers>
+    <edges>
+        <edge from-layer="1" from-port="0" to-layer="2" to-port="1"/>
+    </edges>
+</net>
+)V0G0N";
+
+    std::string getModel(pooling_test_params p) {
+        std::string model = model_t;
+
+        REPLACE_WITH_NUM(model, "_IN_", p.in.n);
+        REPLACE_WITH_NUM(model, "_IC_", p.in.c);
+        REPLACE_WITH_NUM(model, "_IH_", p.in.h);
+        REPLACE_WITH_NUM(model, "_IW_", p.in.w);
+
+        REPLACE_WITH_NUM(model, "_KH_", p.krn_h);
+        REPLACE_WITH_NUM(model, "_KW_", p.krn_w);
+        REPLACE_WITH_NUM(model, "_SH_", p.str_h);
+        REPLACE_WITH_NUM(model, "_SW_", p.str_w);
+        REPLACE_WITH_NUM(model, "_PH_", p.pad_h);
+        REPLACE_WITH_NUM(model, "_PW_", p.pad_w);
+
+        REPLACE_WITH_NUM(model, "_OH_", p.out.h);
+        REPLACE_WITH_NUM(model, "_OW_", p.out.w);
+
+        REPLACE_WITH_STR(model, "_ALG_", p.avg ? "avg":"max");
+        REPLACE_WITH_STR(model, "_EXCL_PAD_", p.exclude_pad ? "true":"false");
+
+        return model;
+    }
+
+protected:
+    virtual void SetUp() {
+
+        try {
+            pooling_test_params p = ::testing::WithParamInterface<pooling_test_params>::GetParam();
+            std::string model = getModel(p);
+
+            Core ie;
+            CNNNetwork network = ie.ReadNetwork(model, Blob::CPtr());
+
+            SizeVector dims_src = {p.in.w, p.in.h, p.in.c, p.in.n};
+            Blob::Ptr src = make_shared_blob<float>(TensorDesc(Precision::FP32, SizeVector(dims_src.rbegin(), dims_src.rend()), NCHW));
+            src->allocate();
+            fill_data(src->buffer().as<float *>(), src->size());
+
+            SizeVector dims_dst = {p.out.w, p.out.h, p.in.c, p.in.n};
+            Blob::Ptr dst = make_shared_blob<float>(TensorDesc(Precision::FP32, SizeVector(dims_dst.rbegin(), dims_dst.rend()), NCHW));
+            dst->allocate();
+
+            Blob::Ptr dst_ref = make_shared_blob<float>(TensorDesc(Precision::FP32, SizeVector(dims_dst.rbegin(), dims_dst.rend()), NCHW));
+            dst_ref->allocate();
+
+            ExecutableNetwork exeNetwork = ie.LoadNetwork(network, p.device_name);
+            InferRequest inferRequest = exeNetwork.CreateInferRequest();
+            OutputsDataMap outInfo;
+            outInfo = network.getOutputsInfo();
+            ASSERT_EQ(outInfo.size(), 1);
+            ASSERT_NE(outInfo.begin()->second, nullptr);
+            inferRequest.SetBlob(network.getInputsInfo().begin()->first, src);
+            inferRequest.SetBlob(outInfo.begin()->first, dst);
+            inferRequest.Infer();
+
+            ref_pool<float>(src, dst_ref, p);
+            compare(*dst.get(), *dst_ref.get());
+
+        } catch (const details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+#define case_0 pooling_base_params({{1, 3, 228, 228}, {114, 114}, 2, 2, 2, 2, 0, 0})
+#define case_1 pooling_base_params({{1, 3, 228, 228}, {113, 114}, 4, 2, 2, 2, 0, 0})
+#define case_2 pooling_base_params({{1, 3, 228, 228}, {113, 227}, 4, 2, 2, 1, 0, 0})
+#define case_3 pooling_base_params({{1, 3, 224, 224}, {224, 224}, 3, 3, 1, 1, 1, 1, false, false})
+#define case_4 pooling_base_params({{1, 3, 224, 224}, {224, 224}, 3, 3, 1, 1, 1, 1, true, false})
+#define case_5 pooling_base_params({{1, 3, 224, 224}, {224, 224}, 3, 3, 1, 1, 1, 1, true, true})
+
+#define case_6 pooling_base_params({{1, 3, 224, 224}, {112, 112}, 3, 3, 2, 2, 1, 1, false, false})
+#define case_7 pooling_base_params({{1, 3, 224, 224}, {112, 112}, 3, 3, 2, 2, 1, 1, true, false})
+#define case_8 pooling_base_params({{1, 3, 224, 224}, {112, 112}, 3, 3, 2, 2, 1, 1, true, true})
+
+#define case_9  pooling_base_params({{1, 3, 224, 224}, {113, 113}, 3, 3, 2, 2, 1, 1, false, false})
+#define case_10 pooling_base_params({{1, 3, 224, 224}, {113, 113}, 3, 3, 2, 2, 1, 1, true, false})
+#define case_11 pooling_base_params({{1, 3, 224, 224}, {113, 113}, 3, 3, 2, 2, 1, 1, true, true})
+
+
+TEST_P(smoke_CPU_PoolingOnlyTest, TestsPooling) {}
+
+std::string  getTestCaseName(testing::TestParamInfo<pooling_test_params> obj) {
+    return  obj.param.device_name +
+        "_w" + std::to_string(obj.param.in.w) +
+        "_h" + std::to_string(obj.param.in.h) +
+        "_c" + std::to_string(obj.param.in.c) +
+        "_krnw" + std::to_string(obj.param.krn_w) +
+        "_krnh" + std::to_string(obj.param.krn_h) +
+        "_strw" + std::to_string(obj.param.str_w) +
+        "_strh" + std::to_string(obj.param.str_h);
+}
+
+pooling_test_params pooling_only_test_cases[] = {
+        pooling_test_params("CPU", case_0),
+        pooling_test_params("CPU", case_1),
+               pooling_test_params("CPU", case_2),
+               pooling_test_params("CPU", case_3),
+        pooling_test_params("CPU", case_4),
+        pooling_test_params("CPU", case_5),
+        pooling_test_params("CPU", case_6),
+        pooling_test_params("CPU", case_7),
+        pooling_test_params("CPU", case_8),
+        pooling_test_params("CPU", case_9),
+        pooling_test_params("CPU", case_10),
+        pooling_test_params("CPU", case_11),
+};
+
+INSTANTIATE_TEST_CASE_P(
+        TestsPooling, smoke_CPU_PoolingOnlyTest, ::testing::ValuesIn(pooling_only_test_cases));
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/priorbox_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/priorbox_tests.cpp
new file mode 100644 (file)
index 0000000..b02069c
--- /dev/null
@@ -0,0 +1,369 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <ie_core.hpp>
+#include <ie_core.hpp>
+
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+
+using namespace ::testing;
+using namespace InferenceEngine;
+
+struct priorbox_test_params {
+    std::string device_name;
+
+    size_t mb;
+
+    struct {
+        size_t c;
+        size_t h;
+        size_t w;
+    } in1;
+
+    struct {
+        size_t c;
+        size_t h;
+        size_t w;
+    } in2;
+
+    struct {
+        size_t c;
+        size_t h;
+        size_t w;
+    } out;
+
+    int offset;
+    int stride;
+    int min_size;
+    int max_size;
+    bool flip;
+    bool clip;
+};
+
+class smoke_CPUPriorBoxOnlyTest: public TestsCommon,
+                             public WithParamInterface<priorbox_test_params> {
+
+    std::string model_t = R"V0G0N(
+<Net Name="PriorBox_Only" version="2" precision="FP32" batch="1">
+    <layers>
+        <layer name="input1" type="Input" precision="FP32" id="0">
+            <output>
+                <port id="0">
+                    <dim>1</dim>
+                    <dim>_IC1_</dim>
+                    <dim>_IH1_</dim>
+                    <dim>_IW1_</dim>
+                </port>
+            </output>
+        </layer>
+        <layer name="input2" type="Input" precision="FP32" id="1">
+            <output>
+                <port id="1">
+                    <dim>1</dim>
+                    <dim>_IC2_</dim>
+                    <dim>_IH2_</dim>
+                    <dim>_IW2_</dim>
+                </port>
+            </output>
+        </layer>
+        <layer name="prior" type="PriorBox" precision="FP32" id="2">
+            <data min_size="4.000000" max_size="9.000000" flip="1" clip="1" offset="0" step="0" aspect_ratio="" variance=""/>
+            <input>
+                <port id="2">
+                    <dim>1</dim>
+                    <dim>_IC1_</dim>
+                    <dim>_IH1_</dim>
+                    <dim>_IW1_</dim>
+                </port>
+                <port id="3">
+                    <dim>1</dim>
+                    <dim>_IC2_</dim>
+                    <dim>_IH2_</dim>
+                    <dim>_IW2_</dim>
+                </port>
+            </input>
+            <output>
+                <port id="4">
+                    <dim>1</dim>
+                    <dim>_OC_</dim>
+                    <dim>_OH_</dim>
+                    <dim>_OW_</dim>
+                </port>
+            </output>
+        </layer>
+    </layers>
+    <edges>
+        <edge from-layer="0" from-port="0" to-layer="2" to-port="2"/>
+        <edge from-layer="1" from-port="1" to-layer="2" to-port="3"/>
+    </edges>
+
+</Net>
+)V0G0N";
+
+    std::string getModel(priorbox_test_params p) {
+        std::string model = model_t;
+
+        REPLACE_WITH_NUM(model, "_IW1_", p.in1.w);
+        REPLACE_WITH_NUM(model, "_IH1_", p.in1.h);
+        REPLACE_WITH_NUM(model, "_IC1_", p.in1.c);
+
+        REPLACE_WITH_NUM(model, "_IW2_", p.in2.w);
+        REPLACE_WITH_NUM(model, "_IH2_", p.in2.h);
+        REPLACE_WITH_NUM(model, "_IC2_", p.in2.c);
+
+        REPLACE_WITH_NUM(model, "_OW_", p.out.w);
+        REPLACE_WITH_NUM(model, "_OH_", p.out.h);
+        REPLACE_WITH_NUM(model, "_OC_", p.out.c);
+
+        return model;
+    }
+
+protected:
+    virtual void SetUp() {
+
+        try {
+            priorbox_test_params p = ::testing::WithParamInterface<priorbox_test_params>::GetParam();
+            std::string model = getModel(p);
+
+            Core ie;
+            CNNNetwork network = ie.ReadNetwork(model, Blob::CPtr());
+            network.setBatchSize(p.mb);
+
+            InputsDataMap inputs = network.getInputsInfo();
+
+            DataPtr inputPtr1 = inputs["input1"]->getInputData();
+            DataPtr inputPtr2 = inputs["input2"]->getInputData();
+
+            InferenceEngine::Blob::Ptr input1 = InferenceEngine::make_shared_blob<float>(inputPtr1->getTensorDesc());
+            input1->allocate();
+
+            InferenceEngine::Blob::Ptr input2 = InferenceEngine::make_shared_blob<float>(inputPtr2->getTensorDesc());
+            input2->allocate();
+
+            InferenceEngine::BlobMap inputBlobs;
+            inputBlobs["input1"] = input1;
+            inputBlobs["input2"] = input2;
+
+            OutputsDataMap outputs = network.getOutputsInfo();
+
+            InferenceEngine::TBlob<float>::Ptr output;
+            output = InferenceEngine::make_shared_blob<float>(outputs["prior"]->getTensorDesc());
+            output->allocate();
+
+            InferenceEngine::BlobMap outputBlobs;
+            outputBlobs["prior"] = output;
+
+            ExecutableNetwork exeNetwork = ie.LoadNetwork(network, "CPU");
+            InferRequest inferRequest = exeNetwork.CreateInferRequest();
+            inferRequest.SetInput(inputBlobs);
+            inferRequest.SetOutput(outputBlobs);
+            inferRequest.Infer();
+
+            // Check results
+
+            const TBlob<float>::Ptr outputArray = std::dynamic_pointer_cast<TBlob<float>>(output);
+            float* dst_ptr = outputArray->data();
+
+            const float eps = 1e-6;
+
+            // pick a few generated priors and compare against the expected number.
+            // first prior
+            EXPECT_NEAR(dst_ptr[0], 0.03, eps);
+            EXPECT_NEAR(dst_ptr[1], 0.03, eps);
+            EXPECT_NEAR(dst_ptr[2], 0.07, eps);
+            EXPECT_NEAR(dst_ptr[3], 0.07, eps);
+            // second prior
+            EXPECT_NEAR(dst_ptr[4], 0.02, eps);
+            EXPECT_NEAR(dst_ptr[5], 0.02, eps);
+            EXPECT_NEAR(dst_ptr[6], 0.08, eps);
+            EXPECT_NEAR(dst_ptr[7], 0.08, eps);
+            // prior in the 5-th row and 5-th col
+            EXPECT_NEAR(dst_ptr[4*10*2*4+4*2*4], 0.43, eps);
+            EXPECT_NEAR(dst_ptr[4*10*2*4+4*2*4+1], 0.43, eps);
+            EXPECT_NEAR(dst_ptr[4*10*2*4+4*2*4+2], 0.47, eps);
+            EXPECT_NEAR(dst_ptr[4*10*2*4+4*2*4+3], 0.47, eps);
+
+            // check variance
+            dst_ptr += p.out.h * p.out.w;
+            for (int d = 0; d < p.out.h * p.out.w; ++d) {
+                EXPECT_NEAR(dst_ptr[d], 0.1, eps);
+            }
+        } catch (const InferenceEngine::details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+TEST_P(smoke_CPUPriorBoxOnlyTest, TestsPriorBox) {}
+
+INSTANTIATE_TEST_CASE_P(
+        TestsPriorBox, smoke_CPUPriorBoxOnlyTest,
+        ::testing::Values(
+                priorbox_test_params{ "CPU",
+                    10, {10, 10, 10}, {3, 100, 100}, {2, 1, 800}, 0, 0, 4, 9, true, true }));
+
+
+class smoke_CPUPriorBoxDensityTest : public TestsCommon,
+    public WithParamInterface<priorbox_test_params> {
+
+    std::string model_t = R"V0G0N(
+<Net Name="PriorBox_Only" version="2" precision="FP32" batch="1">
+    <layers>
+        <layer name="input1" type="Input" precision="FP32" id="0">
+            <output>
+                <port id="0">
+                    <dim>1</dim>
+                    <dim>_IC1_</dim>
+                    <dim>_IH1_</dim>
+                    <dim>_IW1_</dim>
+                </port>
+            </output>
+        </layer>
+        <layer name="input2" type="Input" precision="FP32" id="1">
+            <output>
+                <port id="1">
+                    <dim>1</dim>
+                    <dim>_IC2_</dim>
+                    <dim>_IH2_</dim>
+                    <dim>_IW2_</dim>
+                </port>
+            </output>
+        </layer>
+        <layer name="prior" type="PriorBox" precision="FP32" id="2">
+            <data fixed_size="4.000000" density="1.000000" flip="1" clip="1" offset="0" step="0" aspect_ratio="1.0" variance=""/>
+            <input>
+                <port id="2">
+                    <dim>1</dim>
+                    <dim>_IC1_</dim>
+                    <dim>_IH1_</dim>
+                    <dim>_IW1_</dim>
+                </port>
+                <port id="3">
+                    <dim>1</dim>
+                    <dim>_IC2_</dim>
+                    <dim>_IH2_</dim>
+                    <dim>_IW2_</dim>
+                </port>
+            </input>
+            <output>
+                <port id="4">
+                    <dim>1</dim>
+                    <dim>_OC_</dim>
+                    <dim>_OH_</dim>
+                    <dim>_OW_</dim>
+                </port>
+            </output>
+        </layer>
+    </layers>
+    <edges>
+        <edge from-layer="0" from-port="0" to-layer="2" to-port="2"/>
+        <edge from-layer="1" from-port="1" to-layer="2" to-port="3"/>
+    </edges>
+
+</Net>
+)V0G0N";
+
+    std::string getModel(priorbox_test_params p) {
+        std::string model = model_t;
+
+        REPLACE_WITH_NUM(model, "_IW1_", p.in1.w);
+        REPLACE_WITH_NUM(model, "_IH1_", p.in1.h);
+        REPLACE_WITH_NUM(model, "_IC1_", p.in1.c);
+
+        REPLACE_WITH_NUM(model, "_IW2_", p.in2.w);
+        REPLACE_WITH_NUM(model, "_IH2_", p.in2.h);
+        REPLACE_WITH_NUM(model, "_IC2_", p.in2.c);
+
+        REPLACE_WITH_NUM(model, "_OW_", p.out.w);
+        REPLACE_WITH_NUM(model, "_OH_", p.out.h);
+        REPLACE_WITH_NUM(model, "_OC_", p.out.c);
+
+        return model;
+    }
+
+protected:
+    virtual void SetUp() {
+
+        try {
+            priorbox_test_params p = ::testing::WithParamInterface<priorbox_test_params>::GetParam();
+            std::string model = getModel(p);
+
+            Core ie;
+            CNNNetwork network = ie.ReadNetwork(model, Blob::CPtr());
+            network.setBatchSize(p.mb);
+
+            InputsDataMap inputs = network.getInputsInfo();
+
+            DataPtr inputPtr1 = inputs["input1"]->getInputData();
+            DataPtr inputPtr2 = inputs["input2"]->getInputData();
+
+            InferenceEngine::Blob::Ptr input1 = InferenceEngine::make_shared_blob<float>(inputPtr1->getTensorDesc());
+            input1->allocate();
+
+            InferenceEngine::Blob::Ptr input2 = InferenceEngine::make_shared_blob<float>(inputPtr2->getTensorDesc());
+            input2->allocate();
+
+            InferenceEngine::BlobMap inputBlobs;
+            inputBlobs["input1"] = input1;
+            inputBlobs["input2"] = input2;
+
+            OutputsDataMap outputs = network.getOutputsInfo();
+
+            InferenceEngine::TBlob<float>::Ptr output;
+            output = InferenceEngine::make_shared_blob<float>(outputs["prior"]->getTensorDesc());
+            output->allocate();
+
+            InferenceEngine::BlobMap outputBlobs;
+            outputBlobs["prior"] = output;
+
+            ExecutableNetwork exeNetwork = ie.LoadNetwork(network, "CPU");
+            InferRequest inferRequest = exeNetwork.CreateInferRequest();
+            inferRequest.SetInput(inputBlobs);
+            inferRequest.SetOutput(outputBlobs);
+            inferRequest.Infer();
+
+            // Check results
+
+            const TBlob<float>::Ptr outputArray = std::dynamic_pointer_cast<TBlob<float>>(output);
+            float* dst_ptr = outputArray->data();
+
+            // pick a few generated priors and compare against the expected number.
+            // first prior
+            EXPECT_NEAR(dst_ptr[0], 0.03, 1e-6);
+            EXPECT_NEAR(dst_ptr[1], 0.03, 1e-6);
+            EXPECT_NEAR(dst_ptr[2], 0.07, 1e-6);
+            EXPECT_NEAR(dst_ptr[3], 0.07, 1e-6);
+            // second prior
+            EXPECT_NEAR(dst_ptr[4], 0.03, 0.1);
+            EXPECT_NEAR(dst_ptr[5], 0.03, 0.1);
+            EXPECT_NEAR(dst_ptr[6], 0.17, 0.1);
+            EXPECT_NEAR(dst_ptr[7], 0.03, 0.1);
+            // prior in the 5-th row and 5-th col
+            EXPECT_NEAR(dst_ptr[4 * 10 * 2 * 4 + 4 * 2 * 4], 0.83, 0.1);
+            EXPECT_NEAR(dst_ptr[4 * 10 * 2 * 4 + 4 * 2 * 4 + 1], 0.83, 0.1);
+            EXPECT_NEAR(dst_ptr[4 * 10 * 2 * 4 + 4 * 2 * 4 + 2], 0.84, 0.1);
+            EXPECT_NEAR(dst_ptr[4 * 10 * 2 * 4 + 4 * 2 * 4 + 3], 0.84, 0.1);
+
+            // check variance
+            dst_ptr += p.out.h * p.out.w;
+            for (int d = 0; d < p.out.h * p.out.w; ++d) {
+                EXPECT_NEAR(dst_ptr[d], 0.1, 1e-6);
+            }
+        }
+        catch (const InferenceEngine::details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+TEST_P(smoke_CPUPriorBoxDensityTest, TestsPriorBoxDensity) {}
+
+INSTANTIATE_TEST_CASE_P(
+    TestsPriorBoxDensity, smoke_CPUPriorBoxDensityTest,
+    ::testing::Values(
+        priorbox_test_params{ "CPU",
+        10,{ 10, 10, 10 },{ 3, 100, 100 },{ 2, 1, 400 }, 0, 0, 4, 9, true, true }));
+
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/region_yolo_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/region_yolo_tests.cpp
new file mode 100644 (file)
index 0000000..1d3a0a0
--- /dev/null
@@ -0,0 +1,235 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <ie_core.hpp>
+
+#include "single_layer_common.hpp"
+#include "tests_common.hpp"
+#include "common_test_utils/data_utils.hpp"
+
+using namespace ::testing;
+using namespace InferenceEngine;
+
+struct region_yolo_test_params {
+    std::vector<size_t> src_dims;
+    std::vector<size_t> dst_dims;
+    int classes;
+    int coords;
+    int num;
+    float do_softmax;
+    std::vector<int> mask;
+};
+
+static inline int entry_index(int width, int height, int coords, int classes, int outputs, int batch, int location,
+                       int entry) {
+    int n = location / (width * height);
+    int loc = location % (width * height);
+    return batch * outputs + n * width * height * (coords + classes + 1) +
+           entry * width * height + loc;
+}
+
+static inline float logistic_activate(float x) {
+    return 1.f / (1.f + exp(-x));
+}
+
+static inline
+void softmax_generic(const float *src_data, float *dst_data, int B, int C, int H, int W) {
+    int start = 0;
+    for (int b = 0; b < B; b++) {
+        for (int i = start; i < H * W; i++) {
+            float max = src_data[b * C * H * W + i];
+            for (int c = 0; c < C; c++) {
+                float val = src_data[b * C * H * W + c * H * W + i];
+                if (val > max) max = val;
+            }
+
+            float expSum = 0;
+            for (int c = 0; c < C; c++) {
+                dst_data[b * C * H * W + c * H * W + i] = exp(src_data[b * C * H * W + c * H * W + i] - max);
+                expSum += dst_data[b * C * H * W + c * H * W + i];
+            }
+
+            for (int c = 0; c < C; c++) {
+                dst_data[b * C * H * W + c * H * W + i] = dst_data[b * C * H * W + c * H * W + i] / expSum;
+            }
+        }
+    }
+}
+
+static void ref_region_yolo(InferenceEngine::TBlob<float> &src, InferenceEngine::TBlob<float> &dst, region_yolo_test_params p) {
+    float* src_data = src.data();
+    float* dst_data = dst.data();
+
+    int mask_size = p.mask.size();;
+
+    int IW = (src.getTensorDesc().getDims().size() > 3) ? src.getTensorDesc().getDims()[3] : 1;
+    int IH = (src.getTensorDesc().getDims().size() > 2) ? src.getTensorDesc().getDims()[2] : 1;
+    int IC = (src.getTensorDesc().getDims().size() > 1) ? src.getTensorDesc().getDims()[1] : 1;
+    int B = (src.getTensorDesc().getDims().size() > 0) ? src.getTensorDesc().getDims()[0] : 1;
+
+    for (int i = 0; i < src.size(); i++) {
+        dst_data[i] = src_data[i];
+    }
+
+    int end_index = 0;
+    int num_ = 0;
+    if (p.do_softmax) {
+        // Region layer (Yolo v2)
+        end_index = IW * IH;
+        num_ = p.num;
+    } else {
+        // Yolo layer (Yolo v3)
+        end_index = IW * IH * (p.classes + 1);
+        num_ = mask_size;
+    }
+    int inputs_size = IH * IW * num_ * (p.classes + p.coords + 1);
+
+    for (int b = 0; b < B; b++) {
+        for (int n = 0; n < num_; n++) {
+            int index = entry_index(IW, IH, p.coords, p.classes, inputs_size, b, n * IW * IH, 0);
+            for (int i = index; i < index + 2 * IW * IH; i++) {
+                dst_data[i] = logistic_activate(dst_data[i]);
+            }
+
+            index = entry_index(IW, IH, p.coords, p.classes, inputs_size, b, n * IW * IH, p.coords);
+            for (int i = index; i < index + end_index; i++) {
+                dst_data[i] = logistic_activate(dst_data[i]);
+            }
+        }
+    }
+
+    if (p.do_softmax) {
+        int index = entry_index(IW, IH, p.coords, p.classes, inputs_size, 0, 0, p.coords + 1);
+        int batch_offset = inputs_size / p.num;
+        for (int b = 0; b < B * p.num; b++)
+            softmax_generic(src_data + index + b * batch_offset, dst_data + index + b * batch_offset, 1, p.classes,
+                            IH, IW);
+    }
+}
+
+class smoke_CPU_RegionYoloOnlyTest: public TestsCommon, public WithParamInterface<region_yolo_test_params> {
+    std::string model_t = R"V0G0N(
+<net name="RegionYoloOnly" version="2" precision="FP32" batch="1">
+    <layers>
+        <layer id="0" name="input" type="Input" precision="FP32" >
+            <output>
+                <port id="0">__SRC_DIMS__
+                </port>
+            </output>
+        </layer>
+        <layer id="1" name="region_yolo" type="RegionYolo" precision="FP32">
+            <data classes="_CLASSES_" coords="_COORDS_" do_softmax="_DO_SOFTMAX_" mask="_MASK_" num="_NUM_"/>
+            <input>
+                <port id="0">__SRC_DIMS__
+                </port>
+            </input>
+            <output>
+                <port id="1">__DST_DIMS__
+                </port>
+            </output>
+        </layer>
+    </layers>
+    <edges>
+        <edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
+    </edges>
+</net>
+)V0G0N";
+
+protected:
+    std::string getModel(region_yolo_test_params p) {
+        std::string model = model_t;
+
+
+        std::string src_dims;
+        for (auto &dim : p.src_dims) {
+            src_dims += "\n                    <dim>";
+            src_dims += std::to_string(dim) + "</dim>";
+        }
+        REPLACE_WITH_STR(model, "__SRC_DIMS__", src_dims);
+
+        std::string dst_dims;
+        for (auto &dim : p.dst_dims) {
+            dst_dims += "\n                    <dim>";
+            dst_dims += std::to_string(dim) + "</dim>";
+        }
+        REPLACE_WITH_STR(model, "__DST_DIMS__", dst_dims);
+
+        std::string mask;
+        for (auto &n : p.mask) {
+            mask += std::to_string(n) + ",";
+        }
+        mask.pop_back();
+        REPLACE_WITH_STR(model, "_MASK_", mask);
+
+
+        REPLACE_WITH_STR(model, "_CLASSES_", std::to_string(p.classes));
+        REPLACE_WITH_STR(model, "_COORDS_", std::to_string(p.coords));
+        REPLACE_WITH_STR(model, "_DO_SOFTMAX_", std::to_string(p.do_softmax));
+        REPLACE_WITH_STR(model, "_NUM_", std::to_string(p.num));
+
+
+        return model;
+    }
+
+    virtual void SetUp() {
+        try {
+            region_yolo_test_params p = ::testing::WithParamInterface<region_yolo_test_params>::GetParam();
+            std::string model = getModel(p);
+
+            Core ie;
+            CNNNetwork net = ie.ReadNetwork(model, Blob::CPtr());
+
+            Blob::Ptr src = make_shared_blob<float>({Precision::FP32, p.src_dims, Layout::ANY});
+            src->allocate();
+
+            TBlob<float>* srcPtr = dynamic_cast<TBlob<float>*>(src.get());
+
+            if (srcPtr == nullptr)
+                FAIL() << "Cannot cast blob to TBlob<float>.";
+            CommonTestUtils::fill_data_sine(src->buffer(), src->size(), 10, 30, 1);
+
+            BlobMap srcs;
+            srcs.insert(std::pair<std::string, Blob::Ptr>("input", src));
+
+            OutputsDataMap out;
+            out = net.getOutputsInfo();
+            BlobMap outputBlobs;
+
+            std::pair<std::string, DataPtr> item = *out.begin();
+
+            TBlob<float>::Ptr output;
+            output = make_shared_blob<float>(item.second->getTensorDesc());
+            output->allocate();
+            outputBlobs[item.first] = output;
+
+            InferenceEngine::TBlob<float> dst_ref(item.second->getTensorDesc());
+            dst_ref.allocate();
+
+            ref_region_yolo(*srcPtr, dst_ref, p);
+
+            ExecutableNetwork exeNetwork = ie.LoadNetwork(net, "CPU");
+            InferRequest inferRequest = exeNetwork.CreateInferRequest();
+            inferRequest.SetInput(srcs);
+            inferRequest.SetOutput(outputBlobs);
+            inferRequest.Infer();
+
+            compare(*outputBlobs.begin()->second, dst_ref);
+
+        } catch (const InferenceEngine::details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+TEST_P(smoke_CPU_RegionYoloOnlyTest, TestsRegionYolo) {}
+
+INSTANTIATE_TEST_CASE_P(
+        TestsRegionYolo, smoke_CPU_RegionYoloOnlyTest,
+        ::testing::Values(
+                region_yolo_test_params{{1, 255, 52, 52}, {1, 255, 52, 52}, 80, 4, 9, 0, {0, 1, 2}},
+                region_yolo_test_params{{1, 255, 26, 26}, {1, 255, 26, 26}, 80, 4, 9, 0, {3, 4, 5}},
+                region_yolo_test_params{{1, 255, 13, 13}, {1, 255, 13, 13}, 80, 4, 9, 0, {6, 7, 8}},
+                region_yolo_test_params{{1, 125, 13, 13}, {1, 21125}, 20, 4, 5, 1, {0, 1, 2}}
+        ));
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/snippet_test/multi_out_test.cpp b/inference-engine/tests_deprecated/functional/mkldnn/snippet_test/multi_out_test.cpp
new file mode 100644 (file)
index 0000000..2038bdc
--- /dev/null
@@ -0,0 +1,125 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+
+#include "common_test_utils/xml_net_builder/xml_net_builder.hpp"
+#include "tests_common.hpp"
+#include "precision_utils.h"
+#include <ie_core.hpp>
+
+using namespace InferenceEngine;
+using std::string;
+using std::pair;
+using std::map;
+using std::vector;
+
+const static size_t _H = 16;
+const static size_t _W = 16;
+const static size_t _C = 1;
+const static size_t _B = 2;
+
+const static SizeVector dims    {_B, _C, _H, _W};
+
+class MultiOutConnectNet : CommonTestUtils::V2NetBuilder {
+    std::string model;
+    TBlob<uint8_t>::Ptr weightsPtr;
+
+public:
+    MultiOutConnectNet(): CommonTestUtils::V2NetBuilder(buildNetworkWithOneInput(
+            "MultiOutNet", {_B, 3*_C, _H, _W}, "FP32")) {
+               weightsPtr = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, SizeVector{0}, Layout::C));
+               weightsPtr->allocate();
+
+               /**
+                *      [in]
+                *        |
+                *   [__split__]
+                *    |   |   |
+         * [out1] |  [out2]
+         *        |_______
+         *        |       |
+                *   [power1]   [power2]
+                *        |       |
+                *     [out3]   [out4]
+                */
+        addLayer("Split", "FP32", nullptr,
+                 { {{_B, 3*_C, _H, _W}},
+                   {dims, dims, dims}});
+
+        map<string, string> pow_params = { {"scale", "-1"}, {"shift", "0"}, {"power", "1"} };
+        addLayer("Power", "FP32", &pow_params,
+                 { {dims}, {dims} });
+
+        addLayer("Power", "FP32", &pow_params,
+                 { {dims}, {dims} });
+
+        vector<pair<string, string>> edges = {
+                {"0,0", "1,1"},
+                {"1,3", "2,5"},
+                {"1,3", "3,7"}
+        };
+        model = finish(&edges);
+    }
+
+    CNNNetwork net(Core & ie) {
+        return ie.ReadNetwork(model, weightsPtr);
+    }
+};
+
+using test_param = std::tuple<string>;
+
+class smoke_MultiOutConnectTest : public ::testing::TestWithParam<test_param> {
+protected:
+    string device_name;
+    MultiOutConnectNet topology;
+
+    void SetUp() override {
+        device_name = std::get<0>(GetParam());
+    }
+};
+
+static void fill_with(Blob::Ptr &blob, std::vector<float> vals) {
+    float* ptr = blob->buffer().as<float*>();
+    const size_t size = blob->size();
+    const size_t fill_size = vals.size();
+
+    for (int i = 0; i < size; i++)
+        ptr[i] = vals[i%fill_size];
+}
+
+static bool check_with(Blob::Ptr &blob, std::vector<float> vals) {
+    float* ptr = blob->buffer().as<float*>();
+    const size_t size = blob->size();
+    const size_t fill_size = vals.size();
+
+    bool res = true;
+    for (int i = 0; i < size; i++)
+        if (ptr[i] != vals[i%fill_size])
+            res = false;
+    return res;
+}
+
+TEST_P(smoke_MultiOutConnectTest, canLoad) {
+    Core ie;
+    CNNNetwork net = topology.net(ie);
+
+    auto execNet = ie.LoadNetwork(net, device_name);
+    auto req = execNet.CreateInferRequest();
+
+    auto input = req.GetBlob("Input0");
+    fill_with(input, {1,2,3,4});
+
+    req.Infer();
+
+    auto output1 = req.GetBlob("Power2");
+    auto output2 = req.GetBlob("Power3");
+    ASSERT_TRUE(check_with(output1, {-1,-2,-3,-4}));
+    ASSERT_TRUE(check_with(output2, {-1,-2,-3,-4}));
+}
+
+#define PLUGING_CASE(_plugin, _test) \
+    INSTANTIATE_TEST_CASE_P(_plugin##_run, _test, ::testing::Values(#_plugin) )
+
+PLUGING_CASE(CPU, smoke_MultiOutConnectTest);
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/snippet_test/tripple_test.cpp b/inference-engine/tests_deprecated/functional/mkldnn/snippet_test/tripple_test.cpp
new file mode 100644 (file)
index 0000000..6d6f53b
--- /dev/null
@@ -0,0 +1,118 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+
+#include "common_test_utils/xml_net_builder/xml_net_builder.hpp"
+#include "tests_common.hpp"
+#include "precision_utils.h"
+#include <ie_core.hpp>
+
+using namespace InferenceEngine;
+using std::string;
+using std::pair;
+using std::map;
+using std::vector;
+
+const static size_t _H = 16;
+const static size_t _W = 16;
+const static size_t _C = 1;
+const static size_t _B = 2;
+
+const static SizeVector dims {_B, _C, _H, _W};
+
+class TripleConnectNet : CommonTestUtils::V2NetBuilder {
+    std::string model;
+    TBlob<uint8_t>::Ptr weightsPtr;
+
+public:
+       TripleConnectNet(): CommonTestUtils::V2NetBuilder(buildNetworkWithOneInput(
+            "Triple_Net", {_B, _C, _H, _W}, "FP32")) {
+               weightsPtr = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, SizeVector{0}, Layout::C));
+               weightsPtr->allocate();
+
+               /**
+                *      [in]
+                *     ___|___
+                *    |   |   |
+         *   [0] [1] [2]
+                *  [__Concat___]
+                *        |
+                *      [out]
+                */
+        map<string, string> lstm_params = {};
+        addLayer("Concat", "FP32",
+                 &lstm_params,
+                 {  // input dims
+                                       {dims, dims, dims},
+                                       // output dims
+                                       {{_B, 3*_C, _H, _W}}
+                                });
+
+        vector<pair<string, string>> edges = {
+                {"0,0", "1,1"},
+                {"0,0", "1,2"},
+                {"0,0", "1,3"}
+        };
+        model = finish(&edges);
+    }
+
+    CNNNetwork net(Core & ie) {
+        return ie.ReadNetwork(model, weightsPtr);
+    }
+};
+
+using test_param = std::tuple<string>;
+
+class smoke_TripleConnectTest : public ::testing::TestWithParam<test_param> {
+protected:
+    string device_name;
+    TripleConnectNet topology;
+
+    void SetUp() override {
+        device_name = std::get<0>(GetParam());
+    }
+};
+
+static void fill_with(Blob::Ptr &blob, std::vector<float> vals) {
+    float* ptr = blob->buffer().as<float*>();
+    const size_t size = blob->size();
+    const size_t fill_size = vals.size();
+
+    for (int i = 0; i < size; i++)
+        ptr[i] = vals[i%fill_size];
+}
+
+static bool check_with(Blob::Ptr &blob, std::vector<float> vals) {
+    float* ptr = blob->buffer().as<float*>();
+    const size_t size = blob->size();
+    const size_t fill_size = vals.size();
+
+    bool res = true;
+    for (int i = 0; i < size; i++)
+        if (ptr[i] != vals[i%fill_size])
+            res = false;
+    return res;
+}
+
+TEST_P(smoke_TripleConnectTest, canLoad) {
+    Core ie;
+    CNNNetwork net = topology.net(ie);
+
+    auto execNet = ie.LoadNetwork(net, device_name);
+    auto req = execNet.CreateInferRequest();
+
+    auto input = req.GetBlob("Input0");
+    fill_with(input, {1,2,3,4});
+
+    req.Infer();
+
+    auto output = req.GetBlob("Concat1");
+    ASSERT_TRUE(check_with(output, {1,2,3,4}));
+}
+
+#define PLUGING_CASE(_plugin, _test) \
+    INSTANTIATE_TEST_CASE_P(_plugin##_run, _test, ::testing::Values(#_plugin) )
+
+PLUGING_CASE(CPU, smoke_TripleConnectTest);
diff --git a/inference-engine/tests_deprecated/functional/mkldnn/test_model_repo.cpp b/inference-engine/tests_deprecated/functional/mkldnn/test_model_repo.cpp
new file mode 100644 (file)
index 0000000..1cb3045
--- /dev/null
@@ -0,0 +1,17 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "test_model_repo.hpp"
+
+std::string get_model_repo() {
+    return "models:";
+};
+
+const char* TestDataHelpers::getModelPathNonFatal() noexcept {
+    return TestDataHelpers::getModelPathNonFatalDefault();
+}
+
+std::string TestDataHelpers::get_data_path() {
+    return TestDataHelpers::get_data_path_default();
+}
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/CMakeLists.txt b/inference-engine/tests_deprecated/functional/shared_tests/CMakeLists.txt
new file mode 100644 (file)
index 0000000..20ea186
--- /dev/null
@@ -0,0 +1,69 @@
+# Copyright (C) 2016-2020 Intel Corporation
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+
+set(TARGET_NAME IESharedTests)
+
+disable_deprecated_warnings()
+
+list(APPEND SHARED_LIBRARIES
+        ${NGRAPH_LIBRARIES}
+        ie_tests
+        ngraphFunctions
+        inference_engine_ir_readers
+        )
+
+file(GLOB SHARED_TESTS_SRC
+        ${CMAKE_CURRENT_SOURCE_DIR}/common_single_layer_tests/*.cpp
+        ${CMAKE_CURRENT_SOURCE_DIR}/lstm/*.cpp
+        ${CMAKE_CURRENT_SOURCE_DIR}/network_tests/*.cpp
+        ${CMAKE_CURRENT_SOURCE_DIR}/graph_tools/*.cpp
+        ${CMAKE_CURRENT_SOURCE_DIR}/ie_class/*.hpp
+        ${CMAKE_CURRENT_SOURCE_DIR}/transformations/*.cpp
+        ${CMAKE_CURRENT_SOURCE_DIR}/transformations/*.hpp
+        ${CMAKE_CURRENT_SOURCE_DIR}/transformations/common/*.cpp
+        )
+
+add_library(${TARGET_NAME} STATIC ${SHARED_TESTS_SRC})
+add_dependencies(${TARGET_NAME} inference_engine_preproc mock_engine)
+
+if(ENABLE_MKL_DNN)
+    add_dependencies(${TARGET_NAME} MKLDNNPlugin)
+    target_compile_definitions(${TARGET_NAME} PUBLIC ENABLE_MKL_DNN)
+endif()
+
+# Find OpenCV components if exist
+find_package(OpenCV COMPONENTS imgcodecs imgproc QUIET)
+if(OpenCV_FOUND)
+    target_compile_definitions(${TARGET_NAME} PUBLIC USE_OPENCV)
+else()
+    message(WARNING "No suitable OpenCV version detected, pre-processing tests are skipped in " ${TARGET_NAME})
+endif()
+
+target_include_directories(${TARGET_NAME} PUBLIC
+        ${CMAKE_CURRENT_SOURCE_DIR}/network_tests
+        ${CMAKE_CURRENT_SOURCE_DIR}/io_blob_tests
+        ${CMAKE_CURRENT_SOURCE_DIR}/input_tests
+        ${CMAKE_CURRENT_SOURCE_DIR}/inference_engine_regression_tests
+        ${CMAKE_CURRENT_SOURCE_DIR}/lstm
+        ${CMAKE_CURRENT_SOURCE_DIR}/common_single_layer_tests
+        ${CMAKE_CURRENT_SOURCE_DIR}/single_layer_tests
+        ${CMAKE_CURRENT_SOURCE_DIR}/ie_class
+        ${CMAKE_CURRENT_SOURCE_DIR}/graph_tools
+        ${CMAKE_CURRENT_SOURCE_DIR}/transformations
+        $<TARGET_PROPERTY:inference_engine_plugin_api,INTERFACE_INCLUDE_DIRECTORIES>
+)
+
+if(OpenCV_FOUND)
+    target_include_directories(${TARGET_NAME} PUBLIC ${OpenCV_INCLUDE_DIRS})
+    list(APPEND SHARED_LIBRARIES ${OpenCV_LIBS})
+endif()
+
+target_link_libraries(${TARGET_NAME} PUBLIC ${SHARED_LIBRARIES})
+
+add_dependencies(${TARGET_NAME} HeteroPlugin)
+
+# developer package
+
+ie_developer_export_targets(${TARGET_NAME})
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/common_single_layer_tests/conv_ref.cpp b/inference-engine/tests_deprecated/functional/shared_tests/common_single_layer_tests/conv_ref.cpp
new file mode 100644 (file)
index 0000000..94ec6ba
--- /dev/null
@@ -0,0 +1,283 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <ie_layers.h>
+#include <precision_utils.h>
+#include "conv_ref.hpp"
+#include "common_test_utils/common_layers_params.hpp"
+
+using namespace InferenceEngine;
+
+
+void Convolution_parseParams(InferenceEngine::CNNLayer* layer) {
+    auto convLayer = dynamic_cast<InferenceEngine::ConvolutionLayer*>(layer);
+    if (!convLayer) {
+        THROW_IE_EXCEPTION << "Layer is not instance of ConvolutionLayer class";
+    }
+    convLayer->_out_depth = convLayer->GetParamAsUInt("output");
+
+    convLayer->_kernel.clear();
+    convLayer->_stride.clear();
+    convLayer->_padding.clear();
+    convLayer->_pads_end.clear();
+    convLayer->_dilation.clear();
+
+    std::vector<unsigned int> kernels = convLayer->GetParamAsUInts("kernel", {});
+    if (kernels.empty()) {
+        // IR_v == 2
+        convLayer->_kernel.insert(InferenceEngine::X_AXIS, convLayer->GetParamAsUInt("kernel-x"));
+        convLayer->_kernel.insert(InferenceEngine::Y_AXIS, convLayer->GetParamAsUInt("kernel-y"));
+
+        convLayer->_stride.insert(InferenceEngine::X_AXIS, convLayer->GetParamAsUInt("stride-x", 1u));
+        convLayer->_stride.insert(InferenceEngine::Y_AXIS, convLayer->GetParamAsUInt("stride-y", 1u));
+        // TODO: maybe just throw exception, why do we change IR?
+        if (0 == convLayer->_stride[InferenceEngine::X_AXIS]) {
+            convLayer->_stride[InferenceEngine::X_AXIS] = 1u;
+            printf("Warning! in layer %s: Stride x is 0, setting to 1 ", convLayer->name.c_str());
+        }
+        if (0 == convLayer->_stride[InferenceEngine::Y_AXIS]) {
+            convLayer->_stride[InferenceEngine::Y_AXIS] = 1u;
+            printf("Warning! in layer %s: Stride y is 0, setting to 1", convLayer->name.c_str());
+        }
+
+        convLayer->_padding.insert(InferenceEngine::X_AXIS, convLayer->GetParamAsUInt("pad-x", 0u));
+        convLayer->_padding.insert(InferenceEngine::Y_AXIS, convLayer->GetParamAsUInt("pad-y", 0u));
+
+        convLayer->_pads_end.insert(InferenceEngine::X_AXIS, convLayer->GetParamAsUInt("pad-r", convLayer->_padding[InferenceEngine::X_AXIS]));
+        convLayer->_pads_end.insert(InferenceEngine::Y_AXIS, convLayer->GetParamAsUInt("pad-b", convLayer->_padding[InferenceEngine::Y_AXIS]));
+
+        convLayer->_dilation.insert(InferenceEngine::X_AXIS, convLayer->GetParamAsUInt("dilation-x", 1u));
+        convLayer->_dilation.insert(InferenceEngine::Y_AXIS, convLayer->GetParamAsUInt("dilation-y", 1u));
+    } else {
+        // IR_v > 2
+        for (int i = 1; i <= kernels.size(); i++) {
+            convLayer->_kernel.insert(i - 1, kernels[kernels.size() - i]);
+        }
+
+        std::vector<unsigned int> default_0 = std::vector<unsigned int> (convLayer->_kernel.size(), 0u);
+        std::vector<unsigned int> default_1 = std::vector<unsigned int> (convLayer->_kernel.size(), 1u);
+
+        std::vector<unsigned int> strides = convLayer->GetParamAsUInts("strides", default_1);
+        for (int i = 1; i <= strides.size(); i++) {
+            if (strides[strides.size() - i] == 0) {
+                THROW_IE_EXCEPTION << "Stride could not be 0.\nIn layer " << convLayer->name;
+            }
+            convLayer->_stride.insert(i - 1, strides[strides.size() - i]);
+        }
+
+        std::vector<unsigned int> pads_begin = convLayer->GetParamAsUInts("pads_begin", default_0);
+        for (int i = 1; i <= pads_begin.size(); i++) {
+            convLayer->_padding.insert(i - 1, pads_begin[pads_begin.size() - i]);
+        }
+
+        std::vector<unsigned int> pads_end = convLayer->GetParamAsUInts("pads_end", pads_begin);
+        for (int i = 1; i <= pads_end.size(); i++) {
+            convLayer->_pads_end.insert(i - 1, pads_end[pads_end.size() - i]);
+        }
+
+        std::vector<unsigned int> dilations = convLayer->GetParamAsUInts("dilations", default_1);
+        for (int i = 1; i <= dilations.size(); i++) {
+            convLayer->_dilation.insert(i - 1, dilations[dilations.size() - i]);
+        }
+    }
+
+    convLayer->_auto_pad = convLayer->GetParamAsString("auto_pad", "");
+    convLayer->_group = convLayer->GetParamAsUInt("group", 1u);
+}
+
+template<typename wei_data_t, typename bias_data_t>
+void ref_conv_common(const std::vector<InferenceEngine::Blob::Ptr> srcs,
+                     Blob& dst,
+                     const wei_data_t* weights_data,
+                     size_t weights_size,
+                     const bias_data_t* bias_data,
+                     size_t bias_size,
+                     const CommonTestUtils::conv_common_params& prm) {
+    if (srcs[0]->getTensorDesc().getLayout() != Layout::NCHW &&
+            srcs[0]->getTensorDesc().getLayout() != Layout::NCDHW)
+        THROW_IE_EXCEPTION << "Reference FP32 convolution supports NCHW and NCDHW layouts only";
+    size_t KW = prm.kernel[X_AXIS];
+    size_t KH = prm.kernel[Y_AXIS];
+    size_t KD = prm.kernel.size() > Z_AXIS ? prm.kernel[Z_AXIS] : 1lu;
+
+    size_t SW = prm.stride[X_AXIS];
+    size_t SH = prm.stride[Y_AXIS];
+    size_t SD = prm.stride.size() > Z_AXIS ? prm.stride[Z_AXIS] : 0lu;
+
+    size_t DW = prm.dilation[X_AXIS];
+    size_t DH = prm.dilation[Y_AXIS];
+    size_t DD = prm.dilation.size() > Z_AXIS ? prm.dilation[Z_AXIS] : 0lu;
+
+    size_t PW = prm.pads_begin[X_AXIS];
+    size_t PH = prm.pads_begin[Y_AXIS];
+    size_t PD = prm.pads_begin.size() > Z_AXIS ? prm.pads_begin[Z_AXIS] : 0lu;
+
+    size_t GC = prm.group;
+
+    auto src_dims = srcs[0]->getTensorDesc().getDims();
+    size_t IC = src_dims[1];
+    size_t ID = (src_dims.size() == 5lu) ? src_dims[2] : 1lu;
+    size_t IH = src_dims.at(src_dims.size() - 2);
+    size_t IW = src_dims.back();
+
+    auto dst_dims = dst.getTensorDesc().getDims();
+    size_t OW = dst_dims.back();
+    size_t OH = dst_dims.at(dst_dims.size() - 2);
+    size_t OD = (dst_dims.size() == 5lu) ? dst_dims[2] : 1lu;
+    size_t OC = prm.out_c;
+
+    const auto src_buffer = srcs[0]->cbuffer();
+    auto* dst_data = dst.buffer().as<float*>();
+    Precision src_precision = srcs[0]->getTensorDesc().getPrecision();
+
+    IE_ASSERT(KW * KH * KD * OC * IC / GC == weights_size);
+    IE_ASSERT(OC == bias_size);
+
+    for (uint32_t g = 0; g < GC; g++) {
+        for (uint32_t oc = 0; oc < OC / GC; oc++) {
+            for (uint32_t od = 0; od < OD; od++) {
+                for (uint32_t oh = 0; oh < OH; oh++) {
+                    for (uint32_t ow = 0; ow < OW; ow++) {
+                        size_t oidx = g * OC / GC * OD * OH * OW
+                                      + oc * OD * OH * OW
+                                      + od * OH * OW
+                                      + oh * OW
+                                      + ow;
+                        if (bias_data)
+                            dst_data[oidx] = bias_data[g * OC / GC + oc];
+
+                        for (size_t ic = 0; ic < IC / GC; ic++) {
+                            for (size_t kd = 0; kd < KD; kd++) {
+                                for (size_t kh = 0; kh < KH; kh++) {
+                                    for (size_t kw = 0; kw < KW; kw++) {
+                                        int32_t iw = ow * SW - PW + kw * DW;
+                                        int32_t ih = oh * SH - PH + kh * DH;
+                                        int32_t id = od * SD - PD + kd * DD;
+                                        if (iw < 0 || iw >= (int32_t) IW ||
+                                            ih < 0 || ih >= (int32_t) IH ||
+                                            id < 0 || id >= (int32_t) ID)
+                                            continue;
+                                        size_t iidx = g * IC / GC * ID * IH * IW
+                                                      + ic * ID * IH * IW
+                                                      + id * IH * IW
+                                                      + ih * IW
+                                                      + iw;
+                                        size_t widx = g * OC / GC * IC / GC * KD * KH * KW
+                                                      + oc * IC / GC * KD * KH * KW
+                                                      + ic * KD * KH * KW
+                                                      + kd * KH * KW
+                                                      + kh * KW
+                                                      + kw;
+
+                                        if (src_precision == Precision::U8) {
+                                            dst_data[oidx] += (src_buffer.as<const uint8_t*>())[iidx] * weights_data[widx];
+                                        } else if (src_precision == Precision::I8) {
+                                            dst_data[oidx] += (src_buffer.as<const int8_t*>())[iidx] * weights_data[widx];
+                                        } else {
+                                            dst_data[oidx] += (src_buffer.as<const float*>())[iidx] * weights_data[widx];
+                                        }
+                                    }
+                                }
+                            }
+                        }
+                    }
+                }
+            }
+        }
+    }
+}
+
+template void ref_conv_common(const std::vector<InferenceEngine::Blob::Ptr> srcs, Blob& dst, const float* weights_data,
+                              size_t, const float* bias_data, size_t, const CommonTestUtils::conv_common_params& prm);
+template void ref_conv_common(const std::vector<InferenceEngine::Blob::Ptr> srcs, Blob& dst, const int8_t* weights_data,
+                              size_t, const int32_t* bias_data, size_t, const CommonTestUtils::conv_common_params& prm);
+
+template<>
+void ref_conv_common(const std::vector<InferenceEngine::Blob::Ptr> srcs,
+                     Blob& dst,
+                     const ie_fp16* weights_data,
+                     size_t /*weights_size*/,
+                     const ie_fp16* bias_data,
+                     size_t /*bias_size*/,
+                     const CommonTestUtils::conv_common_params& prm) {
+    const auto* src_data = srcs[0]->cbuffer().as<const ie_fp16*>();
+    auto* dst_data = dst.buffer().as<ie_fp16*>();
+    IE_ASSERT(src_data != nullptr);
+    IE_ASSERT(dst_data != nullptr);
+
+    size_t KH = prm.kernel[Y_AXIS];
+    size_t KW = prm.kernel[X_AXIS];
+
+    size_t SH = prm.stride[Y_AXIS];
+    size_t SW = prm.stride[X_AXIS];
+
+    size_t DH = prm.dilation[Y_AXIS];
+    size_t DW = prm.dilation[X_AXIS];
+
+    size_t PH = prm.pads_begin[Y_AXIS];
+    size_t PW = prm.pads_begin[X_AXIS];
+
+    size_t GC = prm.group;
+
+    int32_t IW = 0;
+    int32_t IH = 0;
+    int32_t IC = 0;
+    int32_t I_N = 0;
+    int32_t OW = 0;
+    int32_t OH = 0;
+    int32_t OC = 0;
+    int32_t ON = 0;
+    CommonTestUtils::get_common_dims(*srcs[0], IW, IH, IC, I_N);
+    CommonTestUtils::get_common_dims(dst, OW, OH, OC, ON);
+    IE_ASSERT(I_N == ON);
+    size_t src_channels = IC / GC;
+    size_t dst_channels = OC / GC;
+    for (size_t n = 0; n < ON; ++n) {
+        size_t oShift = n * OC * OH * OW;
+        size_t iShift = n * IC * IH * IW;
+        for (size_t g = 0; g < GC; ++g) {
+            for (size_t oc = 0; oc < dst_channels; ++oc) {
+                size_t dst_channel = (g * dst_channels + oc);
+                for (size_t oh = 0; oh < OH; oh++) {
+                    for (size_t ow = 0; ow < OW; ow++) {
+                        size_t oidx = dst_channel + ow * OC + oh * OC * OW + oShift;
+                        IE_ASSERT(oidx < dst.size());
+                        float val = 0.0f;
+                        if (bias_data)
+                            val = PrecisionUtils::f16tof32(bias_data[dst_channel]);
+
+                        for (size_t ic = 0; ic < src_channels; ++ic) {
+                            size_t src_channel = (g * src_channels + ic);
+
+                            for (size_t ky = 0; ky < KH; ++ky) {
+                                for (size_t kx = 0; kx < KW; ++kx) {
+
+                                    int32_t iw = ow * SW - PW + kx * DW;
+                                    int32_t ih = oh * SH - PH + ky * DH;
+
+                                    if (iw < 0 || iw >= (int32_t) IW || ih < 0 || ih >= (int32_t) IH) {
+                                        continue;
+                                    }
+
+                                    size_t iidx = src_channel + iw * IC + ih * IC * IW + iShift;
+                                    IE_ASSERT(iidx < srcs[0]->size());
+
+                                    size_t widx = (ky * KW + kx) + ic * KH * KW +
+                                                  dst_channel * src_channels * KW * KH;
+
+                                    IE_ASSERT(widx < KH * KW * (IC / GC) * OC);
+
+                                    val += PrecisionUtils::f16tof32(src_data[iidx]) *
+                                           PrecisionUtils::f16tof32(weights_data[widx]);
+                                }
+                            }
+                        }
+
+                        dst_data[oidx] = PrecisionUtils::f32tof16(val);
+                    }
+                }
+            }
+        }
+    }
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/common_single_layer_tests/conv_ref.hpp b/inference-engine/tests_deprecated/functional/shared_tests/common_single_layer_tests/conv_ref.hpp
new file mode 100644 (file)
index 0000000..6e40bda
--- /dev/null
@@ -0,0 +1,58 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <ie_layers.h>
+#include <ie_layers_property.hpp>
+#include <ie_blob.h>
+#include <precision_utils.h>
+#include <ie_layers_internal.hpp>
+#include "common_test_utils/common_layers_params.hpp"
+
+template<typename wei_data_t, typename bias_data_t>
+void ref_conv_common(const std::vector<InferenceEngine::Blob::Ptr> srcs,
+                     InferenceEngine::Blob& dst,
+                     const wei_data_t* weights_data,
+                     size_t weights_size,
+                     const bias_data_t* bias_data,
+                     size_t bias_size,
+                     const CommonTestUtils::conv_common_params& prm);
+
+
+void Convolution_parseParams(InferenceEngine::CNNLayer* layer);
+
+template<typename wei_data_t, typename bias_data_t>
+void common_ref_convolution_wrap(const std::vector<InferenceEngine::Blob::Ptr> srcs,
+                                 InferenceEngine::Blob::Ptr& dst,
+                                 const wei_data_t* weights_data,
+                                 size_t weights_size,
+                                 const bias_data_t* bias_data,
+                                 size_t bias_size,
+                                 const std::map<std::string, std::string>& params_map) {
+    InferenceEngine::LayerParams lp{};
+    InferenceEngine::ConvolutionLayer convLayer(lp);
+    auto data = std::make_shared<InferenceEngine::Data>("insData", srcs[0]->getTensorDesc());
+    convLayer.params = params_map;
+    convLayer.insData.push_back(data);
+    Convolution_parseParams(&convLayer);
+
+    CommonTestUtils::conv_common_params params;
+    params.kernel = convLayer._kernel;
+    auto allPad = InferenceEngine::getPaddings(convLayer);
+    params.pads_begin = allPad.begin;
+    params.pads_end = allPad.end;
+    params.stride = convLayer._stride;
+    params.dilation = convLayer._dilation;
+    params.out_c = convLayer._out_depth;
+    params.group = convLayer._group;
+
+    ref_conv_common<>(srcs,
+                      *dst.get(),
+                      weights_data,
+                      weights_size,
+                      bias_data,
+                      bias_size,
+                      params);
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/common_single_layer_tests/deconv_ref.cpp b/inference-engine/tests_deprecated/functional/shared_tests/common_single_layer_tests/deconv_ref.cpp
new file mode 100644 (file)
index 0000000..9517121
--- /dev/null
@@ -0,0 +1,187 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <ie_layers.h>
+#include <precision_utils.h>
+#include <gtest/gtest.h>
+#include "deconv_ref.hpp"
+#include "common_test_utils/common_layers_params.hpp"
+
+using namespace InferenceEngine;
+
+template<>
+void ref_deconv_common(const std::vector<InferenceEngine::Blob::Ptr> srcs,
+                       Blob &dst,
+                       const float *weights_data,
+                       size_t weights_size,
+                       const float *bias_data,
+                       size_t bias_size,
+                       const CommonTestUtils::conv_common_params &prm) {
+    if (srcs[0]->getTensorDesc().getLayout() != Layout::NCHW)
+        THROW_IE_EXCEPTION << "Reference FP32 convolution supports NCHW layout only";
+
+    size_t KH = prm.kernel[Y_AXIS];
+    size_t KW = prm.kernel[X_AXIS];
+
+    size_t SH = prm.stride[Y_AXIS];
+    size_t SW = prm.stride[X_AXIS];
+
+    size_t PH = prm.pads_begin[Y_AXIS];
+    size_t PW = prm.pads_begin[X_AXIS];
+
+    auto src_dims = srcs[0]->getTensorDesc().getDims();
+    size_t IW = src_dims.back();
+    size_t IH = src_dims.at(src_dims.size() - 2);
+    size_t IC = src_dims.at(1);
+    size_t MB = src_dims.at(0);
+
+    size_t OC = prm.out_c;
+
+    auto dst_dims = dst.getTensorDesc().getDims();
+    size_t OW = dst_dims.back();
+    size_t OH = dst_dims.at(dst_dims.size() - 2);
+
+    const auto *src_data = srcs[0]->cbuffer().as<float *>();
+    auto *dst_data = dst.buffer().as<float *>();;
+
+    for (int mb = 0; mb < MB; ++mb) {
+        for (int oc = 0; oc < OC; ++oc) {
+            for (int oh = 0; oh < OH; ++oh) {
+                for (int ow = 0; ow < OW; ++ow) {
+                    size_t didx = mb * OC * OH * OW
+                                  + oc * OH * OW + oh * OW + ow;
+
+                    dst_data[didx] = float(0);
+                    if (bias_data) dst_data[didx] += bias_data[oc];
+
+                    for (int ic = 0; ic < IC; ic++) {
+                        for (int kh = 0; kh < KH; kh++) {
+                            for (int kw = 0; kw < KW; kw++) {
+                                if (ow + PW < kw || oh + PH < kh)
+                                    continue;
+
+                                size_t iw = ow - kw + PW;
+                                size_t ih = oh - kh + PH;
+
+                                if (iw % SW != 0 || ih % SH != 0)
+                                    continue;
+
+                                iw /= SW;
+                                ih /= SH;
+
+                                if (ih < IH && iw < IW) {
+                                    size_t sidx = mb * IC * IH * IW
+                                                  + ic * IH * IW + ih * IW
+                                                  + iw;
+
+                                    size_t widx = ic * OC * KH * KW
+                                                  + oc * KH * KW + kh * KW
+                                                  + kw;
+
+                                    dst_data[didx] += src_data[sidx] * weights_data[widx];
+                                }
+                            }
+                        }
+                    }
+                }
+            }
+        }
+    }
+}
+
+template<>
+void ref_deconv_common(const std::vector<InferenceEngine::Blob::Ptr> srcs,
+                       Blob &dst,
+                       const ie_fp16 *weights_data,
+                       size_t /*weights_size*/,
+                       const ie_fp16 *bias_data,
+                       size_t /*bias_size*/,
+                       const CommonTestUtils::conv_common_params &prm) {
+    const auto *src_data = srcs[0]->cbuffer().as<ie_fp16 *>();
+    auto *dst_data = dst.buffer().as<ie_fp16 *>();
+    IE_ASSERT(src_data != nullptr);
+    IE_ASSERT(dst_data != nullptr);
+
+    size_t KH = prm.kernel[Y_AXIS];
+    size_t KW = prm.kernel[X_AXIS];
+
+    size_t SH = prm.stride[Y_AXIS];
+    size_t SW = prm.stride[X_AXIS];
+
+    size_t PH = prm.pads_begin[Y_AXIS];
+    size_t PW = prm.pads_begin[X_AXIS];
+
+    auto src_dims = srcs[0]->getTensorDesc().getDims();
+    size_t IW = src_dims.back();
+    size_t IH = src_dims.at(src_dims.size() - 2);
+    size_t IC = src_dims.at(1);
+    size_t IB = src_dims.at(0);
+
+    auto dst_dims = dst.getTensorDesc().getDims();
+    size_t OW = dst_dims.back();
+    size_t OH = dst_dims.at(dst_dims.size() - 2);
+    size_t OC = dst_dims.at(1);
+    size_t OB = src_dims.at(0);
+
+    size_t GC = prm.group;
+
+    size_t src_channels = IC / GC;
+    size_t dst_channels = OC / GC;
+
+    size_t ib_size = srcs[0]->size() / IB;
+    size_t ob_size = dst.size() / OB;
+
+    for (size_t ob = 0; ob < OB; ++ob) {
+        for (size_t g = 0; g < GC; ++g) {
+            for (size_t oc = 0; oc < dst_channels; ++oc) {
+                size_t dst_channel = (g * dst_channels + oc);
+                for (size_t oy = 0; oy < OH; oy++) {
+                    for (size_t ox = 0; ox < OW; ox++) {
+                        size_t oidx = ob * ob_size + dst_channel + ox * OC + oy * OC * OW;
+                        ASSERT_LT(oidx, dst.size());
+                        float val = bias_data != nullptr ? PrecisionUtils::f16tof32(bias_data[dst_channel]) : 0;
+
+                        for (size_t ic = 0; ic < src_channels; ++ic) {
+                            size_t src_channel = (g * src_channels + ic);
+
+                            for (size_t ky = 0; ky < KH; ++ky) {
+                                for (size_t kx = 0; kx < KW; ++kx) {
+                                    if (ox + PW < kx || oy + PH < ky)
+                                        continue;
+
+                                    int32_t ix = ox - kx + PW;
+                                    int32_t iy = oy - ky + PH;
+
+                                    if (ix % SW != 0 || iy % SH != 0)
+                                        continue;
+
+                                    ix /= SW;
+                                    iy /= SH;
+
+                                    if (iy < IH && ix < IW) {
+                                        size_t iidx = ob * ib_size + src_channel + ix * IC + iy * IC * IW;
+
+                                        ASSERT_LT(iidx, srcs[0]->size());
+
+                                        size_t widx = ic * OC * KH * KW
+                                                    + dst_channel * KH * KW
+                                                    + ky * KW
+                                                    + kx;
+
+                                        ASSERT_LT(widx, KW * KH * (IC / GC) * OC);
+
+                                        val += PrecisionUtils::f16tof32(src_data[iidx]) *
+                                            PrecisionUtils::f16tof32(weights_data[widx]);
+                                    }
+                                }
+                            }
+                        }
+
+                        dst_data[oidx] = PrecisionUtils::f32tof16(val);
+                    }
+                }
+            }
+        }
+    }
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/common_single_layer_tests/deconv_ref.hpp b/inference-engine/tests_deprecated/functional/shared_tests/common_single_layer_tests/deconv_ref.hpp
new file mode 100644 (file)
index 0000000..5209fe8
--- /dev/null
@@ -0,0 +1,57 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <ie_layers.h>
+#include <ie_layers_property.hpp>
+#include <ie_blob.h>
+#include <precision_utils.h>
+#include <ie_layers_internal.hpp>
+#include "common_test_utils/common_layers_params.hpp"
+
+template<typename data_t>
+void ref_deconv_common(const std::vector<InferenceEngine::Blob::Ptr> srcs,
+                       InferenceEngine::Blob& dst,
+                       const data_t* weights_data,
+                       size_t weights_size,
+                       const data_t* bias_data,
+                       size_t bias_size,
+                       const CommonTestUtils::conv_common_params& prm);
+
+void Convolution_parseParams(InferenceEngine::CNNLayer* layer);
+
+template<typename data_t>
+void common_ref_deconvolution_wrap(const std::vector<InferenceEngine::Blob::Ptr> srcs,
+                                 InferenceEngine::Blob::Ptr& dst,
+                                 const data_t* weights_data,
+                                 size_t weights_size,
+                                 const data_t* bias_data,
+                                 size_t bias_size,
+                                 const std::map<std::string, std::string>& params_map) {
+    InferenceEngine::LayerParams lp{};
+    InferenceEngine::ConvolutionLayer deconvLayer(lp);
+    auto data = std::make_shared<InferenceEngine::Data>("insData", srcs[0]->getTensorDesc());
+    deconvLayer.params = params_map;
+    deconvLayer.insData.push_back(data);
+    Convolution_parseParams(&deconvLayer);
+
+    CommonTestUtils::conv_common_params params;
+    params.kernel = deconvLayer._kernel;
+    auto allPad = InferenceEngine::getPaddings(deconvLayer);
+    params.pads_begin = allPad.begin;
+    params.pads_end = allPad.end;
+    params.stride = deconvLayer._stride;
+    params.dilation = deconvLayer._dilation;
+    params.out_c = deconvLayer._out_depth;
+    params.group = deconvLayer._group;
+
+    ref_deconv_common<data_t>(srcs,
+                            *dst.get(),
+                            weights_data,
+                            weights_size,
+                            bias_data,
+                            bias_size,
+                            params);
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/common_single_layer_tests/def_conv_ref.cpp b/inference-engine/tests_deprecated/functional/shared_tests/common_single_layer_tests/def_conv_ref.cpp
new file mode 100644 (file)
index 0000000..d86fabc
--- /dev/null
@@ -0,0 +1,272 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <ie_layers.h>
+#include <precision_utils.h>
+#include <math.h>
+#include <ie_parallel.hpp>
+#include "def_conv_ref.hpp"
+#include "common_test_utils/common_layers_params.hpp"
+
+using namespace InferenceEngine;
+
+void Convolution_parseParams(InferenceEngine::CNNLayer* layer);
+
+void DeformableConvolution_parseParams(InferenceEngine::CNNLayer* layer) {
+    auto deformable_conv_layer = dynamic_cast<InferenceEngine::DeformableConvolutionLayer*>(layer);
+    if (!deformable_conv_layer) {
+        THROW_IE_EXCEPTION << "Layer is not instance of DeformableConvolutionLayer class";
+    }
+    deformable_conv_layer->_deformable_group = deformable_conv_layer->GetParamAsUInt("deformable_group", 1u);
+    Convolution_parseParams(layer);
+}
+
+template<>
+void ref_def_conv_common(const std::vector<InferenceEngine::Blob::Ptr> srcs,
+                         Blob& dst,
+                         const float* weights_data,
+                         size_t weights_size,
+                         const float* bias_data,
+                         size_t bias_size,
+                         const CommonTestUtils::def_conv_common_params& prm) {
+    if (srcs[0]->getTensorDesc().getLayout() != Layout::NCHW &&
+        srcs[0]->getTensorDesc().getLayout() != Layout::NCDHW)
+        THROW_IE_EXCEPTION << "Reference FP32 deformable convolution supports NCHW and NCDHW layouts only";
+    size_t KW = prm.kernel[X_AXIS];
+    size_t KH = prm.kernel[Y_AXIS];
+    size_t KD = prm.kernel.size() > Z_AXIS ? prm.kernel[Z_AXIS] : 1lu;
+
+    size_t SW = prm.stride[X_AXIS];
+    size_t SH = prm.stride[Y_AXIS];
+    size_t SD = prm.stride.size() > Z_AXIS ? prm.stride[Z_AXIS] : 0lu;
+
+    size_t DW = prm.dilation[X_AXIS];
+    size_t DH = prm.dilation[Y_AXIS];
+    size_t DD = prm.dilation.size() > Z_AXIS ? prm.dilation[Z_AXIS] : 0lu;
+
+    size_t PW = prm.pads_begin[X_AXIS];
+    size_t PH = prm.pads_begin[Y_AXIS];
+    size_t PD = prm.pads_begin.size() > Z_AXIS ? prm.pads_begin[Z_AXIS] : 0lu;
+
+    size_t GC = prm.group;
+
+    auto src_dims = srcs[0]->getTensorDesc().getDims();
+    size_t MB = src_dims[0];
+    size_t IC = src_dims[1];
+    size_t ID = (src_dims.size() == 5lu) ? src_dims[2] : 1lu;
+    size_t IH = src_dims.at(src_dims.size() - 2);
+    size_t IW = src_dims.back();
+
+    auto dst_dims = dst.getTensorDesc().getDims();
+    size_t OW = dst_dims.back();
+    size_t OH = dst_dims.at(dst_dims.size() - 2);
+    size_t OD = (dst_dims.size() == 5lu) ? dst_dims[2] : 1lu;
+    size_t OC = prm.out_c;
+
+    size_t DG = prm.deformable_group;
+
+    const auto* src_data = srcs[0]->cbuffer().as<const float*>();
+    const auto* trans_data = srcs[1]->cbuffer().as<const float*>();
+    auto* dst_data = dst.buffer().as<float*>();
+
+    IE_ASSERT(KW * KH * KD * OC * IC / GC == weights_size);
+    IE_ASSERT(OC == bias_size);
+
+    const int channel_per_deformable_group = IC / DG;
+
+    parallel_for5d(MB, GC, OC / GC, OD, OH, [&](size_t mb, size_t g, size_t oc, size_t od, size_t oh) {
+        for (size_t ow = 0; ow < OW; ow++) {
+            size_t oidx = mb * OC * OD * OH * OW
+                          + g * OC / GC * OD * OH * OW
+                          + oc * OD * OH * OW
+                          + od * OH * OW
+                          + oh * OW
+                          + ow;
+            if (bias_data)
+                dst_data[oidx] = bias_data[g * OC / GC + oc];
+
+            for (size_t ic = 0; ic < IC / GC; ic++) {
+                const int deformable_group_idx = ic / channel_per_deformable_group;
+                const int trans_offset = mb * DG * 2 * KH * KW * OH * OW
+                                         + deformable_group_idx * 2 * KH * KW * OH * OW;
+
+                for (size_t kd = 0; kd < KD; kd++) {
+                    for (size_t kh = 0; kh < KH; kh++) {
+                        for (size_t kw = 0; kw < KW; kw++) {
+                            int32_t iw = ow * SW - PW + kw * DW;
+                            int32_t ih = oh * SH - PH + kh * DH;
+                            int32_t id = od * SD - PD + kd * DD;
+                            const int trans_y_idx = ((2 * (kh * KW + kw)) * OH + oh) * OW + ow;
+                            float transformed_y = ih + trans_data[trans_offset + trans_y_idx];
+
+                            const int trans_x_idx = ((2 * (kh * KW + kw) + 1) * OH + oh) * OW + ow;
+                            float transformed_x = iw + trans_data[trans_offset + trans_x_idx];
+
+                            if (transformed_x < 0 || transformed_x >= (int32_t) IW ||
+                                transformed_y < 0 || transformed_y >= (int32_t) IH ||
+                                id < 0 || id >= (int32_t) ID)
+                                continue;
+
+                            auto get_data_index = [&](int h, int w) -> int {
+                                return mb * IC * ID * IH * IW
+                                       + g * IC / GC * ID * IH * IW
+                                       + ic * ID * IH * IW
+                                       + id * IH * IW
+                                       + h * IW
+                                       + w;
+                            };
+
+                            size_t widx = g * OC / GC * IC / GC * KD * KH * KW
+                                          + oc * IC / GC * KD * KH * KW
+                                          + ic * KD * KH * KW
+                                          + kd * KH * KW
+                                          + kh * KW
+                                          + kw;
+
+                            const int top_y_index = floor(transformed_y);
+                            const int bottom_y_index = fmin(ceil(transformed_y), IH - 1);
+                            const int left_x_index = floor(transformed_x);
+                            const int right_x_index = fmin(ceil(transformed_x), IW - 1);
+
+                            const float top_left = src_data[get_data_index(top_y_index, left_x_index)];
+                            const float top_right = src_data[get_data_index(top_y_index,
+                                                                            right_x_index)];
+                            const float bottom_left = src_data[get_data_index(bottom_y_index,
+                                                                              left_x_index)];
+                            const float bottom_right = src_data[get_data_index(bottom_y_index,
+                                                                               right_x_index)];
+
+                            const float top =
+                                    top_left + (top_right - top_left) * (transformed_x - left_x_index);
+                            const float bottom = bottom_left + (bottom_right - bottom_left) *
+                                                               (transformed_x - left_x_index);
+
+                            float val = top + (bottom - top) * (transformed_y - top_y_index);
+
+                            dst_data[oidx] += val * weights_data[widx];
+                        }
+                    }
+                }
+            }
+        }
+    });
+}
+
+template<>
+void ref_def_conv_common(const std::vector<InferenceEngine::Blob::Ptr> srcs,
+                         Blob& dst,
+                         const ie_fp16* weights_data,
+                         size_t /*weights_size*/,
+                         const ie_fp16* bias_data,
+                         size_t /*bias_size*/,
+                         const CommonTestUtils::def_conv_common_params& prm) {
+    if (srcs[0]->getTensorDesc().getLayout() != Layout::NCHW &&
+        srcs[0]->getTensorDesc().getLayout() != Layout::NCDHW)
+        THROW_IE_EXCEPTION << "Reference FP16 deformable convolution supports NCHW and NCDHW layouts only";
+    size_t KW = prm.kernel[X_AXIS];
+    size_t KH = prm.kernel[Y_AXIS];
+    size_t KD = prm.kernel.size() > Z_AXIS ? prm.kernel[Z_AXIS] : 1lu;
+
+    size_t SW = prm.stride[X_AXIS];
+    size_t SH = prm.stride[Y_AXIS];
+    size_t SD = prm.stride.size() > Z_AXIS ? prm.stride[Z_AXIS] : 0lu;
+
+    size_t DW = prm.dilation[X_AXIS];
+    size_t DH = prm.dilation[Y_AXIS];
+    size_t DD = prm.dilation.size() > Z_AXIS ? prm.dilation[Z_AXIS] : 0lu;
+
+    size_t PW = prm.pads_begin[X_AXIS];
+    size_t PH = prm.pads_begin[Y_AXIS];
+    size_t PD = prm.pads_begin.size() > Z_AXIS ? prm.pads_begin[Z_AXIS] : 0lu;
+
+    size_t GC = prm.group;
+
+    auto src_dims = srcs[0]->getTensorDesc().getDims();
+    size_t IW = src_dims[0];
+    size_t IH = src_dims[1];
+    size_t ID = src_dims.size() == 5lu ? src_dims[2] : 1lu;
+    size_t IC = src_dims.size() == 5lu ? src_dims[3] : src_dims[2];
+
+    auto dst_dims = dst.getTensorDesc().getDims();
+    size_t OW = dst_dims[0];
+    size_t OH = dst_dims[1];
+    size_t OD = dst_dims.size() == 5lu ? dst_dims[2] : 1lu;
+    size_t OC = prm.out_c;
+
+    size_t DG = prm.deformable_group;
+
+    const auto* src_data = srcs[0]->cbuffer().as<const ie_fp16 *>();
+    const auto* trans_data = srcs[1]->cbuffer().as<const ie_fp16 *>();
+    auto* dst_data = dst.buffer().as<ie_fp16 *>();
+    const int channel_per_deformable_group = IC / prm.deformable_group;
+
+    parallel_for4d(GC, OC / GC, OD, OH, [&](size_t g, size_t oc, size_t od, size_t oh) {
+        for (uint32_t ow = 0; ow < OW; ow++) {
+            size_t oidx = g * OC / GC * OD * OH * OW
+                          + oc * OD * OH * OW
+                          + od * OH * OW
+                          + oh * OW
+                          + ow;
+            if (bias_data)
+                dst_data[oidx] = bias_data[g * OC / GC + oc];
+
+            for (size_t ic = 0; ic < IC / GC; ic++) {
+                const int deformable_group_idx = ic / channel_per_deformable_group;
+                const int trans_offset = deformable_group_idx * 2 * KH * KW * OW * OW;
+
+                for (size_t kd = 0; kd < KD; kd++) {
+                    for (size_t kh = 0; kh < KH; kh++) {
+                        for (size_t kw = 0; kw < KW; kw++) {
+                            int32_t iw = ow * SW - PW + kw * DW;
+                            int32_t ih = oh * SH - PH + kh * DH;
+                            int32_t id = od * SD - PD + kd * DD;
+                            const int trans_y_idx = ((2 * (kh * KW + kw)) * OW + oh) * OW + ow;
+                            float transformed_y = ih + PrecisionUtils::f16tof32(trans_data[trans_offset + trans_y_idx]);
+
+                            const int trans_x_idx = ((2 * (kh * KW + kw) + 1) * OW + oh) * OW + ow;
+                            float transformed_x = iw + PrecisionUtils::f16tof32(trans_data[trans_offset + trans_x_idx]);
+
+                            if (transformed_x < 0 || transformed_x >= (int32_t) IW ||
+                                transformed_y < 0 || transformed_y >= (int32_t) IH ||
+                                id < 0 || id >= (int32_t) ID)
+                                continue;
+
+                            auto get_data_index = [&](int h, int w) -> int {
+                                return g * IC / GC * ID * IH * IW
+                                       + ic * ID * IH * IW
+                                       + id * IH * IW
+                                       + h * IW
+                                       + w;
+                            };
+
+                            size_t widx = g * OC / GC * IC / GC * KD * KH * KW
+                                          + oc * IC / GC * KD * KH * KW
+                                          + ic * KD * KH * KW
+                                          + kd * KH * KW
+                                          + kh * KW
+                                          + kw;
+
+                            const int top_y_index    = floor(transformed_y);
+                            const int bottom_y_index = fmin(ceil(transformed_y), IH - 1);
+                            const int left_x_index   = floor(transformed_x);
+                            const int right_x_index  = fmin(ceil(transformed_x), IW - 1);
+
+                            const float top_left = PrecisionUtils::f16tof32(src_data[get_data_index(top_y_index, left_x_index)]);
+                            const float top_right = PrecisionUtils::f16tof32(src_data[get_data_index(top_y_index, right_x_index)]);
+                            const float bottom_left = PrecisionUtils::f16tof32(src_data[get_data_index(bottom_y_index, left_x_index)]);
+                            const float bottom_right = PrecisionUtils::f16tof32(src_data[get_data_index(bottom_y_index, right_x_index)]);
+
+                            const float top = top_left + (top_right - top_left) * (transformed_x - left_x_index);
+                            const float bottom = bottom_left + (bottom_right - bottom_left) * (transformed_x - left_x_index);
+
+                            float val = top + (bottom - top) * (transformed_y - top_y_index);
+
+                            dst_data[oidx] += PrecisionUtils::f32tof16(val * PrecisionUtils::f16tof32(weights_data[widx]));
+                        }
+                    }
+                }
+            }
+        }
+    });
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/common_single_layer_tests/def_conv_ref.hpp b/inference-engine/tests_deprecated/functional/shared_tests/common_single_layer_tests/def_conv_ref.hpp
new file mode 100644 (file)
index 0000000..d8ffa29
--- /dev/null
@@ -0,0 +1,57 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <ie_layers.h>
+#include <ie_layers_property.hpp>
+#include <ie_blob.h>
+#include <precision_utils.h>
+#include <ie_layers_internal.hpp>
+#include "common_test_utils/common_layers_params.hpp"
+
+template<typename data_t>
+void ref_def_conv_common(const std::vector<InferenceEngine::Blob::Ptr> srcs,
+                     InferenceEngine::Blob& dst,
+                     const data_t* weights_data,
+                     size_t weights_size,
+                     const data_t* bias_data,
+                     size_t bias_size,
+                     const CommonTestUtils::def_conv_common_params& prm);
+
+void DeformableConvolution_parseParams(InferenceEngine::CNNLayer* layer);
+
+template<typename data_t>
+void common_ref_def_convolution_wrap(const std::vector<InferenceEngine::Blob::Ptr> srcs,
+                                 InferenceEngine::Blob::Ptr& dst,
+                                 const data_t* weights_data,
+                                 size_t weights_size,
+                                 const data_t* bias_data,
+                                 size_t bias_size,
+                                 const std::map<std::string, std::string>& params_map) {
+    InferenceEngine::LayerParams lp{};
+    InferenceEngine::ConvolutionLayer convLayer(lp);
+    auto data = std::make_shared<InferenceEngine::Data>("insData", srcs[0]->getTensorDesc());
+    convLayer.params = params_map;
+    convLayer.insData.push_back(data);
+    DeformableConvolution_parseParams(&convLayer);
+
+    CommonTestUtils::conv_common_params params;
+    params.kernel = convLayer._kernel;
+    auto allPad = InferenceEngine::getPaddings(convLayer);
+    params.pads_begin = allPad.begin;
+    params.pads_end = allPad.end;
+    params.stride = convLayer._stride;
+    params.dilation = convLayer._dilation;
+    params.out_c = convLayer._out_depth;
+    params.group = convLayer._group;
+
+    ref_def_conv_common<data_t>(srcs,
+                            *dst.get(),
+                            weights_data,
+                            weights_size,
+                            bias_data,
+                            bias_size,
+                            params);
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/common_single_layer_tests/pool_ref.cpp b/inference-engine/tests_deprecated/functional/shared_tests/common_single_layer_tests/pool_ref.cpp
new file mode 100644 (file)
index 0000000..6df22c5
--- /dev/null
@@ -0,0 +1,267 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <ie_layers.h>
+#include <precision_utils.h>
+#include "common_test_utils/common_layers_params.hpp"
+#include "pool_ref.hpp"
+
+using namespace InferenceEngine;
+
+void Pool_parseParams(InferenceEngine::CNNLayer* layer) {
+    auto poolLayer = dynamic_cast<InferenceEngine::PoolingLayer*>(layer);
+    if (!poolLayer) {
+        THROW_IE_EXCEPTION << "Layer is not instance of PoolingLayer class";
+    }
+
+    poolLayer->_kernel.clear();
+    poolLayer->_stride.clear();
+    poolLayer->_padding.clear();
+    poolLayer->_pads_end.clear();
+
+    poolLayer->_auto_pad = poolLayer->GetParamAsString("auto_pad", "");
+
+    std::vector<unsigned int> kernels = poolLayer->GetParamAsUInts("kernel", {});
+    if (kernels.empty()) {
+        int kernel_x = poolLayer->GetParamAsInt("kernel-x", -1);
+        /** Pooling as custom layer */
+        if (kernel_x == -1) {
+            try {
+                unsigned int kernel_size = poolLayer->GetParamAsUInt("kernel_size");
+                unsigned int kernel_w = poolLayer->GetParamAsUInt("kernel_w", 0u);
+                unsigned int kernel_h = poolLayer->GetParamAsUInt("kernel_h", 0u);
+                poolLayer->_kernel.insert(InferenceEngine::X_AXIS, kernel_w == 0u ? kernel_size : kernel_w);
+                poolLayer->_kernel.insert(InferenceEngine::Y_AXIS, kernel_h == 0u ? kernel_size : kernel_h);
+
+                unsigned int stride = poolLayer->GetParamAsUInt("stride", 1u);
+                unsigned int stride_w = poolLayer->GetParamAsUInt("stride_w", 0u);
+                unsigned int stride_h = poolLayer->GetParamAsUInt("stride_h", 0u);
+                poolLayer->_stride.insert(InferenceEngine::X_AXIS, stride_w == 0u ? stride : stride_w);
+                poolLayer->_stride.insert(InferenceEngine::Y_AXIS, stride_h == 0u ? stride : stride_h);
+
+                unsigned int pad = poolLayer->GetParamAsUInt("pad", 0u);
+                unsigned int pad_w = poolLayer->GetParamAsUInt("pad_w", 0u);
+                unsigned int pad_h = poolLayer->GetParamAsUInt("pad_h", 0u);
+
+                poolLayer->_padding.insert(InferenceEngine::X_AXIS, pad_w == 0u ? pad : pad_w);
+                poolLayer->_padding.insert(InferenceEngine::Y_AXIS, pad_h == 0u ? pad : pad_h);
+
+                poolLayer->_pads_end.insert(InferenceEngine::X_AXIS, 0u);
+                poolLayer->_pads_end.insert(InferenceEngine::Y_AXIS, 0u);
+            } catch (...) {
+            }
+
+            std::string alg = poolLayer->GetParamAsString("pool", "caffe.PoolingParameter.MAX");
+            poolLayer->_type = alg == "caffe.PoolingParameter.MAX" ? InferenceEngine::PoolingLayer::MAX : InferenceEngine::PoolingLayer::AVG;
+        } else  /** Default behavior */ {
+            poolLayer->_kernel.insert(InferenceEngine::X_AXIS, poolLayer->GetParamAsUInt("kernel-x"));
+            poolLayer->_kernel.insert(InferenceEngine::Y_AXIS, poolLayer->GetParamAsUInt("kernel-y"));
+
+            poolLayer->_stride.insert(InferenceEngine::X_AXIS, poolLayer->GetParamAsUInt("stride-x", 1u));
+            poolLayer->_stride.insert(InferenceEngine::Y_AXIS, poolLayer->GetParamAsUInt("stride-y", 1u));
+            // TODO: maybe just throw exception, why do we change IR?
+            if (0 == poolLayer->_stride[InferenceEngine::X_AXIS]) {
+                poolLayer->_stride[InferenceEngine::X_AXIS] = 1u;
+                printf("Warning! in layer %s: Stride x is 0, setting to 1 ", poolLayer->name.c_str());
+            }
+            if (0 == poolLayer->_stride[InferenceEngine::Y_AXIS]) {
+                poolLayer->_stride[InferenceEngine::Y_AXIS] = 1u;
+                printf("Warning! in layer %s: Stride y is 0, setting to 1", poolLayer->name.c_str());
+            }
+
+            poolLayer->_padding.insert(InferenceEngine::X_AXIS, poolLayer->GetParamAsUInt("pad-x", 0u));
+            poolLayer->_padding.insert(InferenceEngine::Y_AXIS, poolLayer->GetParamAsUInt("pad-y", 0u));
+
+            poolLayer->_pads_end.insert(InferenceEngine::X_AXIS, poolLayer->GetParamAsUInt("pad-r", poolLayer->_padding[InferenceEngine::X_AXIS]));
+            poolLayer->_pads_end.insert(InferenceEngine::Y_AXIS, poolLayer->GetParamAsUInt("pad-b", poolLayer->_padding[InferenceEngine::Y_AXIS]));
+
+            // TODO: All kind of pool methods
+            poolLayer->_exclude_pad = poolLayer->GetParamAsBool("exclude-pad", false);
+            std::string alg = poolLayer->GetParamAsString("pool-method", "max");
+            poolLayer->_type = alg == "avg" ? InferenceEngine::PoolingLayer::AVG : InferenceEngine::PoolingLayer::MAX;
+            if (alg != "max" && alg != "avg") {
+                THROW_IE_EXCEPTION << "Layer has incorrect pool-type!";
+            }
+        }
+    } else {
+        for (int i = 1; i <= kernels.size(); i++) {
+            poolLayer->_kernel.insert(i - 1, kernels[kernels.size() - i]);
+        }
+
+        std::vector<unsigned int> default_0 = std::vector<unsigned int> (poolLayer->_kernel.size(), 0u);
+        std::vector<unsigned int> default_1 = std::vector<unsigned int> (poolLayer->_kernel.size(), 1u);
+
+        std::vector<unsigned int> strides = poolLayer->GetParamAsUInts("strides", default_1);
+        for (int i = 1; i <= strides.size(); i++) {
+            if (strides[strides.size() - i] == 0) {
+                THROW_IE_EXCEPTION << "Stride could not be 0.\nIn layer " << poolLayer->name;
+            }
+            poolLayer->_stride.insert(i - 1, strides[strides.size() - i]);
+        }
+
+        std::vector<unsigned int> pads_begin = poolLayer->GetParamAsUInts("pads_begin", default_0);
+        for (int i = 1; i <= pads_begin.size(); i++) {
+            poolLayer->_padding.insert(i - 1, pads_begin[pads_begin.size() - i]);
+        }
+
+        std::vector<unsigned int> pads_end = poolLayer->GetParamAsUInts("pads_end", pads_begin);
+        for (int i = 1; i <= pads_end.size(); i++) {
+            poolLayer->_pads_end.insert(i - 1, pads_end[pads_end.size() - i]);
+        }
+
+        poolLayer->_exclude_pad = poolLayer->GetParamAsBool("exclude-pad", false);
+        std::string alg = poolLayer->GetParamAsString("pool-method", "max");
+        poolLayer->_type = alg == "avg" ? InferenceEngine::PoolingLayer::AVG : InferenceEngine::PoolingLayer::MAX;
+        if (alg != "max" && alg != "avg") {
+            THROW_IE_EXCEPTION << "Layer has incorrect pad-type!";
+        }
+    }
+    // TODO: checks for presence of all required attributes, and that there's no extraneous parameters only.
+}
+
+template<>
+void ref_pool_common<float>(const std::vector<InferenceEngine::Blob::Ptr> srcs, Blob &dst,
+        const CommonTestUtils::pool_common_params &p) {
+    if (srcs[0]->getTensorDesc().getLayout() != Layout::NCHW)
+        THROW_IE_EXCEPTION << "Reference FP32 convolution supports NCHW layout only";
+    size_t KW = p.kernel[X_AXIS];
+    size_t KH = p.kernel[Y_AXIS];
+
+    size_t SH = p.stride[Y_AXIS];
+    size_t SW = p.stride[X_AXIS];
+
+    int PH = p.pads_begin[Y_AXIS];
+    int PW = p.pads_begin[X_AXIS];
+
+    int32_t IW, IH, IC, OW, OH, OC;
+
+    CommonTestUtils::get_common_dims(*srcs[0], IW, IH, IC);
+    CommonTestUtils::get_common_dims(dst, OW, OH, OC);
+
+    const auto *src_data = srcs[0]->cbuffer().as<const float *>();
+    auto *dst_data = dst.buffer().as<float *>();
+
+    IE_ASSERT(Layout::NCHW == dst.getTensorDesc().getLayout());
+    IE_ASSERT(4 == dst.getTensorDesc().getDims().size());
+    IE_ASSERT(OC == dst.getTensorDesc().getDims()[1]);
+
+    for (size_t c = 0; c < OC; c++) {
+        for (size_t oh = 0; oh < OH; oh++) {
+            for (size_t ow = 0; ow < OW; ow++) {
+                size_t oidx = c * OH * OW + oh * OW + ow;
+                float out_ref = p.avg ? float(0) : -FLT_MAX;
+
+                for (uint32_t kh = 0; kh < KH; kh++) {
+                    for (uint32_t kw = 0; kw < KW; kw++) {
+                        int32_t iw = ow * SW - PW + kw;
+                        int32_t ih = oh * SH - PH + kh;
+                        if (iw < 0 || iw >= IW || ih < 0
+                            || ih >= IH)
+                            continue;
+                        uint32_t iidx = c * IH * IW + ih * IW + iw;
+
+                        float d = src_data[iidx];
+                        out_ref = p.avg ? out_ref + d : std::max(out_ref, d);
+                    }
+                }
+
+                if (p.avg) {
+                    int w_beg = ow * SW - PW;
+                    int w_end = w_beg + KW;
+                    int h_beg = oh * SH - PH;
+                    int h_end = h_beg + KH;
+
+                    w_beg = p.exclude_pad ? std::max<int>(w_beg, 0) : std::max<int>(w_beg, -PW);
+                    h_beg = p.exclude_pad ? std::max<int>(h_beg, 0) : std::max<int>(h_beg, -PH);
+
+                    w_end = p.exclude_pad ? std::min<int>(w_end, IW) : std::min<int>(w_end, IW + PW);
+                    h_end = p.exclude_pad ? std::min<int>(h_end, IH) : std::min<int>(h_end, IH + PH);
+
+                    out_ref /= (h_end - h_beg) * (w_end - w_beg);
+                }
+
+                dst_data[oidx] = out_ref;
+            }
+        }
+    }
+}
+
+template<>
+void ref_pool_common<ie_fp16>(const std::vector<InferenceEngine::Blob::Ptr> srcs,
+                              Blob &dst,
+                              const CommonTestUtils::pool_common_params &p) {
+    const auto *src_data = srcs[0]->cbuffer().as<const ie_fp16 *>();
+    auto *dst_data = dst.buffer().as<ie_fp16 *>();
+    ASSERT_NE(src_data, nullptr);
+    ASSERT_NE(dst_data, nullptr);
+
+    int32_t IW = 0;
+    int32_t IH = 0;
+    int32_t IC = 0;
+    int32_t I_N = 0;
+    int32_t OW = 0;
+    int32_t OH = 0;
+    int32_t OC = 0;
+    int32_t ON = 0;
+    // from myriad_tests
+    auto get_dims = [](const InferenceEngine::Blob &blob,
+                       int32_t &dimx,
+                       int32_t &dimy,
+                       int32_t &dimz,
+                       int32_t &dimn) {
+        auto dims = blob.getTensorDesc().getDims();
+        auto dims_size = dims.size();
+        dimn = (dims_size >= 4) ? dims[dims_size - 4] : 1;
+        dimz = (dims_size >= 3) ? dims[dims_size - 3] : 1;
+        dimy = (dims_size >= 2) ? dims[dims_size - 2] : 0;
+        dimx = (dims_size >= 1) ? dims[dims_size - 1] : 0;
+    };
+    get_dims(*srcs[0], IW, IH, IC, I_N);
+    get_dims(dst, OW, OH, OC, ON);
+    ASSERT_EQ(IC, OC);
+    ASSERT_EQ(I_N, ON);
+
+    /* to align with Caffe */
+    for (int32_t n = 0; n < ON; n++) {
+        for (int32_t c = 0; c < OC; c++) {
+            for (int32_t oh = 0; oh < OH; oh++) {
+                for (int32_t ow = 0; ow < OW; ow++) {
+                    size_t oidx = c + OC * (ow + OW * (oh + OH * n));
+                    float out_ref = 0.0f;
+                    bool is_initialized = false;
+                    size_t count = 0;
+                    for (uint32_t kh = 0; kh < p.kernel[Y_AXIS]; kh++) {
+                        for (uint32_t kw = 0; kw < p.kernel[X_AXIS]; kw++) {
+                            int32_t iw = ow * p.stride[X_AXIS] - p.pads_begin[X_AXIS] + kw;
+                            int32_t ih = oh * p.stride[Y_AXIS] - p.pads_begin[Y_AXIS] + kh;
+                            if (iw < 0 || iw >= IW || ih < 0 || ih >= IH)
+                                continue;
+                            size_t iidx = c + IC * (iw + IW * (ih + IH * n));
+                            float d = PrecisionUtils::f16tof32(src_data[iidx]);
+                            if (p.avg) {
+                                out_ref += d;
+                                count++;
+                            } else {
+                                if (!is_initialized) {
+                                    out_ref = d;
+                                    is_initialized = true;
+                                } else {
+                                    if (out_ref < d)
+                                        out_ref = d;
+                                }
+                            }
+                        }
+                    }
+                    if (p.avg) {
+                        if ((p.pads_begin[X_AXIS] || p.pads_begin[Y_AXIS]) && !p.exclude_pad) {
+                            out_ref /= (p.kernel[Y_AXIS] * p.kernel[X_AXIS]);
+                        } else
+                            out_ref /= count;
+                    }
+                    dst_data[oidx] = PrecisionUtils::f32tof16(out_ref);
+                }
+            }
+        }
+    }
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/common_single_layer_tests/pool_ref.hpp b/inference-engine/tests_deprecated/functional/shared_tests/common_single_layer_tests/pool_ref.hpp
new file mode 100644 (file)
index 0000000..e263777
--- /dev/null
@@ -0,0 +1,39 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <ie_blob.h>
+#include <single_layer_common.hpp>
+#include <ie_layers_internal.hpp>
+#include "common_test_utils/common_layers_params.hpp"
+
+template<typename data_t>
+void ref_pool_common(const std::vector<InferenceEngine::Blob::Ptr> srcs,
+        InferenceEngine::Blob &dst,
+        const CommonTestUtils::pool_common_params &p);
+
+void Pool_parseParams(InferenceEngine::CNNLayer* layer);
+
+template<typename data_t>
+void common_ref_pool_wrap(const std::vector<InferenceEngine::Blob::Ptr> srcs, InferenceEngine::Blob::Ptr &dst,
+                          const std::map<std::string, std::string> &params_map) {
+    InferenceEngine::LayerParams lp{};
+    InferenceEngine::PoolingLayer poolLayer(lp);
+    auto data = std::make_shared<InferenceEngine::Data>("insData", srcs[0]->getTensorDesc());
+    poolLayer.params = params_map;
+    poolLayer.insData.push_back(data);
+    Pool_parseParams(&poolLayer);
+
+    CommonTestUtils::pool_common_params params;
+    params.kernel = poolLayer._kernel;
+    auto allPad = InferenceEngine::getPaddings(poolLayer);
+    params.pads_begin = allPad.begin;
+    params.pads_end = allPad.end;
+    params.stride = poolLayer._stride;
+    params.avg = poolLayer._type == InferenceEngine::PoolingLayer::PoolType::AVG;
+    params.exclude_pad = poolLayer._exclude_pad;
+
+    ref_pool_common<data_t>(srcs, *dst.get(), params);
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/common_single_layer_tests/single_layer_tests.hpp b/inference-engine/tests_deprecated/functional/shared_tests/common_single_layer_tests/single_layer_tests.hpp
new file mode 100644 (file)
index 0000000..df5db8f
--- /dev/null
@@ -0,0 +1,498 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <gtest/gtest.h>
+
+#include <tests_common.hpp>
+#include <ie_format_parser.h>
+#include <ie_layers_internal.hpp>
+#include <ie_layers_internal.hpp>
+#include <functional_test_utils/plugin_cache.hpp>
+
+#include "conv_ref.hpp"
+#include "deconv_ref.hpp"
+#include "def_conv_ref.hpp"
+#include "pool_ref.hpp"
+#include "common_test_utils/common_layers_params.hpp"
+
+using namespace InferenceEngine;
+
+struct PluginDependentParam {
+    std::string deviceName;
+    InferenceEngine::Layout layout;
+    InferenceEngine::Precision precision;
+    float tolerance;
+};
+
+class LayerTestHelper {
+protected:
+    std::string type;
+public:
+    using Ptr = std::shared_ptr<LayerTestHelper>;
+    explicit LayerTestHelper(const std::string &_type) : type(_type) {}
+
+    virtual ~LayerTestHelper() = default; 
+    LayerTestHelper() = default;
+
+    virtual void updatePaddingValues(const InferenceEngine::CNNNetwork &network) = 0;
+
+    virtual std::map<std::string, std::string> getMapParams() const = 0;
+
+    virtual size_t getWeightByteSize(size_t elementSize, size_t numChannels) const = 0;
+
+    virtual size_t getBiasByteSize(size_t elementSize) const = 0;
+
+    std::string getType() const { return type; }
+
+    virtual void ref_fp32(const std::vector<InferenceEngine::Blob::Ptr> srcs,
+                          InferenceEngine::Blob &dst,
+                          const float *weights_data,
+                          size_t weights_size,
+                          const float *bias_data,
+                          size_t bias_size) const = 0;
+
+    virtual void ref_fp16(const std::vector<InferenceEngine::Blob::Ptr> srcs,
+                          InferenceEngine::Blob &dst,
+                          const InferenceEngine::ie_fp16 *weights_data,
+                          size_t weights_size,
+                          const InferenceEngine::ie_fp16 *bias_data,
+                          size_t bias_size) const = 0;
+
+    InferenceEngine::Blob::Ptr getRefBlob(size_t weightSize, size_t biasSize,
+                                          const InferenceEngine::TBlob<uint8_t>::Ptr &weights,
+                                          const std::vector<InferenceEngine::Blob::Ptr> srcs,
+                                          const InferenceEngine::TensorDesc &dstTensorDesc,
+                                          const InferenceEngine::Precision &precision) const;
+
+    static std::string propertyToString(const InferenceEngine::PropertyVector<unsigned int> &propertyVector);
+};
+
+class ConvolutionTestHelper : public LayerTestHelper {
+protected:
+    CommonTestUtils::conv_common_params convParams;
+public:
+    explicit ConvolutionTestHelper(const CommonTestUtils::conv_common_params &_convParams);
+
+    void updatePaddingValues(const InferenceEngine::CNNNetwork &network) override;
+
+    std::map<std::string, std::string> getMapParams() const override;
+
+    size_t getWeightByteSize(size_t elementSize, size_t numChannels) const override;
+
+    size_t getBiasByteSize(size_t elementSize) const override;
+
+    void ref_fp32(const std::vector<InferenceEngine::Blob::Ptr> srcs, InferenceEngine::Blob &dst, const float *weights_data,
+                  size_t weights_size, const float *bias_data, size_t bias_size) const override;
+
+    void
+    ref_fp16(const std::vector<InferenceEngine::Blob::Ptr> srcs, InferenceEngine::Blob &dst, const InferenceEngine::ie_fp16 *weights_data,
+             size_t weights_size, const InferenceEngine::ie_fp16 *bias_data, size_t bias_size) const override;
+};
+
+class DeconvolutionTestHelper : public ConvolutionTestHelper {
+public:
+    explicit DeconvolutionTestHelper(const CommonTestUtils::conv_common_params &_convParams);
+
+    void ref_fp32(const std::vector<InferenceEngine::Blob::Ptr> srcs, InferenceEngine::Blob &dst, const float *weights_data,
+                  size_t weights_size, const float *bias_data, size_t bias_size) const override;
+
+    void
+    ref_fp16(const std::vector<InferenceEngine::Blob::Ptr> srcs, InferenceEngine::Blob &dst, const InferenceEngine::ie_fp16 *weights_data,
+             size_t weights_size, const InferenceEngine::ie_fp16 *bias_data, size_t bias_size) const override;
+};
+
+class DeformableConvolutionTestHelper : public ConvolutionTestHelper {
+protected:
+    CommonTestUtils::def_conv_common_params defConvParams;
+public:
+    explicit DeformableConvolutionTestHelper(const CommonTestUtils::conv_common_params &_convParams, const int deformable_group);
+
+    void updatePaddingValues(const InferenceEngine::CNNNetwork &network) override;
+
+    std::map<std::string, std::string> getMapParams() const override;
+
+    void ref_fp32(const std::vector<InferenceEngine::Blob::Ptr> srcs, InferenceEngine::Blob &dst, const float *weights_data,
+                  size_t weights_size, const float *bias_data, size_t bias_size) const override;
+
+    void
+    ref_fp16(const std::vector<InferenceEngine::Blob::Ptr> srcs, InferenceEngine::Blob &dst, const InferenceEngine::ie_fp16 *weights_data,
+             size_t weights_size, const InferenceEngine::ie_fp16 *bias_data, size_t bias_size) const override;
+};
+
+class PoolingTestHelper : public LayerTestHelper {
+protected:
+    CommonTestUtils::pool_common_params poolParams;
+public:
+    explicit PoolingTestHelper(const CommonTestUtils::pool_common_params &_poolParams);
+
+    void ref_fp32(const std::vector<InferenceEngine::Blob::Ptr> srcs, InferenceEngine::Blob &dst, const float *weights_data,
+                  size_t weights_size, const float *bias_data, size_t bias_size) const override;
+
+    void
+    ref_fp16(const std::vector<InferenceEngine::Blob::Ptr> srcs, InferenceEngine::Blob &dst, const InferenceEngine::ie_fp16 *weights_data,
+             size_t weights_size, const InferenceEngine::ie_fp16 *bias_data, size_t bias_size) const override;
+
+    std::map<std::string, std::string> getMapParams() const override;
+
+    void updatePaddingValues(const InferenceEngine::CNNNetwork &network) override;
+
+    size_t getWeightByteSize(size_t elementSize, size_t numChannels) const override;
+
+    size_t getBiasByteSize(size_t elementSize) const override;
+};
+
+PRETTY_PARAM(InitialShapes, CommonTestUtils::InOutShapes)
+
+PRETTY_PARAM(NewShapes, CommonTestUtils::InOutShapes)
+
+PRETTY_PARAM(ConvParams, CommonTestUtils::conv_common_params)
+
+PRETTY_PARAM(PluginParams, PluginDependentParam)
+
+PRETTY_PARAM(Helper, LayerTestHelper::Ptr)
+
+Blob::Ptr LayerTestHelper::getRefBlob(size_t weightSize, size_t biasSize,
+                                      const TBlob<uint8_t>::Ptr &weights,
+                                      const std::vector<InferenceEngine::Blob::Ptr> srcs,
+                                      const TensorDesc &dstTensorDesc,
+                                      const Precision &precision) const {
+    Blob::Ptr dst_ref;
+    if (precision == Precision::FP32) {
+        dst_ref = make_shared_blob<float>(dstTensorDesc);
+        dst_ref->allocate();
+        const auto *weights_data = weights->buffer().as<const float *>();
+        ref_fp32(srcs, *dst_ref.get(), weights_data, weightSize, weights_data + weightSize, biasSize);
+    } else {
+        dst_ref = make_shared_blob<ie_fp16>(dstTensorDesc);
+        dst_ref->allocate();
+        const auto *weights_data = weights->buffer().as<const ie_fp16 *>();
+        ref_fp16(srcs, *dst_ref.get(), weights_data, weightSize, weights_data + weightSize, biasSize);
+    }
+    return dst_ref;
+}
+
+std::string LayerTestHelper::propertyToString(const PropertyVector<unsigned int> &propertyVector) {
+    if (!propertyVector.size()) return "";
+    std::string result = std::to_string(propertyVector[0]);
+    for (int i = 1; i < propertyVector.size(); i++) {
+        result += "," + std::to_string(propertyVector[i]);
+    }
+    return result;
+}
+
+ConvolutionTestHelper::ConvolutionTestHelper(const CommonTestUtils::conv_common_params &_convParams) : LayerTestHelper("Convolution"), convParams(_convParams) {}
+
+void ConvolutionTestHelper::updatePaddingValues(const CNNNetwork &network) {
+    auto found = std::find_if(network.begin(), network.end(), [this](const CNNLayer::Ptr &layer) {
+        return layer->type == type;
+    });
+    ASSERT_NE(found, network.end());
+
+    auto castedLayer = std::dynamic_pointer_cast<ConvolutionLayer>(*found);
+    auto allPad = getPaddings(*castedLayer.get());
+    convParams.pads_end = allPad.end;
+    convParams.pads_begin = allPad.begin;
+}
+
+std::map<std::string, std::string> ConvolutionTestHelper::getMapParams() const {
+    std::map<std::string, std::string> params;
+    if (!convParams.auto_pad.empty()) {
+        params["auto_pad"] = convParams.auto_pad;
+    }
+    params["group"] = std::to_string(convParams.group);
+    params["output"] = std::to_string(convParams.out_c);
+
+    auto propertyToString = [](const PropertyVector<unsigned int> &propertyVector) -> std::string {
+        if (!propertyVector.size()) return "";
+        std::string result = std::to_string(propertyVector[0]);
+        for (int i = 1; i < propertyVector.size(); i++) {
+            result += "," + std::to_string(propertyVector[i]);
+        }
+        return result;
+    };
+    params["kernel"] = propertyToString(convParams.kernel);
+    params["strides"] = propertyToString(convParams.stride);
+    params["pads_begin"] = propertyToString(convParams.pads_begin);
+    params["pads_end"] = propertyToString(convParams.pads_end);
+    params["dilations"] = propertyToString(convParams.dilation);
+    return params;
+}
+
+size_t ConvolutionTestHelper::getWeightByteSize(size_t elementSize, size_t numChannels) const {
+    return (convParams.kernel[X_AXIS] * convParams.kernel[Y_AXIS] * convParams.out_c * numChannels * elementSize)
+           / convParams.group;
+}
+
+size_t ConvolutionTestHelper::getBiasByteSize(size_t elementSize) const { return convParams.out_c * elementSize; }
+
+void
+ConvolutionTestHelper::ref_fp32(const std::vector<InferenceEngine::Blob::Ptr> srcs, Blob &dst, const float *weights_data,
+                                size_t weights_size, const float *bias_data, size_t bias_size) const {
+    ref_conv_common<>(srcs, dst, weights_data, weights_size, bias_data, bias_size, convParams);
+}
+
+void ConvolutionTestHelper::ref_fp16(const std::vector<InferenceEngine::Blob::Ptr> srcs, Blob &dst,
+                                     const ie_fp16 *weights_data, size_t weights_size,
+                                     const ie_fp16 *bias_data, size_t bias_size) const {
+    ref_conv_common<>(srcs, dst, weights_data, weights_size, bias_data, bias_size, convParams);
+}
+
+DeconvolutionTestHelper::DeconvolutionTestHelper(const CommonTestUtils::conv_common_params &_convParams) : ConvolutionTestHelper(
+        _convParams) {
+    type = "Deconvolution";
+}
+
+void
+DeconvolutionTestHelper::ref_fp32(const std::vector<InferenceEngine::Blob::Ptr> srcs, Blob &dst,
+                                  const float *weights_data,
+                                  size_t weights_size, const float *bias_data, size_t bias_size) const {
+    ref_deconv_common<float>(srcs, dst, weights_data, weights_size, bias_data, bias_size, convParams);
+}
+
+void DeconvolutionTestHelper::ref_fp16(const std::vector<InferenceEngine::Blob::Ptr> srcs, Blob &dst,
+                                       const ie_fp16 *weights_data, size_t weights_size,
+                                       const ie_fp16 *bias_data, size_t bias_size) const {
+    ref_deconv_common<ie_fp16>(srcs, dst, weights_data, weights_size, bias_data, bias_size, convParams);
+}
+
+
+DeformableConvolutionTestHelper::DeformableConvolutionTestHelper(const CommonTestUtils::conv_common_params &_convParams,
+                                                                 const int deformable_group) :
+                                                                 defConvParams(convParams), ConvolutionTestHelper( _convParams) {
+    defConvParams.deformable_group = deformable_group;
+    type = "DeformableConvolution";
+}
+
+void DeformableConvolutionTestHelper::ref_fp32(const std::vector<InferenceEngine::Blob::Ptr> srcs, Blob &dst,
+                                  const float *weights_data,
+                                  size_t weights_size, const float *bias_data, size_t bias_size) const {
+    ref_def_conv_common<float>(srcs, dst, weights_data, weights_size, bias_data, bias_size, defConvParams);
+}
+
+void DeformableConvolutionTestHelper::ref_fp16(const std::vector<InferenceEngine::Blob::Ptr> srcs, Blob &dst,
+                                       const ie_fp16 *weights_data, size_t weights_size,
+                                       const ie_fp16 *bias_data, size_t bias_size) const {
+    ref_def_conv_common<ie_fp16>(srcs, dst, weights_data, weights_size, bias_data, bias_size, defConvParams);
+}
+
+void DeformableConvolutionTestHelper::updatePaddingValues(const CNNNetwork &network) {
+    auto found = std::find_if(network.begin(), network.end(), [this](const CNNLayer::Ptr &layer) {
+        return layer->type == type;
+    });
+    ASSERT_NE(found, network.end());
+
+    auto castedLayer = std::dynamic_pointer_cast<ConvolutionLayer>(*found);
+    auto allPad = getPaddings(*castedLayer.get());
+    defConvParams.pads_end = allPad.end;
+    defConvParams.pads_begin = allPad.begin;
+}
+
+std::map<std::string, std::string> DeformableConvolutionTestHelper::getMapParams() const {
+    std::map<std::string, std::string> params;
+    if (!defConvParams.auto_pad.empty()) {
+        params["auto_pad"] = defConvParams.auto_pad;
+    }
+    params["group"] = std::to_string(defConvParams.group);
+    params["output"] = std::to_string(defConvParams.out_c);
+    params["deformable_group"] = std::to_string(defConvParams.deformable_group);
+
+    auto propertyToString = [](const PropertyVector<unsigned int> &propertyVector) -> std::string {
+        if (!propertyVector.size()) return "";
+        std::string result = std::to_string(propertyVector[0]);
+        for (int i = 1; i < propertyVector.size(); i++) {
+            result += "," + std::to_string(propertyVector[i]);
+        }
+        return result;
+    };
+    params["kernel"] = propertyToString(defConvParams.kernel);
+    params["strides"] = propertyToString(defConvParams.stride);
+    params["pads_begin"] = propertyToString(defConvParams.pads_begin);
+    params["pads_end"] = propertyToString(defConvParams.pads_end);
+    params["dilations"] = propertyToString(defConvParams.dilation);
+    return params;
+}
+
+PoolingTestHelper::PoolingTestHelper(const CommonTestUtils::pool_common_params &_poolParams) : LayerTestHelper("Pooling"),
+                                                                              poolParams(_poolParams) {
+}
+
+std::map<std::string, std::string> PoolingTestHelper::getMapParams() const {
+    std::map<std::string, std::string> params;
+    if (!poolParams.auto_pad.empty()) {
+        params["auto_pad"] = poolParams.auto_pad;
+    }
+    params["kernel"] = propertyToString(poolParams.kernel);
+    params["strides"] = propertyToString(poolParams.stride);
+    auto padStr = propertyToString(poolParams.pads_begin);
+    if (!padStr.empty()) params["pads_begin"] = padStr;
+    padStr = propertyToString(poolParams.pads_end);
+    if (!padStr.empty()) params["pads_end"] = padStr;
+    params["exclude-pad"] = poolParams.exclude_pad ? "true" : "false";
+    params["pool-method"] = poolParams.avg ? "avg" : "max";
+    return params;
+}
+
+void
+PoolingTestHelper::ref_fp32(const std::vector<InferenceEngine::Blob::Ptr> srcs, Blob &dst,
+                            const float *weights_data, size_t weights_size,
+                            const float *bias_data, size_t bias_size) const {
+    ref_pool_common<float>(srcs, dst, poolParams);
+}
+
+void PoolingTestHelper::ref_fp16(const std::vector<InferenceEngine::Blob::Ptr> srcs, Blob &dst,
+                                 const ie_fp16 *weights_data, size_t weights_size,
+                                 const ie_fp16 *bias_data, size_t bias_size) const {
+    ref_pool_common<ie_fp16>(srcs, dst, poolParams);
+}
+
+void PoolingTestHelper::updatePaddingValues(const InferenceEngine::CNNNetwork &network) {
+    auto found = std::find_if(network.begin(), network.end(), [this](const CNNLayer::Ptr &layer) {
+        return layer->type == type;
+    });
+    ASSERT_NE(found, network.end());
+
+    auto castedLayer = std::dynamic_pointer_cast<PoolingLayer>(*found);
+    auto allPad = getPaddings(*castedLayer.get());
+    poolParams.pads_end = allPad.end;
+    poolParams.pads_begin = allPad.begin;
+}
+
+size_t PoolingTestHelper::getWeightByteSize(size_t elementSize, size_t numChannels) const {
+    return 0;
+}
+
+size_t PoolingTestHelper::getBiasByteSize(size_t elementSize) const {
+    return 0;
+}
+
+class CommonSingleLayerTest
+        : public testing::WithParamInterface<std::tuple<InitialShapes, NewShapes, PluginParams, Helper>>,
+          public ::testing::Test {
+protected:
+    void SetUp() override {
+        auto params = GetParam();
+        initialShapes = std::get<0>(params);
+        newShapes = std::get<1>(params);
+        pluginParams = std::get<2>(params);
+        layerHelper = std::get<3>(params);
+        PluginCache::get().reset();
+    }
+
+    ICNNNetwork::InputShapes
+    setInputShapes(CNNNetwork &network, const std::vector<SizeVector> &dims) {
+        auto inputShapes = network.getInputShapes();
+        int i = 0;
+        IE_ASSERT(inputShapes.size() == dims.size());
+        for (auto &pair : inputShapes) {
+            pair.second = dims[i++];
+        }
+        return inputShapes;
+    }
+
+    TBlob<uint8_t>::Ptr createWeights(size_t elementSize, size_t weightByteSize, size_t biasByteSize) const {
+        TBlob<uint8_t>::Ptr weights = make_shared_blob<uint8_t>({Precision::U8, {weightByteSize + biasByteSize}, Layout::C});
+        weights->allocate();
+        BufferWrapper wrappedWeights(weights, this->pluginParams.precision);
+        fill_data_common(wrappedWeights, weights->size() / elementSize);
+        return weights;
+    }
+
+    template<int Version = 3>
+    static details::CNNNetworkImplPtr
+    buildSingleLayerNetwork(const std::string &layerType,
+                            const CommonTestUtils::InOutShapes &inOutShapes,
+                            std::map<std::string, std::string> *params,
+                            const std::string &layerDataName = "data",
+                            const Precision &precision = Precision::FP32,
+                            size_t weightsSize = 0,
+                            size_t biasesSize = 0,
+                            const TBlob<uint8_t>::Ptr &weights = nullptr) {
+        auto *parser = new details::FormatParser(Version);
+        return buildSingleLayerNetworkCommon<Version>(parser, layerType, inOutShapes, params, layerDataName, precision,
+                                                      weightsSize, biasesSize, weights);
+    }
+
+protected:
+    CommonTestUtils::InOutShapes initialShapes;
+    CommonTestUtils::InOutShapes newShapes;
+    PluginDependentParam pluginParams;
+    LayerTestHelper::Ptr layerHelper;
+
+    InputInfo::Ptr inputData;
+    std::string inputName;
+    InputInfo::Ptr transData;
+    std::string transName;
+    DataPtr outputData;
+    std::string outputName;
+};
+
+TEST_P(CommonSingleLayerTest, inferAfterReshape) {
+    Core ie;
+
+    auto params = layerHelper->getMapParams();
+    size_t elementSize = Precision(pluginParams.precision).size();
+    ASSERT_EQ(initialShapes.inDims[0][1], newShapes.inDims[0][1]);
+    size_t numChannels = initialShapes.inDims[0][1];
+    size_t weightByteSize = layerHelper->getWeightByteSize(elementSize, numChannels);
+    size_t biasByteSize = layerHelper->getBiasByteSize(elementSize);
+
+    auto weights = createWeights(elementSize, weightByteSize, biasByteSize);
+
+    auto networkImplPtr = buildSingleLayerNetwork<3>(layerHelper->getType(), initialShapes, &params, "data",
+                                                     pluginParams.precision, weightByteSize, biasByteSize, weights);
+
+    CNNNetwork network(networkImplPtr);
+    std::tie(inputName, inputData) = (*network.getInputsInfo().begin());
+    inputData->setPrecision(pluginParams.precision);
+    inputData->setLayout(pluginParams.layout);
+    std::tie(outputName, outputData) = (*network.getOutputsInfo().begin());
+    outputData->setPrecision(pluginParams.precision);
+    outputData->setLayout(pluginParams.layout);
+
+    if (layerHelper->getType() == "DeformableConvolution") {
+        std::tie(transName, transData) = (*network.getInputsInfo().find("Input1"));
+        transData->setPrecision(pluginParams.precision);
+        transData->setLayout(pluginParams.layout);
+    }
+
+    auto inputShapes = setInputShapes(network, newShapes.inDims);
+
+    network.reshape(inputShapes);
+    layerHelper->updatePaddingValues(network);
+
+    auto exeNetwork = ie.LoadNetwork(network, pluginParams.deviceName);
+    auto request = exeNetwork.CreateInferRequest();
+    auto src = request.GetBlob(inputName);
+    GenRandomDataCommon(src);
+
+    size_t weights_size = weightByteSize / elementSize;
+    size_t biases_size = biasByteSize / elementSize;
+
+    if (layerHelper->getType() == "DeformableConvolution") {
+        auto trans = request.GetBlob(transName);
+        GenRandomDataCommon(trans);
+
+        request.Infer();
+        auto dst = request.GetBlob(outputName);
+
+        Blob::Ptr dst_ref = layerHelper->getRefBlob(weights_size, biases_size, weights, { src, trans },
+                                                    dst->getTensorDesc(), pluginParams.precision);
+        CompareCommonAbsolute(dst, dst_ref, pluginParams.tolerance);
+
+        BufferWrapper src_ptr(src);
+        BufferWrapper trans_ptr(trans);
+        float* weights_ptr = weights->buffer().as<float*>();
+        BufferWrapper dst_ptr(dst_ref);
+    } else {
+        request.Infer();
+        auto dst = request.GetBlob(outputName);
+
+        Blob::Ptr dst_ref = layerHelper->getRefBlob(weights_size, biases_size, weights, { src },
+                                                    dst->getTensorDesc(), pluginParams.precision);
+
+        CompareCommonAbsolute(dst, dst_ref, pluginParams.tolerance);
+    }
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/graph_tools/graph_tools_functional_tests.hpp b/inference-engine/tests_deprecated/functional/shared_tests/graph_tools/graph_tools_functional_tests.hpp
new file mode 100644 (file)
index 0000000..861c21e
--- /dev/null
@@ -0,0 +1,37 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <details/ie_cnn_network_tools.h>
+#include <cpp/ie_cnn_network.h>
+#include <memory>
+#include <test_model_path.hpp>
+
+using namespace testing;
+using namespace InferenceEngine::details;
+using namespace InferenceEngine;
+using namespace std;
+
+class GraphToolsFncTest : public ::testing::Test {
+public:
+    template <typename T>
+    static void checkSort(const T &sorted) {
+        for (int i = 0; i < sorted.size(); i++) {
+            //check that all input already visited:
+            for (auto &inputs : sorted[i]->insData) {
+                auto inputName = inputs.lock()->getCreatorLayer().lock()->name;
+
+                bool bFound = false;
+                for (int j = 0; j < i; j++) {
+                    if (sorted[j]->name == inputName) {
+                        bFound = true;
+                        break;
+                    }
+                }
+                ASSERT_TRUE(bFound) << "order is not correct, layer " << sorted[i]->name << " has missed input: "
+                                    << inputName;
+            }
+        }
+    }
+};
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/ie_class/ie_class.hpp b/inference-engine/tests_deprecated/functional/shared_tests/ie_class/ie_class.hpp
new file mode 100644 (file)
index 0000000..c19573f
--- /dev/null
@@ -0,0 +1,1411 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <gtest/gtest.h>
+#include <details/ie_cnn_network_tools.h>
+#include <ie_core.hpp>
+#include <ie_plugin_config.hpp>
+#include <tests_common.hpp>
+#include <memory>
+#include <fstream>
+#include <test_model_path.hpp>
+#include <hetero/hetero_plugin_config.hpp>
+#include <graph_tools.hpp>
+#include <functional_test_utils/plugin_cache.hpp>
+#include <multi-device/multi_device_config.hpp>
+
+#include <ngraph/function.hpp>
+#include <ngraph/op/subtract.hpp>
+
+#include "common_test_utils/file_utils.hpp"
+#include "common_test_utils/unicode_utils.hpp"
+#include "ngraph_functions/subgraph_builders.hpp"
+
+#ifdef ENABLE_UNICODE_PATH_SUPPORT
+#include <iostream>
+#define GTEST_COUT std::cerr << "[          ] [ INFO ] "
+#include <codecvt>
+#endif
+
+using namespace testing;
+using namespace InferenceEngine;
+using namespace InferenceEngine::details;
+using namespace InferenceEngine::PluginConfigParams;
+
+class IEClassBasicTest : public TestsCommon {
+public:
+    void SetUp() override {
+        // To close loaded devices.
+        PluginCache::get().reset();
+    }
+};
+
+class IEClassBasicTestP : public IEClassBasicTest, public WithParamInterface<std::pair<std::string, std::string> > {
+protected:
+    std::string pluginName;
+    std::string deviceName;
+public:
+    void SetUp() override {
+        IEClassBasicTest::SetUp();
+
+        pluginName = GetParam().first + IE_BUILD_POSTFIX;
+        deviceName = GetParam().second;
+    }
+};
+
+class IEClassNetworkTest : public IEClassBasicTest {
+public:
+    void SetUp() override {
+        IEClassBasicTest::SetUp();
+
+        // Generic network - GoogleNet V1
+        {
+            std::shared_ptr<ngraph::Function> fnPtr = ngraph::builder::subgraph::makeSplitConvConcat();
+            ASSERT_NO_THROW(actualNetwork = CNNNetwork(fnPtr));
+        }
+
+        // Quite simple network
+        {
+            std::shared_ptr<ngraph::Function> fnPtr = ngraph::builder::subgraph::makeSingleConv();
+            fnPtr->set_friendly_name("simpleNetwork");
+            ASSERT_NO_THROW(simpleNetwork = CNNNetwork(fnPtr));
+        }
+
+        // miltiinput to substruct network
+        {
+            auto fnPtr = ngraph::builder::subgraph::make2InputSubtract();
+            irv10Network = InferenceEngine::CNNNetwork(fnPtr);
+        }
+    }
+
+    void setHeteroNetworkAffinity(const std::string& target) {
+        InferenceEngine::InputsDataMap networkInputs = actualNetwork.getInputsInfo();
+
+        CNNLayerPtr layer;
+        for (auto input : networkInputs) {
+            InputInfo::Ptr q = input.second;
+            DataPtr p = q->getInputData();
+            layer = p->getInputTo().begin()->second;
+        }
+
+        std::map<std::string, std::string> deviceMapping = {
+            {"Convololution_4",   target},
+            {"Convololution_7",   "CPU"},
+            {"Relu_5",   "CPU"},
+            {"Relu_8",   target},
+            {"Concat_9", "CPU"}
+        };
+
+        CNNNetDFS(layer, [&](const CNNLayerPtr &layer) {
+            auto it = deviceMapping.find(layer->name);
+            if (it != deviceMapping.end()) {
+                layer->affinity = it->second;
+            } else {
+                layer->affinity = "CPU";
+            }
+        });
+    }
+
+    CNNNetwork actualNetwork;
+    CNNNetwork simpleNetwork;
+    CNNNetwork irv10Network;
+
+};
+
+class IEClassNetworkTestP : public IEClassNetworkTest, public WithParamInterface<std::string> {
+protected:
+    std::string deviceName;
+public:
+    void SetUp() override {
+        IEClassNetworkTest::SetUp();
+
+        deviceName = GetParam();
+    }
+};
+
+//
+// Create and register plugins
+//
+
+TEST_F(IEClassBasicTest, smoke_createDefault) {
+    ASSERT_NO_THROW(Core ie);
+}
+
+TEST_P(IEClassBasicTestP, registerExistingPluginThrows) {
+    Core ie;
+    ASSERT_THROW(ie.RegisterPlugin(pluginName, deviceName), InferenceEngineException);
+}
+
+TEST_P(IEClassBasicTestP, registerNewPluginNoThrows) {
+    Core ie;
+    ASSERT_NO_THROW(ie.RegisterPlugin(pluginName, "NEW_DEVICE_NAME"));
+    ASSERT_NO_THROW(ie.GetMetric("NEW_DEVICE_NAME", METRIC_KEY(SUPPORTED_CONFIG_KEYS)));
+}
+
+TEST_F(IEClassBasicTest, smoke_registerExistingPluginFileThrows) {
+    Core ie;
+    ASSERT_THROW(ie.RegisterPlugins("nonExistPlugins.xml"), InferenceEngineException);
+}
+
+TEST_F(IEClassBasicTest, smoke_createNonExistingConfigThrows) {
+    ASSERT_THROW(Core ie("nonExistPlugins.xml"), InferenceEngineException);
+}
+
+#if defined __linux__  && !defined(__APPLE__)
+
+TEST_F(IEClassBasicTest, smoke_createMockEngineConfigNoThrows) {
+    ASSERT_NO_THROW(Core ie(TestDataHelpers::get_data_path() + "/ie_class/mock_engine_valid.xml"));
+}
+
+TEST_F(IEClassBasicTest, smoke_createMockEngineConfigThrows) {
+    ASSERT_THROW(Core ie(TestDataHelpers::get_data_path() + "/ie_class/mock_engine.xml"), InferenceEngineException);
+}
+
+#endif
+
+#ifdef ENABLE_UNICODE_PATH_SUPPORT
+
+TEST_P(IEClassBasicTestP, smoke_registerPluginsXMLUnicodePath) {
+    std::string pluginXML = TestDataHelpers::get_data_path() + "/ie_class/mock_engine_valid.xml";
+
+    for (std::size_t testIndex = 0; testIndex < CommonTestUtils::test_unicode_postfix_vector.size(); testIndex++) {
+        std::wstring postfix  = L"_" + CommonTestUtils::test_unicode_postfix_vector[testIndex];
+        std::wstring pluginsXmlW = CommonTestUtils::addUnicodePostfixToPath(pluginXML, postfix);
+
+        try {
+            bool is_copy_successfully;
+            is_copy_successfully = CommonTestUtils::copyFile(pluginXML, pluginsXmlW);
+            if (!is_copy_successfully) {
+                FAIL() << "Unable to copy from '" << pluginXML << "' to '" << wStringtoMBCSstringChar(pluginsXmlW) << "'";
+            }
+
+            GTEST_COUT << "Test " << testIndex << std::endl;
+
+            Core ie;
+            GTEST_COUT << "Core created " << testIndex << std::endl;
+            ASSERT_NO_THROW(ie.RegisterPlugins(wStringtoMBCSstringChar(pluginsXmlW)));
+            CommonTestUtils::removeFile(pluginsXmlW);
+
+            ASSERT_NO_THROW(ie.GetVersions(deviceName));
+            GTEST_COUT << "Plugin created " << testIndex << std::endl;
+
+            ASSERT_NO_THROW(ie.RegisterPlugin(pluginName, "TEST_DEVICE"));
+            ASSERT_NO_THROW(ie.GetVersions("TEST_DEVICE"));
+            GTEST_COUT << "Plugin registered and created " << testIndex << std::endl;
+
+            GTEST_COUT << "OK" << std::endl;
+        }
+        catch (const InferenceEngine::details::InferenceEngineException &e_next) {
+            CommonTestUtils::removeFile(pluginsXmlW);
+            FAIL() << e_next.what();
+        }
+    }
+}
+
+#endif  // ENABLE_UNICODE_PATH_SUPPORT
+
+//
+// GetVersions()
+//
+
+TEST_P(IEClassBasicTestP, getVersionsByExactDeviceNoThrow) {
+    Core ie;
+    ASSERT_NO_THROW(ie.GetVersions(deviceName + ".0"));
+}
+
+TEST_P(IEClassBasicTestP, getVersionsByDeviceClassNoThrow) {
+    Core ie;
+    ASSERT_NO_THROW(ie.GetVersions(deviceName));
+}
+
+TEST_P(IEClassBasicTestP, getVersionsNonEmpty) {
+    Core ie;
+    ASSERT_EQ(2, ie.GetVersions("HETERO:" + deviceName).size());
+}
+
+//
+// UnregisterPlugin
+//
+
+TEST_P(IEClassBasicTestP, unregisterExistingPluginNoThrow) {
+    Core ie;
+    // device instance is not created yet
+    ASSERT_THROW(ie.UnregisterPlugin(deviceName), InferenceEngineException);
+
+    // make the first call to IE which created device instance
+    ie.GetVersions(deviceName);
+    // now, we can unregister device
+    ASSERT_NO_THROW(ie.UnregisterPlugin(deviceName));
+}
+
+TEST_P(IEClassBasicTestP, accessToUnregisteredPluginThrows) {
+    Core ie;
+    ASSERT_THROW(ie.UnregisterPlugin(deviceName), InferenceEngineException);
+    ASSERT_NO_THROW(ie.GetVersions(deviceName));
+    ASSERT_NO_THROW(ie.UnregisterPlugin(deviceName));
+    ASSERT_NO_THROW(ie.SetConfig({ }, deviceName));
+    ASSERT_NO_THROW(ie.GetVersions(deviceName));
+    ASSERT_NO_THROW(ie.UnregisterPlugin(deviceName));
+}
+
+TEST_F(IEClassBasicTest, smoke_unregisterNonExistingPluginThrows) {
+    Core ie;
+    ASSERT_THROW(ie.UnregisterPlugin("unkown_device"), InferenceEngineException);
+}
+
+//
+// SetConfig
+//
+
+TEST_P(IEClassBasicTestP, SetConfigAllThrows) {
+    Core ie;
+    ASSERT_NO_THROW(ie.SetConfig({ { "unsupported_key", "4" } }));
+    ASSERT_ANY_THROW(ie.GetVersions(deviceName));
+}
+
+TEST_P(IEClassBasicTestP, SetConfigForUnRegisteredDeviceThrows) {
+    Core ie;
+    ASSERT_THROW(ie.SetConfig({ { "unsupported_key", "4" } }, "unregistered_device"), InferenceEngineException);
+}
+
+TEST_P(IEClassBasicTestP, SetConfigNoThrow) {
+    Core ie;
+    ASSERT_NO_THROW(ie.SetConfig({ { KEY_PERF_COUNT, YES } }, deviceName));
+}
+
+TEST_P(IEClassBasicTestP, SetConfigAllNoThrow) {
+    Core ie;
+    ASSERT_NO_THROW(ie.SetConfig({ { KEY_PERF_COUNT, YES } }));
+    ASSERT_NO_THROW(ie.GetVersions(deviceName));
+}
+
+TEST_F(IEClassBasicTest, smoke_SetConfigHeteroThrows) {
+    Core ie;
+    ASSERT_NO_THROW(ie.SetConfig({ { KEY_PERF_COUNT, YES } }, "HETERO"));
+}
+
+TEST_P(IEClassBasicTestP, SetConfigHeteroTargetFallbackThrows) {
+    Core ie;
+    ASSERT_NO_THROW(ie.SetConfig({ { "TARGET_FALLBACK", deviceName } }, "HETERO"));
+}
+
+TEST_F(IEClassBasicTest, smoke_SetConfigHeteroNoThrow) {
+    Core ie;
+    bool value = false;
+
+    ASSERT_NO_THROW(ie.SetConfig({ { HETERO_CONFIG_KEY(DUMP_GRAPH_DOT), YES } }, "HETERO"));
+    ASSERT_NO_THROW(value = ie.GetConfig("HETERO", HETERO_CONFIG_KEY(DUMP_GRAPH_DOT)).as<bool>());
+    ASSERT_TRUE(value);
+
+    ASSERT_NO_THROW(ie.SetConfig({ { HETERO_CONFIG_KEY(DUMP_GRAPH_DOT), NO } }, "HETERO"));
+    ASSERT_NO_THROW(value = ie.GetConfig("HETERO", HETERO_CONFIG_KEY(DUMP_GRAPH_DOT)).as<bool>());
+    ASSERT_FALSE(value);
+}
+
+//
+// LogCallBack
+//
+
+TEST_F(IEClassBasicTest, smoke_LogCallBackNoThrow) {
+    Core ie;
+
+    IE_SUPPRESS_DEPRECATED_START
+    class ConsoleErrorListener : public IErrorListener {
+        void onError(const char *msg) noexcept override {
+            std::clog << "Plugin message: " << msg << std::endl;
+        }
+    };
+
+    ConsoleErrorListener listener;
+
+    ASSERT_NO_THROW(ie.SetLogCallback(listener));
+    IE_SUPPRESS_DEPRECATED_END
+}
+
+//
+// ImportNetwork
+//
+
+TEST_P(IEClassBasicTestP, ImportNetworkThrows) {
+    Core ie;
+
+    if (deviceName == "CPU" || deviceName == "FPGA") {
+        ASSERT_THROW(ie.ImportNetwork("model", deviceName), InferenceEngineException);
+    }
+}
+
+TEST_F(IEClassBasicTest, smoke_ImportNetworkHeteroThrows) {
+    Core ie;
+
+    ASSERT_THROW(ie.ImportNetwork("model", "HETERO"), InferenceEngineException);
+}
+
+TEST_F(IEClassBasicTest, smoke_ImportNetworkMultiThrows) {
+    Core ie;
+
+    ASSERT_THROW(ie.ImportNetwork("model", "MULTI"), InferenceEngineException);
+}
+
+TEST_P(IEClassBasicTestP, ImportNetworkWithNullContextThrows) {
+    Core ie;
+    RemoteContext::Ptr context = nullptr;
+    std::istringstream stream("None");
+    ASSERT_THROW(ie.ImportNetwork(stream, context, {}), InferenceEngineException);
+}
+
+//
+// LoadNetwork
+//
+
+TEST_P(IEClassNetworkTestP, LoadNetworkActualNoThrow) {
+    Core ie;
+    ASSERT_NO_THROW(ie.LoadNetwork(actualNetwork,  deviceName));
+}
+
+TEST_P(IEClassNetworkTestP, LoadNetworkActualHeteroDeviceNoThrow) {
+    Core ie;
+    ASSERT_NO_THROW(ie.LoadNetwork(actualNetwork, "HETERO:" + deviceName ));
+}
+
+TEST_P(IEClassNetworkTestP, LoadNetworkActualHeteroDevice2NoThrow) {
+    Core ie;
+    ASSERT_NO_THROW(ie.LoadNetwork(actualNetwork, "HETERO", { { "TARGET_FALLBACK", deviceName } }));
+}
+
+#define SKIP_IF_NOT_IMPLEMENTED(...) do {                                       \
+    try {                                                                       \
+        __VA_ARGS__;                                                            \
+    } catch(InferenceEngine::details::InferenceEngineException ieException) {   \
+        auto notImplementedExceptionIsThrown =                                  \
+            std::string::npos != std::string{ieException.what()}                \
+            .find(std::string{"[NOT_IMPLEMENTED] "});                           \
+        if (notImplementedExceptionIsThrown) {                                  \
+            GTEST_SKIP();                                                       \
+        } else {                                                                \
+            FAIL() << "thrown from expression: " # __VA_ARGS__ << std::endl     \
+            << "what: " << ieException.what();                                  \
+        }                                                                       \
+    }                                                                           \
+} while(0)
+
+//
+// ImportExportNetwork
+//
+
+using IEClassImportExportTestP = IEClassNetworkTestP;
+
+TEST_P(IEClassImportExportTestP, smoke_ImportNetworkNoThrowIfNoDeviceName) {
+    Core ie;
+    std::stringstream strm;
+    ExecutableNetwork executableNetwork;
+    ASSERT_NO_THROW(executableNetwork = ie.LoadNetwork(actualNetwork, deviceName));
+    SKIP_IF_NOT_IMPLEMENTED(executableNetwork.Export(strm));
+    if (!strm.str().empty() && deviceName.find("FPGA") != std::string::npos) {
+        SKIP_IF_NOT_IMPLEMENTED(executableNetwork = ie.ImportNetwork(strm));
+    }
+    if (nullptr != static_cast<IExecutableNetwork::Ptr&>(executableNetwork)) {
+        ASSERT_NO_THROW(executableNetwork.CreateInferRequest());
+    }
+}
+
+TEST_P(IEClassImportExportTestP, smoke_ImportNetworkNoThrowWithDeviceName) {
+    Core ie;
+    std::stringstream strm;
+    ExecutableNetwork executableNetwork;
+    ASSERT_NO_THROW(executableNetwork = ie.LoadNetwork(actualNetwork, deviceName));
+    SKIP_IF_NOT_IMPLEMENTED(executableNetwork.Export(strm));
+    SKIP_IF_NOT_IMPLEMENTED(executableNetwork = ie.ImportNetwork(strm, deviceName));
+    if (nullptr != static_cast<IExecutableNetwork::Ptr&>(executableNetwork)) {
+        ASSERT_NO_THROW(executableNetwork.CreateInferRequest());
+    }
+}
+
+TEST_P(IEClassImportExportTestP, smoke_ExportUsingFileNameImportFromStreamNoThrowWithDeviceName) {
+    Core ie;
+    ExecutableNetwork executableNetwork;
+    std::string fileName{"ExportedNetwork"};
+    {
+        ASSERT_NO_THROW(executableNetwork = ie.LoadNetwork(simpleNetwork, deviceName));
+        SKIP_IF_NOT_IMPLEMENTED(executableNetwork.Export(fileName));
+    }
+    if (CommonTestUtils::fileExists(fileName)) {
+        {
+            std::ifstream strm(fileName);
+            SKIP_IF_NOT_IMPLEMENTED(executableNetwork = ie.ImportNetwork(strm, deviceName));
+        }
+        ASSERT_EQ(0, remove(fileName.c_str()));
+    }
+    if (nullptr != static_cast<IExecutableNetwork::Ptr&>(executableNetwork)) {
+        ASSERT_NO_THROW(executableNetwork.CreateInferRequest());
+    }
+}
+
+//
+// QueryNetwork
+//
+
+TEST_P(IEClassNetworkTestP, QueryNetworkActualThrows) {
+    Core ie;
+    ASSERT_NO_THROW(ie.QueryNetwork(actualNetwork, "HETERO:" + deviceName));
+}
+
+TEST_P(IEClassNetworkTestP, QueryNetworkActualNoThrow) {
+    Core ie;
+    ASSERT_NO_THROW(ie.QueryNetwork(actualNetwork, deviceName));
+}
+
+TEST_P(IEClassNetworkTestP, QueryNetworkHeteroActualNoThrow) {
+    Core ie;
+    QueryNetworkResult res;
+    ASSERT_NO_THROW(res = ie.QueryNetwork(actualNetwork, "HETERO", { { "TARGET_FALLBACK", deviceName } }));
+    ASSERT_LT(0, res.supportedLayersMap.size());
+}
+
+TEST_P(IEClassNetworkTestP, QueryNetworkMultiThrows) {
+    Core ie;
+    ASSERT_THROW(ie.QueryNetwork(actualNetwork, "MULTI"), InferenceEngineException);
+}
+
+//
+// IE Class GetMetric / GetConfig
+//
+
+class IEClassGetMetricTest : public TestsCommon, public WithParamInterface<std::string> {
+protected:
+    std::string deviceName;
+
+public:
+    void SetUp() override {
+        // To close loaded devices.
+        PluginCache::get().reset();
+
+        deviceName = GetParam();
+    }
+};
+
+#define ASSERT_METRIC_SUPPORTED(metricName)                              \
+    {                                                                    \
+        std::vector<std::string> metrics =                               \
+            ie.GetMetric(deviceName, METRIC_KEY(SUPPORTED_METRICS));     \
+        auto it = std::find(metrics.begin(), metrics.end(), metricName); \
+        ASSERT_NE(metrics.end(), it);                                    \
+    }
+
+TEST_F(IEClassBasicTest, smoke_GetMetricSupportedMetricsHeteroNoThrow) {
+    Core ie;
+    Parameter p;
+    std::string deviceName = "HETERO";
+
+    ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(SUPPORTED_METRICS)));
+    std::vector<std::string> t = p;
+
+    std::cout << "Supported HETERO metrics: " << std::endl;
+    for (auto && str : t) {
+        std::cout << str << std::endl;
+    }
+
+    ASSERT_METRIC_SUPPORTED(METRIC_KEY(SUPPORTED_METRICS));
+}
+
+TEST_F(IEClassBasicTest, smoke_GetMetricSupportedConfigKeysHeteroNoThrow) {
+    Core ie;
+    Parameter p;
+    std::string deviceName = "HETERO";
+
+    ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(SUPPORTED_CONFIG_KEYS)));
+    std::vector<std::string> t = p;
+
+    std::cout << "Supported HETERO config keys: " << std::endl;
+    for (auto && str : t) {
+        std::cout << str << std::endl;
+    }
+
+    ASSERT_METRIC_SUPPORTED(METRIC_KEY(SUPPORTED_CONFIG_KEYS));
+}
+
+TEST_F(IEClassBasicTest, smoke_GetMetricSupportedConfigKeysHeteroThrows) {
+    Core ie;
+
+    ASSERT_THROW(ie.GetMetric("HETERO:CPU", METRIC_KEY(SUPPORTED_CONFIG_KEYS)), InferenceEngineException);
+}
+
+using IEClassGetMetricTest_SUPPORTED_METRICS = IEClassGetMetricTest;
+TEST_P(IEClassGetMetricTest_SUPPORTED_METRICS, GetMetricAndPrintNoThrow) {
+    Core ie;
+    Parameter p;
+
+    ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(SUPPORTED_METRICS)));
+    std::vector<std::string> t = p;
+
+    std::cout << "Supported metrics: " << std::endl;
+    for (auto && str : t) {
+        std::cout << str << std::endl;
+    }
+
+    ASSERT_METRIC_SUPPORTED(METRIC_KEY(SUPPORTED_METRICS));
+}
+
+using IEClassGetMetricTest_SUPPORTED_CONFIG_KEYS = IEClassGetMetricTest;
+TEST_P(IEClassGetMetricTest_SUPPORTED_CONFIG_KEYS, GetMetricAndPrintNoThrow) {
+    Core ie;
+    Parameter p;
+
+    ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(SUPPORTED_CONFIG_KEYS)));
+    std::vector<std::string> t = p;
+
+    std::cout << "Supported config values: " << std::endl;
+    for (auto && str : t) {
+        std::cout << str << std::endl;
+    }
+
+    ASSERT_METRIC_SUPPORTED(METRIC_KEY(SUPPORTED_CONFIG_KEYS));
+}
+
+using IEClassGetMetricTest_AVAILABLE_DEVICES = IEClassGetMetricTest;
+TEST_P(IEClassGetMetricTest_AVAILABLE_DEVICES, GetMetricAndPrintNoThrow) {
+    Core ie;
+    Parameter p;
+
+    ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(AVAILABLE_DEVICES)));
+    std::vector<std::string> t = p;
+
+    std::cout << "Available devices: " << std::endl;
+    for (auto && str : t) {
+        std::cout << str << std::endl;
+    }
+
+    ASSERT_METRIC_SUPPORTED(METRIC_KEY(AVAILABLE_DEVICES));
+}
+
+using IEClassGetMetricTest_FULL_DEVICE_NAME = IEClassGetMetricTest;
+TEST_P(IEClassGetMetricTest_FULL_DEVICE_NAME, GetMetricAndPrintNoThrow) {
+    Core ie;
+    Parameter p;
+
+    ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(FULL_DEVICE_NAME)));
+    std::string t = p;
+    std::cout << "Full device name: " << std::endl << t << std::endl;
+
+    ASSERT_METRIC_SUPPORTED(METRIC_KEY(FULL_DEVICE_NAME));
+}
+
+using IEClassGetMetricTest_OPTIMIZATION_CAPABILITIES = IEClassGetMetricTest;
+TEST_P(IEClassGetMetricTest_OPTIMIZATION_CAPABILITIES, GetMetricAndPrintNoThrow) {
+    Core ie;
+    Parameter p;
+
+    ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(OPTIMIZATION_CAPABILITIES)));
+    std::vector<std::string> t = p;
+
+    std::cout << "Optimization capabilities: " << std::endl;
+    for (auto && str : t) {
+        std::cout << str << std::endl;
+    }
+
+    ASSERT_METRIC_SUPPORTED(METRIC_KEY(OPTIMIZATION_CAPABILITIES));
+}
+
+using IEClassGetMetricTest_NUMBER_OF_WAITING_INFER_REQUESTS = IEClassGetMetricTest;
+TEST_P(IEClassGetMetricTest_NUMBER_OF_WAITING_INFER_REQUESTS, GetMetricAndPrintNoThrow) {
+    Core ie;
+    Parameter p;
+
+    ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(NUMBER_OF_WAITING_INFER_REQUESTS)));
+    unsigned int t = p;
+
+    std::cout << "Number of waiting infer requests: " << std::endl << t << std::endl;
+
+    ASSERT_METRIC_SUPPORTED(METRIC_KEY(NUMBER_OF_WAITING_INFER_REQUESTS));
+}
+
+using IEClassGetMetricTest_NUMBER_OF_EXEC_INFER_REQUESTS = IEClassGetMetricTest;
+TEST_P(IEClassGetMetricTest_NUMBER_OF_EXEC_INFER_REQUESTS, GetMetricAndPrintNoThrow) {
+    Core ie;
+    Parameter p;
+
+    ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(NUMBER_OF_EXEC_INFER_REQUESTS)));
+    unsigned int t = p;
+
+    std::cout << "Number of executing infer requests: " << std::endl << t << std::endl;
+
+    ASSERT_METRIC_SUPPORTED(METRIC_KEY(NUMBER_OF_EXEC_INFER_REQUESTS));
+}
+
+using IEClassGetMetricTest_RANGE_FOR_ASYNC_INFER_REQUESTS = IEClassGetMetricTest;
+TEST_P(IEClassGetMetricTest_RANGE_FOR_ASYNC_INFER_REQUESTS, GetMetricAndPrintNoThrow) {
+    Core ie;
+    Parameter p;
+
+    ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(RANGE_FOR_ASYNC_INFER_REQUESTS)));
+    std::tuple<unsigned int, unsigned int, unsigned int> t = p;
+
+    unsigned int start = std::get<0>(t);
+    unsigned int end = std::get<1>(t);
+    unsigned int step = std::get<2>(t);
+
+    std::cout << "Range for async infer requests: " << std::endl;
+    std::cout << start << std::endl;
+    std::cout << end << std::endl;
+    std::cout << step << std::endl;
+    std::cout << std::endl;
+
+    ASSERT_LE(start, end);
+    ASSERT_GE(step, 1);
+    ASSERT_METRIC_SUPPORTED(METRIC_KEY(RANGE_FOR_ASYNC_INFER_REQUESTS));
+}
+
+using IEClassGetMetricTest_RANGE_FOR_STREAMS = IEClassGetMetricTest;
+TEST_P(IEClassGetMetricTest_RANGE_FOR_STREAMS, GetMetricAndPrintNoThrow) {
+    Core ie;
+    Parameter p;
+
+    ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(RANGE_FOR_STREAMS)));
+    std::tuple<unsigned int, unsigned int> t = p;
+
+    unsigned int start = std::get<0>(t);
+    unsigned int end = std::get<1>(t);
+
+    std::cout << "Range for streams: " << std::endl;
+    std::cout << start << std::endl;
+    std::cout << end << std::endl;
+    std::cout << std::endl;
+
+    ASSERT_LE(start, end);
+    ASSERT_METRIC_SUPPORTED(METRIC_KEY(RANGE_FOR_STREAMS));
+}
+
+using IEClassGetMetricTest_ThrowUnsupported = IEClassGetMetricTest;
+TEST_P(IEClassGetMetricTest_ThrowUnsupported,GetMetricThrow) {
+    Core ie;
+    Parameter p;
+
+    ASSERT_THROW(p = ie.GetMetric(deviceName, "unsupported_metric"), InferenceEngineException);
+}
+
+using IEClassGetConfigTest = IEClassGetMetricTest;
+TEST_P(IEClassGetConfigTest, GetConfigNoThrow) {
+    Core ie;
+    Parameter p;
+
+    ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(SUPPORTED_CONFIG_KEYS)));
+    std::vector<std::string> configValues = p;
+
+    for (auto && confKey : configValues) {
+        Parameter defaultValue;
+        ASSERT_NO_THROW(defaultValue = ie.GetConfig(deviceName, confKey));
+        ASSERT_FALSE(defaultValue.empty());
+    }
+}
+
+using IEClassGetConfigTest = IEClassGetMetricTest;
+TEST_P(IEClassGetConfigTest, GetConfigHeteroNoThrow) {
+    Core ie;
+    Parameter p;
+
+    ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(SUPPORTED_CONFIG_KEYS)));
+    std::vector<std::string> configValues = p;
+
+    for (auto && confKey : configValues) {
+        ASSERT_NO_THROW(ie.GetConfig(deviceName, confKey));
+    }
+}
+
+using IEClassGetConfigTest_ThrowUnsupported = IEClassGetMetricTest;
+TEST_P(IEClassGetConfigTest_ThrowUnsupported, GetConfigHeteroThrow) {
+    Core ie;
+    Parameter p;
+
+    ASSERT_THROW(p = ie.GetConfig("HETERO", "unsupported_config"), InferenceEngineException);
+}
+
+using IEClassGetConfigTest_ThrowUnsupported = IEClassGetMetricTest;
+TEST_P(IEClassGetConfigTest_ThrowUnsupported, GetConfigHeteroWithDeviceThrow) {
+    Core ie;
+    Parameter p;
+
+    ASSERT_THROW(p = ie.GetConfig("HETERO:" + deviceName, HETERO_CONFIG_KEY(DUMP_GRAPH_DOT)), InferenceEngineException);
+}
+
+using IEClassGetConfigTest_ThrowUnsupported = IEClassGetMetricTest;
+TEST_P(IEClassGetConfigTest_ThrowUnsupported, GetConfigThrow) {
+    Core ie;
+    Parameter p;
+
+    ASSERT_THROW(p = ie.GetConfig(deviceName, "unsupported_config"), InferenceEngineException);
+}
+
+using IEClassGetAvailableDevices = IEClassGetMetricTest;
+TEST_P(IEClassGetAvailableDevices, GetAvailableDevicesNoThrow) {
+    Core ie;
+    std::vector<std::string> devices;
+
+    ASSERT_NO_THROW(devices = ie.GetAvailableDevices());
+
+    bool deviceFound = false;
+    std::cout << "Available devices: " << std::endl;
+    for (auto && device : devices) {
+        if (device.find(deviceName) != std::string::npos) {
+            deviceFound = true;
+        }
+
+        std::cout << device << " ";
+    }
+    std::cout << std::endl;
+
+    ASSERT_TRUE(deviceFound);
+}
+
+//
+// ExecutableNetwork GetMetric / GetConfig
+//
+
+class IEClassExecutableNetworkGetMetricTest : public IEClassNetworkTest, public WithParamInterface<std::string> {
+protected:
+    std::string deviceName;
+
+public:
+    void SetUp() override {
+        IEClassNetworkTest::SetUp();
+        deviceName = GetParam();
+    }
+};
+
+#define ASSERT_EXEC_METRIC_SUPPORTED(metricName)                         \
+    {                                                                    \
+        std::vector<std::string> metrics =                               \
+            exeNetwork.GetMetric(METRIC_KEY(SUPPORTED_METRICS));         \
+        auto it = std::find(metrics.begin(), metrics.end(), metricName); \
+        ASSERT_NE(metrics.end(), it);                                    \
+    }
+
+using IEClassExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS = IEClassExecutableNetworkGetMetricTest;
+TEST_P(IEClassExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS, GetMetricNoThrow) {
+    Core ie;
+    Parameter p;
+
+    ExecutableNetwork exeNetwork = ie.LoadNetwork(simpleNetwork, deviceName);
+
+    ASSERT_NO_THROW(p = exeNetwork.GetMetric(METRIC_KEY(SUPPORTED_CONFIG_KEYS)));
+    std::vector<std::string> configValues = p;
+
+    std::cout << "Supported config keys: " << std::endl;
+    for (auto && conf : configValues) {
+        std::cout << conf << std::endl;
+        ASSERT_LT(0, conf.size());
+    }
+    ASSERT_LE(0, configValues.size());
+    ASSERT_EXEC_METRIC_SUPPORTED(METRIC_KEY(SUPPORTED_CONFIG_KEYS));
+}
+
+using IEClassExecutableNetworkGetMetricTest_SUPPORTED_METRICS = IEClassExecutableNetworkGetMetricTest;
+TEST_P(IEClassExecutableNetworkGetMetricTest_SUPPORTED_METRICS,GetMetricNoThrow) {
+    Core ie;
+    Parameter p;
+
+    ExecutableNetwork exeNetwork = ie.LoadNetwork(simpleNetwork, deviceName);
+
+    ASSERT_NO_THROW(p = exeNetwork.GetMetric(METRIC_KEY(SUPPORTED_METRICS)));
+    std::vector<std::string> metricValues = p;
+
+    std::cout << "Supported metric keys: " << std::endl;
+    for (auto && conf : metricValues) {
+        std::cout << conf << std::endl;
+        ASSERT_LT(0, conf.size());
+    }
+    ASSERT_LT(0, metricValues.size());
+    ASSERT_EXEC_METRIC_SUPPORTED(METRIC_KEY(SUPPORTED_METRICS));
+}
+
+using IEClassExecutableNetworkGetMetricTest_NETWORK_NAME = IEClassExecutableNetworkGetMetricTest;
+TEST_P(IEClassExecutableNetworkGetMetricTest_NETWORK_NAME, GetMetricNoThrow) {
+    Core ie;
+    Parameter p;
+
+    ExecutableNetwork exeNetwork = ie.LoadNetwork(simpleNetwork, deviceName);
+
+    ASSERT_NO_THROW(p = exeNetwork.GetMetric(EXEC_NETWORK_METRIC_KEY(NETWORK_NAME)));
+    std::string networkname = p;
+
+    std::cout << "Exe network name: " << std::endl << networkname << std::endl;
+    ASSERT_EQ("simpleNetwork", networkname);
+    ASSERT_EXEC_METRIC_SUPPORTED(EXEC_NETWORK_METRIC_KEY(NETWORK_NAME));
+}
+
+using IEClassExecutableNetworkGetMetricTest_OPTIMAL_NUMBER_OF_INFER_REQUESTS = IEClassExecutableNetworkGetMetricTest;
+TEST_P(IEClassExecutableNetworkGetMetricTest_OPTIMAL_NUMBER_OF_INFER_REQUESTS, GetMetricNoThrow) {
+    Core ie;
+    Parameter p;
+
+    ExecutableNetwork exeNetwork = ie.LoadNetwork(simpleNetwork, deviceName);
+
+    ASSERT_NO_THROW(p = exeNetwork.GetMetric(EXEC_NETWORK_METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)));
+    unsigned int value = p;
+
+    std::cout << "Optimal number of Inference Requests: " << value << std::endl;
+    ASSERT_GE(value, 1u);
+    ASSERT_EXEC_METRIC_SUPPORTED(EXEC_NETWORK_METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS));
+}
+
+using IEClassExecutableNetworkGetMetricTest_ThrowsUnsupported = IEClassExecutableNetworkGetMetricTest;
+TEST_P(IEClassExecutableNetworkGetMetricTest_ThrowsUnsupported, GetMetricThrow) {
+    Core ie;
+    Parameter p;
+
+    ExecutableNetwork exeNetwork = ie.LoadNetwork(simpleNetwork, deviceName);
+
+    ASSERT_THROW(p = exeNetwork.GetMetric("unsupported_metric"), InferenceEngineException);
+}
+
+using IEClassExecutableNetworkGetConfigTest = IEClassExecutableNetworkGetMetricTest;
+TEST_P(IEClassExecutableNetworkGetConfigTest, GetConfigNoThrow) {
+    Core ie;
+    Parameter p;
+
+    ExecutableNetwork exeNetwork = ie.LoadNetwork(simpleNetwork, deviceName);
+
+    ASSERT_NO_THROW(p = exeNetwork.GetMetric(METRIC_KEY(SUPPORTED_CONFIG_KEYS)));
+    std::vector<std::string> configValues = p;
+
+    for (auto && confKey : configValues) {
+        Parameter defaultValue;
+        ASSERT_NO_THROW(defaultValue = ie.GetConfig(deviceName, confKey));
+        ASSERT_FALSE(defaultValue.empty());
+    }
+}
+
+TEST_P(IEClassExecutableNetworkGetConfigTest, GetConfigThrows) {
+    Core ie;
+    Parameter p;
+
+    ExecutableNetwork exeNetwork = ie.LoadNetwork(simpleNetwork, deviceName);
+
+    ASSERT_THROW(p = exeNetwork.GetConfig("unsupported_config"), InferenceEngineException);
+}
+
+using IEClassExecutableNetworkSetConfigTest = IEClassExecutableNetworkGetMetricTest;
+TEST_P(IEClassExecutableNetworkSetConfigTest, SetConfigThrows) {
+    Core ie;
+    Parameter p;
+
+    ExecutableNetwork exeNetwork = ie.LoadNetwork(simpleNetwork, deviceName);
+
+    ASSERT_THROW(exeNetwork.SetConfig({ { "unsupported_config", "some_value" } }), InferenceEngineException);
+}
+
+using IEClassExecutableNetworkGetConfigTest = IEClassExecutableNetworkGetMetricTest;
+TEST_P(IEClassExecutableNetworkGetConfigTest, GetConfigNoEmptyNoThrow) {
+    Core ie;
+    Parameter p;
+
+    ASSERT_NO_THROW(p = ie.GetMetric(deviceName, METRIC_KEY(SUPPORTED_CONFIG_KEYS)));
+    std::vector<std::string> devConfigValues = p;
+
+    ExecutableNetwork exeNetwork = ie.LoadNetwork(simpleNetwork, deviceName);
+
+    ASSERT_NO_THROW(p = exeNetwork.GetMetric(METRIC_KEY(SUPPORTED_CONFIG_KEYS)));
+    std::vector<std::string> execConfigValues = p;
+
+    /*
+    for (auto && configKey : devConfigValues) {
+        ASSERT_NE(execConfigValues.end(), std::find(execConfigValues.begin(), execConfigValues.end(), configKey));
+
+        Parameter configValue;
+        ASSERT_NO_THROW(Parameter configValue = exeNetwork.GetConfig(configKey));
+    }
+    */
+}
+
+//
+// Hetero Executable network case
+//
+
+class IEClassHeteroExecutableNetworkGetMetricTest : public IEClassNetworkTest, public WithParamInterface<std::string> {
+protected:
+    std::string deviceName;
+    std::string heteroDeviceName;
+
+public:
+    virtual void TearDown() {
+    }
+
+    virtual void SetUp() {
+        IEClassNetworkTest::SetUp();
+        deviceName = GetParam();
+        heteroDeviceName = "HETERO:" + deviceName + ",CPU";
+    }
+};
+
+using IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS = IEClassHeteroExecutableNetworkGetMetricTest;
+TEST_P(IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS, GetMetricNoThrow) {
+    Core ie;
+    Parameter pHetero, pDevice;
+
+    ExecutableNetwork heteroExeNetwork = ie.LoadNetwork(actualNetwork, heteroDeviceName);
+    ExecutableNetwork deviceExeNetwork = ie.LoadNetwork(actualNetwork, deviceName);
+
+    ASSERT_NO_THROW(pHetero = heteroExeNetwork.GetMetric(METRIC_KEY(SUPPORTED_CONFIG_KEYS)));
+    ASSERT_NO_THROW(pDevice = deviceExeNetwork.GetMetric(METRIC_KEY(SUPPORTED_CONFIG_KEYS)));
+    std::vector<std::string> heteroConfigValues = pHetero, deviceConfigValues = pDevice;
+
+    std::cout << "Supported config keys: " << std::endl;
+    for (auto && conf : heteroConfigValues) {
+        std::cout << conf << std::endl;
+        ASSERT_LT(0, conf.size());
+    }
+    ASSERT_LE(0, heteroConfigValues.size());
+
+    // check that all device config values are present in hetero case
+    for (auto && deviceConf : deviceConfigValues) {
+        auto it = std::find(heteroConfigValues.begin(), heteroConfigValues.end(), deviceConf);
+        ASSERT_TRUE(it != heteroConfigValues.end());
+
+        Parameter heteroConfigValue = heteroExeNetwork.GetConfig(deviceConf);
+        Parameter deviceConfigValue = deviceExeNetwork.GetConfig(deviceConf);
+
+        // HETERO returns EXCLUSIVE_ASYNC_REQUESTS as a boolean value
+        if (CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS) != deviceConf)
+            ASSERT_EQ(deviceConfigValue, heteroConfigValue);
+    }
+}
+
+using IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_METRICS = IEClassHeteroExecutableNetworkGetMetricTest;
+TEST_P(IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_METRICS, GetMetricNoThrow) {
+    Core ie;
+    Parameter pHetero, pDevice;
+
+    ExecutableNetwork heteroExeNetwork = ie.LoadNetwork(actualNetwork, heteroDeviceName);
+    ExecutableNetwork deviceExeNetwork = ie.LoadNetwork(actualNetwork, deviceName);
+
+    ASSERT_NO_THROW(pHetero = heteroExeNetwork.GetMetric(METRIC_KEY(SUPPORTED_METRICS)));
+    ASSERT_NO_THROW(pDevice = deviceExeNetwork.GetMetric(METRIC_KEY(SUPPORTED_METRICS)));
+    std::vector<std::string> heteroMetricValues = pHetero, deviceMetricValues = pDevice;
+
+    std::cout << "Supported metric keys: " << std::endl;
+    for (auto && conf : heteroMetricValues) {
+        std::cout << conf << std::endl;
+        ASSERT_LT(0, conf.size());
+    }
+    ASSERT_LT(0, heteroMetricValues.size());
+
+    const std::vector<std::string> heteroSpecificMetrics = {
+        METRIC_KEY(SUPPORTED_METRICS),
+        METRIC_KEY(SUPPORTED_CONFIG_KEYS)
+    };
+
+    // check that all device metric values are present in hetero case
+    for (auto && deviceMetricName : deviceMetricValues) {
+        auto it = std::find(heteroMetricValues.begin(), heteroMetricValues.end(), deviceMetricName);
+        ASSERT_TRUE(it != heteroMetricValues.end());
+
+        Parameter heteroMetricValue = heteroExeNetwork.GetMetric(deviceMetricName);
+        Parameter deviceMetricValue = deviceExeNetwork.GetMetric(deviceMetricName);
+
+        if (std::find(heteroSpecificMetrics.begin(), heteroSpecificMetrics.end(), deviceMetricName) ==
+            heteroSpecificMetrics.end())
+            ASSERT_TRUE(heteroMetricValue == deviceMetricValue);
+    }
+}
+
+using IEClassHeteroExecutableNetworkGetMetricTest_NETWORK_NAME = IEClassHeteroExecutableNetworkGetMetricTest;
+TEST_P(IEClassHeteroExecutableNetworkGetMetricTest_NETWORK_NAME, GetMetricNoThrow) {
+    Core ie;
+    Parameter p;
+
+    ExecutableNetwork exeNetwork = ie.LoadNetwork(actualNetwork, heteroDeviceName);
+
+    ASSERT_NO_THROW(p = exeNetwork.GetMetric(EXEC_NETWORK_METRIC_KEY(NETWORK_NAME)));
+    std::string networkname = p;
+
+    std::cout << "Exe network name: " << std::endl << networkname << std::endl;
+}
+
+using IEClassHeteroExecutableNetworkGetMetricTest_TARGET_FALLBACK = IEClassHeteroExecutableNetworkGetMetricTest;
+TEST_P(IEClassHeteroExecutableNetworkGetMetricTest_TARGET_FALLBACK, GetMetricNoThrow) {
+    Core ie;
+    Parameter p;
+
+    setHeteroNetworkAffinity(deviceName);
+
+    ExecutableNetwork exeNetwork = ie.LoadNetwork(actualNetwork, heteroDeviceName);
+
+    ASSERT_NO_THROW(p = exeNetwork.GetConfig("TARGET_FALLBACK"));
+    std::string targets = p;
+    auto expectedTargets = deviceName + ",CPU";
+
+    std::cout << "Exe network fallback targets: " << targets << std::endl;
+    ASSERT_EQ(expectedTargets, targets);
+}
+
+//
+// QueryNetwork with HETERO on particular device
+//
+
+namespace {
+
+bool supportsDeviceID(Core & ie, const std::string & deviceName) {
+    auto supportedConfigKeys = ie.GetMetric(deviceName, METRIC_KEY(SUPPORTED_CONFIG_KEYS)).as<std::vector<std::string>>();
+    return supportedConfigKeys.end() != std::find(std::begin(supportedConfigKeys),
+                                                  std::end(supportedConfigKeys),
+                                                  CONFIG_KEY(DEVICE_ID));
+}
+
+bool supportsAvaliableDevices(Core & ie, const std::string & deviceName) {
+    auto supportedMetricKeys = ie.GetMetric(deviceName, METRIC_KEY(SUPPORTED_METRICS)).as<std::vector<std::string>>();
+    return supportedMetricKeys.end() != std::find(std::begin(supportedMetricKeys),
+                                                  std::end(supportedMetricKeys),
+                                                  METRIC_KEY(AVAILABLE_DEVICES));
+}
+
+}
+
+class IEClassQueryNetworkTest : public IEClassNetworkTest, public WithParamInterface<std::string> {
+protected:
+    std::string deviceName;
+public:
+    void SetUp() override {
+        IEClassNetworkTest::SetUp();
+        deviceName = GetParam();
+    }
+};
+
+TEST_P(IEClassQueryNetworkTest, QueryNetworkHETEROWithDeviceIDNoThrow) {
+    Core ie;
+
+    if (supportsDeviceID(ie, deviceName)) {
+        auto deviceIDs = ie.GetMetric(deviceName, METRIC_KEY(AVAILABLE_DEVICES)).as<std::vector<std::string>>();
+        if (deviceIDs.empty())
+            GTEST_SKIP();
+        ASSERT_NO_THROW(ie.QueryNetwork(actualNetwork, "HETERO",
+            { { "TARGET_FALLBACK", deviceName + "." + deviceIDs[0] + ",CPU" }}));
+    } else {
+        GTEST_SKIP();
+    }
+}
+
+TEST_P(IEClassQueryNetworkTest, QueryNetworkWithDeviceID) {
+    Core ie;
+
+    if (supportsDeviceID(ie, deviceName)) {
+        ASSERT_NO_THROW(ie.QueryNetwork(simpleNetwork, deviceName + ".0"));
+    } else {
+        GTEST_SKIP();
+    }
+}
+
+TEST_P(IEClassQueryNetworkTest, QueryNetworkWithBigDeviceIDThrows) {
+    Core ie;
+
+    if (supportsDeviceID(ie, deviceName)) {
+        ASSERT_THROW(ie.QueryNetwork(actualNetwork, deviceName + ".110"), InferenceEngineException);
+    } else {
+        GTEST_SKIP();
+    }
+}
+
+TEST_P(IEClassQueryNetworkTest, QueryNetworkWithInvalidDeviceIDThrows) {
+    Core ie;
+
+    if (supportsDeviceID(ie, deviceName)) {
+        ASSERT_THROW(ie.QueryNetwork(actualNetwork, deviceName + ".l0"), InferenceEngineException);
+    } else {
+        GTEST_SKIP();
+    }
+}
+
+TEST_P(IEClassQueryNetworkTest, QueryNetworkHETEROWithBigDeviceIDThrows) {
+    Core ie;
+
+    if (supportsDeviceID(ie, deviceName)) {
+        ASSERT_THROW(ie.QueryNetwork(actualNetwork, "HETERO",
+                                     { { "TARGET_FALLBACK", deviceName + ".100,CPU" }}), InferenceEngineException);
+    } else {
+        GTEST_SKIP();
+    }
+}
+
+//
+// LoadNetwork with HETERO on particular device
+//
+
+using IEClassLoadNetworkTest = IEClassQueryNetworkTest;
+
+TEST_P(IEClassLoadNetworkTest, LoadNetworkHETEROWithDeviceIDNoThrow) {
+    Core ie;
+
+    if (supportsDeviceID(ie, deviceName)) {
+        auto deviceIDs = ie.GetMetric(deviceName, METRIC_KEY(AVAILABLE_DEVICES)).as<std::vector<std::string>>();
+        if (deviceIDs.empty())
+            GTEST_SKIP();
+        std::string heteroDevice = "HETERO:" + deviceName + "." + deviceIDs[0] + ",CPU";
+        ASSERT_NO_THROW(ie.LoadNetwork(actualNetwork, heteroDevice));
+    } else {
+        GTEST_SKIP();
+    }
+}
+
+TEST_P(IEClassLoadNetworkTest, LoadNetworkWithDeviceIDNoThrow) {
+    Core ie;
+
+    if (supportsDeviceID(ie, deviceName)) {
+        auto deviceIDs = ie.GetMetric(deviceName, METRIC_KEY(AVAILABLE_DEVICES)).as<std::vector<std::string>>();
+        if (deviceIDs.empty())
+            GTEST_SKIP();
+        ASSERT_NO_THROW(ie.LoadNetwork(simpleNetwork, deviceName + "." + deviceIDs[0]));
+    } else {
+        GTEST_SKIP();
+    }
+}
+
+TEST_P(IEClassLoadNetworkTest, LoadNetworkWithBigDeviceIDThrows) {
+    Core ie;
+
+    if (supportsDeviceID(ie, deviceName)) {
+        ASSERT_THROW(ie.LoadNetwork(actualNetwork, deviceName + ".10"), InferenceEngineException);
+    } else {
+        GTEST_SKIP();
+    }
+}
+
+TEST_P(IEClassLoadNetworkTest, LoadNetworkWithInvalidDeviceIDThrows) {
+    Core ie;
+
+    if (supportsDeviceID(ie, deviceName)) {
+        ASSERT_THROW(ie.LoadNetwork(actualNetwork, deviceName + ".l0"), InferenceEngineException);
+    } else {
+        GTEST_SKIP();
+    }
+}
+
+TEST_P(IEClassLoadNetworkTest, LoadNetworkHETEROWithBigDeviceIDThrows) {
+    Core ie;
+
+    if (supportsDeviceID(ie, deviceName)) {
+        ASSERT_THROW(ie.LoadNetwork(actualNetwork, "HETERO",
+                                     { { "TARGET_FALLBACK", deviceName + ".100,CPU" } }), InferenceEngineException);
+    } else {
+        GTEST_SKIP();
+    }
+}
+
+TEST_P(IEClassLoadNetworkTest, LoadNetworkHETEROAndDeviceIDThrows) {
+    Core ie;
+
+    if (supportsDeviceID(ie, deviceName)) {
+        ASSERT_THROW(ie.LoadNetwork(actualNetwork, "HETERO",
+                                     { { "TARGET_FALLBACK", deviceName + ",CPU" }, {CONFIG_KEY(DEVICE_ID), "110"}}), InferenceEngineException);
+    } else {
+        GTEST_SKIP();
+    }
+}
+
+//
+// LoadNetwork with HETERO on MULTI combinations particular device
+//
+
+TEST_P(IEClassLoadNetworkTest, LoadNetworkHETEROwithMULTINoThrow) {
+    Core ie;
+
+    if (supportsDeviceID(ie, deviceName) && supportsAvaliableDevices(ie, deviceName)) {
+        std::string devices;
+        auto availableDevices = ie.GetMetric(deviceName, METRIC_KEY(AVAILABLE_DEVICES)).as<std::vector<std::string>>();
+        for (auto&& device : availableDevices) {
+            devices += deviceName + '.' + device;
+            if (&device != &(availableDevices.back())) {
+                devices += ',';
+            }
+        }
+        ASSERT_NO_THROW(ie.LoadNetwork(actualNetwork, "HETERO", {
+                {MULTI_CONFIG_KEY(DEVICE_PRIORITIES), devices},
+                { "TARGET_FALLBACK", "MULTI,CPU" }}));
+    } else {
+        GTEST_SKIP();
+    }
+
+}
+
+TEST_P(IEClassLoadNetworkTest, LoadNetworkMULTIwithHETERONoThrow) {
+    Core ie;
+
+    if (supportsDeviceID(ie, deviceName) && supportsAvaliableDevices(ie, deviceName)) {
+        std::string devices;
+        auto availableDevices = ie.GetMetric(deviceName, METRIC_KEY(AVAILABLE_DEVICES)).as<std::vector<std::string>>();
+        for (auto&& device : availableDevices) {
+            devices += "HETERO." + device;
+            if (&device != &(availableDevices.back())) {
+                devices += ',';
+            }
+        }
+        ASSERT_NO_THROW(ie.LoadNetwork(actualNetwork, "MULTI", {
+                {MULTI_CONFIG_KEY(DEVICE_PRIORITIES), devices},
+                { "TARGET_FALLBACK", deviceName + ",CPU" }}));
+    } else {
+        GTEST_SKIP();
+    }
+}
+
+//
+// QueryNetwork with HETERO on MULTI combinations particular device
+//
+
+TEST_P(IEClassLoadNetworkTest, QueryNetworkHETEROwithMULTINoThrowv7) {
+    Core ie;
+
+    if (supportsDeviceID(ie, deviceName) && supportsAvaliableDevices(ie, deviceName)) {
+        std::string devices;
+        auto availableDevices = ie.GetMetric(deviceName, METRIC_KEY(AVAILABLE_DEVICES)).as<std::vector<std::string>>();
+        for (auto&& device : availableDevices) {
+            devices += deviceName + '.' + device;
+            if (&device != &(availableDevices.back())) {
+                devices += ',';
+            }
+        }
+
+        QueryNetworkResult result;
+        ASSERT_NO_THROW(result = ie.QueryNetwork(actualNetwork, "HETERO", {
+                {MULTI_CONFIG_KEY(DEVICE_PRIORITIES), devices},
+                { "TARGET_FALLBACK", "MULTI,CPU" }}));
+
+        for (auto && layer : result.supportedLayersMap) {
+            IE_SUPPRESS_DEPRECATED_START
+            EXPECT_NO_THROW(actualNetwork.getLayerByName(layer.first.c_str()));
+            IE_SUPPRESS_DEPRECATED_END
+        }
+    } else {
+        GTEST_SKIP();
+    }
+}
+
+TEST_P(IEClassLoadNetworkTest, QueryNetworkMULTIwithHETERONoThrowv7) {
+    Core ie;
+
+    if (supportsDeviceID(ie, deviceName) && supportsAvaliableDevices(ie, deviceName)) {
+        std::string devices;
+        auto availableDevices = ie.GetMetric(deviceName, METRIC_KEY(AVAILABLE_DEVICES)).as<std::vector<std::string>>();
+        for (auto&& device : availableDevices) {
+            devices += "HETERO." + device;
+            if (&device != &(availableDevices.back())) {
+                devices += ',';
+            }
+        }
+
+        QueryNetworkResult result;
+        ASSERT_NO_THROW(result = ie.QueryNetwork(actualNetwork, "MULTI", {
+                {MULTI_CONFIG_KEY(DEVICE_PRIORITIES), devices},
+                { "TARGET_FALLBACK", deviceName + ",CPU" }}));
+
+        for (auto && layer : result.supportedLayersMap) {
+            IE_SUPPRESS_DEPRECATED_START
+            EXPECT_NO_THROW(actualNetwork.getLayerByName(layer.first.c_str()));
+            IE_SUPPRESS_DEPRECATED_END
+        }
+    } else {
+        GTEST_SKIP();
+    }
+}
+
+TEST_P(IEClassLoadNetworkTest, QueryNetworkHETEROwithMULTINoThrowv10) {
+    Core ie;
+
+    if (supportsDeviceID(ie, deviceName) && supportsAvaliableDevices(ie, deviceName)) {
+        std::string devices;
+        auto availableDevices = ie.GetMetric(deviceName, METRIC_KEY(AVAILABLE_DEVICES)).as<std::vector<std::string>>();
+        for (auto&& device : availableDevices) {
+            devices += deviceName + '.' + device;
+            if (&device != &(availableDevices.back())) {
+                devices += ',';
+            }
+        }
+
+        QueryNetworkResult result;
+        ASSERT_NO_THROW(result = ie.QueryNetwork(irv10Network, "HETERO", {
+                {MULTI_CONFIG_KEY(DEVICE_PRIORITIES), devices},
+                { "TARGET_FALLBACK", "MULTI,CPU" }}));
+
+        for (auto && layer : result.supportedLayersMap) {
+            IE_SUPPRESS_DEPRECATED_START
+            EXPECT_NO_THROW(irv10Network.getLayerByName(layer.first.c_str()));
+            IE_SUPPRESS_DEPRECATED_END
+        }
+    } else {
+        GTEST_SKIP();
+    }
+}
+
+TEST_P(IEClassLoadNetworkTest, DISABLED_QueryNetworkMULTIwithHETERONoThrowv10) {
+    Core ie;
+
+    if (supportsDeviceID(ie, deviceName) && supportsAvaliableDevices(ie, deviceName)) {
+        std::string devices;
+        auto availableDevices = ie.GetMetric(deviceName, METRIC_KEY(AVAILABLE_DEVICES)).as<std::vector<std::string>>();
+        for (auto&& device : availableDevices) {
+            devices += "HETERO." + device;
+            if (&device != &(availableDevices.back())) {
+                devices += ',';
+            }
+        }
+
+        // TODO: remove once HETERO and MULTI support v10
+        irv10Network.getLayerByName("param0");
+
+        std::vector<std::string> names;
+        if (auto ngraphFunction = irv10Network.getFunction()) {
+            for (auto && op : irv10Network.getFunction()->get_ops()) {
+                names.push_back(op->get_friendly_name());
+            }
+        } else {
+            IE_SUPPRESS_DEPRECATED_START
+            auto i = irv10Network.begin();
+            while (i != irv10Network.end()) {
+                CNNLayerPtr layer = *i;
+                names.push_back(layer->name);
+                ++i;
+            }
+            IE_SUPPRESS_DEPRECATED_END
+        }
+
+        QueryNetworkResult result;
+        ASSERT_NO_THROW(result = ie.QueryNetwork(irv10Network, "MULTI", {
+                {MULTI_CONFIG_KEY(DEVICE_PRIORITIES), devices},
+                { "TARGET_FALLBACK", deviceName + ",CPU" }}));
+
+        // check that all supported layers are in network
+        for (auto && layer : result.supportedLayersMap) {
+            EXPECT_NE(std::end(names), std::find(names.begin(), names.end(), layer.first));
+        }
+
+        // check that network layers are supported
+        for (auto && name : names) {
+            bool layerIsFound = result.supportedLayersMap.end() !=
+                std::find_if(result.supportedLayersMap.begin(), result.supportedLayersMap.end(),
+                    [&](const std::pair<std::string, std::string> & p) {
+                        return name == p.first;
+                    });
+            EXPECT_TRUE(layerIsFound);
+        }
+    } else {
+        GTEST_SKIP();
+    }
+}
+
+using IEClassLoadNetworkAfterCoreRecreateTest = IEClassLoadNetworkTest;
+
+TEST_P(IEClassLoadNetworkAfterCoreRecreateTest, LoadAfterRecreateCoresAndPlugins) {
+    {
+        Core ie;
+        auto versions = ie.GetVersions("MULTI:" + deviceName + ",CPU");
+        ASSERT_EQ(3, versions.size());
+    }
+    std::map<std::string, std::string> config;
+    if (deviceName == CommonTestUtils::DEVICE_CPU) {
+        config.insert({"CPU_THREADS_NUM", "3"});
+    };
+    ASSERT_NO_THROW({
+        Core ie;
+        std::string name = actualNetwork.getInputsInfo().begin()->first;
+        actualNetwork.getInputsInfo().at(name)->setPrecision(Precision::U8);
+        auto executableNetwork = ie.LoadNetwork(actualNetwork, deviceName, config);
+    });
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/inference_engine_regression_tests/common_dyn_batch_regression.hpp b/inference-engine/tests_deprecated/functional/shared_tests/inference_engine_regression_tests/common_dyn_batch_regression.hpp
new file mode 100644 (file)
index 0000000..3e4dac9
--- /dev/null
@@ -0,0 +1,84 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <regression_tests.hpp>
+#include <string>
+#include <ngraph_functions/subgraph_builders.hpp>
+#include <functional_test_utils/blob_utils.hpp>
+
+using namespace ::testing;
+using namespace InferenceEngine;
+
+class CommonDynBatchFuncTestParams {
+public:
+    std::string deviceName;
+    double nearValue;
+    int batch_limit;
+    int cur_batch;
+
+    CommonDynBatchFuncTestParams(const std::string& _deviceName,
+                                 int blimit,
+                                 int batch,
+                                 double _nearValue = 0.01f):
+            deviceName(_deviceName),
+                       batch_limit(blimit),
+                       cur_batch(batch),
+            nearValue(_nearValue)
+    {}
+};
+
+template <Precision::ePrecision P>
+class TestNoRegressionDynBatch : public Regression::RegressionTests, public WithParamInterface<CommonDynBatchFuncTestParams> {
+    std::string getDeviceName() const override {
+        return GetParam().deviceName;
+    }
+
+public:
+    double getNearValue() {
+        return GetParam().nearValue;
+    }
+    int get_batch_limit() {
+        return GetParam().batch_limit;
+    }
+    int get_cur_batch() {
+        return GetParam().cur_batch;
+    }
+};
+
+using TestNoRegressionDynBatchFP32 = TestNoRegressionDynBatch<Precision::FP32>;
+
+TEST_P(TestNoRegressionDynBatchFP32, dynBatch) {
+    int bl = get_batch_limit();
+    int bsz = get_cur_batch();
+    auto fnPtr = ngraph::builder::subgraph::makeSingleConv({static_cast<size_t>(bl), 4, 20, 20});
+
+    CNNNetwork net(fnPtr);
+    auto ieCore = PluginCache::get().ie();
+    InferenceEngine::ExecutableNetwork exeNet = ieCore->LoadNetwork(net, GetParam().deviceName, {{PluginConfigParams::KEY_DYN_BATCH_ENABLED,
+                                                                                           PluginConfigParams::YES}});
+    InferenceEngine::InferRequest inferRequest = exeNet.CreateInferRequest();
+
+    auto blob = FuncTestUtils::createAndFillBlob(net.getInputsInfo().begin()->second->getTensorDesc());
+
+    inferRequest.SetBatch(bsz);
+    inferRequest.SetBlob(net.getInputsInfo().begin()->first, blob);
+    inferRequest.Infer();
+    auto *outRawData = inferRequest.GetBlob(net.getOutputsInfo().begin()->first)->cbuffer().as<float *>();
+
+    auto refOutData = ngraph::helpers::inferFnWithInterp<ngraph::element::Type_t::f32>(fnPtr,
+                                                                                       {blob->cbuffer().as<float *>()});
+
+    auto thr = FuncTestUtils::GetComparisonThreshold(InferenceEngine::Precision::FP32);
+    std::vector<size_t> inShapeLimited{size_t(bsz), 4, 20, 20};
+    size_t outElementsCount = std::accumulate(begin(inShapeLimited), end(inShapeLimited), 1, std::multiplies<size_t>());
+    FuncTestUtils::compareRawBuffers(outRawData, *refOutData[0], outElementsCount, outElementsCount, thr);
+    if (GetParam().deviceName.find(CommonTestUtils::DEVICE_GPU) != std::string::npos) {
+        PluginCache::get().reset();
+    }
+}
+
+std::string  getTestCaseName(TestParamInfo<CommonDynBatchFuncTestParams> obj) {
+    return obj.param.deviceName + "_" + std::to_string(obj.param.batch_limit)
+        + "_" + std::to_string(obj.param.cur_batch);
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/input_tests/parser_tests.hpp b/inference-engine/tests_deprecated/functional/shared_tests/input_tests/parser_tests.hpp
new file mode 100644 (file)
index 0000000..d1594ad
--- /dev/null
@@ -0,0 +1,175 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <tests_common.hpp>
+#include <utility>
+#include <cctype>
+#include <ie_core.hpp>
+
+#include "common_test_utils/xml_net_builder/xml_net_builder.hpp"
+
+struct layer_params {
+    layer_params(std::string type, std::vector<size_t> in, std::vector<size_t> out,
+                 int weights, int biases, std::map<std::string, std::string> params)
+            : type(std::move(type)), in(std::move(in)), out(std::move(out)), params(std::move(params)),
+              weights(weights), biases(biases) {}
+
+    std::string type;
+    std::vector<size_t> in;
+    std::vector<size_t> out;
+    int weights;
+    int biases;
+    std::map<std::string, std::string> params;
+};
+
+struct ir_test_params :  layer_params {
+    ir_test_params(std::string name, std::string precision, layer_params param)
+            : layer_params(param), device_name(name), precision(std::move(precision)) {}
+
+    std::string device_name;
+    std::string precision;
+};
+
+std::map<std::string, std::vector<std::string>> smokeTests{};
+
+std::string getTestName(testing::TestParamInfo<ir_test_params> obj) {
+    std::string name = obj.param.device_name + "__" + obj.param.precision + "__" + obj.param.type + "__";
+
+    bool isSmoke{ false };
+    if (smokeTests.find(obj.param.device_name) == smokeTests.end()) {
+        smokeTests.insert(std::make_pair(obj.param.device_name, std::vector<std::string>{ obj.param.type }));
+        isSmoke = true;
+    }
+    else {
+        auto& typeVector = smokeTests.at(obj.param.device_name);
+        bool flag = (std::find(typeVector.begin(), typeVector.end(), obj.param.type) == typeVector.end());
+        if (flag) {
+            typeVector.push_back(obj.param.type);
+            isSmoke = true;
+        }
+    }
+
+    if (isSmoke)
+        name = obj.param.device_name + "__" + obj.param.precision + "__" + obj.param.type + "__";
+
+    for (size_t i = 0; i < obj.param.in.size(); i++) {
+        if (i)
+            name += "_";
+        name += std::to_string(obj.param.in[i]);
+    }
+    name += "__";
+    for (size_t i = 0; i < obj.param.out.size(); i++) {
+        if (i)
+            name += "_";
+        name += std::to_string(obj.param.out[i]);
+    }
+    name += "__";
+    if (obj.param.weights < 0)
+        name += "n";
+    name += std::to_string(abs(obj.param.weights));
+    name += "__";
+    if (obj.param.biases < 0)
+        name += "n";
+    name += std::to_string(abs(obj.param.biases));
+    name += "__";
+
+    std::string param;
+    for (const auto& it : obj.param.params) {
+        if (!param.empty())
+            name += "__";
+        std::string key = it.first;
+        std::string value = it.second;
+        for (char &i : key) {
+            if (!isalnum(i))
+                i = '_';
+        }
+        for (char &i : value) {
+            if (!isalnum(i))
+                i = '_';
+        }
+        name += key + "___" + value;
+    }
+    name += param;
+    return name;
+}
+
+class IncorrectIRTests: public TestsCommon,
+                        public testing::WithParamInterface<ir_test_params> {
+protected:
+    InferenceEngine::TBlob<uint8_t>::Ptr GetNetworkWeights(const layer_params &p) {
+        size_t weigtsSize = (abs(p.weights) + abs(p.biases))*sizeof(float);
+        if (weigtsSize == 0)
+            return nullptr;
+        InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({
+                InferenceEngine::Precision::U8, { weigtsSize }, InferenceEngine::Layout::C});
+        weights->allocate();
+        fill_data(weights->buffer().as<float*>(),
+                  weights->size() / sizeof(float));
+        InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
+
+        return weights_ptr;
+    }
+};
+
+TEST_F(IncorrectIRTests, smoke_loadIRWithIncorrectInput) {
+    std::map<std::string, std::string> params = {{"negative_slope", "0"}};
+
+    std::string model = CommonTestUtils::V2NetBuilder::buildNetworkWithOneInput("ReLU_WithInput_Only", {1, 3, 4, 4}, "FP32")
+            .addLayer("ReLU", "FP32", &params, {{{1, 3, 2, 2}}, {{1, 3, 4, 4}}})
+            .finish(false);
+
+    InferenceEngine::Core ie;
+    ASSERT_THROW(ie.ReadNetwork(model, InferenceEngine::Blob::CPtr()), 
+        InferenceEngine::details::InferenceEngineException);
+}
+
+TEST_P(IncorrectIRTests, loadIncorrectLayer) {
+    auto param = GetParam();
+
+    std::string model = CommonTestUtils::V2NetBuilder::buildNetworkWithOneInput(param.type + "_Only", param.in, param.precision)
+            .addLayer(param.type, param.precision, &param.params, {{param.in}, {param.out}}, param.weights, param.biases)
+            .finish(false);
+
+    try {
+        InferenceEngine::Core ie;
+        auto network = ie.ReadNetwork(model, GetNetworkWeights(param));
+        auto exec = ie.LoadNetwork(network, param.device_name);
+    } catch(...) {
+        return;
+    }
+    FAIL() << "Topology was loaded successfully.";
+}
+
+// Convolution
+#define negative_conv_kernel_x_case layer_params("Convolution", {1, 3, 224, 224}, {1, 64, 112, 112}, 64*3*7*7, 64, {{"kernel-x", "-7"}, {"kernel-y", "7"}, {"stride-x", "2"}, {"stride-y", "2"}, {"pad-x", "2"}, {"pad-y", "2"}, {"dilation-x", "0"}, {"dilation-y", "0"}, {"output", "64"}, {"group", "1"}})
+#define negative_conv_kernel_y_case layer_params("Convolution", {1, 3, 224, 224}, {1, 64, 112, 112}, 64*3*7*7, 64, {{"kernel-x", "7"}, {"kernel-y", "-7"}, {"stride-x", "2"}, {"stride-y", "2"}, {"pad-x", "2"}, {"pad-y", "2"}, {"dilation-x", "0"}, {"dilation-y", "0"}, {"output", "64"}, {"group", "1"}})
+#define negative_conv_stride_x_case layer_params("Convolution", {1, 3, 224, 224}, {1, 64, 112, 112}, 64*3*7*7, 64, {{"kernel-x", "7"}, {"kernel-y", "7"}, {"stride-x", "-2"}, {"stride-y", "2"}, {"pad-x", "2"}, {"pad-y", "2"}, {"dilation-x", "0"}, {"dilation-y", "0"}, {"output", "64"}, {"group", "1"}})
+#define negative_conv_weights_case layer_params("Convolution", {1, 3, 224, 224}, {1, 64, 112, 112}, -64*3*7*7, 64, {{"kernel-x", "7"}, {"kernel-y", "7"}, {"stride-x", "2"}, {"stride-y", "2"}, {"pad-x", "2"}, {"pad-y", "2"}, {"dilation-x", "0"}, {"dilation-y", "0"}, {"output", "64"}, {"group", "1"}})
+#define negative_conv_biases_case layer_params("Convolution", {1, 3, 224, 224}, {1, 64, 112, 112}, 64*3*7*7, -64, {{"kernel-x", "7"}, {"kernel-y", "7"}, {"stride-x", "2"}, {"stride-y", "2"}, {"pad-x", "2"}, {"pad-y", "2"}, {"dilation-x", "0"}, {"dilation-y", "0"}, {"output", "64"}, {"group", "1"}})
+
+// Fully connected
+#define negative_fc_out_size_case layer_params("InnerProduct", {1, 3, 224, 224}, {1, 64, 112, 112}, 224*224*3*1000, 1000, {{"out-size", "-1000"}})
+#define negative_fc_weights_case layer_params("InnerProduct", {1, 3, 224, 224}, {1, 64, 112, 112}, -224*224*3*1000, 1000, {{"out-size", "1000"}})
+#define negative_fc_biases_case layer_params("InnerProduct", {1, 3, 224, 224}, {1, 64, 112, 112}, 224*224*3*1000, -1000, {{"out-size", "1000"}})
+
+// Deconvolution
+#define negative_deconv_kernel_x_case layer_params("Deconvolution", {1, 64, 224, 224}, {1, 3, 112, 112}, 64*3*7*7, 64, {{"kernel-x", "-7"}, {"kernel-y", "7"}, {"stride-x", "2"}, {"stride-y", "2"}, {"pad-x", "2"}, {"pad-y", "2"}, {"dilation-x", "0"}, {"dilation-y", "0"}, {"output", "64"}, {"group", "1"}})
+#define negative_deconv_kernel_y_case layer_params("Deconvolution", {1, 64, 224, 224}, {1, 3, 112, 112}, 64*3*7*7, 64, {{"kernel-x", "7"}, {"kernel-y", "-7"}, {"stride-x", "2"}, {"stride-y", "2"}, {"pad-x", "2"}, {"pad-y", "2"}, {"dilation-x", "0"}, {"dilation-y", "0"}, {"output", "64"}, {"group", "1"}})
+#define negative_deconv_stride_x_case layer_params("Deconvolution", {1, 64, 224, 224}, {1, 3, 112, 112}, 64*3*7*7, 64, {{"kernel-x", "7"}, {"kernel-y", "7"}, {"stride-x", "-2"}, {"stride-y", "2"}, {"pad-x", "2"}, {"pad-y", "2"}, {"dilation-x", "0"}, {"dilation-y", "0"}, {"output", "64"}, {"group", "1"}})
+#define negative_deconv_weights_case layer_params("Deconvolution", {1, 64, 224, 224}, {1, 3, 112, 112}, -64*3*7*7, 64, {{"kernel-x", "7"}, {"kernel-y", "7"}, {"stride-x", "2"}, {"stride-y", "2"}, {"pad-x", "2"}, {"pad-y", "2"}, {"dilation-x", "0"}, {"dilation-y", "0"}, {"output", "64"}, {"group", "1"}})
+#define negative_deconv_biases_case layer_params("Deconvolution", {1, 64, 224, 224}, {1, 3, 112, 112}, 64*3*7*7, -64, {{"kernel-x", "7"}, {"kernel-y", "7"}, {"stride-x", "2"}, {"stride-y", "2"}, {"pad-x", "2"}, {"pad-y", "2"}, {"dilation-x", "0"}, {"dilation-y", "0"}, {"output", "64"}, {"group", "1"}})
+
+// Pooling
+#define negative_pool_kernel_x_case layer_params("Pooling", {1, 3, 224, 224}, {1, 3, 112, 112}, 0, 0, {{"kernel-x", "-2"}, {"kernel-y", "2"}, {"stride-x", "2"}, {"stride-y", "2"}, {"pad-x", "0"}, {"pad-y", "0"}, {"rounding-type", "ceil"}, {"pool-method", "max"}})
+#define negative_pool_kernel_y_case layer_params("Pooling", {1, 3, 224, 224}, {1, 3, 112, 112}, 0, 0, {{"kernel-x", "2"}, {"kernel-y", "-2"}, {"stride-x", "2"}, {"stride-y", "2"}, {"pad-x", "0"}, {"pad-y", "0"}, {"rounding-type", "ceil"}, {"pool-method", "max"}})
+#define negative_pool_stride_x_case layer_params("Pooling", {1, 3, 224, 224}, {1, 3, 112, 112}, 0, 0, {{"kernel-x", "2"}, {"kernel-y", "2"}, {"stride-x", "-2"}, {"stride-y", "2"}, {"pad-x", "0"}, {"pad-y", "0"}, {"rounding-type", "ceil"}, {"pool-method", "max"}})
+#define incorrect_pool_type_case layer_params("Pooling", {1, 3, 224, 224}, {1, 3, 112, 112}, 0, 0, {{"kernel-x", "2"}, {"kernel-y", "2"}, {"stride-x", "2"}, {"stride-y", "2"}, {"pad-x", "0"}, {"pad-y", "0"}, {"rounding-type", "ceil"}, {"pool-method", "unknown"}})
+
+// Norm
+#define negative_norm_local_size_case layer_params("Norm", {1, 3, 224, 224}, {1, 3, 224, 224}, 0, 0, {{"alpha", "9.9999997e-05"}, {"beta", "0.75"}, {"local_size", "-5"}, {"region", "across"}, {"k", "1"}})
+#define negative_norm_k_case layer_params("Norm", {1, 3, 224, 224}, {1, 3, 224, 224}, 0, 0, {{"alpha", "9.9999997e-05"}, {"beta", "0.75"}, {"local_size", "5"}, {"region", "across"}, {"k", "-2"}})
+
+
+// TODO: Add Concat and split tests
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/io_blob_tests/cropResize_tests.hpp b/inference-engine/tests_deprecated/functional/shared_tests/io_blob_tests/cropResize_tests.hpp
new file mode 100644 (file)
index 0000000..8731f6c
--- /dev/null
@@ -0,0 +1,1175 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#ifdef USE_OPENCV
+#include <gtest/gtest.h>
+#include <opencv2/opencv.hpp>
+#include <ie_compound_blob.h>
+#include <precision_utils.h>
+#include <ie_precision.hpp>
+#include <ie_plugin_config.hpp>
+
+#include "tests_common.hpp"
+#include "tests_common_func.hpp"
+#include "format_reader_ptr.h"
+#include "single_layer_common.hpp"
+
+#include "functional_test_utils/plugin_cache.hpp"
+
+#include "ie_preprocess_data.hpp"
+
+#include <map>
+#include <functional_test_utils/precision_utils.hpp>
+#include <ngraph/partial_shape.hpp>
+#include <ngraph_functions/builders.hpp>
+#include <functional_test_utils/blob_utils.hpp>
+
+#include "ie_parallel.hpp"
+#include "details/ie_exception.hpp"
+
+using namespace ::testing;
+using namespace InferenceEngine;
+
+template <Precision::ePrecision PRC>
+Blob::Ptr img2Blob(const std::vector<cv::Mat>& imgs, Layout layout) {
+    using data_t = typename PrecisionTrait<PRC>::value_type;
+
+    if (imgs.empty()) {
+        THROW_IE_EXCEPTION << "No images to create blob from";
+    }
+
+    // get image value in correct format
+    static const auto img_value = [] (const cv::Mat& img, size_t h, size_t w, size_t c) -> data_t {
+        switch (img.type())
+        {
+            case CV_8UC1: return img.at<uchar>(h, w);
+            case CV_8UC2: return img.at<cv::Vec2b>(h, w)[c];
+            case CV_8UC3: return img.at<cv::Vec3b>(h, w)[c];
+            case CV_8UC4: return img.at<cv::Vec4b>(h, w)[c];
+            case CV_32FC3: return img.at<cv::Vec3f>(h, w)[c];
+            case CV_32FC4: return img.at<cv::Vec4f>(h, w)[c];
+            default:
+                THROW_IE_EXCEPTION << "Image type is not recognized";
+        }
+    };
+
+    size_t channels = imgs[0].channels();
+    size_t height = imgs[0].size().height;
+    size_t width = imgs[0].size().width;
+
+    SizeVector dims = {imgs.size(), channels, height, width};
+    Blob::Ptr resultBlob = make_shared_blob<data_t>(TensorDesc(PRC, dims, layout));
+    resultBlob->allocate();
+
+    data_t* blobData = resultBlob->buffer().as<data_t*>();
+
+    for (size_t i = 0; i < imgs.size(); ++i) {
+        auto& img = imgs[i];
+        auto batch_offset = i * channels * height * width;
+
+        switch (layout) {
+            case Layout::NCHW: {
+                for (size_t c = 0; c < channels; c++) {
+                    for (size_t h = 0; h < height; h++) {
+                        for (size_t w = 0; w < width; w++) {
+                            blobData[batch_offset + c * width * height + h * width + w] =
+                                img_value(img, h, w, c);
+                        }
+                    }
+                }
+            }
+            break;
+            case Layout::NHWC: {
+                for (size_t h = 0; h < height; h++) {
+                    for (size_t w = 0; w < width; w++) {
+                        for (size_t c = 0; c < channels; c++) {
+                            blobData[batch_offset + h * width * channels + w * channels + c] =
+                                img_value(img, h, w, c);
+                        }
+                    }
+                }
+            }
+            break;
+            default:
+                THROW_IE_EXCEPTION << "Inconsistent input layout for image processing: " << layout;
+        }
+    }
+    return resultBlob;
+}
+
+template <Precision::ePrecision PRC>
+Blob::Ptr img2Blob(cv::Mat &img, Layout layout) {
+    return img2Blob<PRC>(std::vector<cv::Mat>({img}), layout);
+}
+
+// base class with common functionality for test fixtures
+template<typename Params>
+class Base : public TestsCommon, public WithParamInterface<Params> {
+protected:
+    std::string _device;
+    Precision _netPrc = Precision(Precision::UNSPECIFIED);
+    TBlob<uint8_t>::Ptr _weights;
+    SizeVector _netDims;
+    Precision _inputPrecision = Precision(Precision::UNSPECIFIED);
+    float _threshold = 0.f;
+    Layout _inputLayout = Layout::ANY;
+    ResizeAlgorithm _resAlg = ResizeAlgorithm::NO_RESIZE;
+    ColorFormat _colorFormat = ColorFormat::RAW;
+    ROI _cropRoi = {};
+    bool _isAsync = false;
+    constexpr static const int _maxRepeat = 2;
+
+    std::map<std::string, std::string> device_config;
+
+    std::shared_ptr<InferenceEngine::Core> ie;
+
+    void SetUp() override {
+        TestsCommon::SetUp();
+
+        ie = PluginCache::get().ie();
+    }
+
+    void TearDown() override {
+        if (_device.find(CommonTestUtils::DEVICE_GPU) != std::string::npos) {
+            PluginCache::get().reset();
+        }
+    }
+
+public:
+    std::shared_ptr<ngraph::Function> createSubgraph(const SizeVector &dims, InferenceEngine::Precision prc = InferenceEngine::Precision::FP32) {
+        ngraph::element::Type type = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(prc);
+
+        auto param0 = std::make_shared<ngraph::op::Parameter>(type, ngraph::PartialShape{dims});
+        auto relu1 = std::make_shared<ngraph::opset1::Relu>(param0);
+
+        ngraph::ParameterVector params = {param0};
+        ngraph::ResultVector results = {std::make_shared<ngraph::op::Result>(relu1)};
+
+        auto fn_ptr = std::make_shared<ngraph::Function>(results, params);
+        return fn_ptr;
+    }
+
+    cv::ColorConversionCodes toCvtColorType(ColorFormat fmt) {
+        // Note: OpenCV matrices are always in BGR format by default
+        switch (fmt) {
+            case ColorFormat::BGRX: return cv::COLOR_BGR2BGRA;
+            case ColorFormat::RGBX: return cv::COLOR_BGR2RGBA;
+            case ColorFormat::RGB: return cv::COLOR_BGR2RGB;
+            default: THROW_IE_EXCEPTION << "Color format " << fmt << " not found";
+        }
+        return cv::COLOR_COLORCVT_MAX;
+    }
+
+    void auxDownscale(cv::Mat& img, cv::InterpolationFlags interpolation, int bonus_divider = 1) {
+        cv::resize(img, img, cv::Size(_netDims[3] / 2, _netDims[2] / 2) / bonus_divider, 0, 0,
+            interpolation);
+    }
+
+    InferenceEngine::ROI auxDownscaledRoi() {
+        const auto make_even = [] (size_t v) { return v % 2 != 0 ? v + 1 : v; };
+        auto h = make_even(_netDims[3]), w = make_even(_netDims[2]);
+        if (h % 4 != 0 || w % 4 != 0) {
+            return {0, 0, 0, h, w};
+        } else {
+            return {0, 0, 0, h / 2, w / 2};
+        }
+    }
+};
+
+// base resize parameters used by test fixtures
+template<typename ImageParam, typename LayoutParam = Layout>
+using resize_params = std::tuple<
+    std::string,                          // Plugin name
+    std::tuple<
+            Precision,                    // Network precision
+            SizeVector,                   // Net input sizes
+            std::pair<Precision, float>,  // Input data precision and threshold
+            LayoutParam,                  // Input data layout
+            ResizeAlgorithm,              // Resize algorithm kind
+            ColorFormat,                  // Input color format kind
+            ROI,                          // Cropped ROI coordinates
+            bool                          // Infer modes: true = Async, false = Sync
+    >
+>;
+
+namespace {
+// division operator for ROI
+InferenceEngine::ROI operator/(const InferenceEngine::ROI& roi, size_t divider) {
+    return InferenceEngine::ROI{
+        roi.id,
+        roi.posX / divider,
+        roi.posY / divider,
+        roi.sizeX / divider,
+        roi.sizeY / divider
+    };
+}
+}  // anonymous namespace
+
+InferenceEngine::ROI getRandomROI(const cv::Size& picSize)
+{
+    ROI rect;
+
+    rect.posX =  static_cast <size_t>((picSize.width*0.75) * ((double)std::rand()/(double)RAND_MAX));
+    rect.posY =  static_cast <size_t>((picSize.height*0.75) * ((double)std::rand()/(double)RAND_MAX));
+    rect.sizeX = static_cast <size_t>((picSize.width/4) * ((double)std::rand()/(double)RAND_MAX));
+    rect.sizeY = static_cast <size_t>((picSize.height/4) * ((double)std::rand()/(double)RAND_MAX));
+
+    // According to initScratchLinear function picture width should be >= 2
+    if (rect.sizeX < 2)
+        rect.sizeX = picSize.width/5; //20% of picture width to fit exactly in the last 25% of the picture.
+
+    if (rect.sizeY < 2)
+        rect.sizeY = rect.sizeX;
+
+    if (rect.posX + rect.sizeX > picSize.width)
+        rect.sizeX = picSize.width - rect.posX;
+
+    if (rect.posY + rect.sizeY > picSize.height)
+        rect.sizeY = picSize.height - rect.posY;
+
+    return rect;
+}
+
+class RandomROITest: public Base<resize_params<std::string, Layout>>
+{
+protected:
+    void SetUp() override {
+        Base<resize_params<std::string, Layout>>::SetUp();
+        _device = std::get<0>(GetParam());
+        std::pair<Precision, float> _inPrcThresh;
+        std::tie(
+                _netPrc,
+                _netDims,
+                _inPrcThresh,
+                _inputLayout,
+                _resAlg,
+                _colorFormat,
+                _cropRoi,
+                _isAsync
+        ) = std::get<1>(GetParam());
+
+        if (((_colorFormat == BGRX) || (_colorFormat == RGBX)) && (_inputLayout != NHWC))
+        {
+            THROW_IE_EXCEPTION << "The color format with the layout aren't compatible.";
+        }
+
+        _inputPrecision = _inPrcThresh.first;
+        _threshold = _inPrcThresh.second;
+
+        if (_device == "HETERO")
+            device_config["TARGET_FALLBACK"] = "GPU,CPU";
+    }
+};
+
+TEST_P(RandomROITest, PreprocRandomROITest)
+{
+    auto fn_ptr = createSubgraph(_netDims);
+    CNNNetwork net(fn_ptr);
+
+    net.getInputsInfo().begin()->second->setPrecision(_inputPrecision);
+    net.getInputsInfo().begin()->second->setLayout(_inputLayout);
+    net.getInputsInfo().begin()->second->getPreProcess().setResizeAlgorithm(_resAlg);
+    net.getInputsInfo().begin()->second->getPreProcess().setColorFormat(_colorFormat);
+
+    auto execNet = ie->LoadNetwork(net, _device, device_config);
+    auto req = execNet.CreateInferRequest();
+
+    Blob::Ptr blob;
+    Blob::Ptr yBlob;
+    Blob::Ptr uvBlob;
+
+    auto w = 200;
+    auto h = 200;
+    switch (_colorFormat)
+    {
+        case BGR:
+            switch (_inputPrecision)
+            {
+                case Precision::U8:
+                {
+                    cv::Mat mat(h, w, CV_8UC3);
+                    cv::randu(mat, cv::Scalar(0, 0, 0), cv::Scalar(255,255, 255));
+                    blob = img2Blob<Precision::U8>(mat, _inputLayout);
+                    break;
+                }
+                case Precision::FP16:
+                {
+                    cv::Mat mat(h, w, CV_16FC3,  cv::Scalar(0,0, 255));
+                    cv::randu(mat, cv::Scalar(0, 0, 0), cv::Scalar(255,255, 255));
+                    blob = img2Blob<Precision::FP16>(mat, _inputLayout);
+                    break;
+                }
+                case Precision::FP32:
+                {
+                    cv::Mat mat(h, w, CV_32FC3);
+                    cv::randu(mat, cv::Scalar(0, 0, 0), cv::Scalar(255,255, 255));
+                    blob = img2Blob<Precision::FP32>(mat, _inputLayout);
+                    break;
+                }
+                default:
+                    break;
+            }
+            break;
+        case RGB:
+            switch (_inputPrecision)
+            {
+                case Precision::U8:
+                {
+                    cv::Mat mat(h, w, CV_8UC3);
+                    cv::randu(mat, cv::Scalar(0, 0, 0), cv::Scalar(255,255, 255));
+                    cv::cvtColor(mat, mat, toCvtColorType(_colorFormat));
+                    blob = img2Blob<Precision::U8>(mat, _inputLayout);
+                    break;
+                }
+                case Precision::FP16:
+                {
+                    cv::Mat mat(h, w, CV_16FC3);
+                    cv::randu(mat, cv::Scalar(0, 0, 0), cv::Scalar(255,255, 255));
+                    cv::cvtColor(mat, mat, toCvtColorType(_colorFormat));
+                    blob = img2Blob<Precision::FP16>(mat, _inputLayout);
+                    break;
+                }
+                case Precision::FP32:
+                {
+                    cv::Mat mat(h, w, CV_32FC3);
+                    cv::randu(mat, cv::Scalar(0, 0, 0), cv::Scalar(255,255, 255));
+                    cv::cvtColor(mat, mat, toCvtColorType(_colorFormat));
+                    blob = img2Blob<Precision::FP32>(mat, _inputLayout);
+                    break;
+                }
+                default:
+                    break;
+            }
+
+            break;
+        case BGRX:
+        case RGBX:
+            switch (_inputPrecision)
+            {
+                case Precision::U8:
+                {
+                    cv::Mat mat(h, w, CV_8UC4);
+                    cv::randu(mat, cv::Scalar(0, 0, 0), cv::Scalar(255,255, 255));
+                    cv::cvtColor(mat, mat, toCvtColorType(_colorFormat));
+                    blob = img2Blob<Precision::U8>(mat, _inputLayout);
+                    break;
+                }
+                case Precision::FP16:
+                {
+                    cv::Mat mat(h, w, CV_16FC4);
+                    cv::randu(mat, cv::Scalar(0, 0, 0), cv::Scalar(255,255, 255));
+                    cv::cvtColor(mat, mat, toCvtColorType(_colorFormat));
+                    blob = img2Blob<Precision::FP16>(mat, _inputLayout);
+                    break;
+                }
+                case Precision::FP32:
+                {
+                    cv::Mat mat(h, w , CV_32FC4);
+                    cv::randu(mat, cv::Scalar(0, 0, 0), cv::Scalar(255,255, 255));
+                    cv::cvtColor(mat, mat, toCvtColorType(_colorFormat));
+                    blob = img2Blob<Precision::FP32>(mat, _inputLayout);
+                    break;
+                }
+                default:
+                    break;
+            }
+
+            break;
+        case NV12:
+        {
+            cv::Mat yPlane(h, w, CV_MAKE_TYPE(CV_8U, 1));
+            cv::Mat uvPlane(h/2, w/2, CV_MAKE_TYPE(CV_8U, 2));
+
+            cv::randn(yPlane, cv::Scalar::all(127), cv::Scalar::all(40.f));
+            cv::randu(uvPlane, cv::Scalar::all(0), cv::Scalar::all(255));
+
+            yBlob = img2Blob<Precision::U8>(yPlane, _inputLayout);
+            uvBlob = img2Blob<Precision::U8>(uvPlane, _inputLayout);
+
+            break;
+        }
+        default:
+            break;
+    }
+
+
+    for(int i = 0; i <= 10; ++i)
+    {
+        ROI roi = getRandomROI(cv::Size(w, h));
+        Blob::Ptr cropBlob;
+
+        if (_colorFormat == NV12)
+        {
+            roi.sizeX += roi.sizeX % 2;
+            roi.sizeY += roi.sizeY % 2;
+
+            auto roiUV = roi/2;
+
+            auto cropYBlob = make_shared_blob(yBlob, roi);
+            auto cropUvBlob = make_shared_blob(uvBlob, roiUV);
+
+            cropBlob = make_shared_blob<NV12Blob>(cropYBlob, cropUvBlob);
+        }
+        else
+        {
+            cropBlob = make_shared_blob(blob, roi);
+        }
+
+        req.SetBlob(net.getInputsInfo().begin()->first, cropBlob);
+
+        if (_isAsync)
+        {
+            req.StartAsync();
+            req.Wait(IInferRequest::WaitMode::RESULT_READY);
+        }
+        else
+        {
+            req.Infer();
+        }
+    }
+}
+
+template<typename ImageParam, typename LayoutParam = Layout>
+class ResizeBase : public Base<resize_params<ImageParam, LayoutParam>> {
+protected:
+    bool _doColorConversion = false;
+};
+
+class CropResizeTest : public ResizeBase<std::string> {
+protected:
+    cv::Mat _img;
+
+    void SetUp() override {
+        ResizeBase<std::string>::SetUp();
+        _device = std::get<0>(GetParam());
+        std::pair<Precision, float> inPrcThresh;
+        std::tie(
+                _netPrc,
+                _netDims,
+                inPrcThresh,
+                _inputLayout,
+                _resAlg,
+                _colorFormat,
+                _cropRoi,
+                _isAsync
+        ) = std::get<1>(GetParam());
+
+        _img = cv::Mat(300, 300, CV_8UC3);
+        cv::randu(_img, cv::Scalar(0, 0, 0), cv::Scalar(255,255, 255));
+
+        _inputPrecision = inPrcThresh.first;
+        _threshold = inPrcThresh.second;
+
+        _doColorConversion = _colorFormat != ColorFormat::RAW && _colorFormat != ColorFormat::BGR;
+
+        if (_device == "HETERO")
+            device_config["TARGET_FALLBACK"] = "GPU,CPU";
+    }
+
+    void prepareInputandReferenceImage(Blob::Ptr& inputBlob, Blob::Ptr& refBlob) {
+        // we use an image resized by openCV as a reference value.
+        cv::InterpolationFlags cv_interpolation = (_resAlg == RESIZE_BILINEAR) ? cv::INTER_LINEAR : cv::INTER_AREA;
+
+        // when no resize is performed (input size == output size), resizedImg is a shallow copy of
+        // _img in case of Precision::U8 and therefore it is color converted the same way as _img!
+        // doing explicit cloning prevents this
+        cv::Mat resizedImg = _img.clone();
+
+        switch (_inputPrecision) {
+            case Precision::FP32: {
+                cv::Mat resizedImg_;
+                _img.convertTo(resizedImg_, CV_32FC3);
+                cv::resize(resizedImg_, resizedImg, cv::Size(_netDims[3], _netDims[2]), 0, 0, cv_interpolation);
+
+                if (_doColorConversion) {
+                    cv::cvtColor(_img, _img, toCvtColorType(_colorFormat));
+                }
+                inputBlob = img2Blob<Precision::FP32>(_img, _inputLayout);
+            }
+                break;
+            case Precision::U8: {
+                cv::resize(_img, resizedImg, cv::Size(_netDims[3], _netDims[2]), 0, 0, cv_interpolation);
+
+                if (_doColorConversion) {
+                    cv::cvtColor(_img, _img, toCvtColorType(_colorFormat));
+                }
+                inputBlob = img2Blob<Precision::U8>(_img, _inputLayout);
+            }
+                break;
+            default:
+                THROW_IE_EXCEPTION << "Can't resize data of inconsistent precision: " << _inputPrecision;
+        }
+
+        refBlob = img2Blob<Precision::FP32>(resizedImg, Layout::NCHW);
+    }
+};
+
+TEST_P(CropResizeTest, resizeTest) {
+    auto fn_ptr = createSubgraph(_netDims);
+    CNNNetwork net(fn_ptr);
+
+    net.getInputsInfo().begin()->second->setPrecision(_inputPrecision);
+    net.getInputsInfo().begin()->second->setLayout(_inputLayout);
+    net.getInputsInfo().begin()->second->getPreProcess().setResizeAlgorithm(_resAlg);
+    net.getInputsInfo().begin()->second->getPreProcess().setColorFormat(_colorFormat);
+
+    auto execNet = ie->LoadNetwork(net, _device, device_config);
+    auto req = execNet.CreateInferRequest();
+
+    Blob::Ptr inputBlob;
+    Blob::Ptr refBlob;
+
+    prepareInputandReferenceImage(inputBlob, refBlob);
+
+    req.SetBlob(net.getInputsInfo().begin()->first, inputBlob);
+
+    if (_isAsync) {
+        req.StartAsync();
+        req.Wait(IInferRequest::WaitMode::RESULT_READY);
+    } else {
+        req.Infer();
+    }
+
+    Blob::Ptr outputBlob = req.GetBlob(net.getOutputsInfo().begin()->first);
+
+    if (refBlob->size() != outputBlob->size()) {
+        THROW_IE_EXCEPTION << "reference and output blobs have different sizes!";
+    }
+
+    compare(*outputBlob, *refBlob, _threshold);
+}
+
+TEST_P(CropResizeTest, resizeAfterLoadTest) {
+    auto fn_ptr = createSubgraph(_netDims);
+    CNNNetwork net(fn_ptr);
+
+    net.getInputsInfo().begin()->second->setPrecision(_inputPrecision);
+    net.getInputsInfo().begin()->second->setLayout(_inputLayout);
+
+    auto execNet = ie->LoadNetwork(net, _device, device_config);
+    auto req = execNet.CreateInferRequest();
+
+    Blob::Ptr inputBlob;
+    Blob::Ptr refBlob;
+
+    prepareInputandReferenceImage(inputBlob, refBlob);
+
+    PreProcessInfo info;
+    info.setResizeAlgorithm(_resAlg);
+    info.setColorFormat(_colorFormat);
+    req.SetBlob(net.getInputsInfo().begin()->first, inputBlob, info);
+
+    if (_isAsync) {
+        req.StartAsync();
+        req.Wait(IInferRequest::WaitMode::RESULT_READY);
+    } else {
+        req.Infer();
+    }
+
+    Blob::Ptr outputBlob = req.GetBlob(net.getOutputsInfo().begin()->first);
+
+    if (refBlob->size() != outputBlob->size()) {
+        THROW_IE_EXCEPTION << "reference and output blobs have different sizes!";
+    }
+
+    compare(*outputBlob, *refBlob, _threshold);
+}
+
+TEST_P(CropResizeTest, cropRoiTest) {
+    auto fn_ptr = createSubgraph(_netDims);
+    CNNNetwork net(fn_ptr);
+
+    net.getInputsInfo().begin()->second->setPrecision(_inputPrecision);
+    net.getInputsInfo().begin()->second->setLayout(_inputLayout);
+    net.getInputsInfo().begin()->second->getPreProcess().setResizeAlgorithm(_resAlg);
+    net.getInputsInfo().begin()->second->getPreProcess().setColorFormat(_colorFormat);
+
+    auto execNet = ie->LoadNetwork(net, _device, device_config);
+    auto req = execNet.CreateInferRequest();
+
+    Blob::Ptr inputBlob;
+    Blob::Ptr cropRoiBlob;
+    Blob::Ptr refBlob;
+
+    // we use an image resized by openCV as a reference value.
+    cv::InterpolationFlags cv_interpolation = (_resAlg == RESIZE_BILINEAR) ? cv::INTER_LINEAR : cv::INTER_AREA;
+
+    cv::Rect location;
+    location.x = _cropRoi.posX;
+    location.y = _cropRoi.posY;
+    location.width = _cropRoi.sizeX;
+    location.height = _cropRoi.sizeY;
+
+    auto clippedRect = location & cv::Rect(0, 0, _img.size().width, _img.size().height);
+    cv::Mat imgRoi = _img(clippedRect);
+
+    // when no resize is performed (input size == output size), resizedImg is a shallow copy of
+    // _img in case of Precision::U8 and therefore it is color converted the same way as _img!
+    // doing explicit cloning prevents this
+    cv::Mat resizedImg = _img.clone();
+
+    switch (_inputPrecision) {
+        case Precision::FP32: {
+            cv::Mat resizedImg_;
+            imgRoi.convertTo(resizedImg_, CV_32FC3);
+            cv::resize(resizedImg_, resizedImg, cv::Size(_netDims[3], _netDims[2]), 0, 0, cv_interpolation);
+
+            if (_doColorConversion) {
+                cv::cvtColor(_img, _img, toCvtColorType(_colorFormat));
+            }
+
+            inputBlob = img2Blob<Precision::FP32>(_img, _inputLayout);
+        }
+        break;
+        case Precision::U8: {
+            cv::resize(imgRoi, resizedImg, cv::Size(_netDims[3], _netDims[2]), 0, 0, cv_interpolation);
+
+            if (_doColorConversion) {
+                cv::cvtColor(_img, _img, toCvtColorType(_colorFormat));
+            }
+
+            inputBlob = img2Blob<Precision::U8>(_img, _inputLayout);
+        }
+        break;
+        default:
+            THROW_IE_EXCEPTION << "Can't resize data of inconsistent precision: " << _inputPrecision;
+    }
+    refBlob = img2Blob<Precision::FP32>(resizedImg, Layout::NCHW);
+
+    cropRoiBlob = make_shared_blob(inputBlob, _cropRoi);
+    ASSERT_EQ(_inputPrecision, cropRoiBlob->getTensorDesc().getPrecision());
+
+    req.SetBlob(net.getInputsInfo().begin()->first, cropRoiBlob);
+
+    if (_isAsync) {
+        req.StartAsync();
+        req.Wait(IInferRequest::WaitMode::RESULT_READY);
+    } else {
+        req.Infer();
+    }
+
+    Blob::Ptr outputBlob = req.GetBlob(net.getOutputsInfo().begin()->first);
+
+    if (refBlob->size() != outputBlob->size()) {
+        THROW_IE_EXCEPTION << "reference and output blobs have different sizes!";
+    }
+
+    compare(*outputBlob, *refBlob, _threshold);
+}
+
+class BatchResizeTest : public ResizeBase<std::vector<std::string>> {
+protected:
+    std::vector<cv::Mat> _imgs;
+
+    void SetUp() override {
+        ResizeBase<std::vector<std::string>>::SetUp();
+
+        _device = std::get<0>(GetParam());
+        std::pair<Precision, float> inPrcThresh;
+        std::tie(
+                _netPrc,
+                _netDims,
+                inPrcThresh,
+                _inputLayout,
+                _resAlg,
+                _colorFormat,
+                _cropRoi,
+                _isAsync
+        ) = std::get<1>(GetParam());
+        auto batch_size = _netDims[0];
+
+        _imgs.reserve(batch_size);
+        int h = 200, w = 200;
+        for (size_t i = 0; i < batch_size; ++i) {
+            cv::Mat img(h, w, CV_8UC3);
+            cv::randu(img, cv::Scalar(0, 0, 0), cv::Scalar(255,255, 255));
+            _imgs.push_back(img);
+        }
+
+        _inputPrecision = inPrcThresh.first;
+        _threshold = inPrcThresh.second;
+
+        _doColorConversion = _colorFormat != ColorFormat::RAW && _colorFormat != ColorFormat::BGR;
+
+        if (_device == "HETERO")
+            device_config["TARGET_FALLBACK"] = "GPU,CPU";
+    }
+};
+
+TEST_P(BatchResizeTest, batchTest) {
+    auto fn_ptr = createSubgraph(_netDims);
+    CNNNetwork net(fn_ptr);
+
+    net.getInputsInfo().begin()->second->setPrecision(_inputPrecision);
+    net.getInputsInfo().begin()->second->setLayout(_inputLayout);
+    net.getInputsInfo().begin()->second->getPreProcess().setResizeAlgorithm(_resAlg);
+    net.getInputsInfo().begin()->second->getPreProcess().setColorFormat(_colorFormat);
+
+    auto execNet = ie->LoadNetwork(net, _device, device_config);
+    auto req = execNet.CreateInferRequest();
+
+    Blob::Ptr inputBlob;
+    Blob::Ptr refBlob;
+
+    // we use an image resized by openCV as a reference value.
+    cv::InterpolationFlags cv_interpolation = (_resAlg == RESIZE_BILINEAR) ? cv::INTER_LINEAR : cv::INTER_AREA;
+
+    std::vector<cv::Mat> resizedImgs(_imgs.size());
+    for (size_t i = 0; i < _imgs.size(); ++i) {
+        switch (_inputPrecision) {
+            case Precision::FP32: {
+                cv::Mat resizedImg_;
+                _imgs[i].convertTo(resizedImg_, CV_32FC3);
+                cv::resize(resizedImg_, resizedImgs[i], cv::Size(_netDims[3], _netDims[2]),
+                    0, 0, cv_interpolation);
+            }
+            break;
+            case Precision::U8: {
+                cv::resize(_imgs[i], resizedImgs[i], cv::Size(_netDims[3], _netDims[2]),
+                    0, 0, cv_interpolation);
+            }
+            break;
+            default:
+                THROW_IE_EXCEPTION  << "Can't resize data of inconsistent precision: "
+                                    << _inputPrecision;
+        }
+    }
+
+    if (_doColorConversion) {
+        for (auto& img : _imgs) {
+                cv::cvtColor(img, img, toCvtColorType(_colorFormat));
+        }
+    }
+
+    // set inputBlob to the whole batch
+    switch (_inputPrecision) {
+        case Precision::FP32: {
+            inputBlob = img2Blob<Precision::FP32>(_imgs, _inputLayout);
+        }
+        break;
+        case Precision::U8: {
+            inputBlob = img2Blob<Precision::U8>(_imgs, _inputLayout);
+        }
+        break;
+        default:
+            THROW_IE_EXCEPTION  << "Can't resize data of inconsistent precision: "
+                                << _inputPrecision;
+    }
+
+    refBlob = img2Blob<Precision::FP32>(resizedImgs, Layout::NCHW);
+
+    req.SetBlob(net.getInputsInfo().begin()->first, inputBlob);
+
+    if (_isAsync) {
+        req.StartAsync();
+        req.Wait(IInferRequest::WaitMode::RESULT_READY);
+    } else {
+        req.Infer();
+    }
+
+    Blob::Ptr outputBlob = req.GetBlob(net.getOutputsInfo().begin()->first);
+
+    if (refBlob->size() != outputBlob->size()) {
+        THROW_IE_EXCEPTION << "reference and output blobs have different sizes!";
+    }
+
+    compare(*outputBlob, *refBlob, _threshold);
+}
+
+// Separate test fixture due to limited support of dynamic batching in plugins.
+class DynamicBatchResizeTest : public BatchResizeTest {
+protected:
+    const size_t _max_batch_size = 10;
+};
+
+TEST_P(DynamicBatchResizeTest, dynamicBatchTest) {
+    // use N = 1 for model generation (IR + weights). set maximum batch size (const value) to
+    // CNNNetwork. set batch_size from _netDims to InferRequest as a dynamic batch value. overall,
+    // this tests the usual case when IR provided doesn't depend on specific batch, while during
+    // inference we want to vary the batch size according to input data available.
+    auto batch_size = _netDims[0];
+    _netDims[0] = 1;
+
+    auto fn_ptr = createSubgraph(_netDims);
+    CNNNetwork net(fn_ptr);
+
+    net.getInputsInfo().begin()->second->setPrecision(_inputPrecision);
+    net.getInputsInfo().begin()->second->setLayout(_inputLayout);
+    net.getInputsInfo().begin()->second->getPreProcess().setResizeAlgorithm(_resAlg);
+    net.getInputsInfo().begin()->second->getPreProcess().setColorFormat(_colorFormat);
+
+    // enable dynamic batching and prepare for setting max batch limit.
+
+    device_config[PluginConfigParams::KEY_DYN_BATCH_ENABLED] = PluginConfigParams::YES;
+    net.setBatchSize(_max_batch_size);
+
+    auto execNet = ie->LoadNetwork(net, _device, device_config);
+    auto req = execNet.CreateInferRequest();
+
+    Blob::Ptr inputBlob;
+    Blob::Ptr outputBlob;
+    Blob::Ptr refBlob;
+
+    // we use an image resized by OpenCV as a reference value.
+    cv::InterpolationFlags cv_interpolation = (_resAlg == RESIZE_BILINEAR)
+                                              ? cv::INTER_LINEAR : cv::INTER_AREA;
+
+    for (int i = 0; i < _maxRepeat; ++i) {
+
+    _imgs.clear();
+    for (size_t j = 0; j < batch_size; ++j) {
+        cv::Mat img(200, 200, CV_8UC3);
+        cv::randu(img, cv::Scalar(0, 0, 0), cv::Scalar(255,255, 255));
+        _imgs.emplace_back(img);
+        if (i != 0) auxDownscale(_imgs.back(), cv_interpolation);
+    }
+
+    // fill in input images outside of current batch with random values. these should not occur in
+    // output blob after inference.
+    auto diff = _max_batch_size - batch_size;
+    for (; diff > 0; --diff) {
+        cv::Mat random(_imgs[0].size(), _imgs[0].type());
+        cv::randn(random, cv::Scalar::all(127), cv::Scalar::all(40.f));
+        _imgs.emplace_back(std::move(random));
+    }
+
+    // use identity matrices to initialize output blob and OpenCV-resized images to initialize
+    // reference blob. we use specific init values for output to ensure that output is specifically
+    // initialized and be able to consistently check part that is outside of current batch size.
+    std::vector<cv::Mat> identityImgs(_imgs);
+    std::vector<cv::Mat> resizedImgs(_imgs);
+    for (size_t i = 0; i < batch_size; ++i) {
+        switch (_inputPrecision) {
+            case Precision::FP32: {
+                cv::Mat resizedImg_;
+                _imgs[i].convertTo(resizedImg_, CV_32FC3);
+                cv::resize(resizedImg_, resizedImgs[i], cv::Size(_netDims[3], _netDims[2]),
+                    0, 0, cv_interpolation);
+                identityImgs[i] = cv::Mat::eye(cv::Size(_netDims[3], _netDims[2]),
+                    resizedImg_.type());
+            }
+            break;
+            case Precision::U8: {
+                cv::resize(_imgs[i], resizedImgs[i], cv::Size(_netDims[3], _netDims[2]),
+                    0, 0, cv_interpolation);
+                identityImgs[i] = cv::Mat::eye(cv::Size(_netDims[3], _netDims[2]), _imgs[i].type());
+            }
+            break;
+            default:
+                THROW_IE_EXCEPTION  << "Can't resize data of inconsistent precision: "
+                                    << _inputPrecision;
+        }
+    }
+
+    // update images that are outside of current batch: these remain unchaged after inference =>
+    // resized == identity. If for some reason they're different, something changed them (in this
+    // test, likely preprocessing) which must not happen.
+    for (size_t i = batch_size; i < _imgs.size(); ++i) {
+        resizedImgs[i] = cv::Mat::eye(cv::Size(_netDims[3], _netDims[2]), resizedImgs[0].type());
+        identityImgs[i] = cv::Mat::eye(cv::Size(_netDims[3], _netDims[2]), resizedImgs[0].type());
+    }
+
+    if (_doColorConversion) {
+        for (auto& img : _imgs) {
+                cv::cvtColor(img, img, toCvtColorType(_colorFormat));
+        }
+    }
+
+    // set inputBlob to the whole batch
+    switch (_inputPrecision) {
+        case Precision::FP32: {
+            inputBlob = img2Blob<Precision::FP32>(_imgs, _inputLayout);
+        }
+        break;
+        case Precision::U8: {
+            inputBlob = img2Blob<Precision::U8>(_imgs, _inputLayout);
+        }
+        break;
+        default:
+            THROW_IE_EXCEPTION  << "Can't resize data of inconsistent precision: "
+                                << _inputPrecision;
+    }
+
+    outputBlob = img2Blob<Precision::FP32>(identityImgs, Layout::NCHW);
+    refBlob = img2Blob<Precision::FP32>(resizedImgs, Layout::NCHW);
+
+    req.SetBlob(net.getInputsInfo().begin()->first, inputBlob);
+    req.SetBlob(net.getOutputsInfo().begin()->first, outputBlob);
+
+    // Note: order of SetBlob and SetBatch matters! at the time of SetBlob, preprocessing is
+    // initialized. If SetBatch is called before SetBlob, it may have no effect on preprocessing
+    // because there's no preprocessing instances available yet.
+    req.SetBatch(batch_size);
+    if (_isAsync) {
+        req.StartAsync();
+        req.Wait(IInferRequest::WaitMode::RESULT_READY);
+    } else {
+        req.Infer();
+    }
+
+    if (refBlob->size() != outputBlob->size()) {
+        THROW_IE_EXCEPTION << "reference and output blobs have different sizes!";
+    }
+
+    compare(*outputBlob, *refBlob, _threshold);
+
+    }
+}
+
+class ReorderTest : public ResizeBase<std::string, std::pair<Layout, Layout>> {
+protected:
+    cv::Mat _img;
+    Layout _networkLayout = Layout::ANY;
+
+    void SetUp() override {
+        ResizeBase<std::string, std::pair<Layout, Layout>>::SetUp();
+
+        _device = std::get<0>(GetParam());
+        std::string imgFile;
+        std::pair<Precision, float> inPrcThresh;
+        std::pair<Layout, Layout> layouts;
+        std::tie(
+                _netPrc,
+                _netDims,
+                inPrcThresh,
+                layouts,
+                _resAlg,
+                _colorFormat,
+                _cropRoi,
+                _isAsync
+        ) = std::get<1>(GetParam());
+
+        _img = cv::Mat(300, 300, CV_8UC3);
+        cv::randu(_img, cv::Scalar(0, 0, 0), cv::Scalar(255,255, 255));
+
+        _inputPrecision = inPrcThresh.first;
+        _threshold = inPrcThresh.second;
+
+        _doColorConversion = _colorFormat != ColorFormat::RAW && _colorFormat != ColorFormat::BGR;
+
+        std::tie(_inputLayout, _networkLayout) = layouts;
+
+        if (_device == "HETERO")
+            device_config["TARGET_FALLBACK"] = "GPU,CPU";
+    }
+};
+
+TEST_P(ReorderTest, reorderTest) {
+    auto fn_ptr = createSubgraph(_netDims);
+    CNNNetwork net(fn_ptr);
+
+    net.getInputsInfo().begin()->second->setPrecision(_inputPrecision);
+    net.getInputsInfo().begin()->second->setLayout(_networkLayout);
+    net.getInputsInfo().begin()->second->getPreProcess().setResizeAlgorithm(_resAlg);
+    net.getInputsInfo().begin()->second->getPreProcess().setColorFormat(_colorFormat);
+
+    auto execNet = ie->LoadNetwork(net, _device, device_config);
+    auto req = execNet.CreateInferRequest();
+
+    Blob::Ptr inputBlob;
+    Blob::Ptr refBlob;
+
+    for (int i = 0; i < _maxRepeat; ++i) {
+
+    switch (_inputPrecision) {
+        case Precision::FP32: {
+            inputBlob = img2Blob<Precision::FP32>(_img, _inputLayout);
+        }
+        break;
+        case Precision::U8: {
+            inputBlob = img2Blob<Precision::U8>(_img, _inputLayout);
+        }
+        break;
+        default:
+            THROW_IE_EXCEPTION << "Can't resize data of inconsistent precision: " << _inputPrecision;
+    }
+
+    refBlob = img2Blob<Precision::FP32>(_img, Layout::NCHW);
+
+    req.SetBlob(net.getInputsInfo().begin()->first, inputBlob);
+
+    if (_isAsync) {
+        req.StartAsync();
+        req.Wait(IInferRequest::WaitMode::RESULT_READY);
+    } else {
+        req.Infer();
+    }
+
+    Blob::Ptr outputBlob = req.GetBlob(net.getOutputsInfo().begin()->first);
+
+    if (refBlob->size() != outputBlob->size()) {
+        THROW_IE_EXCEPTION << "reference and output blobs have different sizes!";
+    }
+
+    compare(*outputBlob, *refBlob, _threshold);
+
+    }
+}
+
+using nv12_test_params = std::tuple<
+    std::string,                          // Plugin name
+    std::tuple<
+            Precision,                    // Network precision
+            cv::Size,                     // Input image size
+            SizeVector,                   // Net input sizes
+            std::pair<Precision, float>,  // Input data precision and threshold
+            ResizeAlgorithm,              // Resize algorithm kind
+            ColorFormat,                  // Input color format kind
+            ROI,                          // Cropped ROI coordinates
+            bool                          // Infer modes: true = Async, false = Sync
+    >
+>;
+
+class NV12ColorConvertTest : public Base<nv12_test_params> {
+protected:
+    cv::Size _inputSize;  // Input image size
+    InferenceEngine::ROI _yRoi;
+    InferenceEngine::ROI _uvRoi;
+
+    void SetUp() override {
+        Base<nv12_test_params>::SetUp();
+
+        _device = std::get<0>(GetParam());
+        std::pair<Precision, float> inPrcThresh;
+        std::tie(
+                _netPrc,
+                _inputSize,
+                _netDims,
+                inPrcThresh,
+                _resAlg,
+                _colorFormat,
+                _cropRoi,
+                _isAsync
+        ) = std::get<1>(GetParam());
+
+        _inputPrecision = inPrcThresh.first;
+        _threshold = inPrcThresh.second;
+
+        _inputLayout = Layout::NCHW;
+
+        if (_inputPrecision != Precision::U8) {
+            THROW_IE_EXCEPTION << "Specified input precision != Precision::U8";
+        }
+
+        _yRoi = _cropRoi;
+        _uvRoi = _cropRoi / 2;
+
+        if (_device == "HETERO")
+            device_config["TARGET_FALLBACK"] = "GPU,CPU";
+    }
+};
+
+TEST_P(NV12ColorConvertTest, NV12Test) {
+    auto fn_ptr = createSubgraph(_netDims);
+    CNNNetwork net(fn_ptr);
+
+    net.getInputsInfo().begin()->second->setPrecision(_inputPrecision);
+    net.getInputsInfo().begin()->second->setLayout(_inputLayout);
+    net.getInputsInfo().begin()->second->getPreProcess().setResizeAlgorithm(_resAlg);
+    net.getInputsInfo().begin()->second->getPreProcess().setColorFormat(_colorFormat);
+
+    auto execNet = ie->LoadNetwork(net, _device, device_config);
+    auto req = execNet.CreateInferRequest();
+
+    // we use an image resized by openCV as a reference value.
+    cv::InterpolationFlags cv_interpolation = (_resAlg == RESIZE_BILINEAR) ? cv::INTER_LINEAR : cv::INTER_AREA;
+
+    for (int i = 0; i < _maxRepeat; ++i) {
+
+    auto yRoi = _yRoi;
+    auto uvRoi = _uvRoi;
+
+    if (i != 0) {
+        yRoi = auxDownscaledRoi();
+        uvRoi = yRoi / 2;
+    }
+
+    cv::Mat yPlane(_inputSize, CV_MAKE_TYPE(CV_8U, 1)),
+            uvPlane(cv::Size(_inputSize.width/2, _inputSize.height/2), CV_MAKE_TYPE(CV_8U, 2));
+
+    cv::randn(yPlane, cv::Scalar::all(127), cv::Scalar::all(40.f));
+    cv::randu(uvPlane, cv::Scalar::all(0), cv::Scalar::all(255));
+
+    auto toRect = [] (const InferenceEngine::ROI& roi) {
+        cv::Rect location;
+        location.x = roi.posX;
+        location.y = roi.posY;
+        location.width = roi.sizeX;
+        location.height = roi.sizeY;
+        return location;
+    };
+    // Note: using 2 ROIs in case Y ROI has non even offset at the beginning: e.g. { .x = 25, ... },
+    //       this way UV ROI will have x = 12! this is critical in case of ROI applied to BGR
+    //       (converted from NV12) image, so ROIs are applied before conversion to be always
+    //       compliant with IE code
+    cv::Rect yLocation = toRect(yRoi);
+    cv::Rect uvLocation = toRect(uvRoi);
+    auto yPlaneCropped = yPlane(yLocation);
+    auto uvPlaneCropped = uvPlane(uvLocation);
+
+    cv::Mat refImg;  // converted and resized image
+    cv::cvtColorTwoPlane(yPlaneCropped, uvPlaneCropped, refImg, cv::COLOR_YUV2BGR_NV12);
+    cv::resize(refImg, refImg, cv::Size(_netDims[3], _netDims[2]), 0, 0, cv_interpolation);
+    auto refBlob = img2Blob<Precision::FP32>(refImg, Layout::NCHW);
+
+    // Note: Y and UV blobs for original data must always be "alive" until the end of the execution:
+    //       ROI blobs do not own the data
+    auto yBlob = img2Blob<Precision::U8>(yPlane, NHWC);
+    auto uvBlob = img2Blob<Precision::U8>(uvPlane, NHWC);
+    auto croppedYBlob = make_shared_blob(yBlob, yRoi);
+    auto croppedUvBlob = make_shared_blob(uvBlob, uvRoi);
+    auto inputBlob = make_shared_blob<NV12Blob>(croppedYBlob, croppedUvBlob);
+
+    req.SetBlob(net.getInputsInfo().begin()->first, inputBlob);
+
+    if (_isAsync) {
+        req.StartAsync();
+        req.Wait(IInferRequest::WaitMode::RESULT_READY);
+    } else {
+        req.Infer();
+    }
+
+    Blob::Ptr outputBlob = req.GetBlob(net.getOutputsInfo().begin()->first);
+
+    if (refBlob->size() != outputBlob->size()) {
+        THROW_IE_EXCEPTION << "reference and output blobs have different sizes!";
+    }
+
+    compare(*outputBlob, *refBlob, _threshold);
+
+    }
+}
+
+// multiple values macro wrapper
+#define MULTI_VALUE(...) __VA_ARGS__
+
+// sizes of the network to be tested
+#define TESTED_DIMS(batch_size) \
+    SizeVector({batch_size, 3, 200, 200}), \
+    SizeVector({batch_size, 3, 300, 300}), \
+    SizeVector({batch_size, 3, 400, 400}), \
+    SizeVector({batch_size, 3, 300, 199}), \
+    SizeVector({batch_size, 3, 199, 300})
+
+// sizes of the network to be tested
+#define TESTED_DIMS_SMALL(batch_size) \
+    SizeVector({batch_size, 3, 200, 200}), \
+    SizeVector({batch_size, 3, 400, 400})
+
+#define COLOR_FORMATS_RAW \
+    ColorFormat::RAW
+
+#define COLOR_FORMATS_3CH \
+    ColorFormat::BGR, ColorFormat::RGB
+
+#define COLOR_FORMATS_4CH \
+    ColorFormat::BGRX, ColorFormat::RGBX
+
+// #define PLUGING_CASE(_plugin, _test, _params) \
+//     INSTANTIATE_TEST_CASE_P(_plugin##_run, _test, Combine(Values(#_plugin "Plugin"), _params) )
+
+#define PLUGING_CASE_WITH_SUFFIX(_device, _suffix, _test, _params) \
+    INSTANTIATE_TEST_CASE_P(_device##_run##_suffix, _test, Combine(Values(#_device), _params) )
+
+#endif  // USE_OPENCV
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/io_blob_tests/dims_tests.hpp b/inference-engine/tests_deprecated/functional/shared_tests/io_blob_tests/dims_tests.hpp
new file mode 100644 (file)
index 0000000..09489fc
--- /dev/null
@@ -0,0 +1,201 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <memory>
+#include "common_test_utils/xml_net_builder/xml_net_builder.hpp"
+#include "tests_common.hpp"
+
+#include "functional_test_utils/plugin_cache.hpp"
+#include <string>
+#include <map>
+
+using namespace InferenceEngine;
+
+using test_param = std::tuple<
+    std::string,   // Plugin name
+    std::tuple<
+        Precision  // Network Precision
+    >
+>;
+
+class IO_BlobTest : public ::testing::TestWithParam<test_param> {
+protected:
+    std::string deviceName;
+    std::shared_ptr<InferenceEngine::Core> ie;
+    std::map<std::string, std::string> deviceConfig;
+    Precision netPrc;
+    CNNNetwork net;
+
+    void SetUp() override {
+        // default plugin instantiation
+        deviceName = std::get<0>(GetParam());
+        std::tie(
+                netPrc
+        ) = std::get<1>(GetParam());
+
+        // default model: in->con->out
+        auto weights = make_shared_blob<uint8_t>({Precision::U8, {0}, Layout::C});
+        std::string model = ConvNet(/*MB*/ 2, weights);
+
+        ie = PluginCache::get().ie();
+
+        // loaded network
+        net = ie->ReadNetwork(model, weights);
+
+        if (deviceName == "HETERO")
+            deviceConfig["TARGET_FALLBACK"] = "GPU,CPU";
+    }
+
+    void TearDown() override {
+        if (deviceName.find(CommonTestUtils::DEVICE_GPU) != std::string::npos) {
+            PluginCache::get().reset();
+        }
+    }
+
+
+    std::string ConvNet(const int batch, TBlob<uint8_t>::Ptr &weights) {
+        if (netPrc == Precision::FP32) {
+            return ConvNetImpl<Precision::FP32>(batch, weights);
+        } else {
+            return ConvNetImpl<Precision::FP16>(batch, weights);
+        }
+    }
+
+    template <Precision::ePrecision PRC>
+    std::string ConvNetImpl(const int batch, TBlob<uint8_t>::Ptr &weights) {
+        using data_t = typename PrecisionTrait<PRC>::value_type;
+
+
+        size_t IC = 3, OC = 3, KH = 3, KW = 3;
+        std::vector<size_t> in{static_cast<size_t>(batch), IC, 25, 25};
+        std::vector<size_t> out{static_cast<size_t>(batch), OC, 25, 25};
+
+        std::map<std::string, std::string> params{
+#define PAR(_key, _val) { _key, std::to_string(_val) }
+                PAR("stride-x", 1),
+                PAR("stride-y", 1),
+                PAR("pad-x", 1),
+                PAR("pad-y", 1),
+                PAR("kernel-x", 3),
+                PAR("kernel-y", 3),
+                PAR("output", 3),
+                PAR("group", 1),
+#undef  PAR
+        };
+
+        std::ostringstream prc_name;
+        prc_name << PRC;
+
+        weights = make_shared_blob<uint8_t>({weights->getTensorDesc().getPrecision(),
+                                            { (OC * IC * KH * KW + OC) * sizeof(data_t) },
+                                            weights->getTensorDesc().getLayout()});
+        weights->allocate();
+        TestsCommon::fill_data(weights->buffer().as<float*>(),
+                  weights->size() / sizeof(float));
+        return CommonTestUtils::V2NetBuilder::buildNetworkWithOneInput("ConvNet", in, prc_name.str())
+                .addLayer("Convolution", prc_name.str(), &params, {{in}, {out}}, OC*IC*KH*KW*sizeof(data_t), OC*sizeof(data_t))
+                .finish(false);
+    }
+};
+
+TEST_P(IO_BlobTest, CheckDefaultValues_In) {
+    auto infos = net.getInputsInfo();
+    ASSERT_EQ(1, infos.size());
+
+    auto in_info = infos["Input0"];
+    ASSERT_EQ(in_info->getLayout(), NCHW);
+    ASSERT_EQ(in_info->getPrecision(), Precision::FP32);
+    ASSERT_EQ(in_info->getTensorDesc().getDims(), SizeVector({2,3,25,25}));
+
+    auto ex_net = ie->LoadNetwork(net, deviceName, deviceConfig);
+    auto inf_req = ex_net.CreateInferRequestPtr();
+    auto blob = inf_req->GetBlob("Input0");
+
+    ASSERT_EQ(blob->getTensorDesc().getLayout(), Layout::NCHW);
+    ASSERT_EQ(blob->getTensorDesc().getPrecision(), Precision::FP32);
+    ASSERT_EQ(blob->getTensorDesc().getDims(), SizeVector({2,3,25,25}));
+
+    auto ext_blob = make_shared_blob<float>({Precision::FP32, {2, 3, 25, 25}, Layout::NCHW});
+    ext_blob->allocate();
+    ASSERT_NO_THROW(inf_req->SetBlob("Input0", ext_blob));
+}
+
+TEST_P(IO_BlobTest, CheckDefaultValues_Out) {
+    auto infos = net.getOutputsInfo();
+    ASSERT_EQ(1, infos.size());
+
+    auto out_info = infos["Convolution1"];
+    ASSERT_EQ(out_info->getLayout(), NCHW);
+    ASSERT_EQ(out_info->getPrecision(), Precision::FP32);
+    ASSERT_EQ(out_info->getTensorDesc().getDims(), SizeVector({2,3,25,25}));
+
+    auto ex_net = ie->LoadNetwork(net, deviceName, deviceConfig);
+    auto inf_req = ex_net.CreateInferRequestPtr();
+    auto blob = inf_req->GetBlob("Convolution1");
+
+    ASSERT_EQ(blob->getTensorDesc().getLayout(), Layout::NCHW);
+    ASSERT_EQ(blob->getTensorDesc().getPrecision(), Precision::FP32);
+    ASSERT_EQ(blob->getTensorDesc().getDims(), SizeVector({2,3,25,25}));
+
+    auto ext_blob = make_shared_blob<float>({Precision::FP32, {2,3,25,25}, Layout::NCHW});
+    ext_blob->allocate();
+    ASSERT_NO_THROW(inf_req->SetBlob("Convolution1", ext_blob));
+}
+
+TEST_P(IO_BlobTest, DISABLED_NoAcceptBadBlobs_In) {
+    auto ex_net = ie->LoadNetwork(net, deviceName, deviceConfig);
+    auto inf_req = ex_net.CreateInferRequestPtr();
+
+    auto in_blob_0 = make_shared_blob<float>({Precision::FP32, {2, 3, 25, 25},     Layout::NCHW}); // not allocated
+    auto in_blob_1 = make_shared_blob<float>({Precision::FP32, {2, 3, 25, 25},     Layout::NHWC}); // wrong layout
+    auto in_blob_2 = make_shared_blob<float>({Precision::FP32, {1, 1, 3*25*25, 2}, Layout::NCHW}); // wrong dims
+    auto in_blob_3 = make_shared_blob<float>({Precision::FP32, {2*3*25*25},        Layout::C});    // wrong dims num
+    auto in_blob_4 = make_shared_blob<uint8_t>({Precision::U8, {2, 3, 25, 25},     Layout::NCHW}); // wrong precision
+
+    // in_blob_0 - is not allocated
+    in_blob_1->allocate();
+    in_blob_2->allocate();
+    in_blob_3->allocate();
+    in_blob_4->allocate();
+
+    ASSERT_THROW(inf_req->SetBlob("Input0", in_blob_0), std::exception);
+    ASSERT_THROW(inf_req->SetBlob("Input0", in_blob_1), std::exception);
+    ASSERT_THROW(inf_req->SetBlob("Input0", in_blob_2), std::exception);
+    ASSERT_THROW(inf_req->SetBlob("Input0", in_blob_3), std::exception);
+    ASSERT_THROW(inf_req->SetBlob("Input0", in_blob_4), std::exception);
+}
+
+TEST_P(IO_BlobTest, DISABLED_NoAcceptBadBlobs_Out) {
+    auto ex_net = ie->LoadNetwork(net, deviceName, deviceConfig);
+    auto inf_req = ex_net.CreateInferRequestPtr();
+
+    auto in_blob_0 = make_shared_blob<float>({Precision::FP32, {2, 3, 25, 25},     Layout::NCHW}); // not allocated
+    auto in_blob_1 = make_shared_blob<float>({Precision::FP32, {2, 3, 25, 25},     Layout::NHWC}); // wrong layout
+    auto in_blob_2 = make_shared_blob<float>({Precision::FP32, {1, 1, 3*25*25, 2}, Layout::NCHW}); // wrong dims
+    auto in_blob_3 = make_shared_blob<float>({Precision::FP32, {2*3*25*25},        Layout::C});    // wrong dims num
+    auto in_blob_4 = make_shared_blob<uint8_t>({Precision::U8, {2, 3, 25, 25},     Layout::NCHW}); // wrong precision
+
+    // in_blob_0 - is not allocated
+    in_blob_1->allocate();
+    in_blob_2->allocate();
+    in_blob_3->allocate();
+    in_blob_4->allocate();
+
+    ASSERT_THROW(inf_req->SetBlob("Convolution1", in_blob_0), std::exception);
+    ASSERT_THROW(inf_req->SetBlob("Convolution1", in_blob_1), std::exception);
+    ASSERT_THROW(inf_req->SetBlob("Convolution1", in_blob_2), std::exception);
+    ASSERT_THROW(inf_req->SetBlob("Convolution1", in_blob_3), std::exception);
+    ASSERT_THROW(inf_req->SetBlob("Convolution1", in_blob_4), std::exception);
+}
+
+static auto params = ::testing::Values(Precision::FP32);  // network precision
+
+static auto params_myriad = ::testing::Values(Precision::FP16);  // network precision
+
+#define PLUGING_CASE(_device, _test, _params) \
+    INSTANTIATE_TEST_CASE_P(_device##_run, _test, ::testing::Combine(::testing::Values(#_device), _params) )
+
+#define PLUGING_CASE_WITH_SUFFIX(_device, _suffix, _test, _params) \
+    INSTANTIATE_TEST_CASE_P(_device##_run##_suffix, _test, ::testing::Combine(::testing::Values(#_device), _params) )
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/io_blob_tests/layout_tests.hpp b/inference-engine/tests_deprecated/functional/shared_tests/io_blob_tests/layout_tests.hpp
new file mode 100644 (file)
index 0000000..d2adc8c
--- /dev/null
@@ -0,0 +1,228 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include "common_test_utils/xml_net_builder/xml_net_builder.hpp"
+#include "tests_common.hpp"
+#include "precision_utils.h"
+
+#include "functional_test_utils/plugin_cache.hpp"
+
+#include <iostream>
+#include <string>
+#include <map>
+
+using namespace InferenceEngine;
+using namespace CommonTestUtils;
+
+struct conv_param {
+    size_t k_x, k_y;
+    size_t p_x, p_y;
+    size_t s_x, s_y;
+
+    std::vector<size_t> in;
+    std::vector<size_t> out;
+};
+
+using test_param = std::tuple<
+    std::string,                      // Plugin name
+    std::tuple<
+        conv_param,                   // Convolution params
+        std::pair<Precision, float>,  // Network precision
+        Layout, Layout,               // Layout in/out
+        Precision                     // Precision input
+    >
+>;
+
+class LayoutTTTest : public ::testing::TestWithParam<test_param> {
+protected:
+    conv_param cnv;
+    Precision netPrc;
+    float threshold;
+    std::string _device;
+    std::shared_ptr<InferenceEngine::Core> ie;
+    std::map<std::string, std::string> _deviceConfig;
+    Layout l_in, l_out;
+    Precision prc_in;
+
+    std::string model;
+    TBlob<uint8_t>::Ptr weights;
+    test_param p;
+
+    size_t N, IC, IH, IW, OC, OH, OW;
+
+    void SetUp() override {
+        _device = std::get<0>(GetParam());
+        ie = PluginCache::get().ie();
+        std::pair<Precision, float> netPrcThresh;
+        std::tie(
+            cnv,
+            netPrcThresh,
+            l_in, l_out,
+            prc_in
+        ) = std::get<1>(GetParam());
+
+        /*======== Some additional params =========*/
+        N  = cnv.in[0];
+        IC = cnv.in[1];
+        IH = cnv.in[2];
+        IW = cnv.in[3];
+        OC = cnv.out[1];
+        OH = cnv.out[2];
+        OW = cnv.out[3];
+
+        netPrc = netPrcThresh.first;
+        threshold = netPrcThresh.second;
+
+        if (netPrc == Precision::FP32) {
+            prepareNetwork<Precision::FP32>();
+        } else {
+            prepareNetwork<Precision::FP16>();
+        }
+
+        if (_device == "HETERO")
+            _deviceConfig["TARGET_FALLBACK"] = "GPU,CPU";
+    }
+
+    template <Precision::ePrecision PRC>
+    void prepareNetwork() {
+        using data_t = typename PrecisionTrait<PRC>::value_type;
+
+        /*======== Prepare model IR =========*/
+        std::map<std::string, std::string> conv_params = {
+                { "stride-x", std::to_string(cnv.s_x) },
+                { "stride-y", std::to_string(cnv.s_y) },
+                { "pad-x",    std::to_string(cnv.p_x) },
+                { "pad-y",    std::to_string(cnv.p_y) },
+                { "kernel-x", std::to_string(cnv.k_x) },
+                { "kernel-y", std::to_string(cnv.k_y) },
+                { "output",   std::to_string(cnv.out[1])},
+                { "group",    "1" }
+        };
+
+        InOutShapes inout = { {cnv.in}, {cnv.out} };
+
+        size_t KH = cnv.k_y;
+        size_t KW = cnv.k_x;
+        std::ostringstream prc_name;
+        prc_name << PRC;
+
+        model = V2NetBuilder::buildNetworkWithOneInput("ConvNet", cnv.in, prc_name.str())
+                .addLayer("Convolution", prc_name.str(), &conv_params, inout, OC*IC*KH*KW*sizeof(data_t), OC*sizeof(data_t))
+                .finish(false);
+
+        /*======== Prepare model Weights =========*/
+        weights = make_shared_blob<uint8_t>(
+            {Precision::U8, { ( OC*IC*KH*KW + OC )*sizeof(data_t) }, Layout::C});
+        weights->allocate();
+
+        data_t* w_ptr = weights->buffer().as<data_t*>();
+
+        for (size_t oc = 1; oc < OC+1; oc++)
+            for (size_t ic = 1; ic < IC+1; ic++)
+                for (size_t khw = 0; khw < KH*KW; khw++) {
+                    if (PRC == Precision::FP32) {
+                        *w_ptr++ = 1.0 / (ic * KH * KW * IC) * oc;
+                    } else {
+                        *w_ptr++ = PrecisionUtils::f32tof16(1.0 / (ic * KH * KW * IC) * oc);
+                    }
+                }
+
+        for (size_t oc = 1; oc < OC+1; oc++) {
+            if (PRC == Precision::FP32) {
+                *w_ptr++ = oc;
+            } else {
+                *w_ptr++ = PrecisionUtils::f32tof16(oc);
+            }
+        }
+    }
+
+    void TearDown() override {
+        if (_device.find(CommonTestUtils::DEVICE_GPU) != std::string::npos) {
+            PluginCache::get().reset();
+        }
+    }
+
+    template <Precision::ePrecision PRC>
+    Blob::Ptr input() {
+        using data_t = typename PrecisionTrait<PRC>::value_type;
+
+        SizeVector in_dims(cnv.in.begin(), cnv.in.end());
+        Blob::Ptr input = make_shared_blob<data_t>({PRC, in_dims, l_in});
+        input->allocate();
+        data_t* i_ptr = input->buffer().as<data_t*>();
+
+        if (l_in == NCHW) {
+            for (size_t n = 0; n < N; n++)
+            for (size_t c = 1; c < IC + 1; c++)
+            for (size_t hw = 0; hw < IH * IW; hw++)
+                *i_ptr++ = c;
+        } else { // NCHW
+            for (size_t n = 0; n < N; n++)
+            for (size_t hw = 0; hw < IH * IW; hw++)
+            for (size_t c = 1; c < IC + 1; c++)
+                *i_ptr++ = c;
+        }
+
+        return input;
+    }
+
+    Blob::Ptr input(Precision prc) {
+        return prc == Precision::FP32 ? input<Precision::FP32>():
+               prc == Precision::U8   ? input<Precision::U8>()  :
+               prc == Precision::I8   ? input<Precision::I8>() :
+               prc == Precision::I16  ? input<Precision::I16>() :
+               prc == Precision::U16  ? input<Precision::U16>() :
+                                        input<Precision::FP32>();
+    }
+
+    void checkOutput(Blob::Ptr output) {
+        float *o_ptr = output->buffer().as<float*>();
+
+        if (l_out == NCHW) {
+            for (size_t n = 0; n < N; n++)
+            for (size_t c = 1; c < OC+1; c++)
+            for (size_t hw = 0; hw < OH*OW; hw++)
+                ASSERT_NEAR(*o_ptr++, c*2, threshold);
+        } else {
+            for (size_t n = 0; n < N; n++)
+            for (size_t hw = 0; hw < OH*OW; hw++)
+            for (size_t c = 1; c < OC+1; c++)
+                ASSERT_NEAR(*o_ptr++, c*2, threshold);
+        };
+    }
+
+};
+
+TEST_P(LayoutTTTest, SomeTest1) {
+    CNNNetwork net = ie->ReadNetwork(model, weights);
+
+    net.getInputsInfo().begin()->second->setPrecision(prc_in);
+    net.getInputsInfo().begin()->second->setLayout(l_in);
+    net.getOutputsInfo().begin()->second->setLayout(l_out);
+
+    auto execNet = ie->LoadNetwork(net, _device, _deviceConfig);
+    auto req = execNet.CreateInferRequest();
+
+    req.SetBlob("Input0", input(prc_in));
+    req.Infer();
+
+    Blob::Ptr output = req.GetBlob("Convolution1");
+    checkOutput(output);
+}
+
+conv_param conv_p = { 3,3, // kernel
+                      0,0, // pads
+                      1,1, // strides
+                      {2,3,15,15},   // in shape
+                      {2,16,13,13}}; // out shape
+
+#define PLUGING_CASE(_device, _test, _params) \
+    INSTANTIATE_TEST_CASE_P(_device##_run, _test, ::testing::Combine(::testing::Values(#_device), _params) )
+
+#define PLUGING_CASE_WITH_SUFFIX(_device, _suffix, _test, _params) \
+    INSTANTIATE_TEST_CASE_P(_device##_run##_suffix, _test, ::testing::Combine(::testing::Values(#_device), _params) )
+
+#define PLUGING_CASE_WITH_PREFIX(_device, _prefix, _test, _params) \
+    INSTANTIATE_TEST_CASE_P(_prefix##_device##_run, _test, ::testing::Combine(::testing::Values(#_device), _params) )
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/lstm/lstm_cell_test.hpp b/inference-engine/tests_deprecated/functional/shared_tests/lstm/lstm_cell_test.hpp
new file mode 100644 (file)
index 0000000..52b8aea
--- /dev/null
@@ -0,0 +1,233 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "plg_test.hpp"
+#include "common_test_utils/xml_net_builder/xml_net_builder.hpp"
+
+#include <cmath>
+#include <vector>
+#include <string>
+
+using namespace InferenceEngine;
+using std::map;
+using std::pair;
+using std::vector;
+using std::string;
+
+static const size_t DataSize = 10;  // Data size
+static const size_t T = 1;   // Sequence length (for single LSTM cell is 1)
+static const size_t StateSize = 5;   // State size
+static const size_t G = 4;   // Number of gate
+
+class LSTMCellNet : CommonTestUtils::V2NetBuilder {
+public:
+    LSTMCellNet(size_t N, size_t S, size_t D): CommonTestUtils::V2NetBuilder(buildNetworkWithOneInput("LSTM_Cell_Net"
+            , { N,D }, "FP32")) {
+        const size_t wSz = S*G*(D+S);
+        const size_t bSz = S*G;
+        const size_t wSz_b = wSz * sizeof(float);
+        const size_t bSz_b = bSz * sizeof(float);
+
+        weights = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, SizeVector{ (wSz_b + bSz_b) }, Layout::C));
+        weights->allocate();
+
+        auto ptr = weights->buffer().as<float*>();
+        w_blob = make_shared_blob<float>(TensorDesc(Precision::FP32, SizeVector{wSz}, 
+            TensorDesc::getLayoutByDims(SizeVector{wSz})), ptr);
+        b_blob = make_shared_blob<float>(TensorDesc(Precision::FP32, SizeVector{bSz}, 
+            TensorDesc::getLayoutByDims(SizeVector{bSz})), ptr + wSz);
+
+        // input layer #1 and #2
+        addInputLayer("FP32", { N,S });
+        addInputLayer("FP32", { N,S });
+
+        // layer #3
+        map<string, string> lstm_p {{"hidden_size", std::to_string(S)}};
+        addLayer("LSTMCell", "FP32", &lstm_p,
+                { {{ N,D }, { N,S }, { N,S }},
+                  {{ N,S }, { N,S }} },
+                wSz_b, bSz_b);
+
+        vector<pair<string, string>> edges = {
+                {"0,0", "3,3"},
+                {"1,1", "3,4"},
+                {"2,2", "3,5"}
+        };
+        model = finish(&edges);
+    }
+
+    using Filler = std::function<void(Blob::Ptr)>;
+
+    CNNNetwork net(Filler w_filler = nullptr, Filler b_filler = nullptr) {
+        w_filler(w_blob);
+        b_filler(b_blob);
+
+        Core ie;
+        return ie.ReadNetwork(model, weights);
+    }
+
+private:
+    string model;
+    TBlob<uint8_t>::Ptr weights;
+    Blob::Ptr w_blob;
+    Blob::Ptr b_blob;
+};
+
+static inline bool cmp_near(float res, float ref, float eq_threshold) {
+    constexpr float eps = 1e-5;
+
+    if (eq_threshold != eps) {
+        return std::abs(res-ref) < eq_threshold;
+    }
+
+    auto ref_abs = std::abs(ref);
+    if (ref_abs > eps)
+        return std::abs(res-ref)/ref_abs < eps;
+    else
+        return std::abs(res-ref) < eps;
+}
+
+/**********************************************/
+/***     Test Body       **********************/
+/**********************************************/
+
+struct lstm_cell_param {
+    size_t N;    // Batch size
+    size_t S;    // State size
+    size_t D;    // Data  size
+};
+
+class LSTMCellTestBase : public PlgTest<lstm_cell_param> {
+ public:
+    void runSingleLSTMTest(const std::map<std::string, std::string> & config = {},
+                           float eq_threshold = 1e-5) {
+        auto p = param();
+        const size_t N = p.N;
+        const size_t S = p.S;
+        const size_t D = p.D;
+
+        /* Broadcast value through tensors */
+        const float H0 = 0.3, C0 = 0.77;
+
+        const float Wf = 0.1, Bf = 0.35;
+        const float Wi = 0.2, Bi = 0.25;
+        const float Wc = 0.5, Bc = 0.15;
+        const float Wo = 0.7, Bo = 0.05;
+
+        using Vals = float[T+1];
+        Vals f, i, c, o, C, H, X;
+
+        auto _f = [](float x) { return 1/(1 + std::exp(-x)); };  // sigmoid
+        auto _h = [](float x) { return std::tanh(x); };          // tanh
+
+        H[0] = H0; C[0] = C0;
+
+        for (int t = 1; t < T+1; t++) {  // t=0 - initial state. So time index starts from 1.
+            X[t] = t;
+            f[t] = _f(Wf*(H[t-1] + X[t]) + Bf);
+            i[t] = _f(Wi*(H[t-1] + X[t]) + Bi);
+            c[t] = _h(Wc*(H[t-1] + X[t]) + Bc);
+            o[t] = _f(Wo*(H[t-1] + X[t]) + Bo);
+
+            C[t] = f[t] * C[t-1] + i[t] * c[t];
+            H[t] = o[t] * _h(C[t]);
+        }
+
+        /********  Weight and Input blob filler *****************/
+
+        auto w_filler = [=](Blob::Ptr blob) {
+            assert(blob->size() == G*S*(S+D));
+            auto ptr = blob->buffer().as<float*>();
+
+            float W[] = {Wf, Wi, Wc, Wo};
+            for (int g = 0; g < G; g++)
+                for (int s = 0; s < S; s++) {
+                    for (int i = 0; i < D; i++) *ptr++ = W[g] / D;
+                    for (int i = 0; i < S; i++) *ptr++ = W[g] / S;
+                }
+        };
+
+        auto b_filler = [=](Blob::Ptr blob) {
+            assert(blob->size() == G*S);
+            auto ptr = blob->buffer().as<float*>();
+
+            float B[] = {Bf, Bi, Bc, Bo};
+            for (int g = 0; g < G; g++)
+                for (int s = 0; s < S; s++) *ptr++ = B[g];
+        };
+
+        auto stat_filler = [=](Blob::Ptr blob, float val) {
+            assert(blob->size() == N*S);
+            auto ptr = blob->buffer().as<float*>();
+
+            for (int n = 0; n < N; n++)
+                for (int s = 0; s < S; s++) *ptr++ = val;
+        };
+
+        auto data_filler = [&](Blob::Ptr blob) {
+            assert(blob->size() == N*T*D);
+            auto ptr = blob->buffer().as<float*>();
+
+            for (int n = 0; n < N; n++)
+                for (int d = 0; d < D; d++) *ptr++ = X[1];
+        };
+
+        /*****  Output blob checkers  ************************/
+        auto stat_checker = [=](Blob::Ptr blob, float val) {
+            assert(blob->size() == N*S);
+            auto ptr = blob->buffer().as<float*>();
+
+            bool passed = true;
+            float maxDiff = 0;
+            for (int n = 0; n < N; n++)
+                for (int s = 0; s < S; s++) {
+
+                    if (!cmp_near(*ptr, val, eq_threshold)) {
+                        printf("float eq %dx%d fail: %f : %f\n", n, s, *ptr, val);
+                        passed = false;
+                    }
+                    maxDiff = std::max(std::abs(*ptr - val), maxDiff);
+                    ptr++;
+                }
+            if (eq_threshold != 1e-5) {
+                printf("max diff= %.6f\n", maxDiff);
+            }
+            return passed;
+        };
+
+        /************ Test Body  *****************************/
+
+        LSTMCellNet topology(N, S, D);
+        auto net = topology.net(w_filler, b_filler);
+
+        Core ie;
+        auto execNet = ie.LoadNetwork(net, device_name, config);
+        auto req = execNet.CreateInferRequest();
+
+        auto in_data    = req.GetBlob("Input0");
+        auto in_h_state = req.GetBlob("Input1");
+        auto in_c_state = req.GetBlob("Input2");
+
+        data_filler(in_data);
+        stat_filler(in_h_state, H0);
+        stat_filler(in_c_state, C0);
+
+        req.Infer();
+
+        auto out_h_state = req.GetBlob("LSTMCell3.0");
+        auto out_c_state = req.GetBlob("LSTMCell3.1");
+
+        EXPECT_TRUE(stat_checker(out_h_state, H[T]));
+        EXPECT_TRUE(stat_checker(out_c_state, C[T]));
+    }
+};
+
+using LSTMCellTest  = LSTMCellTestBase;
+
+TEST_P(LSTMCellTest, SingleLSTM) {
+    runSingleLSTMTest();
+}
+
+
+static const lstm_cell_param workload[] = {{1, StateSize, DataSize}, {2, StateSize, DataSize}};
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/lstm/lstm_ir_test.hpp b/inference-engine/tests_deprecated/functional/shared_tests/lstm/lstm_ir_test.hpp
new file mode 100644 (file)
index 0000000..a9e1c9d
--- /dev/null
@@ -0,0 +1,91 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "plg_test.hpp"
+
+#include <string>
+#include <vector>
+#include <ngraph_functions/subgraph_builders.hpp>
+
+// library taken from https://github.com/llohse/libnpy
+#include "npy.hpp"
+
+using namespace ::testing;
+using namespace InferenceEngine;
+
+struct ModelInfo {
+    std::string dir, xml, bin;
+};
+
+class LSTM_IR_Test : public PlgTest<ModelInfo> {
+protected:
+    virtual void SetUp() {
+        PlgTest::SetUp();
+        auto p = param();
+    }
+};
+
+TEST_P(LSTM_IR_Test, canParseLSTM) {
+    auto fn_ptr = ngraph::builder::subgraph::makeTIwithLSTMcell();
+    CNNNetwork net(fn_ptr);
+
+    Core ie;
+    auto exec_net = ie.LoadNetwork(net, device_name);
+    auto inf_req = exec_net.CreateInferRequest();
+
+    auto _load_from_npy = [&](std::string name) {
+        std::replace(name.begin(), name.end(), '\\', '_');
+        std::replace(name.begin(), name.end(), '/', '_');
+        auto file_path = name + ".npy";
+
+        std::ifstream npy_file(file_path);
+        std::vector<unsigned long> npy_shape;
+        std::vector<float> npy_data;
+        if (npy_file.good())
+            npy::LoadArrayFromNumpy(file_path, npy_shape, npy_data);
+
+        return npy_data;
+    };
+
+    auto _save_to_npy = [&](std::string name,
+                            const std::vector<unsigned long>& npy_shape,
+                            const std::vector<float>& npy_data) {
+        std::replace(name.begin(), name.end(), '\\', '_');
+        std::replace(name.begin(), name.end(), '/', '_');
+        auto file_path = name + ".npy";
+
+        npy::SaveArrayAsNumpy(file_path, false, (unsigned int)(npy_shape.size()), npy_shape.data(), npy_data);
+    };
+
+    for (auto &info: net.getInputsInfo()) {
+        auto blob = inf_req.GetBlob(info.first);
+        auto npy = _load_from_npy(info.first);
+
+        if (!npy.empty())
+            std::copy_n(npy.data(), npy.size(), blob->buffer().as<float*>());
+    }
+
+    inf_req.Infer();
+
+    for (auto &info : net.getOutputsInfo()) {
+        auto blob = inf_req.GetBlob(info.first);
+        auto npy = _load_from_npy(info.first);
+
+        if (!npy.empty())
+            TestsCommon::compare(blob->buffer().as<float*>(), npy.data(), npy.size());
+
+        /* auto dims = blob->dims();
+
+        std::vector<unsigned long> shape;
+        for (auto d : dims) shape.push_back(d);
+
+        std::vector<float> npy_data(blob->buffer().as<float*>(), blob->buffer().as<float*>() + blob->size());
+        _save_to_npy(plugin_name + "_" + info.first, shape, npy_data); */
+    }
+}
+
+static std::vector<ModelInfo> workload = {
+/*  Directory             |       XML name              |   Bin name    */
+{"Basic_LSTM_S/FP32", "Basic_LSTM_S"},
+};
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/lstm/npy.hpp b/inference-engine/tests_deprecated/functional/shared_tests/lstm/npy.hpp
new file mode 100644 (file)
index 0000000..695b186
--- /dev/null
@@ -0,0 +1,497 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#ifndef NPY_H
+#define NPY_H
+
+#include <complex>
+#include <fstream>
+#include <string>
+#include <iostream>
+#include <sstream>
+#include <cstdint>
+#include <cstring>
+#include <vector>
+#include <stdexcept>
+#include <algorithm>
+#include <unordered_map>
+
+
+namespace npy {
+
+/* Compile-time test for byte order.
+   If your compiler does not define these per default, you may want to define
+   one of these constants manually. 
+   Defaults to little endian order. */
+#if defined(__BYTE_ORDER) && __BYTE_ORDER == __BIG_ENDIAN || \
+    defined(__BIG_ENDIAN__) || \
+    defined(__ARMEB__) || \
+    defined(__THUMBEB__) || \
+    defined(__AARCH64EB__) || \
+    defined(_MIBSEB) || defined(__MIBSEB) || defined(__MIBSEB__)
+const bool big_endian = true;
+#else
+const bool big_endian = false;
+#endif
+
+
+const char magic_string[] = "\x93NUMPY";
+const size_t magic_string_length = 6;
+
+const char little_endian_char = '<';
+const char big_endian_char = '>';
+const char no_endian_char = '|';
+
+constexpr char host_endian_char = ( big_endian ? 
+    big_endian_char : 
+    little_endian_char );
+
+/* npy array length */
+typedef unsigned long int ndarray_len_t;
+
+inline void write_magic(std::ostream& ostream, unsigned char v_major=1, unsigned char v_minor=0) {
+  ostream.write(magic_string, magic_string_length);
+  ostream.put(v_major);
+  ostream.put(v_minor);
+}
+
+inline void read_magic(std::istream& istream, unsigned char& v_major, unsigned char& v_minor) {
+  char buf[magic_string_length+2];
+  istream.read(buf, magic_string_length+2);
+
+  if(!istream) {
+    throw std::runtime_error("io error: failed reading file");
+  }
+
+  if (0 != std::memcmp(buf, magic_string, magic_string_length))
+    throw std::runtime_error("this file does not have a valid npy format.");
+
+  v_major = buf[magic_string_length];
+  v_minor = buf[magic_string_length+1];
+}
+
+// typestring magic
+struct Typestring {
+  private:
+    char c_endian;
+    char c_type;
+    int  len;
+
+  public:
+    inline std::string str() {
+      const size_t max_buflen = 16;
+      char buf[max_buflen];
+      std::sprintf(buf, "%c%c%u", c_endian, c_type, len);
+      return std::string(buf);
+    }
+
+    Typestring(const std::vector<float>& v) 
+      :c_endian {host_endian_char}, c_type {'f'}, len {sizeof(float)} {}
+    Typestring(const std::vector<double>& v) 
+      :c_endian {host_endian_char}, c_type {'f'}, len {sizeof(double)} {}
+    Typestring(const std::vector<long double>& v) 
+      :c_endian {host_endian_char}, c_type {'f'}, len {sizeof(long double)} {}
+
+    Typestring(const std::vector<char>& v) 
+      :c_endian {no_endian_char}, c_type {'i'}, len {sizeof(char)} {}
+    Typestring(const std::vector<short>& v) 
+      :c_endian {host_endian_char}, c_type {'i'}, len {sizeof(short)} {}
+    Typestring(const std::vector<int>& v) 
+      :c_endian {host_endian_char}, c_type {'i'}, len {sizeof(int)} {}
+    Typestring(const std::vector<long>& v)
+      :c_endian {host_endian_char}, c_type {'i'}, len {sizeof(long)} {}
+    Typestring(const std::vector<long long>& v) :c_endian {host_endian_char}, c_type {'i'}, len {sizeof(long long)} {}
+
+    Typestring(const std::vector<unsigned char>& v)
+      :c_endian {no_endian_char}, c_type {'u'}, len {sizeof(unsigned char)} {}
+    Typestring(const std::vector<unsigned short>& v)
+      :c_endian {host_endian_char}, c_type {'u'}, len {sizeof(unsigned short)} {}
+    Typestring(const std::vector<unsigned int>& v)
+      :c_endian {host_endian_char}, c_type {'u'}, len {sizeof(unsigned int)} {}
+    Typestring(const std::vector<unsigned long>& v)
+      :c_endian {host_endian_char}, c_type {'u'}, len {sizeof(unsigned long)} {}
+    Typestring(const std::vector<unsigned long long>& v)
+      :c_endian {host_endian_char}, c_type {'u'}, len {sizeof(unsigned long long)} {}
+
+    Typestring(const std::vector<std::complex<float>>& v)
+      :c_endian {host_endian_char}, c_type {'c'}, len {sizeof(std::complex<float>)} {}
+    Typestring(const std::vector<std::complex<double>>& v)
+      :c_endian {host_endian_char}, c_type {'c'}, len {sizeof(std::complex<double>)} {}
+    Typestring(const std::vector<std::complex<long double>>& v)
+      :c_endian {host_endian_char}, c_type {'c'}, len {sizeof(std::complex<long double>)} {}
+};
+
+inline void parse_typestring( std::string typestring){
+//  std::regex re ("'([<>|])([ifuc])(\\d+)'");
+//  std::smatch sm;
+//
+//  std::regex_match(typestring, sm, re );
+//
+//  if ( sm.size() != 4 ) {
+//    throw std::runtime_error("invalid typestring");
+//  }
+}
+
+namespace pyparse {
+
+/**
+  Removes leading and trailing whitespaces
+  */
+inline std::string trim(const std::string& str) {
+  const std::string whitespace = " \t";
+  auto begin = str.find_first_not_of(whitespace);
+
+  if (begin == std::string::npos)
+    return "";
+
+  auto end = str.find_last_not_of(whitespace);
+
+  return str.substr(begin, end-begin+1);
+}
+
+
+inline std::string get_value_from_map(const std::string& mapstr) {
+  size_t sep_pos = mapstr.find_first_of(":");
+  if (sep_pos == std::string::npos)
+    return "";
+
+  std::string tmp = mapstr.substr(sep_pos+1);
+  return trim(tmp);
+}
+
+/**
+   Parses the string representation of a Python dict
+
+   The keys need to be known and may not appear anywhere else in the data.
+ */
+inline std::unordered_map<std::string, std::string> parse_dict(std::string in, std::vector<std::string>& keys) {
+
+  std::unordered_map<std::string, std::string> map;
+
+  if (keys.size() == 0)
+    return map;
+
+  in = trim(in);
+
+  // unwrap dictionary
+  if ((in.front() == '{') && (in.back() == '}'))
+    in = in.substr(1, in.length()-2);
+  else
+    throw std::runtime_error("Not a Python dictionary.");
+
+  std::vector<std::pair<size_t, std::string>> positions;
+
+  for (auto const& value : keys) {
+    size_t pos = in.find( "'" + value + "'" );
+
+    if (pos == std::string::npos)
+      throw std::runtime_error("Missing '"+value+"' key.");
+
+    std::pair<size_t, std::string> position_pair { pos, value };
+    positions.push_back(position_pair);
+  }
+
+  // sort by position in dict
+  std::sort(positions.begin(), positions.end() );
+
+  for(size_t i = 0; i < positions.size(); ++i) {
+    std::string raw_value;
+    size_t begin { positions[i].first };
+    size_t end { std::string::npos };
+
+    std::string key = positions[i].second;
+
+    if ( i+1 < positions.size() )
+      end = positions[i+1].first;
+
+    raw_value = in.substr(begin, end-begin);
+
+    raw_value = trim(raw_value);
+
+    if (raw_value.back() == ',')
+      raw_value.pop_back();
+
+    map[key] = get_value_from_map(raw_value);
+  }
+
+  return map;
+}
+
+/**
+  Parses the string representation of a Python boolean
+  */
+inline bool parse_bool(const std::string& in) {
+  if (in == "True")
+    return true;
+  if (in == "False")
+    return false;
+
+  throw std::runtime_error("Invalid python boolan.");
+}
+
+/**
+  Parses the string representation of a Python str
+  */
+inline std::string parse_str(const std::string& in) {
+  if ((in.front() == '\'') && (in.back() == '\''))
+    return in.substr(1, in.length()-2);
+
+  throw std::runtime_error("Invalid python string.");
+}
+
+/**
+  Parses the string represenatation of a Python tuple into a vector of its items
+ */
+inline std::vector<std::string> parse_tuple(std::string in) {
+  std::vector<std::string> v;
+  const char seperator = ',';
+
+  in = trim(in);
+
+  if ((in.front() == '(') && (in.back() == ')'))
+    in = in.substr(1, in.length()-2);
+  else
+    throw std::runtime_error("Invalid Python tuple.");
+
+  std::istringstream iss(in);
+
+  for (std::string token; std::getline(iss, token, seperator);) {
+      v.push_back(token);
+  }
+
+  return v;
+}
+
+template <typename T>
+inline std::string write_tuple(const std::vector<T>& v) {
+  if (v.size() == 0)
+    return "";
+
+  std::ostringstream ss;
+
+  if (v.size() == 1) {
+    ss << "(" << v.front() << ",)";
+  } else {
+    const std::string delimiter = ", ";
+    // v.size() > 1
+    ss << "(";
+    std::copy(v.begin(), v.end()-1, std::ostream_iterator<T>(ss, delimiter.c_str()));
+    ss << v.back();
+    ss << ")";
+  }
+
+  return ss.str();
+}
+
+inline std::string write_boolean(bool b) {
+  if(b)
+    return "True";
+  else
+    return "False";
+}
+
+} // namespace pyparse
+
+
+inline void parse_header(std::string header, std::string& descr, bool& fortran_order, std::vector<ndarray_len_t>& shape) {
+  /*
+     The first 6 bytes are a magic string: exactly "x93NUMPY".
+     The next 1 byte is an unsigned byte: the major version number of the file format, e.g. x01.
+     The next 1 byte is an unsigned byte: the minor version number of the file format, e.g. x00. Note: the version of the file format is not tied to the version of the numpy package.
+     The next 2 bytes form a little-endian unsigned short int: the length of the header data HEADER_LEN.
+     The next HEADER_LEN bytes form the header data describing the array's format. It is an ASCII string which contains a Python literal expression of a dictionary. It is terminated by a newline ('n') and padded with spaces ('x20') to make the total length of the magic string + 4 + HEADER_LEN be evenly divisible by 16 for alignment purposes.
+     The dictionary contains three keys:
+
+     "descr" : dtype.descr
+     An object that can be passed as an argument to the numpy.dtype() constructor to create the array's dtype.
+     "fortran_order" : bool
+     Whether the array data is Fortran-contiguous or not. Since Fortran-contiguous arrays are a common form of non-C-contiguity, we allow them to be written directly to disk for efficiency.
+     "shape" : tuple of int
+     The shape of the array.
+     For repeatability and readability, this dictionary is formatted using pprint.pformat() so the keys are in alphabetic order.
+   */
+
+  // remove trailing newline
+  if (header.back() != '\n')
+    throw std::runtime_error("invalid header");
+  header.pop_back();
+
+  // parse the dictionary
+  std::vector<std::string> keys { "descr", "fortran_order", "shape" };
+  auto dict_map = npy::pyparse::parse_dict(header, keys);
+
+  if (dict_map.size() == 0)
+    throw std::runtime_error("invalid dictionary in header");
+
+  std::string descr_s = dict_map["descr"];
+  std::string fortran_s = dict_map["fortran_order"];
+  std::string shape_s = dict_map["shape"];
+
+  // TODO: extract info from typestring
+  parse_typestring(descr_s);
+  // remove 
+  descr = npy::pyparse::parse_str(descr_s);
+
+  // convert literal Python bool to C++ bool
+  fortran_order = npy::pyparse::parse_bool(fortran_s);
+
+  // parse the shape tuple
+  auto shape_v = npy::pyparse::parse_tuple(shape_s);
+  if (shape_v.size() == 0)
+    throw std::runtime_error("invalid shape tuple in header");
+
+  for ( auto item : shape_v ) {
+    ndarray_len_t dim = static_cast<ndarray_len_t>(std::stoul(item));
+    shape.push_back(dim);
+  }
+}
+
+
+inline std::string write_header_dict(const std::string& descr, bool fortran_order, const std::vector<ndarray_len_t>& shape) {
+    std::string s_fortran_order = npy::pyparse::write_boolean(fortran_order);
+    std::string shape_s = npy::pyparse::write_tuple(shape);
+
+    return "{'descr': '" + descr + "', 'fortran_order': " + s_fortran_order + ", 'shape': " + shape_s + ", }";
+}
+
+inline void write_header(std::ostream& out, const std::string& descr, bool fortran_order, const std::vector<ndarray_len_t>& shape_v)
+{
+    std::string header_dict = write_header_dict(descr, fortran_order, shape_v);
+
+    size_t length = magic_string_length + 2 + 2 + header_dict.length() + 1;
+
+    unsigned char version[2] = {1, 0};
+    if (length >= 255*255) {
+      length = magic_string_length + 2 + 4 + header_dict.length() + 1;
+      version[0] = 2;
+      version[1] = 0;
+    }
+    size_t padding_len = 16 - length % 16;
+    std::string padding (padding_len, ' ');
+
+    // write magic
+    write_magic(out, version[0], version[1]);
+
+    // write header length
+    if (version[0] == 1 && version[1] == 0) {
+      char header_len_le16[2];
+      uint16_t header_len = header_dict.length() + padding.length() + 1;
+
+      header_len_le16[0] = (header_len >> 0) & 0xff;
+      header_len_le16[1] = (header_len >> 8) & 0xff;
+      out.write(reinterpret_cast<char *>(header_len_le16), 2);
+    }else{
+      char header_len_le32[4];
+      uint32_t header_len = header_dict.length() + padding.length() + 1;
+
+      header_len_le32[0] = (header_len >> 0) & 0xff;
+      header_len_le32[1] = (header_len >> 8) & 0xff;
+      header_len_le32[2] = (header_len >> 16) & 0xff;
+      header_len_le32[3] = (header_len >> 24) & 0xff;
+      out.write(reinterpret_cast<char *>(header_len_le32), 4);
+    }
+
+    out << header_dict << padding << '\n';
+}
+
+inline std::string read_header(std::istream& istream) {
+    // check magic bytes an version number
+    unsigned char v_major, v_minor;
+    read_magic(istream, v_major, v_minor);
+
+    uint32_t header_length;
+    if(v_major == 1 && v_minor == 0){
+
+      char header_len_le16[2];
+      istream.read(header_len_le16, 2);
+      header_length = (header_len_le16[0] << 0) | (header_len_le16[1] << 8);
+
+      if((magic_string_length + 2 + 2 + header_length) % 16 != 0) {
+          // TODO: display warning
+      }
+    }else if(v_major == 2 && v_minor == 0) {
+      char header_len_le32[4];
+      istream.read(header_len_le32, 4);
+
+      header_length = (header_len_le32[0] <<  0) | (header_len_le32[1] <<  8)
+                    | (header_len_le32[2] << 16) | (header_len_le32[3] <<  24);
+
+      if((magic_string_length + 2 + 4 + header_length) % 16 != 0) {
+        // TODO: display warning
+      }
+    }else{
+       throw std::runtime_error("unsupported file format version");
+    }
+
+    auto buf_v = std::vector<char>();
+    buf_v.reserve(header_length);
+    istream.read(buf_v.data(), header_length);
+    std::string header(buf_v.data(), header_length);
+
+    return header;
+}
+
+inline ndarray_len_t comp_size(const std::vector<ndarray_len_t>& shape) {
+    ndarray_len_t size = 1;
+    for (ndarray_len_t i : shape )
+      size *= i;
+
+    return size;
+}
+
+template<typename Scalar>
+inline void SaveArrayAsNumpy( const std::string& filename, bool fortran_order, unsigned int n_dims, const unsigned long shape[], const std::vector<Scalar>& data)
+{
+    Typestring typestring_o(data);
+    std::string typestring = typestring_o.str();
+
+    std::ofstream stream( filename, std::ofstream::binary);
+    if(!stream) {
+        throw std::runtime_error("io error: failed to open a file.");
+    }
+
+    std::vector<ndarray_len_t> shape_v(shape, shape+n_dims);
+    write_header(stream, typestring, fortran_order, shape_v);
+
+    auto size = static_cast<size_t>(comp_size(shape_v));
+
+    stream.write(reinterpret_cast<const char*>(data.data()), sizeof(Scalar) * size);
+}
+
+
+template<typename Scalar>
+inline void LoadArrayFromNumpy(const std::string& filename, std::vector<unsigned long>& shape, std::vector<Scalar>& data)
+{
+    std::ifstream stream(filename, std::ifstream::binary);
+    if(!stream) {
+        throw std::runtime_error("io error: failed to open a file.");
+    }
+
+    std::string header = read_header(stream);
+
+    // parse header
+    bool fortran_order;
+    std::string typestr;
+
+    parse_header(header, typestr, fortran_order, shape);
+
+    // check if the typestring matches the given one
+    Typestring typestring_o {data};
+    std::string expect_typestr = typestring_o.str();
+    if (typestr != expect_typestr) {
+      throw std::runtime_error("formatting error: typestrings not matching");
+    }
+
+
+    // compute the data size based on the shape
+    auto size = static_cast<size_t>(comp_size(shape));
+    data.resize(size);
+
+    // read the data
+    stream.read(reinterpret_cast<char*>(data.data()), sizeof(Scalar)*size);
+}
+
+} // namespace npy
+
+#endif // NPY_H
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/lstm/plg_test.hpp b/inference-engine/tests_deprecated/functional/shared_tests/lstm/plg_test.hpp
new file mode 100644 (file)
index 0000000..0f5b0b6
--- /dev/null
@@ -0,0 +1,73 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+/**
+ * @brief Base test class for Per Plugin tests
+ *
+ * Helper to handle test cases for all Plugins.
+ * @file
+ */
+
+#include <gtest/gtest.h>
+#include <cstddef>
+#include <string>
+#include <tuple>
+#include <tests_common.hpp>
+#include <ie_core.hpp>
+
+/**
+ * @brief Container for plugin_name and test params
+ *
+ * plugin_name is mandatory field.
+ */
+template<typename P>
+using PlgTestParam = std::tuple<std::string, P>;
+
+/**
+ * @brief Base class for per plugin tests
+ */
+template<typename P = std::nullptr_t>
+class PlgTest : public testing::TestWithParam<PlgTestParam<P>> {
+protected:
+    std::map<std::string, std::string>  config;
+    virtual void SetUp() {
+        device_name = std::get<0>(this->GetParam());
+        std::transform(device_name.begin(), device_name.end(), 
+            device_name.begin(), [] (char v) { return v == '_' ? ':' : v; });
+    }
+
+    const P &param() const {
+        return std::get<1>(this->GetParam());
+    }
+
+    std::string device_name;
+};
+
+/**
+ * @brief Helper to print name
+ */
+template<typename P>
+class Named {
+public:
+    Named(std::function<std::string(P)> clb) : _clb(clb) {}
+
+    const std::string operator() (const testing::TestParamInfo<PlgTestParam<P>> &p) {
+        return _clb(std::get<1>(p.param));
+    }
+private:
+    const std::function<std::string(P)> _clb;
+};
+
+/**
+ * @brief Macros to specify Per Plugin Run Test Case with parameters.
+ */
+#define RUN_CASE_P_WITH_SUFFIX(_plugin, _suffix, _test, _params) \
+    INSTANTIATE_TEST_CASE_P(_plugin##_run##_suffix, _test, ::testing::Combine(::testing::Values(#_plugin), ::testing::ValuesIn(_params) ))
+
+/**
+ * @brief Macros to specify Per Plugin Run Test Case with Cartesian Product of parameters.
+ */
+#define RUN_CASE_CP_WITH_SUFFIX(_plugin, _suffix, _test, _params, ...) \
+    INSTANTIATE_TEST_CASE_P(_plugin##_run##_suffix, _test, ::testing::Combine(::testing::Values(#_plugin), _params), __VA_ARGS__ )
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/lstm/rnn_gen.cpp b/inference-engine/tests_deprecated/functional/shared_tests/lstm/rnn_gen.cpp
new file mode 100644 (file)
index 0000000..7166906
--- /dev/null
@@ -0,0 +1,319 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "rnn_gen.hpp"
+#include "rnn_referee.hpp"
+#include "rnn_util.hpp"
+#include "common_test_utils/xml_net_builder/xml_net_builder.hpp"
+#include <ie_core.hpp>
+
+#include <vector>
+#include <string>
+
+using namespace InferenceEngine;
+using std::map;
+using std::pair;
+using std::vector;
+using std::string;
+
+using Shape = InferenceEngine::SizeVector;
+
+RNNGen::RNNGen(size_t batch, size_t seq, CellDesc cell, Mode mode, Direction dir, int axis) :
+        N(batch), T(seq), cell(cell), mode(mode), dir(dir),
+        axis(axis), neg(mode == TI_CSTM) {
+    size_t effective_T = (mode == DYN_SEQ) ? T - 1 : T;
+
+    referee = RNN_Referee::create_referee(cell, N, effective_T, D, S);
+
+    st_dim = {N, S};
+    id_dim = (axis == 1) ? Shape{N, T, D} : Shape{T, N, D};
+    od_dim = (axis == 1) ? Shape{N, T, S} : Shape{T, N, S};
+    seq_l_dim = {N};
+
+    state_num = referee->stateNum();
+    wSzB = referee->wSize() * sizeof(float);
+    bSzB = referee->bSize() * sizeof(float);
+
+    weights = std::make_shared<TBlob<uint8_t>>(TensorDesc(Precision::U8, SizeVector{(wSzB + bSzB)}, Layout::C));
+    weights->allocate();
+
+    auto ptr = weights->buffer().as<float *>();
+    SizeVector w_dims{referee->wSize()};
+    SizeVector b_dims{referee->bSize()};
+    w_blob = make_shared_blob<float>({Precision::FP32, w_dims, TensorDesc::getLayoutByDims(w_dims)}, ptr);
+    b_blob = make_shared_blob<float>({Precision::FP32, b_dims, TensorDesc::getLayoutByDims(b_dims)},
+                                     ptr + referee->wSize());
+}
+
+string RNNGen::model() {
+    auto net_b = CommonTestUtils::V2NetBuilder::buildNetworkWithOneInput("RNN_Net", id_dim, "FP32");
+    for (int i = 0; i < state_num; i++)
+        net_b.addInputLayer("FP32", st_dim);
+
+    if (mode == DYN_SEQ)
+        net_b.addInputLayer("FP32", seq_l_dim);
+
+    if (mode == CELL)
+        add_CELL(net_b);
+    else if (mode == SEQ || mode == DYN_SEQ)
+        add_SEQ(net_b);
+    else {
+        add_TI(net_b);
+    }
+
+    size_t num_input = 1 + state_num + (mode == DYN_SEQ ? 1 : 0);
+    vector<pair<string, string>> edges;
+
+    switch (num_input) {
+        case 4:
+            edges = {
+                    {"0,0", "4,4"},
+                    {"1,1", "4,5"},
+                    {"2,2", "4,6"},
+                    {"3,3", "4,7"},
+            };
+            break;
+        case 3:
+            edges = {
+                    {"0,0", "3,3"},
+                    {"1,1", "3,4"},
+                    {"2,2", "3,5"},
+            };
+            break;
+        case 2:
+            edges = {
+                    {"0,0", "2,2"},
+                    {"1,1", "2,3"},
+            };
+            break;
+    }
+    return net_b.finish(&edges);
+}
+
+static const std::string cell_type(Cell cell) {
+    return cell == LSTM ? "LSTM" :
+           cell == GRU ? "GRU" :
+           cell == GRU_lbr ? "GRU" :
+           cell == RNN ? "RNN" : "Unknown";
+}
+
+static const std::string cell_layer_type(CellDesc cell) {
+    return cell_type(cell.type) + "Cell";
+}
+
+map<string, string> RNNGen::basic_cell_attr() {
+    map<string, string> attr{};
+
+    // Prepare activations attributes
+    string algs, alpha, beta;
+    for (auto &act : cell.acts) {
+        algs += act.alg + ',';
+        alpha += std::to_string(act.alpha) + ',';
+        beta += std::to_string(act.beta) + ',';
+    }
+    algs.pop_back(); // remove last comma
+    alpha.pop_back();
+    beta.pop_back();
+
+    attr["activations"] = algs;
+    attr["activations_alpha"] = alpha;
+    attr["activations_beta"] = beta;
+
+    attr["clip"] = std::to_string(cell.clip);
+    attr["hidden_size"] = std::to_string(S);
+
+    if (cell.type == GRU_lbr)
+        attr["linear_before_reset"] = std::to_string(true);
+
+    return attr;
+}
+
+void RNNGen::add_TI(CommonTestUtils::V2NetBuilder &builder) {
+    /// Generate TI body
+    Shape id_ti = id_dim;
+    Shape od_ti = od_dim;
+    id_ti[axis] = 1;
+    od_ti[axis] = 1;
+
+    std::map<std::string, std::string>
+            cell_attr = basic_cell_attr(),
+            rsh1_attr{{"dim", "-1," + std::to_string(D)}},
+            rsh2_attr{{"dim", (axis == 1 ? "-1,1," : "1,-1,") + std::to_string(S)}},
+            negt_attr{{"scale", "-1"},
+                      {"shift", "0"},
+                      {"power", "1"}};
+
+    CommonTestUtils::InOutShapes cell_inout{{{N, D}},
+                                              {}};
+    for (int i = 0; i < state_num; i++) {
+        cell_inout.inDims.push_back({N, S});
+        cell_inout.outDims.push_back({N, S});
+    }
+
+    auto body_bilder = CommonTestUtils::V2NetBuilder::buildBody();
+    body_bilder.addLayer("Reshape", "FP32", &rsh1_attr, {{id_ti},
+                                                         {{N, D}}});
+    body_bilder.addLayer(cell_layer_type(cell), "FP32", &cell_attr, cell_inout, wSzB, bSzB);
+    body_bilder.addLayer("Reshape", "FP32", &rsh2_attr, {{{N, S}},
+                                                         {od_ti}});
+    if (neg)
+        body_bilder.addLayer("Power", "FP32", &negt_attr, {{od_ti},
+                                                           {od_ti}});
+
+    // body edges
+    int last_l = 2, last_p = 6;
+    vector<pair<string, string>> body_edges{
+            {"0,1", "1,2"},
+            {"1,4", "2,5"}};
+
+    if (state_num == 2) {
+        body_edges[1] = {"1,5", "2,7"};
+        last_p += 2;
+    }
+
+    if (neg) {
+        using std::to_string;
+        body_edges.push_back({to_string(last_l) + ',' + to_string(last_p),
+                              to_string(last_l + 1) + ',' + to_string(last_p + 1)});
+        last_l += 1;
+        last_p += 2;
+    }
+
+    auto body = body_bilder.finish(&body_edges);
+    /// body is generated
+
+    bool fwd = (dir == FWD);
+
+    int st = fwd ? 1 : -1;
+    int bgn = fwd ? 0 : -1;
+    int end = fwd ? -1 : 0;
+
+    CommonTestUtils::InOutShapes ti_inout{{id_dim},
+                                            {od_dim}};
+    for (int i = 0; i < state_num; i++) {
+        ti_inout.inDims.push_back({N, S});
+        ti_inout.outDims.push_back({N, S});
+    }
+
+    int &ll = last_l, lp = last_p;
+    if (state_num == 2) {
+        builder.TILayer(ti_inout, body,
+                /* frm_l | frm_p | to_l | to_p | axis | step | start | end */
+                        {{3, 3, 0, 0, axis, st, bgn, end},
+                         {3, 4, 1, 3, -1},
+                         {3, 5, 1, 4, -1}},
+                        {{3, 6, ll, lp, axis, st, bgn, end},
+                         {3, 7, 1,  5,  -1},
+                         {3, 8, 1,  6,  -1}},
+                        {{1, 5, 1, 3},
+                         {1, 6, 1, 4}});
+    } else {
+        builder.TILayer(ti_inout, body,
+                /* frm_l | frm_p | to_l | to_p | axis | step | start | end */
+                        {{2, 2, 0, 0, axis, st, bgn, end},
+                         {2, 3, 1, 3, -1}},
+                        {{2, 4, ll, lp, axis, st, bgn, end},
+                         {2, 5, 1,  4,  -1}},
+                        {{1, 4, 1, 3}});
+    }
+}
+
+void RNNGen::add_SEQ(CommonTestUtils::V2NetBuilder &builder) {
+    map<string, string> seq_attr = basic_cell_attr();
+
+    string direction = dir == FWD ? "Forward" :
+                       dir == BWD ? "Backward" :
+                       dir == BDR ? "Bidirectional" :
+                       "Unknown";
+
+    seq_attr["direction"] = direction;
+    seq_attr["axis"] = std::to_string(axis);
+
+    CommonTestUtils::InOutShapes inout{{id_dim},
+                                         {od_dim}};
+    for (int i = 0; i < state_num; i++) {
+        inout.inDims.push_back({N, S});
+        inout.outDims.push_back({N, S});
+    }
+
+    if (mode == DYN_SEQ) {
+        inout.inDims.push_back(seq_l_dim);
+    }
+
+    auto seq_type = cell_type(cell.type) + "Sequence";
+    builder.addLayer(seq_type, "FP32", &seq_attr, inout, wSzB, bSzB);
+}
+
+void RNNGen::add_CELL(CommonTestUtils::V2NetBuilder &builder) {
+    auto id = Shape{N, D};
+    auto od = Shape{N, S};
+
+    map<string, string> cell_p = {{"hidden_size", std::to_string(S)}};
+    builder.addLayer("LSTMCell", "FP32", &cell_p,
+                     {{id, {N, S}, {N, S}},
+                      {od, {N, S}, {N, S}}},
+                     wSzB, bSzB);
+}
+
+CNNNetwork RNNGen::net() {
+    referee->wFiller(w_blob);
+    referee->bFiller(b_blob);
+
+    Core ie;
+    return ie.ReadNetwork(model(), weights);
+}
+
+const std::vector<Filler> RNNGen::fillers() const {
+    auto fillers = referee->getDataFillers();
+
+    if (dir == BWD)
+        // Reverse seq dim for input and output
+        fillers[0] = reverse(fillers[0], 1);
+
+    if (axis == 0)
+        // Swap N and T dims
+        fillers[0] = permute(fillers[0], {1, 0, 2});
+
+    // filler for sequence length tensor
+    if (mode == DYN_SEQ) {
+        using namespace std::placeholders;
+        fillers.push_back(std::bind(scalar_filler, _1, SizeVector{N}, T - 1));
+
+        auto zero_shape = id_dim;
+        zero_shape[axis] = 1;
+        Filler zero_filler(std::bind(scalar_filler, _1, zero_shape, 0.0f));
+
+        fillers[0] = concat(fillers[0], zero_filler, axis);
+    }
+    return fillers;
+}
+
+const std::vector<Checker> RNNGen::checkers() const {
+    auto checkers = referee->getDataChecker();
+
+    if (mode == TI_CSTM)
+        // Negative data blob checker. Customization is negative Power layer at the end of TI body
+        checkers[0] = negative(checkers[0]);
+
+    if (dir == BWD)
+        // Reverse seq dim for input and output
+        checkers[0] = reverse(checkers[0], 1);
+
+    if (axis == 0)
+        // Swap N and T dims
+        checkers[0] = permute(checkers[0], {1, 0, 2});
+
+    if (mode == DYN_SEQ) {
+        using namespace std::placeholders;
+
+        auto zero_shape = od_dim;
+        zero_shape[axis] = 1;
+        Checker zero_checker(std::bind(scalar_checker, _1, zero_shape, 0.0f));
+
+        checkers[0] = concat(checkers[0], zero_checker, axis);
+    }
+
+    return checkers;
+}
+
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/lstm/rnn_gen.hpp b/inference-engine/tests_deprecated/functional/shared_tests/lstm/rnn_gen.hpp
new file mode 100644 (file)
index 0000000..346921d
--- /dev/null
@@ -0,0 +1,75 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include "common_test_utils/xml_net_builder/xml_net_builder.hpp"
+#include "rnn_referee.hpp"
+#include <cpp/ie_cnn_network.h>
+
+#include <vector>
+#include <string>
+
+enum Mode {
+    CELL,       /**< single LSTMCell layer */
+    SEQ,        /**< single LSTMSeq layer */
+    DYN_SEQ,    /**< single LSTMSeq layer with seq length input*/
+    TI,         /**< TI layer with LSTM body */
+    TI_CSTM     /**< TI layer with LSTM plus negative at the body */
+};
+
+enum Direction {
+    FWD,        /**< Forward. With stride 1  */
+    BWD,        /**< Backward. WIth stride -1 */
+    BDR         /**< Bidirectional. With stride 1 and -1 */
+};
+
+/**
+ *  Topology generator for some RNN specific cases
+ */
+class RNNGen {
+public:
+    /** Sequence topology */
+    RNNGen(size_t batch, size_t seq, CellDesc cell, Mode mode, Direction dir, int axis);
+
+    const std::vector<Filler> fillers() const;
+    const std::vector<Checker> checkers() const;
+
+    InferenceEngine::CNNNetwork net();
+
+private:
+    const size_t D = 10;  // Data size
+    const size_t S = 5;   // State size
+    const size_t G = 4;   // Number of gate
+
+    const size_t N;  // Batch
+    const size_t T;  // Sequence
+    const int axis;  // Axis of sequence
+
+    const Mode mode;
+    const CellDesc cell;
+    const Direction dir;
+    const bool neg;
+
+    size_t state_num = 0;
+
+    size_t wSzB = 0;
+    size_t bSzB = 0;
+
+    InferenceEngine::SizeVector seq_l_dim, st_dim, id_dim, od_dim;
+
+    InferenceEngine::TBlob<uint8_t>::Ptr weights;
+    InferenceEngine::Blob::Ptr w_blob, b_blob;
+
+    std::shared_ptr<RNN_Referee> referee;
+
+private:
+    std::string model();
+
+    void add_TI(CommonTestUtils::V2NetBuilder &builder);
+    void add_SEQ(CommonTestUtils::V2NetBuilder &builder);
+    void add_CELL(CommonTestUtils::V2NetBuilder &builder);
+
+    std::map<std::string, std::string> basic_cell_attr();
+};
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/lstm/rnn_referee.cpp b/inference-engine/tests_deprecated/functional/shared_tests/lstm/rnn_referee.cpp
new file mode 100644 (file)
index 0000000..f1212ea
--- /dev/null
@@ -0,0 +1,290 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "rnn_referee.hpp"
+
+#include <cmath>
+#include <vector>
+#include <string>
+
+using namespace InferenceEngine;
+using namespace std::placeholders;
+using std::vector;
+
+class RNN_ReferBase : public RNN_Referee {
+protected:
+    RNN_ReferBase(float clip, size_t D, size_t S, size_t G, size_t Gb, size_t ST_N)
+            : clip(clip), D(D), S(S), G(G), Gb(Gb), state_num(ST_N) {}
+
+    const size_t D, S, G, Gb;
+    const size_t state_num;
+    const float clip;
+
+    vector<float> W, B;
+
+    vector<Filler> _d_filler;
+    vector<Checker> _d_checker;
+
+    const vector<Filler>& getDataFillers()  override { return _d_filler;  }
+    const vector<Checker>& getDataChecker() override { return _d_checker; }
+    size_t wSize()                          override { return G*S*(S+D);  }
+    size_t bSize()                          override { return Gb*S;        }
+    size_t stateNum()                       override { return state_num;  }
+
+    using Act = std::function<float(const float)>;
+
+    static float _clip (const float x, const float clip) {
+        return std::min(std::max(x, -clip), clip);
+    }
+
+    static Act clip_before(Act act, const float clip) {
+        return [=] (const float x) {
+            return act(_clip(x, clip));
+        };
+    }
+
+    Act act(ActivationDesc act) {
+        float alpha = act.alpha;
+        Act res;
+        if (act.alg == "sigmoid")
+            res = [=] (const float x) { return 1 / (1 + std::exp(-x)); };
+        else if (act.alg == "tanh")
+            res = [=] (const float x) { return std::tanh(x); };
+        else if (act.alg == "relu")
+            res = [=] (const float x) { return (x > 0) ? x : alpha*x; };
+        else
+            THROW_IE_EXCEPTION << "Unknown activation type " << act.alg;
+        return res;
+    }
+
+
+public:
+    void wFiller(Blob::Ptr blob) override {
+        IE_ASSERT(blob->size() == wSize());
+        auto ptr = blob->buffer().as<float*>();
+
+        for (int g = 0; g < G; g++)
+        for (int s = 0; s < S; s++) {
+            for (int i = 0; i < D; i++) *ptr++ = W[g] / D;
+            for (int i = 0; i < S; i++) *ptr++ = W[g] / S;
+        }
+    }
+
+    void bFiller(Blob::Ptr blob) override {
+        IE_ASSERT(blob->size() == bSize());
+        auto ptr = blob->buffer().as<float*>();
+
+        for (int g = 0; g < Gb; g++)
+        for (int s = 0; s < S; s++) *ptr++ = B[g];
+    }
+};
+
+#define Vals(_name) std::vector<float> _name(T+1)
+
+class LSTMCell_Refer : public RNN_ReferBase {
+public:
+    LSTMCell_Refer(CellDesc cell, size_t N, size_t T, size_t D, size_t S) : RNN_ReferBase(cell.clip, D, S, 4, 4, 2) {
+        // Some random values in range [0,1]
+        const float H0 = 0.3, C0 = 0.77;
+
+        const float Wf = 0.1, Bf = 0.35;
+        const float Wi = 0.2, Bi = 0.25;
+        const float Wc = 0.5, Bc = 0.15;
+        const float Wo = 0.7, Bo = 0.05;
+
+        auto _f = act(cell.acts[0]);
+        auto _g = act(cell.acts[1]);
+        auto _h = act(cell.acts[2]);
+
+        if (clip > 0.0f) {
+            _f = clip_before(_f, clip);
+            _g = clip_before(_g, clip);
+        }
+
+        Vals(f); Vals(i); Vals(c); Vals(o);
+        Vals(X); Vals(H); Vals(C);
+
+        H[0] = H0;
+        C[0] = C0;
+
+        for (int t = 1; t < T+1; t++) {
+            X[t] = t;
+            f[t] = _f(Wf*(H[t-1] + X[t]) + Bf);
+            i[t] = _f(Wi*(H[t-1] + X[t]) + Bi);
+            c[t] = _g(Wc*(H[t-1] + X[t]) + Bc);
+            o[t] = _f(Wo*(H[t-1] + X[t]) + Bo);
+
+            C[t] = f[t] * C[t-1] + i[t] * c[t];
+            H[t] = o[t] * _h(C[t]);
+        }
+
+        W = {Wf, Wi, Wc, Wo};
+        B = {Bf, Bi, Bc, Bo};
+
+        X.erase(X.begin());  // remove first element (unused zero element)
+        H.erase(H.begin());
+        C.erase(C.begin());
+
+        _d_filler.resize(3);
+        _d_filler[0] = std::bind(vector_filler, _1, SizeVector {N,T,D}, X, 1);
+        _d_filler[1] = std::bind(scalar_filler, _1, SizeVector {N,S}, H0);
+        _d_filler[2] = std::bind(scalar_filler, _1, SizeVector {N,S}, C0);
+
+        _d_checker.resize(3);
+        _d_checker[0] = std::bind(vector_checker, _1, SizeVector {N,T,S}, H, 1);
+        _d_checker[1] = std::bind(scalar_checker, _1, SizeVector {N,S}  , H[T-1]);
+        _d_checker[2] = std::bind(scalar_checker, _1, SizeVector {N,S}  , C[T-1]);
+    }
+};
+
+class GRUCell_Refer : public RNN_ReferBase {
+public:
+    GRUCell_Refer(CellDesc cell, size_t N, size_t T, size_t D, size_t S) : RNN_ReferBase(cell.clip, D, S, 3, 3, 1) {
+        // Some random values in range [0,1]
+        const float H0 = 0.3;
+
+        const float Wz = 0.1, Bz = 0.35;
+        const float Wr = 0.2, Br = 0.25;
+        const float Wh = 0.5, Bh = 0.15;
+
+        auto _f = act(cell.acts[0]);
+        auto _g = act(cell.acts[1]);
+
+        if (clip > 0.0f) {
+            _f = clip_before(_f, clip);
+            _g = clip_before(_g, clip);
+        }
+
+        Vals(z); Vals(r); Vals(h);
+        Vals(X); Vals(H);
+
+        H[0] = H0;
+
+        for (int t = 1; t < T+1; t++) {
+            X[t] = t;
+            z[t] = _f(Wz*(H[t-1] + X[t]) + Bz);
+            r[t] = _f(Wr*(H[t-1] + X[t]) + Br);
+            h[t] = _g(Wh*(H[t-1]*r[t] + X[t]) + Bh);
+            H[t] = (1 - z[t])*h[t] + z[t]*H[t-1];
+        }
+
+        W = {Wz, Wr, Wh};
+        B = {Bz, Br, Bh};
+
+        X.erase(X.begin());
+        H.erase(H.begin());
+
+        _d_filler.resize(2);
+        _d_filler[0] = std::bind(vector_filler, _1, SizeVector {N,T,D}, X, 1);
+        _d_filler[1] = std::bind(scalar_filler, _1, SizeVector {N,S}  , H0);
+
+        _d_checker.resize(2);
+        _d_checker[0] = std::bind(vector_checker, _1, SizeVector {N,T,S}, H, 1);
+        _d_checker[1] = std::bind(scalar_checker, _1, SizeVector {N,S}  , H[T-1]);
+    }
+};
+
+
+class GRUlbrCell_Refer : public RNN_ReferBase {
+public:
+    GRUlbrCell_Refer(CellDesc cell, size_t N, size_t T, size_t D, size_t S) : RNN_ReferBase(cell.clip, D, S, 3, 4, 1) {
+        // Some random values in range [0,1]
+        const float H0 = 0.3;
+
+        const float Wz = 0.1, Bz = 0.35;
+        const float Wr = 0.2, Br = 0.25;
+        const float Wh = 0.5, Bh = 0.15, Bhr = 0.33;
+
+        auto _f = act(cell.acts[0]);
+        auto _g = act(cell.acts[1]);
+
+        if (clip > 0.0f) {
+            _f = clip_before(_f, clip);
+            _g = clip_before(_g, clip);
+        }
+
+        Vals(z); Vals(r); Vals(h);
+        Vals(X); Vals(H);
+
+        H[0] = H0;
+
+        for (int t = 1; t < T+1; t++) {
+            X[t] = 0.1 * t;
+            z[t] = _f(Wz*(H[t-1] + X[t]) + Bz);
+            r[t] = _f(Wr*(H[t-1] + X[t]) + Br);
+            h[t] = _g(Wh*X[t] + r[t]*(Wh*H[t-1] + Bhr) + Bh);
+            H[t] = (1 - z[t])*h[t] + z[t]*H[t-1];
+        }
+
+        W = {Wz, Wr, Wh};
+        B = {Bz, Br, Bh, Bhr};
+
+        X.erase(X.begin());
+        H.erase(H.begin());
+
+        _d_filler.resize(2);
+        _d_filler[0] = std::bind(vector_filler, _1, SizeVector {N,T,D}, X, 1);
+        _d_filler[1] = std::bind(scalar_filler, _1, SizeVector {N,S}  , H0);
+
+        _d_checker.resize(2);
+        _d_checker[0] = std::bind(vector_checker, _1, SizeVector {N,T,S}, H, 1);
+        _d_checker[1] = std::bind(scalar_checker, _1, SizeVector {N,S}  , H[T-1]);
+    }
+};
+
+class RNNCell_Refer : public RNN_ReferBase {
+public:
+    RNNCell_Refer(CellDesc cell, size_t N, size_t T, size_t D, size_t S) : RNN_ReferBase(cell.clip, D, S, 1, 1, 1) {
+        // Some random values in range [0,1]
+        const float H0 = 0.3;
+
+        const float Wh = 0.5, Bh = 0.15;
+
+        auto _f = act(cell.acts[0]);
+        if (clip > 0.0f)
+            _f = clip_before(_f, clip);
+
+        Vals(X); Vals(H);
+
+        H[0] = H0;
+
+        for (int t = 1; t < T+1; t++) {
+            X[t] = t;
+            H[t] = _f(Wh*(H[t-1] +  X[t]) + Bh);
+        }
+
+        W = {Wh};
+        B = {Bh};
+
+        X.erase(X.begin());
+        H.erase(H.begin());
+
+        _d_filler.resize(2);
+        _d_filler[0] = std::bind(vector_filler, _1, SizeVector {N,T,D}, X, 1);
+        _d_filler[1] = std::bind(scalar_filler, _1, SizeVector {N,S}  , H0);
+
+        _d_checker.resize(2);
+        _d_checker[0] = std::bind(vector_checker, _1, SizeVector {N,T,S}, H, 1);
+        _d_checker[1] = std::bind(scalar_checker, _1, SizeVector {N,S}  , H[T-1]);
+    }
+};
+
+std::shared_ptr<RNN_Referee> RNN_Referee::create_referee(CellDesc cell, size_t N, size_t T, size_t D, size_t S) {
+    std::shared_ptr<RNN_Referee> res;
+    switch (cell.type) {
+        case LSTM:
+            res = std::shared_ptr<RNN_Referee>(new LSTMCell_Refer(cell, N, T, D, S));
+            break;
+        case GRU:
+            res = std::shared_ptr<RNN_Referee>(new GRUCell_Refer(cell, N, T, D, S));
+            break;
+        case GRU_lbr:
+            res = std::shared_ptr<RNN_Referee>(new GRUlbrCell_Refer(cell, N, T, D, S));
+            break;
+        case RNN:
+            res = std::shared_ptr<RNN_Referee>(new RNNCell_Refer(cell, N, T, D, S));
+            break;
+    }
+    return res;
+};
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/lstm/rnn_referee.hpp b/inference-engine/tests_deprecated/functional/shared_tests/lstm/rnn_referee.hpp
new file mode 100644 (file)
index 0000000..af1d9d3
--- /dev/null
@@ -0,0 +1,58 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include "ie_blob.h"
+#include "rnn_util.hpp"
+#include <vector>
+
+enum Cell {
+    LSTM,    /**< Vanilla LSTMCell */
+    GRU,     /**< Vanilla GRUCell */
+    RNN,     /**< Vanilla RNNCell */
+    GRU_lbr  /**< Vanilla GRUCell */
+};
+
+/**
+ * Descriptor of activation function
+ * type : [sigm, tanh, relu, ...]
+ * alpha, beta : optional
+ */
+struct ActivationDesc {
+    std::string alg;
+    float alpha;
+    float beta;
+};
+using ActivationSet = std::vector<ActivationDesc>;
+
+/**
+ * Descriptor of general RNN cell
+ */
+struct CellDesc {
+    Cell type;                 /**< Type of RNN cell */
+    ActivationSet acts;        /**< Activations aplgorithm */
+    float clip;                /**< Clip value. 0 - no clipping */
+};
+
+/**
+ * Ref scoring for some RNN cells
+ * Provide weight filler and in_data filler and out_data checker
+ */
+class RNN_Referee {
+public:
+    static std::shared_ptr<RNN_Referee> create_referee(CellDesc cell, size_t N, size_t T, size_t D, size_t S);
+    virtual ~RNN_Referee() = default;
+
+    virtual void wFiller(InferenceEngine::Blob::Ptr) = 0;
+    virtual void bFiller(InferenceEngine::Blob::Ptr) = 0;
+
+    virtual size_t wSize() = 0;
+    virtual size_t bSize() = 0;
+
+    virtual size_t stateNum() = 0;
+
+    virtual const std::vector<Filler>& getDataFillers() = 0;
+    virtual const std::vector<Checker>& getDataChecker() = 0;
+};
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/lstm/rnn_seq_test.hpp b/inference-engine/tests_deprecated/functional/shared_tests/lstm/rnn_seq_test.hpp
new file mode 100644 (file)
index 0000000..dd4bfd2
--- /dev/null
@@ -0,0 +1,185 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "rnn_gen.hpp"
+#include "plg_test.hpp"
+
+#include <cmath>
+#include <vector>
+#include <string>
+
+using namespace ::testing;
+using namespace InferenceEngine;
+using std::map;
+using std::pair;
+using std::vector;
+using std::string;
+
+enum Reshape {
+    RESH_NO = 0, /**< No reshape step */
+    RESH_B  = 1, /**< Reshape for batch dim */
+    RESH_T  = 2, /**< Reshape for time dim */
+    RESH_BT = 3  /**< Reshape for both batch and time dims */
+};
+
+using rnn_param = std::tuple<
+    CellDesc,    /**< cell  - Descriptor of RNN cell */
+    float,       /**< clip  - Clip value */
+    Direction,   /**< fwd   - Direction */
+    Mode,        /**< mode  - Modes of LSTM representation */
+    size_t,      /**< N     - Batch size */
+    size_t,      /**< T     - Sequence length */
+    size_t,      /**< axis  - Dimension with T */
+    Reshape      /**< shape - Apply reshape. +1 to original dim */
+>;
+
+const Named<rnn_param> test_name( [] (const rnn_param &p) {
+    CellDesc _cell; Direction _dir; Mode _mode; Reshape _resh;
+    size_t _N, _S, _axis;
+    float _clip;
+
+    std::tie(_cell,_clip,_dir,_mode,_N,_S,_axis,_resh) = p;
+
+    string res = _cell.type == LSTM ? "LSTM_" : _cell.type == GRU  ? "GRU__"  : "RNN__";
+    for (auto &act : _cell.acts) res += act.alg[0];
+
+    res += _dir == FWD ? "_FWD" : _dir == BWD ? "_BWD" : _dir == BDR ? "_BDR" : "_XXX";
+    res += _mode == SEQ ? "_SEQ" : _mode == TI ? "__TI" : _mode == TI_CSTM ? "_TIX" : "_XXX";
+    res += (_clip == 0.0f) ? "_c0" : "_cX";
+    res += "_b" + std::to_string(_N);
+    res += "_s" + std::to_string(_S);
+    res += "_axis" + std::to_string(_axis);
+
+    res += _resh == RESH_NO ? "_reshNO" :
+           _resh == RESH_B  ? "__reshB" :
+           _resh == RESH_T  ? "__reshT" :
+           _resh == RESH_BT ? "_reshBT" : "_X";
+    return res;
+});
+
+using RNNSeqTest = PlgTest<rnn_param>;
+
+TEST_P(RNNSeqTest, SingleRNN) {
+    auto p = param();
+
+    auto cell = std::get<0>(p);
+    auto clip = std::get<1>(p);
+    auto dir  = std::get<2>(p);
+    auto mode = std::get<3>(p);
+    auto N    = std::get<4>(p);
+    auto T    = std::get<5>(p);
+    auto axis = std::get<6>(p);
+    auto resh = std::get<7>(p);
+
+    if (device_name == "GPU" && cell.type != LSTM)
+        SKIP();
+
+    cell.clip = clip;
+
+    /************ Test Body  *****************************/
+
+    RNNGen topology(N, T, cell, mode , dir, axis);
+    auto net = topology.net();
+    auto fillers = topology.fillers();
+    auto checkers = topology.checkers();
+
+    // Reshape if requested
+    if (resh != RESH_NO) {
+        const bool resh_b = resh & RESH_B;
+        const bool resh_t = resh & RESH_T;
+
+        auto shapes = net.getInputShapes();
+        for (auto &pair : shapes) {
+            // Blobs with data
+            if (pair.second.size() == 3) {
+                if (resh_b) pair.second[(axis+1)%2]++;
+                if (resh_t) pair.second[axis]++;
+            }
+            // Blobs with state or Seq Len
+            if (pair.second.size() == 1 || pair.second.size() == 2) {
+                if (resh_b) pair.second[0]++;
+            }
+        }
+        net.reshape(shapes);
+
+        // Also need new fillers/checkers for new shapes
+        RNNGen resh_topology(resh_b ? N+1 : N, resh_t ? T+1 : T, cell, mode , dir, axis);
+        fillers = resh_topology.fillers();
+        checkers = resh_topology.checkers();
+    }
+
+    Core ie;
+    auto execNet = ie.LoadNetwork(net, device_name);
+    auto req = execNet.CreateInferRequest();
+
+    ASSERT_TRUE(net.getInputsInfo().size() == fillers.size());
+    ASSERT_TRUE(net.getOutputsInfo().size() == checkers.size());
+
+    int i = 0;
+    for (auto &info : net.getInputsInfo())
+        fillers[i++](req.GetBlob(info.first));
+
+    req.Infer();
+
+    i = 0;
+    for (auto &info : net.getOutputsInfo())
+        EXPECT_TRUE(checkers[i++](req.GetBlob(info.first))) << "Error with #" << i << " output";
+}
+
+const std::vector<CellDesc> cells = {
+  /** LSTM modifications */
+  {LSTM, {{"sigmoid",0,0}, {"tanh",0,0}, {"tanh",0,0}} }, // default
+  {LSTM, {{"tanh",0,0}, {"sigmoid",0,0}, {"relu",0,0}} },
+  /** GRU modifications */
+  {GRU , {{"sigmoid",0,0}, {"tanh",0,0}} }, // default
+  {GRU , {{"tanh",0,0}, {"relu",0,0}} },
+  /** GRU linear_before_reset modifications */
+  {GRU_lbr , {{"sigmoid",0,0}, {"tanh",0,0}} }, // default
+  {GRU_lbr , {{"tanh",0,0}, {"relu",0,0}} },
+  /** RNN modifications */
+  {RNN , {{"tanh",0,0}} },   // default
+  {RNN , {{"sigmoid",0,0}} },
+  {RNN , {{"relu",0,0}} },
+};
+
+#if 0
+// All combination of next parameters
+const auto workload = Combine(
+    ValuesIn(cells),          // Cell desc
+    Values(0.0f, 0.7f),       // Clip arg
+    Values(FWD, BWD),         // Direction
+    Values(SEQ, DYN_SEQ,      // Representation mode
+           TI, TI_CSTM),      //
+    Values(1, 3),             // Batch
+    Values(3),                // Sequence size
+    Values(0, 1),             // Axis of sequence
+    Values(RESH_NO, RESH_B,   // Reshape mode for batch, sequence or both
+           RESH_T, RESH_BT)   //
+);
+#else
+// All combination of next parameters ( small subset for fast CI testing)
+const auto workload = Combine(
+        ValuesIn(cells.begin(),     // Cell desc (only first 5)
+                 cells.begin()+7),  //
+        Values(0.0f, 0.7f),         // Clip arg
+        Values(FWD, BWD),           // Direction
+        Values(SEQ, TI),            // Representation mode
+        Values(2),                  // Batch
+        Values(3),                  // Sequence size
+        Values(0, 1),               // Axis of sequence
+        Values(RESH_NO, RESH_B)     // Reshape mode for batch, sequence or both
+);
+#endif
+
+// All combination of next parameters ( small subset for fast CI testing)
+const auto dyn_seq_workload = Combine(
+        ValuesIn(std::vector<CellDesc> {cells[0], cells[2], cells[4], cells[6]}),
+        Values(0.0f),               // Clip arg
+        Values(FWD, BWD, BDR),      // Direction
+        Values(DYN_SEQ),            // Representation mode
+        Values(1, 8),               // Batch
+        Values(3, 100),             // Sequence size
+        Values(0, 1),               // Axis of sequence
+        Values(RESH_NO, RESH_B)     // Reshape mode for batch, sequence or both
+);
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/lstm/rnn_util.cpp b/inference-engine/tests_deprecated/functional/shared_tests/lstm/rnn_util.cpp
new file mode 100644 (file)
index 0000000..5db473a
--- /dev/null
@@ -0,0 +1,309 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "rnn_util.hpp"
+
+#include <string>
+#include <cmath>
+
+using namespace InferenceEngine;
+
+#define T_LOOP_RANK 5
+
+/**
+ * @brief Iterate through tensor values and do action for each
+ * elements.
+ *
+ * Signature of action is : (data_t &x, int *i) -> void
+ *      x - is reference on tensor element
+ *      i - array of logical indexes
+ *
+ * @tparam T action functor type. Generally is lambda
+ * @param blob to iterate through
+ * @param act functor to apply for each value in tensor
+ */
+template <typename T>
+void T_LOOP(Blob::Ptr &blob, const T &act) {
+
+    const auto &td = blob->getTensorDesc();
+    const auto &dims = td.getDims();
+
+    const auto &blk_d = td.getBlockingDesc();
+    const auto &strides = blk_d.getStrides();
+
+    int D[] = {1, 1, 1, 1, 1};
+    std::copy(dims.begin(), dims.end(), std::end(D) - dims.size() );
+
+    int i[] = {0, 0, 0, 0, 0};
+    int &i0 = i[0], &i1 = i[1], &i2 = i[2], &i3 = i[3], &i4 = i[4];
+
+    int s[] = {0, 0, 0, 0, 0};
+    std::copy(strides.begin(), strides.end(), std::end(s) - dims.size());                                                        \
+    int &s0 = s[0], &s1 = s[1], &s2 = s[2], &s3 = s[3], &s4 = s[4];
+
+    size_t off_ = blk_d.getOffsetPadding();
+
+    auto *ptr = blob->buffer().as<float*>();
+
+    for (i0 = 0; i0 < D[0]; i0++) { auto off0 = off_ + i0 * s0;
+    for (i1 = 0; i1 < D[1]; i1++) { auto off1 = off0 + i1 * s1;
+    for (i2 = 0; i2 < D[2]; i2++) { auto off2 = off1 + i2 * s2;
+    for (i3 = 0; i3 < D[3]; i3++) { auto off3 = off2 + i3 * s3;
+    for (i4 = 0; i4 < D[4]; i4++) { auto off4 = off3 + i4 * s4; auto &off = off4;
+        act(ptr[off], i);
+    }}}}}
+}
+
+Checker negative(Checker checker) {
+    return [=] (Blob::Ptr blob) -> bool {
+        auto dims = blob->getTensorDesc().getDims();
+        auto layout = blob->getTensorDesc().getLayout();
+        auto new_blob = make_shared_blob<float>({Precision::FP32, dims, layout});
+        new_blob->allocate();
+
+        float *new_blob_ptr = new_blob->buffer().as<float*>();
+        float *blob_ptr = blob->buffer().as<float*>();
+        int size = blob->size();
+        for (int i = 0; i < size; i++)
+            *new_blob_ptr++ = -(*blob_ptr++);
+
+        return checker(new_blob);
+    };
+}
+
+static void copy_with_reverse(Blob::Ptr &src, Blob::Ptr &dst, int axis) {
+    IE_ASSERT(src->getTensorDesc().getDims() == dst->getTensorDesc().getDims());
+
+    const auto &td = src->getTensorDesc();
+    const auto &dims = td.getDims();
+
+    const auto &blk_d = td.getBlockingDesc();
+    const auto &strides = blk_d.getStrides();
+
+    int D[] = {1, 1, 1, 1, 1};
+    std::copy(dims.begin(), dims.end(), std::end(D) - dims.size() );
+
+    int s[] = {0, 0, 0, 0, 0};
+    std::copy(strides.begin(), strides.end(), std::end(s) - dims.size());                                                        \
+    int &s0 = s[0], &s1 = s[1], &s2 = s[2], &s3 = s[3], &s4 = s[4];
+
+    size_t off_ = blk_d.getOffsetPadding();
+
+    axis += T_LOOP_RANK - dims.size();
+
+    // to iterate through tensor with reversed one dimension we need to
+    // make stride negative and update offset.
+    int reverse_str = s[axis];
+    s[axis] = -reverse_str;
+    off_ += (D[axis] - 1)*reverse_str;
+
+    auto src_off = [=] (const int *i) {
+        return off_ + i[0]*s0 + i[1]*s1 + i[2]*s2 + i[3]*s3 + i[4]*s4;
+    };
+
+    const auto *src_ptr = src->buffer().as<float*>();
+
+    T_LOOP( dst, [&](float &x, const int *i) {
+        x = src_ptr[ src_off(i) ];
+    });
+}
+
+/** Make view blob (ROI) on parent blob. Doesn't hold parent blob */
+static Blob::Ptr make_view(const Blob::Ptr &src, const SizeVector dims, const SizeVector offsets) {
+    auto src_dims = src->getTensorDesc().getDims();
+    IE_ASSERT(dims.size() == src_dims.size());
+    IE_ASSERT(dims.size() == offsets.size());
+
+    for (size_t i = 0; i < dims.size(); i++)
+        IE_ASSERT(dims[i] + offsets[i] <= src_dims[i]);
+
+    auto desc = src->getTensorDesc();
+    auto b_desc = desc.getBlockingDesc();
+
+    // move T desc to specified offset
+    const auto new_off = desc.offset(offsets);
+    TensorDesc new_desc { desc.getPrecision(), dims,
+                          BlockingDesc { dims,
+                                         b_desc.getOrder(), new_off,
+                                         b_desc.getOffsetPaddingToData(),
+                                         b_desc.getStrides() }
+    };
+
+    // TODO: Only FP32 supported here
+    IE_ASSERT(desc.getPrecision() == Precision::FP32) << "Current limitation. Only FP32 is supported";
+    return make_shared_blob<float>(new_desc, src->buffer());
+}
+
+Checker reverse(const Checker checker, int axis) {
+    return [=] (Blob::Ptr blob) -> bool {
+        auto dims = blob->getTensorDesc().getDims();
+        auto layout = blob->getTensorDesc().getLayout();
+        Blob::Ptr new_blob = make_shared_blob<float>({Precision::FP32, dims, layout});
+        new_blob->allocate();
+
+        copy_with_reverse(blob, new_blob, axis);
+        return checker(new_blob);
+    };
+}
+
+Filler reverse(const Filler filler, int axis) {
+    return [=] (Blob::Ptr blob) {
+        auto dims = blob->getTensorDesc().getDims();
+        auto layout = blob->getTensorDesc().getLayout();
+        Blob::Ptr new_blob = make_shared_blob<float>({Precision::FP32, dims, layout});
+        new_blob->allocate();
+
+        filler(new_blob);
+        copy_with_reverse(new_blob, blob, axis);
+    };
+}
+
+static void copy_with_permute(Blob::Ptr &src, Blob::Ptr &dst, const std::vector<int> order) {
+    IE_ASSERT(order == std::vector<int>({1,0,2}));
+    IE_ASSERT(src->getTensorDesc().getDims().size() == order.size());
+
+    SizeVector prm_dims, dims = src->getTensorDesc().getDims();
+    for (int i : order) prm_dims.push_back(dims[i]);
+
+    IE_ASSERT(prm_dims == dst->getTensorDesc().getDims());
+
+    size_t stride_2 = 1;
+    size_t stride_1 = prm_dims[2] * stride_2;
+    size_t stride_0 = prm_dims[1] * stride_1;
+
+    float *src_ptr = src->buffer().as<float*>();
+    float *dst_ptr = dst->buffer().as<float*>();
+
+    float *_src_ptr = src->buffer().as<float*>();
+
+    for (int i0 = 0; i0 < dims[0]; i0++)
+    for (int i1 = 0; i1 < dims[1]; i1++)
+    for (int i2 = 0; i2 < dims[2]; i2++)
+        dst_ptr[i1*stride_0 + i0*stride_1 + i2*stride_2] = *src_ptr++;
+}
+
+Filler permute(const Filler filler, const std::vector<int> order) {
+    return [=] (Blob::Ptr blob) {
+        SizeVector perm_dims, dims = blob->getTensorDesc().getDims();
+        for (int i : order) perm_dims.push_back(dims[i]);
+
+        Blob::Ptr new_blob = make_shared_blob<float>({Precision::FP32, perm_dims, blob->getTensorDesc().getLayout()});
+        new_blob->allocate();
+
+        filler(new_blob);
+        copy_with_permute(new_blob, blob, order);
+    };
+}
+
+Checker permute(const Checker checker, const std::vector<int> order) {
+    return [=] (Blob::Ptr blob) -> bool {
+        SizeVector perm_dims, dims = blob->getTensorDesc().getDims();
+        for (int i : order) perm_dims.push_back(dims[i]);
+
+        Blob::Ptr new_blob = make_shared_blob<float>({Precision::FP32, perm_dims, blob->getTensorDesc().getLayout()});
+        new_blob->allocate();
+
+        copy_with_permute(blob, new_blob, order);
+        return checker(new_blob);
+    };
+}
+
+Checker concat(const Checker checker1, const Checker checker2, int axis) {
+    return [=] (Blob::Ptr blob) -> bool {
+        auto dims = blob->getTensorDesc().getDims();
+
+        const size_t split_size = 1;  // counting from end
+
+        SizeVector dims1(dims);
+        SizeVector offs1(dims.size(), 0);
+        dims1[axis] -= split_size;
+
+        SizeVector dims2 = dims;
+        SizeVector offs2(dims.size(), 0);
+        dims2[axis] = split_size;
+        offs2[axis] = dims1[axis];
+
+        auto blob1 = make_view(blob, dims1, offs1);
+        auto blob2 = make_view(blob, dims2, offs2);
+
+        return checker1(blob1) && checker2(blob2);
+    };
+}
+
+Filler concat(const Filler filler1, const Filler filler2, int axis) {
+    return [=] (Blob::Ptr blob) {
+        auto dims = blob->getTensorDesc().getDims();
+
+        const size_t split_size = 1;  // counting from end
+
+        SizeVector dims1(dims);
+        SizeVector offs1(dims.size(), 0);
+        dims1[axis] -= split_size;
+
+        SizeVector dims2 = dims;
+        SizeVector offs2(dims.size(), 0);
+        dims2[axis] = split_size;
+        offs2[axis] = dims1[axis];
+
+        auto blob1 = make_view(blob, dims1, offs1);
+        auto blob2 = make_view(blob, dims2, offs2);
+
+        filler1(blob1);
+        filler2(blob2);
+    };
+}
+
+static inline bool cmp_near(float res, float ref) {
+    constexpr float eps = 1e-4;
+    auto ref_abs = std::abs(ref);
+    if (ref_abs > eps)
+        return std::abs(res-ref)/ref_abs < eps;
+    else
+        return std::abs(res-ref) < eps;
+}
+
+bool scalar_checker(Blob::Ptr blob, SizeVector dims, float val) {
+    IE_ASSERT(blob->getTensorDesc().getDims() == dims);
+
+    bool res = true;
+    T_LOOP(blob, [&](float x, int *i) {
+        if (!cmp_near(x, val))
+            res = false;
+    });
+    return res;
+}
+
+bool vector_checker(Blob::Ptr blob, SizeVector dims, std::vector<float> val, int axis) {
+    IE_ASSERT(blob->getTensorDesc().getDims() == dims);
+    IE_ASSERT(dims[axis] == val.size());
+
+    axis += T_LOOP_RANK - dims.size();
+    bool res = true;
+
+    T_LOOP( blob, [&](float &x, int *i) {
+        if (!cmp_near(x, val[ i[axis] ]))
+            res = false;
+    });
+
+    return res;
+}
+
+void scalar_filler (Blob::Ptr blob, SizeVector dims, float val) {
+    IE_ASSERT(blob->getTensorDesc().getDims() == dims);
+
+    T_LOOP( blob, [&](float &x, int *i) {
+        x = val;
+    });
+}
+
+void vector_filler (Blob::Ptr blob, SizeVector dims, std::vector<float> val, int axis) {
+    IE_ASSERT(blob->getTensorDesc().getDims() == dims);
+    IE_ASSERT(dims[axis] == val.size());
+
+    axis += T_LOOP_RANK - dims.size();
+
+    T_LOOP( blob, [&](float &x, int *i) {
+        x = val[ i[axis] ];
+    });
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/lstm/rnn_util.hpp b/inference-engine/tests_deprecated/functional/shared_tests/lstm/rnn_util.hpp
new file mode 100644 (file)
index 0000000..4199ecd
--- /dev/null
@@ -0,0 +1,36 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include "ie_blob.h"
+
+/**
+ * Checkers section (Blob::Ptr) -> bool
+ */
+using Filler = std::function<void(InferenceEngine::Blob::Ptr)>;
+
+/** Fillers conversion */
+Filler reverse(const Filler checker, int axis);
+Filler permute(const Filler filler, const std::vector<int> order);
+Filler concat(const Filler filler1, const Filler filler2, int axis);
+
+/** Some helpful fillers. To use with std::bind() */
+void scalar_filler(InferenceEngine::Blob::Ptr blob, InferenceEngine::SizeVector dims, float val);
+void vector_filler(InferenceEngine::Blob::Ptr blob, InferenceEngine::SizeVector dims, std::vector<float> val, int axis);
+
+/**
+ * Filler section (Blob::Ptr) -> void
+ */
+using Checker = std::function<bool(InferenceEngine::Blob::Ptr)>;
+
+/** Checker conversion */
+Checker negative(const Checker checker);
+Checker reverse(const Checker checker, int axis);
+Checker permute(const Checker checker, const std::vector<int> order);
+Checker concat(const Checker checker1, const Checker checker2, int axis);
+
+/** Some helpful checkers. To use with std::bind() */
+bool scalar_checker (InferenceEngine::Blob::Ptr blob, InferenceEngine::SizeVector dims, float val);
+bool vector_checker (InferenceEngine::Blob::Ptr blob, InferenceEngine::SizeVector dims, std::vector<float> val, int axis);
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/network_tests/network_i8.hpp b/inference-engine/tests_deprecated/functional/shared_tests/network_tests/network_i8.hpp
new file mode 100644 (file)
index 0000000..d6a3b16
--- /dev/null
@@ -0,0 +1,563 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+#pragma once
+
+#include <memory>
+#include <unordered_set>
+#include <xml_helper.hpp>
+
+#include <gtest/gtest.h>
+#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp"
+#include "ie_precision.hpp"
+#include <tests_common.hpp>
+#include <tests_common_func.hpp>
+#include <multi-device/multi_device_config.hpp>
+#include "low_precision_transformations/transformer.hpp"
+#include <regression_tests.hpp>
+#include "common/validation.hpp"
+#include "low_precision_transformations/concat_multi_channels.hpp"
+#include "low_precision_transformations/convolution.hpp"
+#include "low_precision_transformations/fully_connected.hpp"
+#include "low_precision_transformations/eltwise.hpp"
+#include "low_precision_transformations/scaleshift_to_convolution.hpp"
+#include "ie_util_internal.hpp"
+
+#include "cnn_network_ngraph_impl.hpp"
+
+#define XBYAK_NO_OP_NAMES
+#define XBYAK_UNDEF_JNL
+#include "../../../../thirdparty/mkl-dnn/src/cpu/xbyak/xbyak_util.h"
+
+using namespace ::testing;
+using namespace InferenceEngine;
+
+inline CNNLayerPtr getLayer(const ICNNNetwork& network, const std::string& layerName) {
+    std::vector<CNNLayerPtr> layers = InferenceEngine::details::CNNNetSortTopologically(network);
+    for (CNNLayerPtr layer : layers) {
+        if (layer->name == layerName) {
+            return layer;
+        }
+    }
+
+    return nullptr;
+}
+
+inline void checkLayerOuputPrecision(const ICNNNetwork& network, const std::string& layerName, Precision expectedPrecision) {
+    CNNLayerPtr layer = getLayer(network, layerName);
+    for (DataPtr data : layer->outData) {
+        ASSERT_EQ(expectedPrecision, data->getPrecision()) << " unexpected precision " << data->getPrecision() << " for layer " << layerName;
+    }
+}
+
+struct network_params {
+    std::string pluginName;
+    std::string modelFile;
+    std::string imageName;
+    std::string statFile;
+    std::vector<std::pair<int, float>> refValue;
+    // optional config (used for multi-device)
+    std::map<std::string, std::string> config;
+
+    std::string model() {
+        ModelsPath result;
+        result += kPathSeparator;
+        result += modelFile;
+        return result;
+    }
+
+    std::string weights() {
+        ModelsPath result;
+        result += kPathSeparator;
+        result += testing::FileUtils::fileNameNoExt(modelFile);
+        result += ".bin";
+        return result;
+    }
+
+    std::string image() {
+        std::string result = TestDataHelpers::get_data_path();
+        result += kPathSeparator;
+        result += imageName;
+        return result;
+    }
+
+    std::string stat() {
+        ModelsPath result;
+        result += kPathSeparator;
+        result += statFile;
+        return result;
+    }
+
+    std::string plugin() { return pluginName + "Plugin"; }
+
+    std::string deviceName() {
+        if (pluginName == "MultiDevice") {
+            return "MULTI:CPU";
+        }
+        if (pluginName == "MKLDNN") {
+            return "CPU";
+        }
+
+        return "";
+    }
+
+};
+
+static LayerTransformation::Params createParam() {
+    return LayerTransformation::Params(
+        false,
+        true,
+        true,
+        LayerTransformation::QuantizedTensorAlignment::None,
+        LayerTransformation::QuantizedTensorAlignment::None,
+        false);
+}
+
+static LayerTransformation::Params createParamU8I8() {
+    return LayerTransformation::Params(
+        false,
+        true,
+        true,
+        LayerTransformation::QuantizedTensorAlignment::None,
+        LayerTransformation::QuantizedTensorAlignment::None,
+        false,
+        true,
+        true,
+        { Precision::U8 },
+        { Precision::I8 });
+}
+
+static LayerTransformation::Params createParamU8U8() {
+    return LayerTransformation::Params(
+        false,
+        true,
+        true,
+        LayerTransformation::QuantizedTensorAlignment::None,
+        LayerTransformation::QuantizedTensorAlignment::None,
+        false,
+        true,
+        true,
+        { Precision::U8 },
+        { Precision::U8 });
+}
+
+static LayerTransformation::Params createParamI8I8() {
+    return LayerTransformation::Params(
+        false,
+        true,
+        true,
+        LayerTransformation::QuantizedTensorAlignment::None,
+        LayerTransformation::QuantizedTensorAlignment::None,
+        false,
+        true,
+        true,
+        { Precision::I8 },
+        { Precision::I8 });
+}
+
+static LayerTransformation::Params createParamCpu() {
+    return LayerTransformation::Params(
+        true,
+        true,
+        true,
+        LayerTransformation::QuantizedTensorAlignment::UpdateLevel,
+        LayerTransformation::QuantizedTensorAlignment::None,
+        true,
+        true,
+        true);
+}
+
+static std::vector<float> generateInput(const size_t size, const bool reverse = false) {
+    std::vector<float> in(size);
+    for (size_t i = 0; i < in.size(); ++i) {
+        in[i] = reverse ? in.size() - i : i;
+    }
+    return in;
+}
+
+
+class TransformationsParams;
+
+class ModelParams {
+public:
+    ModelParams(
+            const std::string name,
+            const std::string irFilePath,
+            const std::string dataFilePath,
+            const std::vector<std::pair<int, float>> referenceOutputDataWithoutTransformations,
+            const std::vector<std::pair<int, float>> referenceOutputDataWithTransformations = {}) :
+            name(name),
+            irFilePath(irFilePath),
+            dataFilePath(dataFilePath),
+            referenceOutputDataWithoutTransformations({ referenceOutputDataWithoutTransformations }),
+            referenceOutputDataWithTransformations((referenceOutputDataWithTransformations.size() != 0ul) ?
+                                                   std::vector<std::vector<std::pair<int, float>>>({ referenceOutputDataWithTransformations }) :
+                                                   std::vector<std::vector<std::pair<int, float>>>({ referenceOutputDataWithoutTransformations })),
+            validation(nullptr),
+            inputs({}),
+            transformations({}) {}
+
+
+    ModelParams(
+            const std::string name,
+            const std::string irFilePath,
+            const std::string dataFilePath,
+            const std::vector<std::pair<int, float>> referenceOutputDataWithoutTransformations,
+            const std::vector<std::pair<int, float>> referenceOutputDataWithTransformations,
+            std::function<void(const TransformationsParams& params, CNNNetworkImplPtr usedNetwork)> validation,
+            const std::vector<std::pair<std::string, std::vector<float>>> inputs = {},
+            const std::vector<std::pair<std::string, std::shared_ptr<LayerTransformation>>> transformations = {}) :
+            name(name),
+            irFilePath(irFilePath),
+            dataFilePath(dataFilePath),
+            referenceOutputDataWithoutTransformations({ referenceOutputDataWithoutTransformations }),
+            referenceOutputDataWithTransformations(referenceOutputDataWithTransformations.size() != 0ul ?
+                                                   std::vector<std::vector<std::pair<int, float>>>({ referenceOutputDataWithTransformations }) :
+                                                   std::vector<std::vector<std::pair<int, float>>>({ referenceOutputDataWithoutTransformations })),
+            validation(validation),
+            inputs(inputs),
+            transformations(transformations) {}
+
+    ModelParams(
+            const std::string name,
+            const std::string irFilePath,
+            const std::string dataFilePath,
+            const std::vector<std::vector<std::pair<int, float>>> referenceOutputDataWithoutTransformations,
+            const std::vector<std::vector<std::pair<int, float>>> referenceOutputDataWithTransformations,
+            std::function<void(const TransformationsParams& params, CNNNetworkImplPtr usedNetwork)> validation) :
+            name(name),
+            irFilePath(irFilePath),
+            dataFilePath(dataFilePath),
+            referenceOutputDataWithoutTransformations(referenceOutputDataWithoutTransformations),
+            referenceOutputDataWithTransformations(referenceOutputDataWithTransformations.size() != 0ul ? referenceOutputDataWithTransformations : referenceOutputDataWithoutTransformations),
+            validation(validation),
+            inputs({}),
+            transformations({}) {}
+
+    const std::string name;
+    const std::string irFilePath;
+    const std::string dataFilePath;
+    const std::vector<std::vector<std::pair<int, float>>> referenceOutputDataWithoutTransformations;
+    const std::vector<std::vector<std::pair<int, float>>> referenceOutputDataWithTransformations;
+    const std::function<void(const TransformationsParams& params, CNNNetworkImplPtr usedNetwork)> validation;
+    const std::vector<std::pair<std::string, std::vector<float>>> inputs;
+    const std::vector<std::pair<std::string, std::shared_ptr<LayerTransformation>>> transformations;
+};
+
+class TransformationsParams {
+public:
+    TransformationsParams(
+            const bool transformationsInPluginEnabled = true,
+            const bool transformationsInTestEnabled = false,
+            const LayerTransformation::Params& params = LayerTransformation::Params(),
+            const std::unordered_set<std::string>& notTransformedLayers = {},
+            const size_t classesCanBeChangedIndex = 9999,
+            const bool compareRawValues = true,
+            const std::unordered_set<std::string>& removedLayers = {}) :
+            pluginName(""),
+            modelParams(ModelParams("", "", "", {})),
+            batchSize(1ul),
+            transformationsInPluginEnabled(transformationsInPluginEnabled),
+            transformationsInTestEnabled(transformationsInTestEnabled),
+            params(params),
+            notTransformedLayers(notTransformedLayers),
+            classesCanBeChangedIndex(classesCanBeChangedIndex),
+            compareRawValues(compareRawValues),
+            removedLayers(removedLayers) {}
+
+    TransformationsParams(
+            const std::string pluginName,
+            const ModelParams modelParams,
+            const size_t batchSize,
+            const bool transformationsInPluginEnabled = true,
+            const bool transformationsInTestEnabled = false,
+            const LayerTransformation::Params& params = LayerTransformation::Params(),
+            const std::unordered_set<std::string>& notTransformedLayers = {},
+            const size_t classesCanBeChangedIndex = 9999,
+            const bool compareRawValues = true,
+            const std::unordered_set<std::string>& removedLayers = {},
+            const std::vector<std::pair<std::string, std::vector<float>>> inputs = {},
+            const std::vector<std::pair<std::string, std::shared_ptr<LayerTransformation>>> transformations = {}) :
+            pluginName(pluginName),
+            modelParams(modelParams),
+            batchSize(batchSize),
+            transformationsInPluginEnabled(transformationsInPluginEnabled),
+            transformationsInTestEnabled(transformationsInTestEnabled),
+            params(params),
+            notTransformedLayers(notTransformedLayers),
+            classesCanBeChangedIndex(classesCanBeChangedIndex),
+            compareRawValues(compareRawValues),
+            removedLayers(removedLayers) {}
+
+    const std::string pluginName;
+    const ModelParams modelParams;
+    const size_t batchSize;
+
+    static std::string getLowPrecisionTransformerSingleLayerTestName(testing::TestParamInfo<TransformationsParams> params) {
+        const TransformationsParams& p = params.param;
+        std::stringstream ss;
+        ss << p.modelParams.name <<
+           "_batch" << p.batchSize <<
+           "_" << (p.transformationsInPluginEnabled ? "inPluginEnabled" : "inPluginDisabled") <<
+           "_" << (p.transformationsInTestEnabled ? "inTestEnabled" : "inTestDisabled") <<
+           "_" << (p.params.supportAsymmetricQuantization ? "asymmetric" : "symmetric") <<
+           "_" << p.params.precisionsOnActivations <<
+           "_" << p.params.precisionsOnWeights <<
+           "_" << p.params.quantizedTensorAlignmentOnActivations;
+        return ss.str();
+    }
+
+    const bool transformationsInPluginEnabled;
+    const bool transformationsInTestEnabled;
+    const LayerTransformation::Params params;
+    const std::unordered_set<std::string> notTransformedLayers;
+    const size_t classesCanBeChangedIndex;
+    const bool compareRawValues;
+    const std::unordered_set<std::string> removedLayers;
+};
+
+class smoke_NetworkClassifyTest : public TestsCommon, public TestsCommonFunc, public WithParamInterface<TransformationsParams> {
+protected:
+    void classify(
+            network_params p,
+            size_t batch_size = 1,
+            float threshold = 0.005f,
+            const TransformationsParams& transformationsParams = TransformationsParams(),
+            const std::vector<std::pair<std::string, std::vector<float>>>& inputs = {},
+            const std::vector<std::pair<std::string, std::shared_ptr<LayerTransformation>>>& transformations = {}) {
+        CNNNetworkImplPtr usedNetwork;
+        classify(p, batch_size, threshold, transformationsParams, usedNetwork, inputs, transformations);
+    }
+
+    void classify(
+            network_params p,
+            size_t batch_size,
+            float threshold,
+            const TransformationsParams& transformationsParams,
+            CNNNetworkImplPtr& usedNetwork,
+            const std::vector<std::pair<std::string, std::vector<float>>>& inputs = {},
+            const std::vector<std::pair<std::string, std::shared_ptr<LayerTransformation>>>& transformations = {}) {
+
+#ifdef DISPLAY_RESULTS
+        std::cout << std::endl << p.modelFile << ": was started" << std::endl;
+        if (transformationsParams.transformationsInTestEnabled) {
+            std::cout <<
+                "\tenabled: " << (transformationsParams.transformationsInTestEnabled ? "true" : "false") << std::endl <<
+                "\tbatch_size: " << batch_size << std::endl <<
+                "\tupdatePrecision: " << (transformationsParams.params.updatePrecisions ? "true" : "false") << std::endl <<
+                "\tquantizeOutputs: " << (transformationsParams.params.quantizeOutputs ? "true" : "false") << std::endl <<
+                "\tweightsToConst: " << (transformationsParams.params.weightsToConst ? "true" : "false") << std::endl <<
+                "\tquantizedTensorAlignmentOnActivations: " << transformationsParams.params.quantizedTensorAlignmentOnActivations << std::endl <<
+                "\tquantizedTensorAlignmentOnWeights: " << transformationsParams.params.quantizedTensorAlignmentOnWeights << std::endl <<
+                "\troundQuantizedValues: " << (transformationsParams.params.roundQuantizedValues ? "true" : "false") << std::endl <<
+                "\tupdateBiases: " << (transformationsParams.params.updateBiases ? "true" : "false") << std::endl <<
+                "\tsupportAsymmetricQuantization: " << (transformationsParams.params.supportAsymmetricQuantization ? "true" : "false") << std::endl <<
+                "\tprecisionsOnActivations: " << transformationsParams.params.precisionsOnActivations << std::endl <<
+                "\tprecisionsOnWeights: " << transformationsParams.params.precisionsOnWeights << std::endl;
+        } else {
+            std::cout << "\tenabled: " << (transformationsParams.transformationsInTestEnabled ? "true" : "false") << std::endl;
+        }
+#endif
+
+        Core ie;
+        CNNNetwork network;
+        if (*p.modelFile.begin() == '/') {
+            network = ie.ReadNetwork(p.modelFile);
+        } else {
+            network = ie.ReadNetwork(p.model(), p.weights());
+        }
+
+        if (batch_size != 1)
+            network.setBatchSize(batch_size);
+
+        ie.SetConfig(p.config);
+        if (p.statFile != "") {
+            InferenceEngine::NetworkStatsMap stat = testing::loadStatisticFromFile(p.stat());
+
+            ICNNNetworkStats *pstats;
+            ((ICNNNetwork&)network).getStats(&pstats, nullptr);
+            pstats->setNodesStats(stat);
+        }
+
+        if (transformationsParams.transformationsInTestEnabled) {
+            ICNNNetwork& icnnnetwork = network;
+            auto networkNGraph = dynamic_cast<CNNNetworkNGraphImpl*>(&icnnnetwork);
+            if (networkNGraph) {
+                std::shared_ptr<ICNNNetwork> networkPtr = networkNGraph->getCNNNetwork();
+                network = CNNNetwork(networkPtr);
+            }
+
+            auto originalLayersInfo = LowPrecisionTransformationValidation::getLayers(network);
+            for (const std::string removedLayer : transformationsParams.removedLayers) {
+                for (auto originalLayerIt = originalLayersInfo.begin(); originalLayerIt != originalLayersInfo.end(); ++originalLayerIt) {
+                    if (removedLayer == originalLayerIt->first) {
+                        originalLayersInfo.erase(originalLayerIt);
+                        break;
+                    }
+                }
+            }
+
+            LowPrecisionTransformations lowPrecisionTransformations = LowPrecisionTransformer::getAllTransformations(transformationsParams.params).
+                    addBranchSpecific<EltwiseTransformation>(LayerTransformation::Params(transformationsParams.params), "Eltwise").
+                    add<ConvolutionTransformation>(
+                    LayerTransformation::Params(transformationsParams.params).setPrecisionsOnActivations({ Precision::U8 }),
+                    "Convolution").
+                    addCleanup<ScaleShiftToConvolutionTransformation>(
+                    LayerTransformation::Params(transformationsParams.params).setPrecisionsOnActivations({ Precision::U8 }),
+                    "ScaleShift");
+
+            for (const auto transformation : transformations) {
+                auto it = lowPrecisionTransformations.transformations.find(transformation.first);
+                if (it != lowPrecisionTransformations.transformations.end()) {
+                    lowPrecisionTransformations.transformations.erase(it);
+                }
+
+                lowPrecisionTransformations.transformations.emplace(transformation.first, transformation.second);
+            }
+
+            LowPrecisionTransformer transformer(lowPrecisionTransformations);
+            transformer.transform(network);
+
+            LowPrecisionTransformationValidation::validate(
+                    network,
+                    transformationsParams.params,
+                    transformationsParams.notTransformedLayers,
+                    originalLayersInfo);
+        }
+
+        std::map<std::string, std::string> config;
+        if (!transformationsParams.transformationsInPluginEnabled) {
+            config.emplace(PluginConfigInternalParams::KEY_LP_TRANSFORMS_MODE, PluginConfigParams::NO);
+        }
+
+        // use to enable LPT ON devices with explicit KEY_LP_TRANSFORMS_MODE definition (GPU)
+        //config.emplace(
+        //    PluginConfigInternalParams::KEY_LP_TRANSFORMS_MODE,
+        //    transformationsParams.transformationsInPluginEnabled ? PluginConfigParams::YES : PluginConfigParams::NO);
+
+        usedNetwork = cloneNet(network);
+        ExecutableNetwork exeNetwork = ie.LoadNetwork(network, p.deviceName(), config);
+        InferRequest inferRequest = exeNetwork.CreateInferRequest();
+        if (inputs.empty()) {
+            Blob::Ptr src = readInput(p.image(), batch_size);
+            ASSERT_NE(nullptr, src.get()) << "Cannot read Input " << p.image();
+            auto inputsInfo = network.getInputsInfo();
+            if (inputsInfo.size() == 3ul) {
+                std::vector<float> data = { 1.f, 2.f, 3.f };
+                Blob::Ptr blob = make_shared_blob<float>(TensorDesc(Precision::FP32, { 1ul, 3ul }, Layout::NC));
+                blob->allocate();
+                CNNNetworkHelper::fillBlobByFP32(blob, data.data());
+
+                auto it = inputsInfo.begin();
+                inferRequest.SetBlob(it->first, blob);
+
+                ++it;
+                inferRequest.SetBlob(it->first, src);
+
+                ++it;
+                inferRequest.SetBlob(it->first, src);
+            } else {
+                inferRequest.SetBlob(network.getInputsInfo().begin()->first, src);
+            }
+        } else {
+            for (const auto input : inputs) {
+                Blob::Ptr blob = make_shared_blob<float>(TensorDesc(Precision::FP32, { input.second.size() }, Layout::C));
+                blob->allocate();
+                CNNNetworkHelper::fillBlobByFP32(blob, input.second.data());
+                inferRequest.SetBlob(input.first, blob);
+            }
+        }
+
+        OutputsDataMap outInfo;
+        outInfo = network.getOutputsInfo();
+        ASSERT_EQ(outInfo.size(), 1);
+        ASSERT_NE(outInfo.begin()->second, nullptr);
+        Blob::Ptr dst = make_shared_blob<float>(outInfo.begin()->second->getTensorDesc());
+        dst->allocate();
+        inferRequest.SetBlob(outInfo.begin()->first, dst);
+
+        inferRequest.Infer();
+
+        for (size_t i = 0; i < batch_size; i++)
+            ASSERT_TRUE(compareTop(*dst.get(), p.refValue, i, threshold, transformationsParams.classesCanBeChangedIndex, transformationsParams.compareRawValues)) << "Doesn't match with ref values";
+    }
+
+    Regression::Builder please() {
+        std::shared_ptr<Core> ie = PluginCache::get().ie();
+        Regression::Builder b(ie);
+        b.usingDevice("CPU");
+
+        return b;
+    }
+
+private:
+    static bool onWeights(const CNNLayer& layer) {
+        const std::vector<CNNLayerPtr> children = getChildren(layer);
+        return (children.size() == 1) &&
+               (children[0]->type == "Convolution") &&
+               (children[0]->insData.size() >= 2) &&
+               (children[0]->insData[1].lock()->getCreatorLayer().lock()->name == layer.name);
+    }
+
+    static std::vector<CNNLayerPtr> getChildren(const CNNLayer& layer, const std::string& exceptionLayerName = "") {
+        std::vector<CNNLayerPtr> children;
+        for (const DataPtr outData : layer.outData) {
+            const std::map<std::string, CNNLayerPtr>& inputTo = outData->getInputTo();
+            for (auto it = inputTo.begin(); it != inputTo.end(); ++it) {
+                CNNLayerPtr child = it->second;
+                if (exceptionLayerName.empty() || child->name != exceptionLayerName) {
+                    children.push_back(child);
+                }
+            }
+        }
+        return children;
+    }
+};
+
+class ModelTransformationsTest : public smoke_NetworkClassifyTest {
+protected:
+    void SetUp() override {
+        const TransformationsParams transformationsParam = ::testing::WithParamInterface<TransformationsParams>::GetParam();
+        CNNNetworkImplPtr usedNetwork;
+
+        std::vector<std::pair<int, float>> referenceValues;
+        if (transformationsParam.params.updatePrecisions &&
+            (transformationsParam.transformationsInPluginEnabled || transformationsParam.transformationsInTestEnabled)) {
+            if (transformationsParam.modelParams.referenceOutputDataWithTransformations.size() == 1) {
+                referenceValues = transformationsParam.modelParams.referenceOutputDataWithTransformations[0];
+            } else {
+                referenceValues = Xbyak::util::Cpu().has(Xbyak::util::Cpu::tAVX512F) ?
+                                  transformationsParam.modelParams.referenceOutputDataWithTransformations[1] :
+                                  transformationsParam.modelParams.referenceOutputDataWithTransformations[0];
+            }
+        } else {
+            if (transformationsParam.modelParams.referenceOutputDataWithoutTransformations.size() == 1) {
+                referenceValues = transformationsParam.modelParams.referenceOutputDataWithoutTransformations[0];
+            } else {
+                referenceValues = Xbyak::util::Cpu().has(Xbyak::util::Cpu::tAVX512F) ?
+                                  transformationsParam.modelParams.referenceOutputDataWithoutTransformations[1] :
+                                  transformationsParam.modelParams.referenceOutputDataWithoutTransformations[0];
+            }
+        }
+
+        network_params p{
+                "MKLDNN",
+                transformationsParam.modelParams.irFilePath,
+                transformationsParam.modelParams.dataFilePath,
+                "",
+                referenceValues
+        };
+
+        classify(p,
+                 transformationsParam.batchSize,
+                 1.f,
+                 transformationsParam,
+                 usedNetwork,
+                 transformationsParam.modelParams.inputs,
+                 transformationsParam.modelParams.transformations);
+
+        if (transformationsParam.modelParams.validation != nullptr) {
+            transformationsParam.modelParams.validation(transformationsParam, usedNetwork);
+        }
+    }
+};
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/activation_tests.hpp b/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/activation_tests.hpp
new file mode 100644 (file)
index 0000000..e3c50ea
--- /dev/null
@@ -0,0 +1,179 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <ie_core.hpp>
+
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+#include "ir_gen_helper.hpp"
+
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace single_layer_tests;
+
+
+struct activation_base_params {
+    struct {
+        size_t w;
+        size_t h;
+        size_t c;
+    } in;
+
+    float n_clope;
+};
+
+struct activation_test_params : activation_base_params {
+    std::string device_name;
+    std::string activationType;
+
+    activation_test_params(std::string name, activation_base_params params, std::string activationType) :
+            activation_base_params(params), device_name(name), activationType(activationType) {}
+
+};
+
+template <typename data_t>
+void ref_activation(const data_t *src_data, data_t *dst_data, activation_test_params prm)
+{
+    size_t IW = prm.in.w;
+    size_t IH = prm.in.h;
+    size_t IC = prm.in.c;
+
+    for (uint32_t c = 0; c < IC; c++) {
+        for (uint32_t h = 0; h < IH; h++) {
+            for (uint32_t w = 0; w < IW; w++) {
+                uint32_t oidx = c * IH * IW
+                                + h * IW + w;
+
+                if (prm.activationType == "exp")
+                    dst_data[oidx] = exp(src_data[oidx]);
+                else if (prm.activationType == "not")
+                    dst_data[oidx] = !(src_data[oidx]);
+                else if (prm.activationType == "sin")
+                    dst_data[oidx] = sin(src_data[oidx]);
+                else if (prm.activationType == "sinh")
+                    dst_data[oidx] = sinh(src_data[oidx]);
+                else if (prm.activationType == "cos")
+                    dst_data[oidx] = cos(src_data[oidx]);
+                else if (prm.activationType == "cosh")
+                    dst_data[oidx] = cosh(src_data[oidx]);
+                else
+                    dst_data[oidx] = src_data[oidx] >= 0.0 ?
+                                     src_data[oidx] :
+                                     src_data[oidx] * prm.n_clope;
+            }
+        }
+    }
+}
+
+class ActivationTest: public TestsCommon,
+                    public WithParamInterface<activation_test_params> {
+    std::string layers_t = R"V0G0N(
+        <layer name="_ACTIVATION_TYPE_" id="1" type="_ACTIVATION_TYPE_" precision="FP32">
+            <input>
+                <port id="0">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+            </input>
+            <output>
+                <port id="1">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+            </output>
+        </layer>
+)V0G0N";
+    
+    std::string edges_t = R"V0G0N(
+        <edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
+)V0G0N";
+
+    std::string getModel(activation_test_params p) {
+        std::string model = layers_t;
+
+        if (p.activationType == "exp")
+            REPLACE_WITH_STR(model, "_ACTIVATION_TYPE_", "Exp");
+        else if (p.activationType == "not")
+            REPLACE_WITH_STR(model, "_ACTIVATION_TYPE_", "Not");
+        else if (p.activationType == "sin")
+            REPLACE_WITH_STR(model, "_ACTIVATION_TYPE_", "Sin");
+        else if (p.activationType == "sinh")
+            REPLACE_WITH_STR(model, "_ACTIVATION_TYPE_", "Sinh");
+        else if (p.activationType == "cos")
+            REPLACE_WITH_STR(model, "_ACTIVATION_TYPE_", "Cos");
+        else if (p.activationType == "cosh")
+            REPLACE_WITH_STR(model, "_ACTIVATION_TYPE_", "Cosh");
+        else
+            REPLACE_WITH_STR(model, "_ACTIVATION_TYPE_", "ReLU"); // Default value
+
+        REPLACE_WITH_NUM(model, "_IN_", 1);
+        REPLACE_WITH_NUM(model, "_IW_", p.in.w);
+        REPLACE_WITH_NUM(model, "_IH_", p.in.h);
+        REPLACE_WITH_NUM(model, "_IC_", p.in.c);
+
+        model = IRTemplateGenerator::getIRTemplate(p.activationType + "_Only", {1lu, p.in.c, p.in.h, p.in.w}, "FP32", model, edges_t);
+
+        return model;
+    }
+
+protected:
+    virtual void SetUp() {
+
+        try {
+            activation_test_params p = ::testing::WithParamInterface<activation_test_params>::GetParam();
+            std::string model = getModel(p);
+
+            Core ie;
+            CNNNetwork net = ie.ReadNetwork(model, Blob::CPtr());
+            InputsDataMap in_info_map = net.getInputsInfo();
+            OutputsDataMap out_info_map = net.getOutputsInfo();
+
+            ExecutableNetwork executable_network = ie.LoadNetwork(net, p.device_name);
+            InferRequest inferRequest = executable_network.CreateInferRequest();
+
+            SizeVector dims_src = {1,
+                                   p.in.c,
+                                   p.in.h,
+                                   p.in.w};
+
+            Blob::Ptr inputBlob = inferRequest.GetBlob(in_info_map.begin()->first);
+            float* src = inputBlob->buffer().as<float*>();
+            fill_data(src, inputBlob->size());
+
+            SizeVector dims_dst = dims_src;
+            Blob::Ptr outputBlob = inferRequest.GetBlob(out_info_map.begin()->first);
+
+            TBlob<float> dst_ref({ Precision::FP32, dims_dst, Layout::NCHW });
+            dst_ref.allocate();
+
+            inferRequest.Infer();
+
+            ref_activation<float>(src, dst_ref.data(), p);
+
+            const float* res = outputBlob->buffer().as<float*>();
+            const float* ref = dst_ref.data();
+            compare(res, ref, outputBlob->size());
+
+        } catch (const InferenceEngine::details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+#define case_1 activation_base_params({{228, 228, 3}, 0.0})
+
+TEST_P(ActivationTest, TestsActivationFunctions) {}
+
+std::string  getTestCaseName(testing::TestParamInfo<activation_test_params> obj) {
+    return  obj.param.device_name +
+            "_w" + std::to_string(obj.param.in.w) +
+            "_h" + std::to_string(obj.param.in.h) +
+            "_c" + std::to_string(obj.param.in.c) +
+            "_" + obj.param.activationType;
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/arg_max_min_tests.hpp b/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/arg_max_min_tests.hpp
new file mode 100644 (file)
index 0000000..4581cbc
--- /dev/null
@@ -0,0 +1,201 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <ie_core.hpp>
+#include <cmath>
+
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace std;
+
+static inline int count(std::vector<size_t> dims, size_t start_ind, size_t end_ind) {
+    size_t count = 1;
+    for (size_t i = start_ind; i < end_ind; i++)
+        count *= dims[i];
+    return static_cast<int>(count);
+}
+
+static inline int count(std::vector<size_t> dims, size_t start_ind = 0) {
+    return count(dims, start_ind, dims.size());
+}
+
+struct argMaxMinTF_test_params {
+    std::string device_name;
+    std::string layer_type;
+
+    InferenceEngine::SizeVector in_dim;
+    std::vector<float> in;
+
+    int has_axis;
+    int out_max_val;
+    size_t top_k;
+    int axis;
+
+    InferenceEngine::SizeVector ref_dim;
+    std::vector<float> ref;
+};
+
+
+static void ref_argmax(float *src_data, float* dst_data, argMaxMinTF_test_params p) {
+    int dim, axis_dist;
+    if (p.has_axis) {
+        int axis_ = (p.axis < 0) ? p.axis + static_cast<int>(p.in_dim.size()) : p.axis;
+        dim = static_cast<int>(p.in_dim[axis_]);
+        axis_dist = count(p.in_dim, axis_) / dim;
+    } else {
+        dim = count(p.in_dim, 1);
+        axis_dist = 1;
+    }
+
+    int num = count(p.in_dim) / dim;
+    std::vector<std::pair<float, int> > src_vector(dim);
+
+    for (int i = 0; i < num; ++i) {
+        for (int j = 0; j < dim; ++j) {
+            src_vector[j] = std::make_pair(
+                    src_data[(i / axis_dist * dim + j) * axis_dist + i % axis_dist], j);
+        }
+
+        if (p.layer_type == "ArgMax") {
+            for (int j = 0; j < p.top_k; j++) {
+                for (int k = src_vector.size() - 1; k > j; k--) {
+                    if (src_vector[k].first > src_vector[k - 1].first) {
+                        std::pair<float, int> tmp = src_vector[k];
+                        src_vector[k] = src_vector[k - 1];
+                        src_vector[k - 1] = tmp;
+                    }
+                }
+            }
+        } else {
+            for (int j = 0; j < p.top_k; j++) {
+                for (int k = src_vector.size() - 1; k > j; k--) {
+                    if (src_vector[k].first < src_vector[k - 1].first) {
+                        std::pair<float, int> tmp = src_vector[k];
+                        src_vector[k] = src_vector[k - 1];
+                        src_vector[k - 1] = tmp;
+                    }
+                }
+            }
+        }
+        for (int j = 0; j < p.top_k; ++j) {
+            if (p.out_max_val) {
+                if (p.has_axis) {
+                    // Produces max_val per axis
+                    dst_data[(i / axis_dist * p.top_k + j) * axis_dist + i % axis_dist] = src_vector[j].first;
+                } else {
+                    // Produces max_ind and max_val
+                    dst_data[2 * i * p.top_k + j] = src_vector[j].second;
+                    dst_data[2 * i * p.top_k + p.top_k + j] = src_vector[j].first;
+                }
+            } else {
+                // Produces max_ind per axis
+                dst_data[(i / axis_dist * p.top_k + j) * axis_dist + i % axis_dist] = src_vector[j].second;
+            }
+        }
+    }
+}
+
+class ArgMaxMinTFTests : public TestsCommon, public WithParamInterface<argMaxMinTF_test_params> {
+    std::string model_t = R"V0G0N(
+<net Name="ArgMin_net" version="2" precision="FP32" batch="1">
+    <layers>
+        <layer name="input" type="Input" precision="FP32" id="1">
+            <output>
+                <port id="1">
+                    _IDIM_
+                </port>
+            </output>
+        </layer>
+        <layer name="ArgMinTest" id="2" type="_LAYER_TYPE_" precision="FP32">
+            <data top_k="_TOP_K_" out_max_val="_OUT_MAX_VAL_" _AXIS_/>
+            <input>
+                <port id="1">
+                    _IDIM_
+                </port>
+            </input>
+            <output>
+                <port id="2">
+                    _OUT_
+                </port>
+            </output>
+        </layer>
+    </layers>
+    <edges>
+        <edge from-layer="1" from-port="1" to-layer="2" to-port="1"/>
+    </edges>
+</net>
+)V0G0N";
+
+    std::string getModel(argMaxMinTF_test_params p) {
+        std::string model = model_t;
+        std::string inDim;
+        std::string out;
+
+        for (auto& dim : p.in_dim) {
+            inDim += "<dim>";
+            inDim += std::to_string(dim) + "</dim>\n";
+        }
+
+        for (auto& dst : p.ref_dim) {
+            out += "<dim>";
+            out += std::to_string(dst) + "</dim>\n";
+        }
+
+        REPLACE_WITH_STR(model, "_LAYER_TYPE_", p.layer_type);
+        REPLACE_WITH_STR(model, "_IDIM_", inDim);
+        REPLACE_WITH_NUM(model, "_TOP_K_", p.top_k);
+        REPLACE_WITH_NUM(model, "_OUT_MAX_VAL_", p.out_max_val);
+
+        std::string axis;
+        if (p.has_axis)
+            axis += "axis=\"" + std::to_string(p.axis) + "\"";
+
+        REPLACE_WITH_STR(model, "_AXIS_", axis);
+        REPLACE_WITH_STR(model, "_OUT_", out);
+
+        return model;
+    }
+
+protected:
+    virtual void TearDown() {
+    }
+
+    virtual void SetUp() {
+        try {
+            TestsCommon::SetUp();
+            argMaxMinTF_test_params p = ::testing::WithParamInterface<argMaxMinTF_test_params>::GetParam();
+            std::string model = getModel(p);
+
+            Core ie;
+            CNNNetwork net = ie.ReadNetwork(model, Blob::CPtr());
+            InputsDataMap in_info_map = net.getInputsInfo();
+            OutputsDataMap out_info_map = net.getOutputsInfo();
+
+            ExecutableNetwork executable_network = ie.LoadNetwork(net, p.device_name);
+            InferRequest inferRequest = executable_network.CreateInferRequest();
+
+            Blob::Ptr inputBlob = inferRequest.GetBlob(in_info_map.begin()->first);
+            float* inputData = inputBlob->buffer().as<float*>();
+            memcpy(inputData, &p.in[0], sizeof(float)*p.in.size());
+
+            TBlob<float> dst_ref(out_info_map.begin()->second->getTensorDesc());
+            dst_ref.allocate();
+            ref_argmax(inputData, dst_ref.data(), p);
+
+            inferRequest.Infer();
+
+            Blob::Ptr outputBlob = inferRequest.GetBlob(out_info_map.begin()->first);
+            //  Check results
+            compare(outputBlob->buffer().as<float*>(), dst_ref.buffer().as<float*>(), outputBlob->size());
+        } catch (const InferenceEngine::details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+TEST_P(ArgMaxMinTFTests, TestsArgMaxMin) {}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/bin_conv_tests.hpp b/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/bin_conv_tests.hpp
new file mode 100644 (file)
index 0000000..8d3c4af
--- /dev/null
@@ -0,0 +1,412 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <ie_core.hpp>
+
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+#include <single_layer_common.hpp>
+#include <string>
+
+using namespace ::testing;
+using namespace InferenceEngine;
+using std::vector;
+
+struct bin_conv_base_params {
+    vector<size_t> in_dims;
+    vector<size_t> kernel;
+    vector<size_t> strides;
+    vector<size_t> pads_begin;
+    vector<size_t> pads_end;
+    vector<size_t> dilations;
+
+    size_t out_c;
+    size_t grp_c;
+
+    vector<size_t> out_dims;
+
+    float pad_value;
+};
+
+struct bin_conv_test_params : bin_conv_base_params {
+    std::string device_name;
+
+    bin_conv_test_params(std::string name, bin_conv_base_params params) :
+            bin_conv_base_params(params), device_name(name) {}
+
+};
+
+class BinaryConvolutionOnlyTest : public TestsCommon,
+                            public WithParamInterface<bin_conv_test_params> {
+
+    std::string model_t_4D = R"V0G0N(
+<net name="BinaryConvolution_Only" version="3" precision="FP32" batch="1">
+    <layers>
+        <layer name="in1" type="Input" precision="FP32" id="0">
+            <output>
+                <port id="0">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+            </output>
+        </layer>
+        <layer name="conv1" id="1" type="BinaryConvolution" precision="FP32">
+            <data strides="_KS_"
+                         pads_begin="_PB_" pads_end="_PE_"
+                         kernel="_K_"
+                         dilations="_DL_"
+                         input="_IC_" output="_OC_" group="_GC_"
+                         pad_value="_PV_" mode="_M_"/>
+
+            <weights offset="0" size="_S1_" />
+
+            <input>
+                <port id="1">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+            </input>
+            <output>
+                <port id="2">
+                    <dim>_IN_</dim>
+                    <dim>_OC_</dim>
+                    <dim>_OH_</dim>
+                    <dim>_OW_</dim>
+                </port>
+            </output>
+        </layer>
+    </layers>
+    <edges>
+        <edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
+    </edges>
+</net>
+)V0G0N";
+
+protected:
+
+    size_t calculateOutDim(size_t in_dim, size_t kernel, size_t stride, size_t pad_begin) {
+        return (in_dim + 2lu * pad_begin - kernel) / stride + 1lu;
+    }
+
+    void createBlobs(const bin_conv_test_params &p, TBlob<float>::Ptr &src, TBlob<float>::Ptr &dst, TBlob<float>::Ptr &dst_ref) {
+        auto in_size = p.in_dims.size();
+        auto out_size = p.out_dims.size();
+        SizeVector dims_src;
+        for (int i = in_size; i > 0; i--) {
+            dims_src.insert(dims_src.begin(), p.in_dims[i - 1]);
+        }
+
+        SizeVector dims_dst = {
+            1lu,
+            p.out_c,
+            p.out_dims[out_size - 2] == 0 ? calculateOutDim(p.in_dims[in_size - 2], p.kernel[Y_AXIS], p.strides[Y_AXIS], p.pads_begin[Y_AXIS]) : p.out_dims[out_size - 2],
+            p.out_dims[out_size - 1] == 0 ? calculateOutDim(p.in_dims[in_size - 1], p.kernel[X_AXIS], p.strides[X_AXIS], p.pads_begin[X_AXIS]) : p.out_dims[out_size - 1]
+        };
+
+        Layout layout = NCHW;
+        if (in_size == 5) {
+            layout = NCDHW;
+
+            dims_dst.insert(dims_dst.begin() + 3,
+                p.out_dims.size() > 2 ?
+                (p.out_dims[out_size - 3] == 0 ?
+                    calculateOutDim(p.in_dims[in_size - 3], p.kernel[Z_AXIS], p.strides[Z_AXIS], p.pads_begin[Z_AXIS]) : p.out_dims[out_size - 3]) : 1lu);
+        }
+
+        src = make_shared_blob<float>({Precision::FP32, dims_src, layout});
+        src->allocate();
+
+        dst = make_shared_blob<float>({Precision::FP32, dims_dst, layout});
+        dst->allocate();
+
+        dst_ref = make_shared_blob<float>({Precision::FP32, dims_dst, layout});
+        dst_ref->allocate();
+    }
+
+    TBlob<uint8_t>::Ptr fillWeights(const bin_conv_test_params &p) {
+        auto KZ = p.kernel.size() > Z_AXIS ? p.kernel[Z_AXIS] : 1lu;
+        TBlob<uint8_t> *weights_ptr = new TBlob<uint8_t>({Precision::BIN,
+                    {(p.kernel[X_AXIS] * p.kernel[Y_AXIS] * KZ * p.out_c * p.in_dims[1] / p.grp_c + p.out_c)},
+                    Layout::C});
+        weights_ptr->allocate();
+        fill_data_bin_packed(weights_ptr->buffer(), weights_ptr->size());
+        return TBlob<uint8_t>::Ptr(weights_ptr);
+    }
+
+
+    struct bin_conv_common_params {
+        InferenceEngine::PropertyVector<unsigned int> stride;
+        InferenceEngine::PropertyVector<unsigned int> kernel;
+        InferenceEngine::PropertyVector<unsigned int> pads_begin;
+        InferenceEngine::PropertyVector<unsigned int> pads_end;
+        InferenceEngine::PropertyVector<unsigned int> dilation;
+        std::string auto_pad;
+        size_t group;
+        size_t out_c;
+        float pad_value;
+    };
+
+    void ref_bin_conv_common(const Blob& src,
+                         Blob& dst,
+                         const uint8_t* weights_data,
+                         const bin_conv_common_params& prm) {
+        if (src.getTensorDesc().getLayout() != Layout::NCHW &&
+            dst.getTensorDesc().getLayout() != Layout::NCDHW)
+            THROW_IE_EXCEPTION << "Reference FP32 convolution supports NCHW and NCDHW layouts only";
+        size_t KW = prm.kernel[X_AXIS];
+        size_t KH = prm.kernel[Y_AXIS];
+        size_t KD = prm.kernel.size() > Z_AXIS ? prm.kernel[Z_AXIS] : 1lu;
+
+        size_t SW = prm.stride[X_AXIS];
+        size_t SH = prm.stride[Y_AXIS];
+        size_t SD = prm.stride.size() > Z_AXIS ? prm.stride[Z_AXIS] : 0lu;
+
+        size_t DW = prm.dilation[X_AXIS];
+        size_t DH = prm.dilation[Y_AXIS];
+        size_t DD = prm.dilation.size() > Z_AXIS ? prm.dilation[Z_AXIS] : 0lu;
+
+        size_t PW = prm.pads_begin[X_AXIS];
+        size_t PH = prm.pads_begin[Y_AXIS];
+        size_t PD = prm.pads_begin.size() > Z_AXIS ? prm.pads_begin[Z_AXIS] : 0lu;
+
+        size_t GC = prm.group;
+
+        auto src_dims = src.getTensorDesc().getDims();
+        size_t IW, IH, ID, IC = src_dims[1];
+
+        if (src_dims.size() == 5lu) {
+            IW = src_dims[4];
+            IH = src_dims[3];
+            ID = src_dims[2];
+        } else {
+            IW = src_dims[3];
+            IH = src_dims[2];
+            ID = 1lu;
+        }
+
+        auto dst_dims = dst.getTensorDesc().getDims();
+        size_t OW, OH, OD;
+        size_t OC = prm.out_c;
+
+        if (dst_dims.size() == 5lu) {
+            OW = dst_dims[4];
+            OH = dst_dims[3];
+            OD = dst_dims[2];
+        }
+        else {
+            OW = dst_dims[3];
+            OH = dst_dims[2];
+            OD = 1lu;
+        }
+
+        const auto* src_data = src.cbuffer().as<const float*>();
+        auto* dst_data = dst.buffer().as<float*>();
+
+        int nbits = 8;
+
+        auto extract_weights = [](uint8_t val, uint8_t bit) -> float {
+            return (uint8_t)((val >> bit) & 0x0001) == 1 ? 1.f : -1.f;
+        };
+
+        for (uint32_t g = 0; g < GC; g++) {
+            for (uint32_t oc = 0; oc < OC / GC; oc++) {
+                for (uint32_t od = 0; od < OD; od++) {
+                    for (uint32_t oh = 0; oh < OH; oh++) {
+                        for (uint32_t ow = 0; ow < OW; ow++) {
+                            size_t oidx = g * OC / GC * OD * OH * OW
+                                          + oc * OD * OH * OW
+                                          + od * OH * OW
+                                          + oh * OW
+                                          + ow;
+
+                            dst_data[oidx] = 0.f;
+
+                            for (size_t ic = 0; ic < IC / GC; ic++) {
+                                for (size_t kd = 0; kd < KD; kd++) {
+                                    for (size_t kh = 0; kh < KH; kh++) {
+                                        for (size_t kw = 0; kw < KW; kw++) {
+                                            size_t widx = g * OC / GC * IC / GC * KD * KH * KW
+                                                          + oc * IC / GC * KD * KH * KW
+                                                          + ic * KD * KH * KW
+                                                          + kd * KH * KW
+                                                          + kh * KW
+                                                          + kw;
+                                            float w = extract_weights(weights_data[widx/nbits], (uint8_t)(widx % nbits));
+
+                                            float s;
+
+                                            int32_t iw = ow * SW - PW + kw * DW;
+                                            int32_t ih = oh * SH - PH + kh * DH;
+                                            int32_t id = od * SD - PD + kd * DD;
+                                            if (iw < 0 || iw >= (int32_t) IW ||
+                                                ih < 0 || ih >= (int32_t) IH ||
+                                                id < 0 || id >= (int32_t) ID) {
+                                                s = prm.pad_value;
+                                            } else {
+                                                size_t iidx = g * IC / GC * ID * IH * IW
+                                                              + ic * ID * IH * IW
+                                                              + id * IH * IW
+                                                              + ih * IW
+                                                              + iw;
+                                                s = src_data[iidx];
+                                            }
+
+                                            dst_data[oidx] += s * w;
+                                        }
+                                    }
+                                }
+                            }
+                        }
+                    }
+                }
+            }
+        }
+    }
+
+    void calculateRef(const TBlob<uint8_t>::Ptr &weights, const bin_conv_test_params &p, const TBlob<float>::Ptr &src,
+                      TBlob<float>::Ptr &dst_ref) {
+        const uint8_t *weights_data = (const uint8_t *)weights->buffer();
+        size_t bias_size = p.out_c;
+        bin_conv_common_params params;
+        for (int i = 0; i < p.kernel.size(); i++)
+            params.kernel.insert(i, p.kernel[i]);
+        for (int i = 0; i < p.strides.size(); i++)
+            params.stride.insert(i, p.strides[i]);
+        for (int i = 0; i < p.pads_begin.size(); i++)
+            params.pads_begin.insert(i, p.pads_begin[i]);
+        for (int i = 0; i < p.dilations.size(); i++)
+            params.dilation.insert(i, p.dilations[i]);
+        params.group = p.grp_c;
+        params.out_c = p.out_c;
+        params.pad_value = p.pad_value;
+
+        ref_bin_conv_common(*src.get(), *dst_ref.get(), weights_data, params);
+    }
+
+    CNNNetwork getNetwork(const TBlob<uint8_t>::Ptr &weights, const bin_conv_test_params &p) {
+        Core ie;
+        return ie.ReadNetwork(getModel(p), weights);
+    }
+
+    virtual void
+    infer(CNNNetwork &network, const bin_conv_test_params &p, TBlob<float>::Ptr &src, TBlob<float>::Ptr &dst) {
+        Core ie;
+        ExecutableNetwork executable_network = ie.LoadNetwork(network, p.device_name);
+        InferRequest inferRequest = executable_network.CreateInferRequest();
+
+        InputsDataMap inputInfo(network.getInputsInfo());
+        inferRequest.SetBlob(inputInfo.begin()->first, src);
+
+        OutputsDataMap outputInfo(network.getOutputsInfo());
+        inferRequest.SetBlob(outputInfo.begin()->first, dst);
+
+        inferRequest.Infer();
+    }
+
+    void SetUp() override {
+        try {
+            auto p = ::testing::WithParamInterface<bin_conv_test_params>::GetParam();
+            TBlob<float>::Ptr src, dst, dst_ref;
+
+            createBlobs(p, src, dst, dst_ref);
+            fill_data_bin(src->data(), src->size());
+
+            auto weights = fillWeights(p);
+            calculateRef(weights, p, src, dst_ref);
+
+            CNNNetwork network = getNetwork(weights, p);
+            infer(network, p, src, dst);
+
+            compare(*dst, *dst_ref);
+        } catch (const InferenceEngine::details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+
+    virtual std::string getModel(bin_conv_test_params p) {
+        std::string model;
+        auto in_dims_size = p.in_dims.size();
+        model = model_t_4D;
+
+        REPLACE_WITH_NUM(model, "_IW_", p.in_dims[in_dims_size - 1]);
+        REPLACE_WITH_NUM(model, "_IH_", p.in_dims[in_dims_size - 2]);
+        REPLACE_WITH_NUM(model, "_ID_", p.in_dims[in_dims_size - 3]);
+        REPLACE_WITH_NUM(model, "_IC_", p.in_dims[1]);
+        REPLACE_WITH_NUM(model, "_IN_", p.in_dims[0]);
+
+        REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_K_", p.kernel);
+        REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_KS_", p.strides);
+        REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_PB_", p.pads_begin);
+        REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_PE_", p.pads_end);
+        REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_DL_", p.dilations);
+
+        auto out_dims_size = p.out_dims.size();
+        REPLACE_WITH_NUM(model, "_GC_", p.grp_c);
+        REPLACE_WITH_NUM(model, "_OC_", p.out_c);
+        REPLACE_WITH_NUM(model, "_OD_", out_dims_size > 2 ?
+                (p.out_dims[out_dims_size - 3] == 0 ?
+                    calculateOutDim(p.in_dims[in_dims_size - 3], p.kernel[Z_AXIS], p.strides[Z_AXIS], p.pads_begin[Z_AXIS]) : p.out_dims[out_dims_size - 3]) :
+                        1lu);
+        REPLACE_WITH_NUM(model, "_OH_", p.out_dims[out_dims_size - 2] == 0 ?
+                calculateOutDim(p.in_dims[in_dims_size - 2], p.kernel[Y_AXIS], p.strides[Y_AXIS], p.pads_begin[Y_AXIS]) : p.out_dims[out_dims_size - 2]);
+        REPLACE_WITH_NUM(model, "_OW_", p.out_dims[out_dims_size - 1] == 0 ?
+                calculateOutDim(p.in_dims[in_dims_size - 1], p.kernel[X_AXIS], p.strides[X_AXIS], p.pads_begin[X_AXIS]) : p.out_dims[out_dims_size - 1]);
+
+        size_t KD = p.kernel.size() > Z_AXIS ? p.kernel[Z_AXIS] : 1lu;
+
+        int nbits = 8;
+        size_t w_data_size = div_up(p.kernel[X_AXIS] * p.kernel[Y_AXIS] * KD * p.out_c * p.in_dims[1] / p.grp_c, nbits);
+        REPLACE_WITH_NUM(model, "_S1_", w_data_size);
+
+        REPLACE_WITH_NUM(model, "_PV_", p.pad_value);
+        REPLACE_WITH_STR(model, "_M_", "xnor-popcount");
+
+        return model;
+    }
+};
+
+#define case_1  bin_conv_base_params({{1lu, 9lu, 32lu, 16lu},  {2lu, 4lu}, {1lu, 1lu}, {0lu, 0lu}, {0lu, 0lu}, {1lu, 1lu}, 17lu, 1lu, {0lu, 0lu}, -1.f})
+#define case_2  bin_conv_base_params({{1lu, 9lu, 32lu, 16lu},  {2lu, 4lu}, {1lu, 1lu}, {0lu, 0lu}, {0lu, 0lu}, {1lu, 1lu}, 17lu, 1lu, {0lu, 0lu}, 0.f})
+#define case_3  bin_conv_base_params({{1lu, 9lu, 32lu, 16lu},  {2lu, 4lu}, {2lu, 1lu}, {0lu, 0lu}, {0lu, 0lu}, {1lu, 1lu}, 17lu, 1lu, {0lu, 0lu}, -1.f})
+#define case_4  bin_conv_base_params({{1lu, 9lu, 32lu, 16lu},  {2lu, 4lu}, {2lu, 1lu}, {0lu, 0lu}, {0lu, 0lu}, {1lu, 1lu}, 17lu, 1lu, {0lu, 0lu}, 0.f})
+#define case_5  bin_conv_base_params({{1lu, 9lu, 32lu, 16lu},  {2lu, 4lu}, {2lu, 1lu}, {0lu, 0lu}, {0lu, 0lu}, {1lu, 1lu}, 17lu, 1lu, {0lu, 0lu}, 1.f})
+#define case_6  bin_conv_base_params({{1lu, 3lu, 40lu, 40lu},  {3lu, 3lu}, {1lu, 2lu}, {0lu, 0lu}, {0lu, 0lu}, {1lu, 1lu}, 20lu, 1lu, {0lu, 0lu}, 0.f})
+#define case_7  bin_conv_base_params({{1lu, 9lu, 16lu, 32lu},  {7lu, 7lu}, {2lu, 2lu}, {3lu, 3lu}, {0lu, 0lu}, {1lu, 1lu}, 17lu, 1lu, {0lu, 0lu}, -1.f})
+#define case_8  bin_conv_base_params({{1lu, 9lu, 16lu, 32lu},  {7lu, 7lu}, {2lu, 2lu}, {3lu, 3lu}, {0lu, 0lu}, {1lu, 1lu}, 17lu, 1lu, {0lu, 0lu}, 0.f})
+#define case_9  bin_conv_base_params({{1lu, 9lu, 16lu, 32lu},  {7lu, 7lu}, {2lu, 2lu}, {3lu, 3lu}, {0lu, 0lu}, {1lu, 1lu}, 17lu, 1lu, {0lu, 0lu}, 1.f})
+#define case_10 bin_conv_base_params({{1lu, 16lu, 40lu, 40lu}, {3lu, 3lu}, {1lu, 1lu}, {0lu, 0lu}, {0lu, 0lu}, {1lu, 1lu}, 16lu, 16lu, {0lu, 0lu}, 0.f})
+#define case_11 bin_conv_base_params({{1lu, 32lu, 16lu, 32lu}, {7lu, 7lu}, {2lu, 2lu}, {3lu, 3lu}, {0lu, 0lu}, {1lu, 1lu}, 32lu, 32lu, {0lu, 0lu}, 0.f})
+#define case_12 bin_conv_base_params({{1lu, 16lu, 40lu, 40lu}, {3lu, 3lu}, {1lu, 1lu}, {0lu, 0lu}, {0lu, 0lu}, {9lu, 9lu}, 16lu, 16lu, {0lu, 0lu}, 0.f})
+#define case_13 bin_conv_base_params({{1lu, 32lu, 16lu, 32lu}, {7lu, 7lu}, {2lu, 2lu}, {3lu, 3lu}, {0lu, 0lu}, {9lu, 9lu}, 32lu, 32lu, {0lu, 0lu}, 0.f})
+#define case_14 bin_conv_base_params({{1lu, 19lu, 16lu, 32lu}, {3lu, 3lu}, {1lu, 1lu}, {1lu, 1lu}, {1lu, 1lu}, {1lu, 1lu}, 21lu, 1lu, {0lu, 0lu}, -1.f})
+#define case_15 bin_conv_base_params({{1lu, 17lu, 16lu, 32lu}, {3lu, 3lu}, {1lu, 1lu}, {1lu, 1lu}, {1lu, 1lu}, {1lu, 1lu}, 19lu, 1lu, {0lu, 0lu}, 0.f})
+#define case_16 bin_conv_base_params({{1lu, 21lu, 16lu, 32lu}, {3lu, 3lu}, {1lu, 1lu}, {1lu, 1lu}, {1lu, 1lu}, {1lu, 1lu}, 33lu, 1lu, {0lu, 0lu}, 1.f})
+
+TEST_P(BinaryConvolutionOnlyTest, TestsBinaryConvolution) {
+}
+
+std::string getTestCaseName(testing::TestParamInfo<bin_conv_test_params> obj) {
+    auto in_dims_size = obj.param.in_dims.size();
+    return obj.param.device_name +
+           "_w" + std::to_string(obj.param.in_dims[in_dims_size - 1]) +
+           "_h" + std::to_string(obj.param.in_dims[in_dims_size - 2]) +
+           (obj.param.in_dims.size() > 4 ? "_d" + std::to_string(obj.param.in_dims[in_dims_size - 3]) : "") +
+           "_c" + std::to_string(obj.param.in_dims[1]) +
+           "_kw" + std::to_string(obj.param.kernel[X_AXIS]) +
+           "_kh" + std::to_string(obj.param.kernel[Y_AXIS]) +
+           (obj.param.kernel.size() > Z_AXIS ? "_kd" + std::to_string(obj.param.kernel[Z_AXIS]) : "") +
+           "_sw" + std::to_string(obj.param.strides[X_AXIS]) +
+           "_sh" + std::to_string(obj.param.strides[Y_AXIS]) +
+           (obj.param.strides.size() > Z_AXIS ? "_sd" + std::to_string(obj.param.strides[Z_AXIS]) : "") +
+           "_dilw" + std::to_string(obj.param.dilations[X_AXIS]) +
+           "_dilh" + std::to_string(obj.param.dilations[Y_AXIS]) +
+           (obj.param.dilations.size() > Z_AXIS ? "_dild" + std::to_string(obj.param.dilations[Z_AXIS]) : "") +
+           "_grpc" + std::to_string(obj.param.grp_c) +
+           "_pad_v" + std::to_string(obj.param.pad_value);
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/conv_tests.hpp b/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/conv_tests.hpp
new file mode 100644 (file)
index 0000000..892362e
--- /dev/null
@@ -0,0 +1,587 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <ie_core.hpp>
+
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+#include <single_layer_common.hpp>
+#include <string>
+#include "conv_ref.hpp"
+#include "common_test_utils/common_layers_params.hpp"
+
+using namespace ::testing;
+using namespace InferenceEngine;
+using std::vector;
+
+struct conv_base_params {
+    vector<size_t> in_dims;
+    vector<size_t> kernel;
+    vector<size_t> strides;
+    vector<size_t> pads_begin;
+    vector<size_t> pads_end;
+    vector<size_t> dilations;
+
+    size_t out_c;
+    size_t grp_c;
+
+    vector<size_t> out_dims;
+};
+
+struct conv_test_params : conv_base_params {
+    std::string device_name;
+
+    std::string getDeviceName() const {
+        return device_name;
+    }
+    conv_test_params(std::string name, conv_base_params params) :
+            conv_base_params(params), device_name(name) {}
+};
+
+class ConvolutionOnlyTest : public TestsCommon,
+                            public WithParamInterface<conv_test_params> {
+
+    std::string model_t_4D = R"V0G0N(
+<net name="Convolution_Only" version="3" precision="FP32" batch="1">
+    <layers>
+        <layer name="in1" type="Input" precision="FP32" id="0">
+            <output>
+                <port id="0">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+            </output>
+        </layer>
+        <layer name="conv1" id="1" type="Convolution" precision="FP32">
+            <convolution strides="_KS_"
+                         pads_begin="_PB_" pads_end="_PE_"
+                         kernel="_K_"
+                         dilations="_DL_"
+                         output="_OC_" group="_GC_"/>
+
+            <weights offset="0" size="_S1_" />
+            <biases offset="_S1_" size="_S2_" />
+
+            <input>
+                <port id="1">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+            </input>
+            <output>
+                <port id="2">
+                    <dim>_IN_</dim>
+                    <dim>_OC_</dim>
+                    <dim>_OH_</dim>
+                    <dim>_OW_</dim>
+                </port>
+            </output>
+        </layer>
+    </layers>
+    <edges>
+        <edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
+    </edges>
+</net>
+)V0G0N";
+
+    std::string model_t_4D_blobs_as_inputs = R"V0G0N(
+<net name="Convolution_Only" version="3" precision="FP32" batch="1">
+    <layers>
+        <layer name="in1" type="Input" precision="FP32" id="0">
+            <output>
+                <port id="0">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+            </output>
+        </layer>
+        <layer name="wei" type="Const" precision="FP32" id="1">
+            <output>
+                <port id="0">
+                    <dim>_OC_</dim>
+                    <dim>_ICG_</dim>
+                    <dim>_KH_</dim>
+                    <dim>_KW_</dim>
+                </port>
+            </output>
+            <blobs>
+                <custom offset="0" size="_S1_"/>
+            </blobs>
+        </layer>
+        <layer name="bias" type="Const" precision="FP32" id="2">
+            <output>
+                <port id="0">
+                    <dim>_OC_</dim>
+                </port>
+            </output>
+            <blobs>
+                <custom offset="_S1_" size="_S2_"/>
+            </blobs>
+        </layer>
+        <layer name="conv1" id="3" type="Convolution" precision="FP32">
+            <convolution strides="_KS_"
+                         pads_begin="_PB_" pads_end="_PE_"
+                         kernel="_K_"
+                         dilations="_DL_"
+                         output="_OC_" group="_GC_"/>
+
+            <input>
+                <port id="1">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+                <port id="2">
+                    <dim>_OC_</dim>
+                    <dim>_ICG_</dim>
+                    <dim>_KH_</dim>
+                    <dim>_KW_</dim>
+                </port>
+                <port id="3">
+                    <dim>_OC_</dim>
+                </port>
+            </input>
+            <output>
+                <port id="4">
+                    <dim>_IN_</dim>
+                    <dim>_OC_</dim>
+                    <dim>_OH_</dim>
+                    <dim>_OW_</dim>
+                </port>
+            </output>
+        </layer>
+    </layers>
+    <edges>
+        <edge from-layer="0" from-port="0" to-layer="3" to-port="1"/>
+        <edge from-layer="1" from-port="0" to-layer="3" to-port="2"/>
+        <edge from-layer="2" from-port="0" to-layer="3" to-port="3"/>
+    </edges>
+</net>
+)V0G0N";
+
+    std::string model_t_5D = R"V0G0N(
+<net name="Convolution_Only" version="3" precision="FP32" batch="1">
+    <layers>
+        <layer name="in1" type="Input" precision="FP32" id="0">
+            <output>
+                <port id="0">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_ID_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+            </output>
+        </layer>
+        <layer name="conv1" id="1" type="Convolution" precision="FP32">
+            <convolution strides="_KS_"
+                         pads_begin="_PB_"  pads_end="_PE_"
+                         kernel="_K_"
+                         dilations="_DL_"
+                         output="_OC_"  group="_GC_"/>
+
+            <weights offset="0" size="_S1_" />
+            <biases offset="_S1_" size="_S2_" />
+
+            <input>
+                <port id="1">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_ID_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+            </input>
+            <output>
+                <port id="2">
+                    <dim>_IN_</dim>
+                    <dim>_OC_</dim>
+                    <dim>_OD_</dim>
+                    <dim>_OH_</dim>
+                    <dim>_OW_</dim>
+                </port>
+            </output>
+        </layer>
+    </layers>
+    <edges>
+        <edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
+    </edges>
+</net>
+)V0G0N";
+
+    std::string model_t_5D_blobs_as_inputs = R"V0G0N(
+<net name="Convolution_Only" version="3" precision="FP32" batch="1">
+    <layers>
+        <layer name="in1" type="Input" precision="FP32" id="0">
+            <output>
+                <port id="0">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_ID_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+            </output>
+        </layer>
+        <layer name="wei" type="Const" precision="FP32" id="1">
+            <output>
+                <port id="0">
+                    <dim>_OC_</dim>
+                    <dim>_ICG_</dim>
+                    <dim>_KD_</dim>
+                    <dim>_KH_</dim>
+                    <dim>_KW_</dim>
+                </port>
+            </output>
+            <blobs>
+                <custom offset="0" size="_S1_"/>
+            </blobs>
+        </layer>
+        <layer name="bias" type="Const" precision="FP32" id="2">
+            <output>
+                <port id="0">
+                    <dim>_OC_</dim>
+                </port>
+            </output>
+            <blobs>
+                <custom offset="_S1_" size="_S2_"/>
+            </blobs>
+        </layer>
+        <layer name="conv1" id="3" type="Convolution" precision="FP32">
+            <convolution strides="_KS_"
+                         pads_begin="_PB_" pads_end="_PE_"
+                         kernel="_K_"
+                         dilations="_DL_"
+                         output="_OC_" group="_GC_"/>
+
+            <input>
+                <port id="1">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_ID_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+                <port id="2">
+                    <dim>_OC_</dim>
+                    <dim>_ICG_</dim>
+                    <dim>_KD_</dim>
+                    <dim>_KH_</dim>
+                    <dim>_KW_</dim>
+                </port>
+                <port id="3">
+                    <dim>_OC_</dim>
+                </port>
+            </input>
+            <output>
+                <port id="4">
+                    <dim>_IN_</dim>
+                    <dim>_OC_</dim>
+                    <dim>_OD_</dim>
+                    <dim>_OH_</dim>
+                    <dim>_OW_</dim>
+                </port>
+            </output>
+        </layer>
+    </layers>
+    <edges>
+        <edge from-layer="0" from-port="0" to-layer="3" to-port="1"/>
+        <edge from-layer="1" from-port="0" to-layer="3" to-port="2"/>
+        <edge from-layer="2" from-port="0" to-layer="3" to-port="3"/>
+    </edges>
+</net>
+)V0G0N";
+
+protected:
+
+    virtual bool blobsAsInputs() { return false; }
+
+    size_t calculateOutDim(size_t in_dim, size_t kernel, size_t stride, size_t pad_begin) {
+        return (in_dim + 2lu * pad_begin - kernel) / stride + 1lu;
+    }
+
+    void createBlobs(const conv_test_params &p, TBlob<float>::Ptr &src, TBlob<float>::Ptr &dst, TBlob<float>::Ptr &dst_ref) {
+        auto in_size = p.in_dims.size();
+        auto out_size = p.out_dims.size();
+        SizeVector dims_dst = {
+                p.out_dims[out_size - 1] == 0 ?
+                calculateOutDim(p.in_dims[in_size - 1], p.kernel[X_AXIS], p.strides[X_AXIS], p.pads_begin[X_AXIS]) : p.out_dims[out_size - 1],
+                p.out_dims[out_size - 2] == 0 ?
+                calculateOutDim(p.in_dims[in_size - 2], p.kernel[Y_AXIS], p.strides[Y_AXIS], p.pads_begin[Y_AXIS]) : p.out_dims[out_size - 2],
+                p.out_c,
+                1lu};
+        SizeVector dims_src;
+        for (int i = in_size; i > 0; i--) {
+            dims_src.push_back(p.in_dims[i - 1]);
+        }
+
+        Layout layout = NCHW;
+        if (in_size == 5) {
+            layout = NCDHW;
+            dims_dst.insert(dims_dst.begin() + 2, p.out_dims.size() > 2 ?
+                                                  (p.out_dims[out_size - 3] == 0 ?
+                                                   calculateOutDim(p.in_dims[in_size - 3], p.kernel[Z_AXIS], p.strides[Z_AXIS], p.pads_begin[Z_AXIS]) : p.out_dims[out_size - 3]) : 1lu);
+        }
+
+        src = make_shared_blob<float>(TensorDesc(Precision::FP32, SizeVector(dims_src.rbegin(), dims_src.rend()), layout));
+        src->allocate();
+
+        dst = make_shared_blob<float>(TensorDesc(Precision::FP32, SizeVector(dims_dst.rbegin(), dims_dst.rend()), layout));
+        dst->allocate();
+
+        dst_ref = make_shared_blob<float>(TensorDesc(Precision::FP32, SizeVector(dims_dst.rbegin(), dims_dst.rend()), layout));
+        dst_ref->allocate();
+    }
+
+    TBlob<uint8_t>::Ptr fillWeights(const conv_test_params &p) {
+        auto KZ = p.kernel.size() > Z_AXIS ? p.kernel[Z_AXIS] : 1lu;
+        TBlob<uint8_t> *weights_ptr = new TBlob<uint8_t>(TensorDesc(Precision::U8,
+                                                                    {(p.kernel[X_AXIS] * p.kernel[Y_AXIS] * KZ * p.out_c * p.in_dims[1] / p.grp_c + p.out_c)
+                                                                     * sizeof(float)}, C));
+        weights_ptr->allocate();
+        fill_data((float *) weights_ptr->buffer(), weights_ptr->size() / sizeof(float));
+        return TBlob<uint8_t>::Ptr(weights_ptr);
+    }
+
+    void calculateRef(const TBlob<uint8_t>::Ptr &weights, const conv_test_params &p, const TBlob<float>::Ptr &src,
+                      TBlob<float>::Ptr &dst_ref) {
+        const float *weights_data = (const float *) weights->buffer();
+        size_t bias_size = p.out_c;
+        size_t weights_size = weights->size() / sizeof(float) - bias_size;
+        const float *bias_data = weights_data + weights_size;
+        CommonTestUtils::conv_common_params params;
+        for (int i = 0; i < p.kernel.size(); i++)
+            params.kernel.insert(i, p.kernel[i]);
+        for (int i = 0; i < p.strides.size(); i++)
+            params.stride.insert(i, p.strides[i]);
+        for (int i = 0; i < p.pads_begin.size(); i++)
+            params.pads_begin.insert(i, p.pads_begin[i]);
+        for (int i = 0; i < p.dilations.size(); i++)
+            params.dilation.insert(i, p.dilations[i]);
+        params.group = p.grp_c;
+        params.out_c = p.out_c;
+        ref_conv_common<float>({ src }, *dst_ref.get(), weights_data, weights_size, bias_data, bias_size, params);
+    }
+
+    CNNNetwork getNetwork(const TBlob<uint8_t>::Ptr &weights, const conv_test_params &p) {
+        Core ie;
+        return ie.ReadNetwork(getModel(p), weights);
+    }
+
+    virtual void infer(CNNNetwork &network, const conv_test_params &p, TBlob<float>::Ptr &src, TBlob<float>::Ptr &dst) {
+        Core ie;
+        ExecutableNetwork exeNetwork = ie.LoadNetwork(network, p.getDeviceName());
+        InferRequest inferRequest = exeNetwork.CreateInferRequest();
+        OutputsDataMap outInfo;
+        outInfo = network.getOutputsInfo();
+        inferRequest.SetBlob(network.getInputsInfo().begin()->first, src);
+        inferRequest.SetBlob(outInfo.begin()->first, dst);
+        inferRequest.Infer();
+    }
+
+    void SetUp() override {
+        try {
+            conv_test_params p = ::testing::WithParamInterface<conv_test_params>::GetParam();
+            TBlob<float>::Ptr src, dst, dst_ref;
+            createBlobs(p, src, dst, dst_ref);
+            fill_data(src->data(), src->size());
+            auto weights = fillWeights(p);
+            calculateRef(weights, p, src, dst_ref);
+            CNNNetwork network = getNetwork(weights, p);
+            infer(network, p, src, dst);
+            compare(*dst, *dst_ref);
+        } catch (const InferenceEngine::details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+
+    virtual std::string getModel(conv_test_params p) {
+        std::string model;
+        auto in_dims_size = p.in_dims.size();
+        if (in_dims_size == 4) {
+            if (blobsAsInputs())
+                model = model_t_4D_blobs_as_inputs;
+            else
+                model = model_t_4D;
+        } else if (in_dims_size == 5) {
+            if (blobsAsInputs())
+                model = model_t_5D_blobs_as_inputs;
+            else
+                model = model_t_5D;
+        }
+
+        auto out_dims_size = p.out_dims.size();
+
+        size_t KD = p.kernel.size() > Z_AXIS ? p.kernel[Z_AXIS] : 1lu;
+        size_t KH = p.kernel[Y_AXIS];
+        size_t KW = p.kernel[X_AXIS];
+
+        size_t SD = p.strides.size() > Z_AXIS ? p.strides[Z_AXIS] : 1lu;
+        size_t SH = p.strides[Y_AXIS];
+        size_t SW = p.strides[X_AXIS];
+
+        size_t ID = p.in_dims.size() > 4 ? p.in_dims[in_dims_size - 3] : 1lu;
+        size_t IH = p.in_dims[in_dims_size - 2];
+        size_t IW = p.in_dims[in_dims_size - 1];
+
+        size_t OD = p.out_dims.size() > 2 ? p.out_dims[out_dims_size - 3] : 1lu;
+        size_t OH = p.out_dims[out_dims_size - 2];
+        size_t OW = p.out_dims[out_dims_size - 1];
+
+        size_t PD = p.pads_begin.size() > Z_AXIS ? p.pads_begin[Z_AXIS] : 1lu;
+        size_t PH = p.pads_begin[Y_AXIS];
+        size_t PW = p.pads_begin[X_AXIS];
+
+        REPLACE_WITH_NUM(model, "_IW_", IW);
+        REPLACE_WITH_NUM(model, "_IH_", IH);
+        REPLACE_WITH_NUM(model, "_ID_", ID);
+        REPLACE_WITH_NUM(model, "_IC_", p.in_dims[1]);
+        REPLACE_WITH_NUM(model, "_ICG_", p.in_dims[1] / p.grp_c);
+        REPLACE_WITH_NUM(model, "_IN_", p.in_dims[0]);
+
+        REPLACE_WITH_NUM(model, "_KD_", KD);
+        REPLACE_WITH_NUM(model, "_KH_", KH);
+        REPLACE_WITH_NUM(model, "_KW_", KW);
+
+        REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_K_", p.kernel);
+        REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_KS_", p.strides);
+        REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_PB_", p.pads_begin);
+        REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_PE_", p.pads_end);
+        REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_DL_", p.dilations);
+
+        REPLACE_WITH_NUM(model, "_GC_", p.grp_c);
+        REPLACE_WITH_NUM(model, "_OC_", p.out_c);
+        REPLACE_WITH_NUM(model, "_OD_", out_dims_size > 2 ? (OD == 0 ? calculateOutDim(ID, KD, SD, PD) : OD) : 1lu);
+        REPLACE_WITH_NUM(model, "_OH_", OH == 0 ? calculateOutDim(IH, KH, SH, PH) : OH);
+        REPLACE_WITH_NUM(model, "_OW_", OW == 0 ? calculateOutDim(IW, KW, SW, PW) : OW);
+
+        size_t w_data_size = (KW * KH * KD * p.out_c * p.in_dims[1] / p.grp_c) * sizeof(float);
+        size_t b_data_size = p.out_c * sizeof(float);
+        REPLACE_WITH_NUM(model, "_S1_", w_data_size);
+        REPLACE_WITH_NUM(model, "_S2_", b_data_size);
+        return model;
+    }
+};
+
+class ConvolutionReshapeTest : public ConvolutionOnlyTest {
+protected:
+    void SetUp() override {
+        try {
+            conv_test_params p = ::testing::WithParamInterface<conv_test_params>::GetParam();
+            TBlob<float>::Ptr src, dst, dst_ref;
+            createBlobs(p, src, dst, dst_ref);
+            fill_data(src->data(), src->size());
+            auto weights = fillWeights(p);
+            calculateRef(weights, p, src, dst_ref);
+            CNNNetwork network = getNetwork(weights, p);
+            updatePaddings(network, p);
+            infer(network, p, src, dst);
+            compare(*dst, *dst_ref);
+        } catch (const InferenceEngine::details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+
+    void updatePaddings(const CNNNetwork &network, conv_test_params& p) {
+        auto found = std::find_if(network.begin(), network.end(), [](const CNNLayer::Ptr& layer) {
+            return layer->type == "Convolution";
+        });
+        ASSERT_NE(found, network.end());
+        auto convLayer = std::dynamic_pointer_cast<ConvolutionLayer>(*found);
+        auto allPad = getPaddings(*convLayer.get());
+        p.pads_begin[X_AXIS] = allPad.begin[X_AXIS];
+        p.pads_begin[Y_AXIS] = allPad.begin[Y_AXIS];
+        if (p.pads_begin.size() > Z_AXIS)
+            p.pads_begin[Z_AXIS] = allPad.begin[Z_AXIS];
+    }
+
+    void infer(CNNNetwork &network, const conv_test_params &p, TBlob<float>::Ptr &src, TBlob<float>::Ptr &dst) override {
+        Core ie;
+        ExecutableNetwork exeNetwork = ie.LoadNetwork(network, p.getDeviceName());
+        InferRequest inferRequest = exeNetwork.CreateInferRequest();
+        OutputsDataMap outInfo;
+        outInfo = network.getOutputsInfo();
+        inferRequest.SetBlob(network.getInputsInfo().begin()->first, src);
+        inferRequest.SetBlob(outInfo.begin()->first, dst);
+        inferRequest.Infer();
+    }
+
+    std::string getModel(conv_test_params p) override {
+        std::string model = ConvolutionOnlyTest::getModel(p);
+        REPLACE_WITH_STR(model, "convolution", "convolution auto_pad=\"same_upper\"");
+        std::string pads_pattern = "pads_begin=\"";
+        for (int i = p.pads_begin.size(); i > 0; i--) {
+            pads_pattern += std::to_string(p.pads_begin[i - 1]) + ",";
+        }
+        auto end = pads_pattern.end()--;
+        *end = '\"';
+        std::string pads = "pads_begin=\"0,0\"";
+        if (p.pads_begin.size() == 3) {
+            pads = "pads_begin=\"0,0,0\"";
+        }
+        REPLACE_WITH_NUM_VECTOR(model, pads_pattern, pads);
+        return model;
+    }
+};
+
+
+class ConvolutionBlobsAsInputsTest : public ConvolutionOnlyTest {
+protected:
+    bool blobsAsInputs() override { return true; }
+};
+
+#define case_1  conv_base_params({{1lu, 9lu, 16lu, 32lu},  {1lu, 1lu}, {1lu, 1lu}, {0lu, 0lu}, {0lu, 0lu}, {1lu, 1lu}, 17lu, 1lu, {0lu, 0lu}})
+#define case_2  conv_base_params({{1lu, 9lu, 32lu, 16lu},  {2lu, 4lu}, {1lu, 1lu}, {0lu, 0lu}, {0lu, 0lu}, {1lu, 1lu}, 17lu, 1lu, {0lu, 0lu}})
+#define case_3  conv_base_params({{1lu, 9lu, 32lu, 16lu},  {2lu, 4lu}, {2lu, 1lu}, {0lu, 0lu}, {0lu, 0lu}, {1lu, 1lu}, 17lu, 1lu, {0lu, 0lu}})
+#define case_4  conv_base_params({{1lu, 3lu, 40lu, 40lu},  {3lu, 3lu}, {1lu, 2lu}, {0lu, 0lu}, {0lu, 0lu}, {1lu, 1lu}, 20lu, 1lu, {0lu, 0lu}})
+#define case_5  conv_base_params({{1lu, 9lu, 16lu, 32lu},  {7lu, 7lu}, {2lu, 2lu}, {3lu, 3lu}, {0lu, 0lu}, {1lu, 1lu}, 17lu, 1lu, {0lu, 0lu}})
+#define case_6  conv_base_params({{1lu, 3lu, 224lu, 224lu}, {7lu, 7lu}, {2lu, 2lu}, {2lu, 2lu}, {0lu, 0lu}, {1lu, 1lu}, 64lu, 1lu, {112lu, 112lu}})
+#define case_7  conv_base_params({{1lu, 16lu, 40lu, 40lu}, {3lu, 3lu}, {1lu, 1lu}, {0lu, 0lu}, {0lu, 0lu}, {1lu, 1lu}, 16lu, 16lu, {0lu, 0lu}})
+#define case_8  conv_base_params({{1lu, 32lu, 16lu, 32lu}, {7lu, 7lu}, {2lu, 2lu}, {3lu, 3lu}, {0lu, 0lu}, {1lu, 1lu}, 32lu, 32lu, {0lu, 0lu}})
+#define case_9  conv_base_params({{1lu, 16lu, 40lu, 40lu}, {3lu, 3lu}, {1lu, 1lu}, {0lu, 0lu}, {0lu, 0lu}, {9lu, 9lu}, 16lu, 16lu, {0lu, 0lu}})
+#define case_10 conv_base_params({{1lu, 32lu, 16lu, 32lu}, {7lu, 7lu}, {2lu, 2lu}, {3lu, 3lu}, {0lu, 0lu}, {9lu, 9lu}, 32lu, 32lu, {0lu, 0lu}})
+#define case_11 conv_base_params({{1lu, 4lu, 16lu, 32lu},  {7lu, 7lu}, {2lu, 2lu}, {3lu, 3lu}, {0lu, 0lu}, {9lu, 9lu}, 4lu, 4lu, {0lu, 0lu}})
+#define case_12 conv_base_params({{1lu, 3lu, 224lu, 224lu}, {10lu, 10lu}, {1lu, 1lu}, {4lu, 4lu}, {0lu, 0lu}, {1lu, 1lu}, 4lu, 1lu, {224lu, 224lu}})
+
+#define case_13  conv_base_params({{1lu, 3lu, 16lu, 32lu, 32lu},  {1lu, 1lu, 1lu}, {1lu, 1lu, 1lu}, {0lu, 0lu, 0lu}, {0lu, 0lu, 0lu}, {1lu, 1lu, 1lu}, 17lu, 1lu, {0lu, 0lu, 0lu}})
+#define case_14  conv_base_params({{1lu, 3lu, 16lu, 32lu, 32lu},  {3lu, 3lu, 3lu}, {2lu, 2lu, 1lu}, {0lu, 0lu, 0lu}, {0lu, 0lu, 0lu}, {1lu, 1lu, 1lu}, 64lu, 1lu, {0lu, 0lu, 0lu}})
+
+// NOTE: always auto_pad = same_upper. IR with zero_pads, pad from params is used for ref_conv after reshape
+#define case_si_1 conv_base_params({{1lu, 144lu, 75lu, 75lu}, {3lu, 3lu}, {2lu, 2lu}, {1lu, 1lu}, {0lu, 0lu}, {1lu, 1lu}, 144lu, 144lu, {1lu, 1lu}})
+
+TEST_P(ConvolutionOnlyTest, TestsConvolution) {
+}
+
+TEST_P(ConvolutionReshapeTest, TestsReshapeConvolution) {
+}
+
+TEST_P(ConvolutionBlobsAsInputsTest, TestsConvolutionBlobsAsInputs) {
+}
+
+std::string getTestCaseName(testing::TestParamInfo<conv_test_params> obj) {
+    auto in_dims_size = obj.param.in_dims.size();
+    return obj.param.device_name +
+           "_w" + std::to_string(obj.param.in_dims[in_dims_size - 1]) +
+           "_h" + std::to_string(obj.param.in_dims[in_dims_size - 2]) +
+           (obj.param.in_dims.size() > 4 ? "_d" + std::to_string(obj.param.in_dims[in_dims_size - 3]) : "") +
+           "_c" + std::to_string(obj.param.in_dims[1]) +
+           "_kw" + std::to_string(obj.param.kernel[X_AXIS]) +
+           "_kh" + std::to_string(obj.param.kernel[Y_AXIS]) +
+           (obj.param.kernel.size() > Z_AXIS ? "_kd" + std::to_string(obj.param.kernel[Z_AXIS]) : "") +
+           "_sw" + std::to_string(obj.param.strides[X_AXIS]) +
+           "_sh" + std::to_string(obj.param.strides[Y_AXIS]) +
+           (obj.param.strides.size() > Z_AXIS ? "_sd" + std::to_string(obj.param.strides[Z_AXIS]) : "") +
+           "_dilw" + std::to_string(obj.param.dilations[X_AXIS]) +
+           "_dilh" + std::to_string(obj.param.dilations[Y_AXIS]) +
+           (obj.param.dilations.size() > Z_AXIS ? "_dild" + std::to_string(obj.param.dilations[Z_AXIS]) : "") +
+           "_grpc" + std::to_string(obj.param.grp_c);
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/deformable_psroi_tests.hpp b/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/deformable_psroi_tests.hpp
new file mode 100644 (file)
index 0000000..baa2da5
--- /dev/null
@@ -0,0 +1,330 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <ie_core.hpp>
+
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+#include "common_test_utils/data_utils.hpp"
+
+using namespace ::testing;
+using namespace InferenceEngine;
+
+struct deformable_psroi_test_params {
+    std::string device_name;
+
+    std::vector<size_t> src_dims;
+    std::vector<size_t> bbox_dims;
+    std::vector<size_t> out_dims;
+    float spatial_scale;
+    size_t output_dim;
+    size_t group_size;
+    size_t pooled_height;
+    size_t pooled_width;
+    int part_size;
+    int sample_per_part;
+    bool no_trans;
+    float trans_std;
+    std::vector<size_t> trans_dims;
+};
+
+inline float bilinear_interp(const float* data, const float x, const float y, const int width, const int height) {
+    int x1 = static_cast<int>(std::floor(x));
+    int x2 = static_cast<int>(std::ceil(x));
+    int y1 = static_cast<int>(std::floor(y));
+    int y2 = static_cast<int>(std::ceil(y));
+    float dist_x = x - x1;
+    float dist_y = y - y1;
+    float value11 = data[y1 * width + x1];
+    float value12 = data[y2 * width + x1];
+    float value21 = data[y1 * width + x2];
+    float value22 = data[y2 * width + x2];
+    float value = (1 - dist_x) * (1 - dist_y) * value11 + (1 - dist_x) * dist_y * value12
+                  + dist_x * (1 - dist_y) * value21 + dist_x * dist_y * value22;
+    return value;
+}
+
+static void ref_deformable_psroi(const std::vector<Blob::Ptr> &srcs, std::vector<Blob::Ptr> &dsts, deformable_psroi_test_params prm) {
+    float* dst_data = dsts[0]->buffer();
+    const float *bottom_data_beginning = srcs[1]->buffer();
+    const float *bottom_rois_beginning = srcs[0]->buffer();
+
+    SizeVector inDims = srcs[1]->getTensorDesc().getDims();
+    int channels = static_cast<int>(inDims[1]);
+    int height = static_cast<int>(inDims[2]);
+    int width = static_cast<int>(inDims[3]);
+
+    SizeVector outDims = dsts[0]->getTensorDesc().getDims();
+    int nn = static_cast<int>(outDims[0]);
+    int nc = static_cast<int>(outDims[1]);
+    int nh = static_cast<int>(outDims[2]);
+    int nw = static_cast<int>(outDims[3]);
+
+    int real_rois = 0;
+    for (; real_rois < nn; real_rois++) {
+        const float *bottom_rois = bottom_rois_beginning + real_rois * 5;
+        int roi_batch_ind = static_cast<int>(bottom_rois[0]);
+        if (roi_batch_ind == -1) {
+            break;
+        }
+    }
+
+    float *bottom_trans = nullptr;
+    int num_classes = 1;
+    int channels_each_class = prm.output_dim;
+    if (srcs.size() == 3) {
+        bottom_trans = srcs[2]->buffer();
+        num_classes = static_cast<int>(srcs[2]->getTensorDesc().getDims()[1]) / 2;
+        channels_each_class /= num_classes;
+    }
+
+    for (int n = 0; n < real_rois; n++) {
+        const float *bottom_rois = bottom_rois_beginning + n * 5;
+        int roi_batch_ind = static_cast<int>(bottom_rois[0]);
+        float roi_start_w = static_cast<float>(round(bottom_rois[1])) * prm.spatial_scale - 0.5;
+        float roi_start_h = static_cast<float>(round(bottom_rois[2])) * prm.spatial_scale - 0.5;
+        float roi_end_w = static_cast<float>(round(bottom_rois[3]) + 1.0) * prm.spatial_scale - 0.5;
+        float roi_end_h = static_cast<float>(round(bottom_rois[4]) + 1.0) * prm.spatial_scale - 0.5;
+        float roi_width = std::max(static_cast<double>(roi_end_w - roi_start_w), 0.1);
+        float roi_height = std::max(static_cast<double>(roi_end_h - roi_start_h), 0.1);
+
+        for (int c = 0; c < nc; c++) {
+            for (int h = 0; h < nh; h++) {
+                for (int w = 0; w < nw; w++) {
+                    size_t index = n*nc*nh*nw + c*nh*nw + h*nw + w;
+                    dst_data[index] = 0.0f;
+
+                    float bin_size_h = roi_height / static_cast<float>(prm.pooled_height);
+                    float bin_size_w = roi_width  / static_cast<float>(prm.pooled_width);
+
+                    float sub_bin_size_h = bin_size_h / static_cast<float>(prm.sample_per_part);
+                    float sub_bin_size_w = bin_size_w / static_cast<float>(prm.sample_per_part);
+
+                    int part_h = static_cast<int>(std::floor(static_cast<float>(h) / prm.pooled_height * prm.part_size));
+                    int part_w = static_cast<int>(std::floor(static_cast<float>(w) / prm.pooled_width * prm.part_size));
+
+                    int class_id = c / channels_each_class;
+                    float trans_x = prm.no_trans ? 0 :
+                                    bottom_trans[(((n * num_classes + class_id) * 2) * prm.part_size + part_h)
+                                                 * prm.part_size + part_w] * prm.trans_std;
+                    float trans_y = prm.no_trans ? 0 :
+                                    bottom_trans[(((n * num_classes + class_id) * 2 + 1) * prm.part_size + part_h)
+                                                 * prm.part_size + part_w] * prm.trans_std;
+
+                    float wstart = w * bin_size_w + roi_start_w + trans_x * roi_width;
+                    float hstart = h * bin_size_h + roi_start_h + trans_y * roi_height;
+
+                    float sum = 0;
+                    int count = 0;
+                    int gw = (static_cast<float>(w) * prm.group_size / prm.pooled_width );
+                    int gh = (static_cast<float>(h) * prm.group_size / prm.pooled_height );
+                    gw = std::min(std::max(gw, 0), static_cast<int>(prm.group_size - 1));
+                    gh = std::min(std::max(gh, 0), static_cast<int>(prm.group_size - 1));
+
+                    const float* offset_bottom_data = bottom_data_beginning + (roi_batch_ind * channels) * height * width;
+                    for (size_t ih = 0; ih < prm.sample_per_part; ih++) {
+                        for (size_t iw = 0; iw < prm.sample_per_part; iw++) {
+                            float w1 = wstart + iw * sub_bin_size_w;
+                            float h1 = hstart + ih * sub_bin_size_h;
+                            // bilinear interpolation
+                            if (w1 < -0.5 || w1 > width - 0.5 || h1 < -0.5 || h1 > height - 0.5)
+                                continue;
+                            w1 = std::min(std::max(w1, 0.0f), width - 1.0f);
+                            h1 = std::min(std::max(h1, 0.0f), height - 1.0f);
+                            int c1 = static_cast<int>((c * prm.group_size + gh) * prm.group_size + gw);
+                            float val = bilinear_interp(offset_bottom_data + c1 * height * width, w1, h1, width, height);
+                            sum += val;
+                            count++;
+                        }
+                    }
+                    dst_data[index] = count == 0 ? 0 : sum / count;
+                }
+            }
+        }
+    }
+    for (int n = real_rois; n < nn; n++) {
+        for (int c = 0; c < nc; c++) {
+            for (int h = 0; h < nh; h++) {
+                for (int w = 0; w < nw; w++) {
+                    int index = n * nc * nh * nw + c * nh * nw + h * nw + w;
+                    dst_data[index] = 0.0f;
+                }
+            }
+        }
+    }
+}
+
+class DeformablePSROIOnlyTest : public TestsCommon,
+                        public WithParamInterface<deformable_psroi_test_params> {
+
+    std::string model_t = R"V0G0N(
+<net name="DeformablePSROIOnly" version="2" precision="FP32" batch="1">
+    <layers>
+        <layer name="data" id="0" type="Input" precision="FP32">
+            <output>
+                <port id="0">__SRC_DIMS__
+                </port>
+            </output>
+        </layer>
+        <layer name="bbox" id="1" type="Input" precision="FP32">
+            <output>
+                <port id="0">__BBOX_DIMS__
+                </port>
+            </output>
+        </layer>__TRANS__
+        <layer name="psroi" id="3" type="PSROIPooling" precision="FP32">
+            <data mode="bilinear_deformable" no_trans="__NO_TRANS__" spatial_scale="__SPATIAL_SCALE__" output_dim="__OUTPUT_DIM__" part_size="__PART_SIZE__" group_size="__GROUP_SIZE__"
+                pooled_height="__POOLED_HEIGHT__" pooled_width="__POOLED_WIDTH__" spatial_bins_x="__SAMPLE_PER_PART__" spatial_bins_y="__SAMPLE_PER_PART__"__TRANS_PARAMS__/>
+            <input>
+                <port id="0">__SRC_DIMS__
+                </port>
+                <port id="1">__BBOX_DIMS__
+                </port>__TRANS_DIMS__
+            </input>
+            <output>
+                <port id="0">__OUT_DIMS__
+                </port>
+            </output>
+        </layer>
+    </layers>
+    <edges>
+        <edge from-layer="0" from-port="0" to-layer="3" to-port="0"/>
+        <edge from-layer="1" from-port="0" to-layer="3" to-port="1"/>__EDGE_TRANS__
+    </edges>
+</net>
+)V0G0N";
+
+    std::string getModel(deformable_psroi_test_params p) {
+        std::string model = model_t;
+
+        std::string no_trans = "True";
+        std::string trans = "";
+        std::string trans_params = "";
+        std::string trans_dims = "";
+        std::string edge_trans = "";
+        if (!p.no_trans) {
+            no_trans = "False";
+
+            trans = R"VOGON(
+        <layer name="trans" id="2" type="Input" precision="FP32">
+            <output>__TRANS_DIMS__
+            </output>
+        </layer>)VOGON";
+
+            trans_params += " trans_std=\"" + std::to_string(p.trans_std) + "\"";
+
+            trans_dims += "\n                <port id=\"2\">";
+            for (auto &dim : p.trans_dims) {
+                trans_dims += "\n                    <dim>";
+                trans_dims += std::to_string(dim) + "</dim>";
+            }
+            trans_dims += "\n                </port>";
+
+            edge_trans = "\n        <edge from-layer=\"2\" from-port=\"2\" to-layer=\"3\" to-port=\"2\"/>";
+        }
+        REPLACE_WITH_STR(model, "__TRANS__", trans);
+        REPLACE_WITH_STR(model, "__TRANS_PARAMS__", trans_params);
+        REPLACE_WITH_STR(model, "__TRANS_DIMS__", trans_dims);
+        REPLACE_WITH_STR(model, "__EDGE_TRANS__", edge_trans);
+
+        std::string src_dims = "";
+        for (auto &dim : p.src_dims) {
+            src_dims += "\n                    <dim>";
+            src_dims += std::to_string(dim) + "</dim>";
+        }
+        REPLACE_WITH_STR(model, "__SRC_DIMS__", src_dims);
+        std::string bbox_dims = "";
+        for (auto &dim : p.bbox_dims) {
+            bbox_dims += "\n                    <dim>";
+            bbox_dims += std::to_string(dim) + "</dim>";
+        }
+        REPLACE_WITH_STR(model, "__BBOX_DIMS__", bbox_dims);
+        std::string out_dims = "";
+        for (auto &dim : p.out_dims) {
+            out_dims += "\n                    <dim>";
+            out_dims += std::to_string(dim) + "</dim>";
+        }
+        REPLACE_WITH_STR(model, "__OUT_DIMS__", out_dims);
+
+        REPLACE_WITH_STR(model, "__NO_TRANS__", no_trans);
+        REPLACE_WITH_NUM(model, "__SPATIAL_SCALE__", p.spatial_scale);
+        REPLACE_WITH_NUM(model, "__OUTPUT_DIM__", p.output_dim);
+        REPLACE_WITH_NUM(model, "__PART_SIZE__", p.part_size);
+        REPLACE_WITH_NUM(model, "__GROUP_SIZE__", p.group_size);
+        REPLACE_WITH_NUM(model, "__POOLED_HEIGHT__", p.pooled_height);
+        REPLACE_WITH_NUM(model, "__POOLED_WIDTH__", p.pooled_width);
+        REPLACE_WITH_NUM(model, "__SAMPLE_PER_PART__", p.sample_per_part);
+
+        return model;
+    }
+
+protected:
+    virtual void SetUp() {
+        try {
+            deformable_psroi_test_params p = ::testing::WithParamInterface<deformable_psroi_test_params>::GetParam();
+            std::string model = getModel(p);
+
+            Core ie;
+            CNNNetwork net = ie.ReadNetwork(model, Blob::CPtr());
+            ExecutableNetwork executable_network = ie.LoadNetwork(net, p.device_name);
+            InferRequest inferRequest = executable_network.CreateInferRequest();
+
+            std::vector<Blob::Ptr> srcs_vec;
+
+            InputsDataMap in_info_map = net.getInputsInfo();
+            for (auto info : in_info_map) {
+                Blob::Ptr blob = make_shared_blob<float>(
+                        {Precision::FP32, info.second->getTensorDesc().getDims(), Layout::ANY});
+                blob->allocate();
+                if (info.second->name() == "data") {
+                    CommonTestUtils::fill_data_sine(blob->buffer(), blob->size(), 1.0f, 5.0f, 0.1f);
+                } else if (info.second->name() == "bbox") {
+                    CommonTestUtils::fill_data_bbox(blob->buffer(), blob->size(), p.src_dims[2], p.src_dims[3], 1.0f);
+                } else if (info.second->name() == "trans") {
+                    CommonTestUtils::fill_data_sine(blob->buffer(), blob->size(), 0.0f, 10.0f, 1.0f);
+                }
+
+                inferRequest.SetBlob(info.first, blob);
+                srcs_vec.push_back(blob);
+            }
+
+            BlobMap dsts_map;
+            std::vector<Blob::Ptr> dsts_vec;
+
+            OutputsDataMap out_info_map = net.getOutputsInfo();
+            for (auto info : out_info_map) {
+                Blob::Ptr blob = make_shared_blob<float>(
+                        {Precision::FP32, info.second->getTensorDesc().getDims(), Layout::ANY});
+                blob->allocate();
+                inferRequest.SetBlob(info.first, blob);
+                dsts_map[info.first] = blob;
+
+                Blob::Ptr blob_ref = make_shared_blob<float>(
+                        {Precision::FP32, info.second->getTensorDesc().getDims(), Layout::ANY});
+                blob_ref->allocate();
+                dsts_vec.push_back(blob_ref);
+            }
+
+            ref_deformable_psroi(srcs_vec, dsts_vec, p);
+
+            inferRequest.Infer();
+
+            TBlob<float>* dstPtr = dynamic_cast<TBlob<float>*>(dsts_map.begin()->second.get());
+            TBlob<float>* dstrefPtr = dynamic_cast<TBlob<float>*>(dsts_vec[0].get());
+
+            compare(*dsts_map.begin()->second, *dsts_vec[0]);
+
+        } catch (const InferenceEngine::details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+TEST_P(DeformablePSROIOnlyTest, TestsDeformable) {}
+
+/*** TBD ***/
+
+
+
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/depth_to_space_tests.hpp b/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/depth_to_space_tests.hpp
new file mode 100644 (file)
index 0000000..58a4f23
--- /dev/null
@@ -0,0 +1,165 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <cmath>
+
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+#include <ie_core.hpp>
+
+
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace std;
+
+
+struct depth_to_space_test_params {
+    std::string device_name;
+    std::string inPrecision;
+    InferenceEngine::SizeVector in_dim;
+    size_t block_size;
+    InferenceEngine::SizeVector ref_dim;
+};
+
+template<typename data_t>
+void ref_depthToSpace(const std::vector<Blob::Ptr> &srcs, std::vector<Blob::Ptr> &dsts, depth_to_space_test_params& prm) {
+    assert(dsts.size() == 1);
+
+    data_t *dst_data = dsts[0]->buffer().as<data_t*>();
+    const data_t *src_data = srcs[0]->buffer().as<data_t*>();
+
+    size_t feature_in = prm.in_dim[1];
+    size_t y_in = prm.in_dim[2];
+    size_t x_in = prm.in_dim[3];
+
+    size_t batch_out = prm.ref_dim[0];
+    size_t feature_out = prm.ref_dim[1];
+    size_t y_out = prm.ref_dim[2];
+    size_t x_out = prm.ref_dim[3];
+    for (size_t batch = 0; batch < batch_out; ++batch) {
+        for (size_t y = 0; y < y_out; ++y) {
+            size_t input_y = y / prm.block_size;
+            size_t offset_y = y % prm.block_size;
+            for (size_t x = 0; x < x_out; ++x) {
+                size_t input_x = x / prm.block_size;
+                size_t offset_x = (x % prm.block_size);
+                size_t offset_feature = (offset_y * prm.block_size + offset_x) * feature_out;
+                for (size_t feature = 0; feature < feature_out; ++feature) {
+                    size_t input_feature = feature + offset_feature;
+                    size_t input_index = (batch * feature_in * y_in * x_in) + (input_feature * y_in * x_in) + (input_y * x_in) + input_x;
+                    size_t output_index = (batch * feature_out * y_out * x_out) + (feature * y_out * x_out) + (y * x_out) + x;
+                    dst_data[output_index] = src_data[input_index];
+                }
+            }
+        }
+    }
+}
+
+class DepthToSpaceTests : public TestsCommon, public WithParamInterface<depth_to_space_test_params> {
+    std::string model_t = R"V0G0N(
+<net Name="Depth2space_net" version="2" precision="FP32" batch="1">
+    <layers>
+        <layer name="Input0" type="Input" precision="_IPRS_" id="1">
+            <output>
+                <port id="1">
+                    _IDIM_
+                </port>
+            </output>
+        </layer>
+        <layer name="DepthToSpace" id="3" type="DepthToSpace" precision="FP32">
+            <data block_size="_BS_"/>
+            <input>
+                <port id="1">
+                    _IDIM_
+                </port>
+            </input>
+            <output>
+                <port id="3">
+                    _OUT_
+                </port>
+            </output>
+        </layer>
+    </layers>
+    <edges>
+        <edge from-layer="1" from-port="1" to-layer="3" to-port="1"/>
+    </edges>
+</net>
+)V0G0N";
+
+    std::string getModel(depth_to_space_test_params p) {
+        std::string model = model_t;
+        std::string inIdx;
+        std::string inDict;
+        std::string out;
+
+        for (auto& dct : p.in_dim) {
+            inDict += "<dim>";
+            inDict += std::to_string(dct) + "</dim>\n";
+        }
+
+        for (auto& dst : p.ref_dim) {
+            out += "<dim>";
+            out += std::to_string(dst) + "</dim>\n";
+        }
+
+        REPLACE_WITH_STR(model, "_IPRS_", p.inPrecision);
+        REPLACE_WITH_STR(model, "_IDIM_", inDict);
+        REPLACE_WITH_NUM(model, "_BS_", p.block_size);
+        REPLACE_WITH_STR(model, "_OUT_", out);
+
+        return model;
+    }
+
+protected:
+    virtual void TearDown() {
+    }
+
+    virtual void SetUp() {
+        try {
+            depth_to_space_test_params p = ::testing::WithParamInterface<depth_to_space_test_params>::GetParam();
+            std::string model = getModel(p);
+
+            Core ie;
+            CNNNetwork net = ie.ReadNetwork(model, Blob::CPtr());
+            ExecutableNetwork executable_network = ie.LoadNetwork(net, p.device_name);
+            InferRequest inferRequest = executable_network.CreateInferRequest();
+
+            std::vector<Blob::Ptr> srcs_vec;
+            std::vector<Blob::Ptr> dsts_vec;
+            std::vector<Blob::Ptr> out_vec;
+
+            InputsDataMap in_info_map = net.getInputsInfo();
+            for (auto info : in_info_map) {
+                Blob::Ptr blob = make_shared_blob<float>({Precision::FP32, info.second->getTensorDesc().getDims(), NCHW});
+                blob->allocate();
+                fill_data_dbgval(blob->buffer().as<float*>(), blob->size());
+                inferRequest.SetBlob(info.first, blob);
+                srcs_vec.push_back(blob);
+            }
+
+            OutputsDataMap out_info_map = net.getOutputsInfo();
+            for (auto info : out_info_map) {
+                Blob::Ptr blob = make_shared_blob<float>({Precision::FP32, info.second->getTensorDesc().getDims(), NCHW});
+                blob->allocate();
+                inferRequest.SetBlob(info.first, blob);
+                out_vec.push_back(blob);
+
+                Blob::Ptr blob_ref = make_shared_blob<float>({Precision::FP32, info.second->getTensorDesc().getDims(), NCHW});
+                blob_ref->allocate();
+                dsts_vec.push_back(blob_ref);
+            }
+
+            ref_depthToSpace<float>(srcs_vec, dsts_vec, p);
+
+            inferRequest.Infer();
+
+            compare(*out_vec[0], *dsts_vec[0]);
+        } catch (const InferenceEngine::details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+TEST_P(DepthToSpaceTests, TestsDepthToSpace) {}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/eltwise_tests.hpp b/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/eltwise_tests.hpp
new file mode 100644 (file)
index 0000000..a8235b3
--- /dev/null
@@ -0,0 +1,308 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <ie_core.hpp>
+
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+#include "common_test_utils/common_layers_params.hpp"
+#include "common_test_utils/data_utils.hpp"
+
+using namespace ::testing;
+using namespace InferenceEngine;
+
+struct eltwise_test_params {
+    std::string device_name;
+
+    struct {
+        size_t w;
+        size_t h;
+        size_t c;
+    } in;
+
+    enum opType {
+        Sum = 0, Prod = 1, Max = 2, Sub = 3, Min = 4, Div = 5, Squared_diff = 6, Equal = 7, Not_equal = 8,
+        Less = 9, Less_equal = 10, Greater = 11, Greater_equal = 12, Logical_AND = 13, Logical_OR = 14, Logical_XOR = 15,
+        Floor_mod = 16, Pow = 17
+    };
+
+    opType op;
+
+    size_t inputsNum;
+};
+
+template<typename data_t>
+void ref_eltwise(const std::vector<Blob::Ptr> &srcs, std::vector<Blob::Ptr> &dsts, eltwise_test_params prm) {
+    assert(dsts.size() == 1);
+    data_t *dst_data = dsts[0]->buffer().as<data_t*>();
+
+    const data_t *src_data = srcs[0]->buffer().as<data_t*>();
+
+    for (int i = 0; i < srcs[0]->size(); i++) {
+        dst_data[i] = src_data[i];
+    }
+
+    for (int n = 1; n < srcs.size(); n++) {
+        src_data = srcs[n]->buffer().as<data_t*>();
+
+        for (int i = 0; i < srcs[n]->size(); i++) {
+            switch (prm.op) {
+                case eltwise_test_params::Sum:
+                    dst_data[i] += src_data[i];
+                    break;
+
+                case eltwise_test_params::Prod:
+                    dst_data[i] *= src_data[i];
+                    break;
+
+                case eltwise_test_params::Max:
+                    dst_data[i] = std::max<data_t>(dst_data[i], src_data[i]);
+                    break;
+
+                case eltwise_test_params::Sub:
+                    dst_data[i] -= src_data[i];
+                    break;
+
+                case eltwise_test_params::Min:
+                    dst_data[i] = std::min<data_t>(dst_data[i], src_data[i]);
+                    break;
+
+                case eltwise_test_params::Div:
+                    dst_data[i] /= src_data[i];
+                    break;
+
+                case eltwise_test_params::Squared_diff: {
+                    data_t tmp = (dst_data[i] - src_data[i]);
+                    dst_data[i] = tmp * tmp;
+                    break;
+                }
+
+                case eltwise_test_params::Equal:
+                    dst_data[i] = dst_data[i] == src_data[i];
+                    break;
+
+                case eltwise_test_params::Not_equal:
+                    dst_data[i] = dst_data[i] != src_data[i];
+                    break;
+
+                case eltwise_test_params::Less:
+                    dst_data[i] = dst_data[i] < src_data[i];
+                    break;
+
+                case eltwise_test_params::Less_equal:
+                    dst_data[i] = dst_data[i] <= src_data[i];
+                    break;
+
+                case eltwise_test_params::Greater:
+                    dst_data[i] = dst_data[i] > src_data[i];
+                    break;
+
+                case eltwise_test_params::Greater_equal:
+                    dst_data[i] = dst_data[i] >= src_data[i];
+                    break;
+
+                case eltwise_test_params::Logical_AND:
+                    dst_data[i] = dst_data[i] && src_data[i];
+                    break;
+
+                case eltwise_test_params::Logical_OR:
+                    dst_data[i] = dst_data[i] || src_data[i];
+                    break;
+
+                case eltwise_test_params::Logical_XOR:
+                    dst_data[i] = !dst_data[i] != !src_data[i];
+                    break;
+
+                case eltwise_test_params::Floor_mod: {
+                    data_t src1 = src_data[i];
+                    data_t src2 = dst_data[i];
+                    dst_data[i] = src1 - src1 / src2 * src2;
+                    break;
+                }
+
+                case eltwise_test_params::Pow: {
+                    dst_data[i] = std::pow(src_data[i], dst_data[i]);
+                    break;
+                }
+            }
+        }
+    }
+}
+
+class EltwiseOnlyTest : public TestsCommon,
+                              public WithParamInterface<eltwise_test_params> {
+
+    std::string model_t = R"V0G0N(
+<Net Name="Eltwise_Only" version="2" precision="FP32" batch="1">
+    <layers>
+        _INPUT_LAYERS_
+        <layer name="eltwise" id="0" type="Eltwise" precision="FP32">
+            <elementwise_data operation="_OP_"/>
+            <input>
+                _ELTWISE_INPUT_PORTS_
+            </input>
+            <output>
+                <port id="0">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+            </output>
+        </layer>
+    </layers>
+    <edges>
+        _EDGES_
+    </edges>
+</Net>
+)V0G0N";
+
+    std::string getModel(eltwise_test_params p) {
+        std::string model = model_t;
+        std::string op = p.op == eltwise_test_params::Sum ? "sum" :
+                         p.op == eltwise_test_params::Prod ? "mul" :
+                         p.op == eltwise_test_params::Max ? "max" :
+                         p.op == eltwise_test_params::Sub ? "sub" :
+                         p.op == eltwise_test_params::Min ? "min" :
+                         p.op == eltwise_test_params::Div ? "div" :
+                         p.op == eltwise_test_params::Squared_diff ? "squared_diff" :
+                         p.op == eltwise_test_params::Equal ? "equal" :
+                         p.op == eltwise_test_params::Not_equal ? "not_equal" :
+                         p.op == eltwise_test_params::Less ? "less" :
+                         p.op == eltwise_test_params::Less_equal ? "less_equal" :
+                         p.op == eltwise_test_params::Greater ? "greater" :
+                         p.op == eltwise_test_params::Greater_equal ? "greater_equal" :
+                         p.op == eltwise_test_params::Logical_AND ? "logical_and" :
+                         p.op == eltwise_test_params::Logical_OR ? "logical_or" :
+                         p.op == eltwise_test_params::Logical_XOR ? "logical_xor" :
+                         p.op == eltwise_test_params::Floor_mod ? "floor_mod" :
+                         p.op == eltwise_test_params::Pow ? "pow" :
+                         "sum" /* default */;
+
+        // Generating inputs layers
+        auto generateInput = [](size_t inputId) -> std::string {
+            std::string inputLayer = R"V0G0N(
+        <layer name="data_ID_" type="Input" precision="FP32" id="_ID_">
+            <output>
+                <port id="_ID_">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+            </output>
+        </layer>)V0G0N";
+            REPLACE_WITH_NUM(inputLayer, "_ID_", inputId);
+            return inputLayer;
+        };
+        std::string tmp;
+
+        for (size_t i = 1; i < p.inputsNum + 1; ++i) {
+            tmp += generateInput(i);
+        }
+
+        REPLACE_WITH_STR(model, "_INPUT_LAYERS_", tmp);
+
+        // Generating Eltwise inputs
+        tmp.clear();
+        auto generateEltwiseInputPort = [](size_t inputId) -> std::string {
+            std::string inputPort = R"V0G0N(
+                <port id="_ID_">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>)V0G0N";
+            REPLACE_WITH_NUM(inputPort, "_ID_", inputId);
+            return inputPort;
+        };
+
+        for (size_t i = p.inputsNum + 1; i < (2 * p.inputsNum) + 1; ++i) {
+            tmp += generateEltwiseInputPort(i);
+        }
+
+        REPLACE_WITH_STR(model, "_ELTWISE_INPUT_PORTS_", tmp);
+
+        // Generating Edges
+        tmp.clear();
+        auto generateEdge = [](size_t inputLayerId, size_t eltwiseInputPortId) -> std::string {
+            std::string edge = R"V0G0N(
+                    <edge from-layer="_INPUT_LAYER_ID_" from-port="_INPUT_LAYER_ID_" to-layer="0" to-port="_ELTWISE_INPUT_PORT_ID_"/>)V0G0N";
+            REPLACE_WITH_NUM(edge, "_INPUT_LAYER_ID_", inputLayerId);
+            REPLACE_WITH_NUM(edge, "_ELTWISE_INPUT_PORT_ID_", eltwiseInputPortId);
+            return edge;
+        };
+
+        for (size_t i = 1; i < p.inputsNum + 1; ++i) {
+            tmp += generateEdge(i, p.inputsNum + i);
+        }
+
+        REPLACE_WITH_STR(model, "_EDGES_", tmp);
+
+        REPLACE_WITH_NUM(model, "_IN_", 1);
+        REPLACE_WITH_NUM(model, "_IC_", p.in.c);
+        REPLACE_WITH_NUM(model, "_IH_", p.in.h);
+        REPLACE_WITH_NUM(model, "_IW_", p.in.w);
+        REPLACE_WITH_STR(model, "_OP_", op);
+        return model;
+    }
+
+ protected:
+    virtual void SetUp() {
+
+        try {
+            eltwise_test_params p = ::testing::WithParamInterface<eltwise_test_params>::GetParam();
+            std::string model = getModel(p);
+
+            Core ie;
+            CNNNetwork net = ie.ReadNetwork(model, Blob::CPtr());
+            ExecutableNetwork executable_network = ie.LoadNetwork(net, p.device_name);
+            InferRequest inferRequest = executable_network.CreateInferRequest();
+
+            std::vector<Blob::Ptr> srcs_vec;
+
+            InputsDataMap in_info_map = net.getInputsInfo();
+            for (auto info : in_info_map) {
+                Blob::Ptr inputBlob = inferRequest.GetBlob(info.first);
+                float* inputData = inputBlob->buffer().as<float*>();
+                
+                if (p.op != eltwise_test_params::Pow)
+                    CommonTestUtils::fill_data_sine(inputBlob->buffer().as<float*>(), inputBlob->size(), 100, 10, 10);
+                else
+                    CommonTestUtils::fill_data_const(inputBlob->buffer().as<float*>(), inputBlob->size(), 2);
+
+                srcs_vec.push_back(inputBlob);
+            }
+
+            BlobMap dsts_map;
+            std::vector<Blob::Ptr> dsts_vec;
+
+            OutputsDataMap out_info_map = net.getOutputsInfo();
+            for (auto info : out_info_map) {
+                Blob::Ptr outputBlob = inferRequest.GetBlob(info.first);
+                dsts_map[info.first] = outputBlob;
+                
+                Blob::Ptr blob_ref = make_shared_blob<float>({Precision::FP32, info.second->getTensorDesc().getDims(), Layout::NCHW});
+                blob_ref->allocate();
+                dsts_vec.push_back(blob_ref);
+            }
+
+            ref_eltwise<float>(srcs_vec, dsts_vec, p);
+
+            inferRequest.Infer();
+
+            compare(*dsts_map.begin()->second, *dsts_vec[0]);
+
+        } catch (const InferenceEngine::details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+TEST_P(EltwiseOnlyTest, TestsEltwise) {}
+
+/*** TBD ***/
+
+
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/gather_tests.hpp b/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/gather_tests.hpp
new file mode 100644 (file)
index 0000000..5ab3e66
--- /dev/null
@@ -0,0 +1,212 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <ie_core.hpp>
+#include <cmath>
+
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+#include "ie_memcpy.h"
+
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace std;
+
+
+struct gatherTF_test_params {
+    std::string device_name;
+
+    std::string inIdxPrecision;
+
+    SizeVector in_dim;
+    std::vector<float> in;
+
+    SizeVector dct_dim;
+    std::vector<float> dct;
+
+    int axis;
+
+    SizeVector ref_dim;
+    std::vector<float> ref;
+};
+
+class GatherTFTests : public TestsCommon, public WithParamInterface<gatherTF_test_params> {
+    std::string model_t = R"V0G0N(
+<net Name="Gather_net" version="2" precision="FP32" batch="1">
+    <layers>
+        <layer name="InputDictionary" type="Input" precision="FP32" id="1">
+            <output>
+                <port id="1">
+                    _IDICT_
+                </port>
+            </output>
+        </layer>
+        <layer name="InputText" type="Input" precision="_IIDXP_" id="2">
+            <output>
+                <port id="2">
+                    _IIDX_
+                </port>
+            </output>
+        </layer>
+        <layer name="gather" id="3" type="Gather" precision="FP32">
+            <data axis="_AX_"/>
+            <input>
+                <port id="1">
+                    _IDICT_
+                </port>
+                <port id="2">
+                    _IIDX_
+                </port>
+            </input>
+            <output>
+                <port id="3">
+                    _OUT_
+                </port>
+            </output>
+        </layer>
+    </layers>
+    <edges>
+        <edge from-layer="1" from-port="1" to-layer="3" to-port="1"/>
+        <edge from-layer="2" from-port="2" to-layer="3" to-port="2"/>
+    </edges>
+</net>
+)V0G0N";
+
+    std::string getModel(gatherTF_test_params p) {
+        std::string model = model_t;
+        std::string inIdx;
+        std::string inDict;
+        std::string out;
+
+        for (auto& idx : p.in_dim) {
+            inIdx += "<dim>";
+            inIdx += std::to_string(idx) + "</dim>\n";
+        }
+
+        for (auto& dct : p.dct_dim) {
+            inDict += "<dim>";
+            inDict += std::to_string(dct) + "</dim>\n";
+        }
+
+        for (auto& dst : p.ref_dim) {
+            out += "<dim>";
+            out += std::to_string(dst) + "</dim>\n";
+        }
+
+        REPLACE_WITH_STR(model, "_IIDXP_", p.inIdxPrecision);
+        REPLACE_WITH_STR(model, "_IIDX_", inIdx);
+        REPLACE_WITH_STR(model, "_IDICT_", inDict);
+        REPLACE_WITH_NUM(model, "_AX_", p.axis);
+        REPLACE_WITH_STR(model, "_OUT_", out);
+
+        return model;
+    }
+
+protected:
+    virtual void TearDown() {
+    }
+
+    virtual void SetUp() {
+        try {
+            TestsCommon::SetUp();
+            gatherTF_test_params p = ::testing::WithParamInterface<gatherTF_test_params>::GetParam();
+            std::string model = getModel(p);
+
+            Core ie;
+            CNNNetwork net = ie.ReadNetwork(model, Blob::CPtr());
+            ExecutableNetwork executable_network = ie.LoadNetwork(net, p.device_name);
+            InferRequest inferRequest = executable_network.CreateInferRequest();
+
+            // Input Indexes
+            Blob::Ptr srcIdx;
+            if (p.inIdxPrecision == "I32") {
+                srcIdx = make_shared_blob<int32_t>({Precision::I32, p.in_dim,
+                                                                   TensorDesc::getLayoutByDims(
+                                                                           p.in_dim)});
+                srcIdx->allocate();
+                auto *srcIdxPtr = dynamic_cast<TBlob<int32_t> *>(srcIdx.get());
+                if (srcIdxPtr == nullptr)
+                    FAIL() << "Cannot cast blob to TBlob<int32_t>.";
+
+                int32_t *srcIdxP = (int32_t*)srcIdx->buffer();
+                for (int i=0; i<p.in.size(); i++)
+                    srcIdxP[i] = static_cast<int32_t>(p.in[i]);
+            } else if (p.inIdxPrecision == "FP32") {
+                srcIdx = make_shared_blob<float>({Precision::FP32, p.in_dim,
+                                                                    TensorDesc::getLayoutByDims(
+                                                                            p.in_dim)});
+                srcIdx->allocate();
+                auto *srcIdxPtr = dynamic_cast<TBlob<float> *>(srcIdx.get());
+                if (srcIdxPtr == nullptr)
+                    FAIL() << "Cannot cast blob to TBlob<float>.";
+                ie_memcpy(static_cast<float *>(srcIdx->buffer()), srcIdx->byteSize(), &p.in[0], sizeof(float) * p.in.size());
+            } else if (p.inIdxPrecision == "I8") {
+                srcIdx = make_shared_blob<int8_t>({Precision::I8, p.in_dim,
+                                                                   TensorDesc::getLayoutByDims(
+                                                                           p.in_dim)});
+                srcIdx->allocate();
+                auto *srcIdxPtr = dynamic_cast<TBlob<int8_t> *>(srcIdx.get());
+                if (srcIdxPtr == nullptr)
+                    FAIL() << "Cannot cast blob to TBlob<float>.";
+                int8_t *srcIdxP = (int8_t*)srcIdx->buffer();
+                for (int i=0; i<p.in.size(); i++)
+                    srcIdxP[i] = static_cast<int8_t>(p.in[i]);
+            } else if (p.inIdxPrecision == "I16") {
+                srcIdx = make_shared_blob<int16_t>({Precision::I16, p.in_dim,
+                                                                    TensorDesc::getLayoutByDims(
+                                                                            p.in_dim)});
+                srcIdx->allocate();
+                auto *srcIdxPtr = dynamic_cast<TBlob<int16_t> *>(srcIdx.get());
+                if (srcIdxPtr == nullptr)
+                    FAIL() << "Cannot cast blob to TBlob<int16_t>.";
+                int16_t *srcIdxP = (int16_t*)srcIdx->buffer();
+                for (int i=0; i<p.in.size(); i++)
+                    srcIdxP[i] = static_cast<int16_t>(p.in[i]);
+            }
+
+            //  Input Dictionary
+            Blob::Ptr srcDict = make_shared_blob<float>({ Precision::FP32,
+                                                        p.dct_dim, TensorDesc::getLayoutByDims(p.dct_dim) });
+            srcDict->allocate();
+            ie_memcpy(srcDict->buffer(), srcDict->byteSize(), &p.dct[0], sizeof(float)*p.dct.size());
+            auto * srcDictPtr = dynamic_cast<TBlob<float>*>(srcDict.get());
+            if (srcDictPtr == nullptr)
+                FAIL() << "Cannot cast blob to TBlob<float>.";
+
+            //  Output Data
+            OutputsDataMap out = net.getOutputsInfo();
+            std::pair<std::string, DataPtr> item = *out.begin();
+            TBlob<float>::Ptr output;
+            output = make_shared_blob<float>(item.second->getTensorDesc());
+            output->allocate();
+            inferRequest.SetBlob(item.first, output);
+
+            //  Infer
+            inferRequest.SetBlob("InputDictionary", srcDict);
+            inferRequest.SetBlob("InputText", srcIdx);
+            inferRequest.Infer();
+
+            //  Check results
+            if (memcmp((*output).data(), &p.ref[0], p.ref.size() * sizeof(float)) != 0)
+                FAIL() << "Wrong result with compare TF reference!";
+        } catch (const details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+TEST_P(GatherTFTests, TestsGather) {}
+
+//  Test data vectors
+std::vector<float> in0 = { 0.f, 1.f, 1.f, 0.f };
+std::vector<float> in1 = { 0.f, 1.f, 2.f, 1.f };
+std::vector<float> dict = { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f, 12.f };
+std::vector<float> dict2D = { 1.f, 2.f, 3.f, 4.f}; // 2x2
+std::vector<float> ref_in0_a0_d223 = { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f, 12.f, 7.f, 8.f, 9.f, 10.f, 11.f, 12.f, 1.f, 2.f, 3.f, 4.f, 5.f, 6.f }; // 2x2x2x3
+std::vector<float> ref_in0_a2_d232 = { 1.f, 2.f, 2.f, 1.f, 3.f, 4.f, 4.f, 3.f, 5.f, 6.f, 6.f, 5.f, 7.f, 8.f, 8.f, 7.f, 9.f, 10.f, 10.f, 9.f, 11.f, 12.f, 12.f, 11.f }; // 2x3x2x2
+std::vector<float> ref_in1_a0_d322 = { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f, 12.f, 5.f, 6.f, 7.f, 8.f }; // 2x2x2x2
+std::vector<float> ref_in1_a1_d232 = { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 3.f, 4.f, 7.f, 8.f, 9.f, 10.f, 11.f, 12.f, 9.f, 10.f }; // 2x2x2x2
+std::vector<float> ref_in1_a2_d223 = { 1.f, 2.f, 3.f, 2.f, 4.f, 5.f, 6.f, 5.f, 7.f, 8.f, 9.f, 8.f, 10.f, 11.f, 12.f, 11.f }; // 2x2x2x2
+std::vector<float> ref_in0_a0_d22 = { 1.f, 2.f, 3.f, 4.f, 3.f, 4.f, 1.f, 2.f }; // 2x2x2
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/gather_tree_tests.hpp b/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/gather_tree_tests.hpp
new file mode 100644 (file)
index 0000000..c9d9515
--- /dev/null
@@ -0,0 +1,266 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <ie_core.hpp>
+
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+#include "ir_gen_helper.hpp"
+#include <utility>
+#include <string>
+#include <memory>
+#include <vector>
+
+using namespace InferenceEngine;
+using namespace ::testing;
+
+struct gather_tree_test_params {
+    SizeVector           in_out_shape;
+    std::vector<int32_t> step_idx;
+    std::vector<int32_t> parent_idx;
+    std::vector<int32_t> max_seq_len;
+    std::vector<int32_t> end_token;
+    std::vector<int32_t> reference;
+    std::string device_name;
+};
+
+
+template <typename data_t>
+void ref_gather_tree(
+    InferenceEngine::TBlob<data_t> &step_idx,
+    InferenceEngine::TBlob<int32_t> &parent_idx,
+    InferenceEngine::TBlob<int32_t> &max_seq_len,
+    InferenceEngine::TBlob<data_t> &end_token,
+    InferenceEngine::TBlob<data_t> &dst
+) {
+    const data_t *step_idxPtr = step_idx.data();
+    const int32_t *parent_idxPtr = parent_idx.data();
+    const int32_t *max_seq_lenPtr = max_seq_len.data();
+    const data_t *end_tokenPtr = end_token.data();
+    data_t *final_idxPtr = dst.data();
+
+    SizeVector step_idx_dims = step_idx.getTensorDesc().getDims();
+    SizeVector parent_idx_dims = parent_idx.getTensorDesc().getDims();
+    SizeVector max_seq_len_dims = max_seq_len.getTensorDesc().getDims();
+    SizeVector final_idx_dims = dst.getTensorDesc().getDims();
+    int32_t max_time = step_idx_dims[0];
+    int32_t batch_size = step_idx_dims[1];
+    int32_t beam_width = step_idx_dims[2];
+
+    if (max_time != parent_idx_dims[0] || max_time != final_idx_dims[0] ||
+        batch_size != parent_idx_dims[1] || batch_size != final_idx_dims[1] || batch_size != max_seq_len_dims[0] ||
+        beam_width != parent_idx_dims[2] || beam_width != final_idx_dims[2]) {
+        FAIL() << " Input/Output tensors dimensions mismatch";
+        return;
+    }
+
+    for (int32_t time, batch = 0; batch < batch_size; batch++) {
+        for (int32_t beam = 0; beam < beam_width; beam++) {
+            int32_t max_sequence_in_beam = (std::min)(max_time, max_seq_lenPtr[batch]);
+            if (max_sequence_in_beam <= 0)
+                continue;
+
+            for (time = (max_time - 1); time >= max_sequence_in_beam; time--)
+                final_idxPtr[(time * batch_size + batch) * beam_width + beam] = (*end_tokenPtr);
+
+            for (int32_t parent = beam; time >= 0; time--) {
+                if (parent < 0 || parent >= beam_width) {
+                    FAIL() << " Wrong parent index";
+                    return;
+                }
+
+                int32_t idx = (time * batch_size + batch) * beam_width;
+                final_idxPtr[idx + beam] = step_idxPtr[idx + parent];
+                parent = parent_idxPtr[idx + parent];
+            }
+
+            bool finished = false;
+            data_t *final = &final_idxPtr[batch * beam_width + beam];
+
+            for (time = 0; time < max_sequence_in_beam; time++, final += (batch_size * beam_width)) {
+                if (finished)
+                    (*final) = (*end_tokenPtr);
+                else if ((*final) == (*end_tokenPtr))
+                    finished = true;
+            }
+        }
+    }
+}
+
+class GatherTreeTests : public TestsCommon, public WithParamInterface<gather_tree_test_params> {
+    std::string model_t = R"V0G0N(
+<net Name="GatherTree_net" version="2" precision="FP32" batch="1">
+    <layers>
+        <layer name="step_idx" type="Input" precision="I32" id="1">
+            <output>
+                <port id="1">
+                    _IN_OUT_
+                </port>
+            </output>
+        </layer>
+        <layer name="parent_idx" type="Input" precision="I32" id="2">
+            <output>
+                <port id="2">
+                    _IN_OUT_
+                </port>
+            </output>
+        </layer>
+        <layer name="max_seq_len" type="Input" precision="I32" id="3">
+            <output>
+                <port id="3">
+                    <dim>_IN2_</dim>
+                </port>
+            </output>
+        </layer>
+        <layer name="end_token" type="Input" precision="I32" id="4">
+            <output>
+                <port id="4">
+                    <dim>1</dim>
+                </port>
+            </output>
+        </layer>
+        <layer name="output" id="2" type="GatherTree" precision="I32">
+            <data/>
+            <input>
+                <port id="1">
+                    _IN_OUT_
+                </port>
+                <port id="2">
+                    _IN_OUT_
+                </port>
+                <port id="3">
+                    <dim>_IN2_</dim>
+                </port>
+                <port id="4">
+                    <dim>1</dim>
+                </port>
+            </input>
+            <output>
+                <port id="5">
+                    _IN_OUT_
+                </port>
+            </output>
+        </layer>
+    </layers>
+    <edges>
+        <edge from-layer="1" from-port="1" to-layer="2" to-port="1"/>
+        <edge from-layer="2" from-port="2" to-layer="2" to-port="2"/>
+        <edge from-layer="3" from-port="3" to-layer="2" to-port="3"/>
+        <edge from-layer="4" from-port="4" to-layer="2" to-port="4"/>
+    </edges>
+</net>
+)V0G0N";
+
+    std::string getModel(gather_tree_test_params p) {
+        std::string model = model_t;
+        std::string in_out_shape;
+
+        for (auto& dct : p.in_out_shape) {
+            in_out_shape += "<dim>";
+            in_out_shape += std::to_string(dct) + "</dim>\n";
+        }
+
+        REPLACE_WITH_STR(model, "_IN_OUT_", in_out_shape);
+        REPLACE_WITH_NUM(model, "_IN2_", p.in_out_shape[1]);
+
+        return model;
+    }
+
+protected:
+    virtual void TearDown() {
+    }
+
+    virtual void SetUp() {
+        try {
+            TestsCommon::SetUp();
+            gather_tree_test_params p = ::testing::WithParamInterface<gather_tree_test_params>::GetParam();
+            std::string model = getModel(p);
+
+            Core ie;
+            CNNNetwork network = ie.ReadNetwork(model, Blob::CPtr());
+            ExecutableNetwork executableNetwork = ie.LoadNetwork(network, p.device_name);
+            InferRequest inferRequest = executableNetwork.CreateInferRequest();
+            // Output Data
+            InferenceEngine::OutputsDataMap out;
+            out = network.getOutputsInfo();
+
+            std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
+
+            InferenceEngine::TBlob<int32_t>::Ptr output;
+            output = InferenceEngine::make_shared_blob<int32_t>(item.second->getTensorDesc());
+            output->allocate();
+
+            // Output Reference
+            InferenceEngine::TBlob<int32_t> dst_ref(item.second->getTensorDesc());
+            dst_ref.allocate();
+
+            // Input Data
+            // step_idx
+            InferenceEngine::Blob::Ptr step_idx;
+            step_idx = InferenceEngine::make_shared_blob<int32_t>({ InferenceEngine::Precision::I32, p.in_out_shape,
+                InferenceEngine::TensorDesc::getLayoutByDims(p.in_out_shape) });
+            step_idx->allocate();
+            memcpy(step_idx->buffer(), &p.step_idx[0], sizeof(int32_t)*p.step_idx.size());
+            auto * step_idxPtr = dynamic_cast<InferenceEngine::TBlob<int32_t>*>(step_idx.get());
+            if (step_idxPtr == nullptr)
+                FAIL() << "Cannot cast blob to TBlob<int32_t>.";
+
+            // parent_idx
+            InferenceEngine::Blob::Ptr parent_idx;
+            parent_idx = InferenceEngine::make_shared_blob<int32_t>({ InferenceEngine::Precision::I32, p.in_out_shape,
+                InferenceEngine::TensorDesc::getLayoutByDims(p.in_out_shape) });
+            parent_idx->allocate();
+            memcpy(parent_idx->buffer(), &p.parent_idx[0], sizeof(int32_t)*p.parent_idx.size());
+            auto * parent_idxPtr = dynamic_cast<InferenceEngine::TBlob<int32_t>*>(parent_idx.get());
+            if (parent_idxPtr == nullptr)
+                FAIL() << "Cannot cast blob to TBlob<int32_t>.";
+
+            // max_seq_len
+            InferenceEngine::Blob::Ptr max_seq_len;
+            InferenceEngine::SizeVector max_seq_len_dim(1, p.in_out_shape[1]);
+            max_seq_len = InferenceEngine::make_shared_blob<int32_t>({ InferenceEngine::Precision::I32, max_seq_len_dim,
+                InferenceEngine::TensorDesc::getLayoutByDims(max_seq_len_dim) });
+            max_seq_len->allocate();
+            memcpy(max_seq_len->buffer(), &p.max_seq_len[0], sizeof(int32_t)*p.max_seq_len.size());
+            auto * max_seq_lenPtr = dynamic_cast<InferenceEngine::TBlob<int32_t>*>(max_seq_len.get());
+            if (max_seq_lenPtr == nullptr)
+                FAIL() << "Cannot cast blob to TBlob<int32_t>.";
+
+            // end_token
+            InferenceEngine::Blob::Ptr end_token;
+            InferenceEngine::SizeVector end_token_dim(1, 1);
+            end_token = InferenceEngine::make_shared_blob<int32_t>({ InferenceEngine::Precision::I32, end_token_dim,
+                InferenceEngine::TensorDesc::getLayoutByDims(end_token_dim) });
+            end_token->allocate();
+            memcpy(static_cast<int32_t*>(end_token->buffer()), &p.end_token[0], sizeof(int32_t));
+            auto * seq_lengthsIdxPtr = dynamic_cast<InferenceEngine::TBlob<int32_t>*>(end_token.get());
+            if (seq_lengthsIdxPtr == nullptr)
+                FAIL() << "Cannot cast blob to TBlob<int32_t>.";
+
+            // Reference
+            ref_gather_tree(*step_idxPtr, *parent_idxPtr, *max_seq_lenPtr, *seq_lengthsIdxPtr, dst_ref);
+
+            if (p.reference.size())
+                if (memcmp(dst_ref.data(), &p.reference[0], p.reference.size() * sizeof(int32_t)) != 0)
+                    FAIL() << "Wrong result with compare reference vector!";
+
+            // Infer
+            inferRequest.SetBlob("step_idx", step_idx);
+            inferRequest.SetBlob("parent_idx", parent_idx);
+            inferRequest.SetBlob("max_seq_len", max_seq_len);
+            inferRequest.SetBlob("end_token", end_token);
+            inferRequest.SetBlob(network.getOutputsInfo().begin()->first, output);
+            inferRequest.Infer();
+
+            ASSERT_EQ(dst_ref.size(), output->size());
+            for (int i = dst_ref.size()-1; i >= 0; i--)
+               ASSERT_EQ(dst_ref.data()[i], output->data()[i]);
+        } catch (const InferenceEngine::details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+TEST_P(GatherTreeTests, TestsGatherTree) {}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/gemm_tests.hpp b/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/gemm_tests.hpp
new file mode 100644 (file)
index 0000000..b67d689
--- /dev/null
@@ -0,0 +1,492 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <ie_core.hpp>
+
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+#include <single_layer_common.hpp>
+#include <string>
+#include <tuple>
+
+using namespace InferenceEngine;
+
+struct gemm_base_params {
+    float alpha;
+    float beta;
+    bool transpose_A;
+    bool transpose_B;
+    SizeVector dims_A;
+    SizeVector dims_B;
+    SizeVector dims_C;
+
+    gemm_base_params() = default;
+    gemm_base_params(float _alpha,
+                     float _beta,
+                     bool _transpose_A,
+                     bool _transpose_B,
+                     SizeVector _dims_A,
+                     SizeVector _dims_B,
+                     SizeVector _dims_C = {})
+        : alpha(_alpha)
+        , beta(_beta)
+        , transpose_A(_transpose_A)
+        , transpose_B(_transpose_B)
+        , dims_A(_dims_A)
+        , dims_B(_dims_B)
+        , dims_C(_dims_C)
+    {}
+
+    virtual void print(std::ostream& os) const {
+        os << "alpha: " << alpha << ", beta: " << beta
+            << ", trans A: " << transpose_A << ", trans B: " << transpose_B
+            << std::endl;
+
+        auto print_dims = [&](std::string name, const SizeVector& dims) {
+            os << name << ": {";
+            if (!dims.empty())
+                os << dims[0];
+            for (size_t i = 1; i < dims.size(); ++i)
+                os << ", " << dims[i];
+            os << "}" << std::endl;
+        };
+
+        print_dims("A", dims_A);
+        print_dims("B", dims_B);
+        print_dims("C", dims_C);
+    }
+
+    virtual SizeVector outDims() const {
+        size_t max_dims_num = std::max(dims_A.size(), dims_B.size());
+        max_dims_num = std::max(dims_C.size(), max_dims_num);
+
+        SizeVector dims_out(max_dims_num);
+        // Process batch dims in reverse for required alignment
+        for (size_t rbi = 0; rbi < max_dims_num - 2; ++rbi) {
+            size_t max_val = 1;
+
+            if (rbi + 2 < dims_A.size()) {
+                auto bi_A = dims_A.size() - rbi - 3;
+                max_val = std::max(max_val, dims_A[bi_A]);
+            }
+            if (rbi + 2 < dims_B.size()) {
+                auto bi_B = dims_B.size() - rbi - 3;
+                max_val = std::max(max_val, dims_B[bi_B]);
+            }
+            if (rbi + 2 < dims_C.size()) {
+                auto bi_C = dims_C.size() - rbi - 3;
+                max_val = std::max(max_val, dims_C[bi_C]);
+            }
+
+            auto bi_out = max_dims_num - rbi - 3;
+            dims_out[bi_out] = max_val;
+        }
+
+        auto y_dim_A = transpose_A ? dims_A.size() - 1 : dims_A.size() - 2;
+        auto x_dim_B = transpose_B ? dims_B.size() - 2 : dims_B.size() - 1;
+        dims_out[dims_out.size() - 1] = dims_B[x_dim_B];
+        dims_out[dims_out.size() - 2] = dims_A[y_dim_A];
+
+        return dims_out;
+    }
+};
+
+
+std::vector<float> ref_gemm(const gemm_base_params& params,
+                            const std::vector<float>& data_A,
+                            const std::vector<float>& data_B,
+                            const std::vector<float>& data_C) {
+    const auto& dims_A = params.dims_A;
+    const auto& dims_B = params.dims_B;
+    const auto& dims_C = params.dims_C;
+
+    bool use_C = !dims_C.empty();
+
+    auto x_A = dims_A[dims_A.size() - 1];
+    auto y_A = dims_A[dims_A.size() - 2];
+    auto x_pitch_A = size_t(1);
+    auto y_pitch_A = x_A;
+
+    auto x_B = dims_B[dims_B.size() - 1];
+    auto y_B = dims_B[dims_B.size() - 2];
+    auto x_pitch_B = size_t(1);
+    auto y_pitch_B = x_B;
+
+    if (params.transpose_A) {
+        std::swap(x_A, y_A);
+        std::swap(x_pitch_A, y_pitch_A);
+    }
+
+    if (params.transpose_B) {
+        std::swap(x_B, y_B);
+        std::swap(x_pitch_B, y_pitch_B);
+    }
+
+    auto dims_out = params.outDims();
+
+    auto x_out = dims_out[dims_out.size() - 1];
+    auto y_out = dims_out[dims_out.size() - 2];
+    auto x_pitch_out = size_t(1);
+    auto y_pitch_out = x_out;
+
+    auto out_batch_num = dims_out.size() - 2;
+
+    // Calculates batch pitches in reverse order
+    auto calculate_batch_pitches = [out_batch_num](const SizeVector& dims) {
+        std::vector<size_t> batch_pitches = { };
+        batch_pitches.reserve(out_batch_num);
+        size_t real_pitch = dims[dims.size() - 2] * dims[dims.size() - 1];
+
+        for (size_t rbi = 0; rbi < out_batch_num; ++rbi) {
+            if (rbi + 2 < dims.size() && dims[dims.size() - rbi - 3] != 1) {
+                batch_pitches.push_back(real_pitch);
+                real_pitch *= dims[dims.size() - rbi - 3];
+            } else {
+                // Set to zero for broadcasting
+                batch_pitches.push_back(0ul);
+            }
+        }
+
+        return batch_pitches;
+    };
+
+    auto batch_pitches_A = calculate_batch_pitches(dims_A);
+    auto batch_pitches_B = calculate_batch_pitches(dims_B);
+    auto batch_pitches_C = use_C ? calculate_batch_pitches(dims_C) : std::vector<size_t>();
+    auto batch_pitches_out = calculate_batch_pitches(dims_out);
+
+    auto k = x_A;
+
+    auto total_out_size = std::accumulate(dims_out.begin(), dims_out.end(), 1ul, std::multiplies<size_t>());
+    std::vector<float> data_out(total_out_size, 0.f);
+
+    // Currently processed batch indices in reverse order
+    std::vector<size_t> current_batch_indices(out_batch_num, 0ul);
+    auto get_current_batch_offset = [&](const std::vector<size_t>& pitches) {
+        return std::inner_product(pitches.begin(), pitches.end(), current_batch_indices.begin(), 0ul);
+    };
+
+    do {
+        auto batch_offset_A = get_current_batch_offset(batch_pitches_A);
+        auto batch_offset_B = get_current_batch_offset(batch_pitches_B);
+        auto batch_offset_C = use_C ? get_current_batch_offset(batch_pitches_C) : 0ul;
+        auto batch_offset_out = get_current_batch_offset(batch_pitches_out);
+
+        for (size_t yi = 0; yi < y_out; ++yi) {
+            for (size_t xi = 0; xi < x_out; ++xi) {
+
+                float acc = 0.f;
+                if (params.alpha != 0.f) {
+                    for (size_t ki = 0; ki < k; ++ki) {
+                        auto idx_A = batch_offset_A + yi * y_pitch_A + ki * x_pitch_A;
+                        auto idx_B = batch_offset_B + ki * y_pitch_B + xi * x_pitch_B;
+
+                        acc += data_A[idx_A] * data_B[idx_B];
+                    }
+
+                    acc *= params.alpha;
+                }
+
+                if (use_C && params.beta != 0.f) {
+                    auto idx_C = batch_offset_C + yi * y_pitch_out + xi * x_pitch_out;
+                    acc += params.beta * data_C[idx_C];
+                }
+
+                auto idx_out = batch_offset_out + yi * y_pitch_out + xi * x_pitch_out;
+                data_out[idx_out] = acc;
+            }
+        }
+
+        for (size_t i = 0; i < out_batch_num; ++i) {
+            current_batch_indices[i] += 1;
+            if (current_batch_indices[i] == dims_out[dims_out.size() - 3 - i] &&
+                i != out_batch_num - 1) {  // Don't reset last index as it signals end of calculations
+                current_batch_indices[i] = 0;
+            } else {
+                break;
+            }
+        }
+    } while (current_batch_indices.size() > 0 &&
+             current_batch_indices[current_batch_indices.size() - 1] != dims_out[0]);
+
+    return data_out;
+}
+
+struct gemm_test_params : gemm_base_params {
+    std::string device_name;
+    std::string precision;
+
+    gemm_test_params(std::string name, std::string _precision, gemm_base_params base)
+        : gemm_base_params(base)
+        , device_name(name)
+        , precision(_precision)
+    {}
+
+    gemm_test_params(std::tuple<std::string, std::string, gemm_base_params> wrapper)
+        : gemm_test_params(std::get<0>(wrapper), std::get<1>(wrapper), std::get<2>(wrapper))
+    {}
+
+    void print(std::ostream& os) const override {
+        os << "Device: " << device_name << ", precision: " << precision << std::endl;
+        gemm_base_params::print(os);
+    }
+};
+
+class GemmTestBase : public TestsCommon {
+    std::string model_t = R"V0G0N(
+<net name="GemmSingleLayerTest" version="5" precision="_PRECISION_" batch="1">
+    <layers>
+        <layer name="input_A" type="Input" id="1" precision="_PRECISION_">
+            <output>
+                <port id="0">
+                    _IN_A_DIMS_
+                </port>
+            </output>
+        </layer>
+        <layer name="input_B" type="Input" id="2" precision="_PRECISION_">
+            <output>
+                <port id="0">
+                    _IN_B_DIMS_
+                </port>
+            </output>
+        </layer>
+        _IN_C_LAYER_
+        <layer name="gemm" type="GEMM" id="4" precision="_PRECISION_">
+            <data alpha="_ALPHA_" beta="_BETA_" transpose_a="_TRANS_A_" transpose_b="_TRANS_B_" />
+            <input>
+                <port id="0">
+                    _IN_A_DIMS_
+                </port>
+                <port id="1">
+                    _IN_B_DIMS_
+                </port>
+                _IN_C_GEMM_PORT_
+            </input>
+            <output>
+                <port id="0">
+                    _OUT_DIMS_
+                </port>
+            </output>
+        </layer>
+    </layers>
+    <edges>
+        <edge from-layer="1" from-port="0" to-layer="4" to-port="0"/>
+        <edge from-layer="2" from-port="0" to-layer="4" to-port="1"/>
+        _IN_C_EDGE_
+    </edges>
+</net>
+)V0G0N";
+
+std::string in_C_layer = R"V0G0N(
+        <layer name="input_C" type="Input" id="3" precision="_PRECISION_">
+            <output>
+                <port id="0">
+                    _IN_C_DIMS_
+                </port>
+            </output>
+        </layer>
+)V0G0N";
+
+std::string in_C_port = R"V0G0N(
+                <port id="2">
+                    _IN_C_DIMS_
+                </port>
+)V0G0N";
+
+std::string in_C_edge = R"V0G0N(
+        <edge from-layer="3" from-port="0" to-layer="4" to-port="2"/>
+)V0G0N";
+
+protected:
+    virtual float getThreshold(const gemm_test_params& params) {
+        if (params.precision == "FP16")
+            return 0.02f;
+        else
+            return 0.01f;
+    }
+
+    std::string getModel(const gemm_test_params& params) {
+        auto model = model_t;
+
+        if (!params.dims_C.empty()) {
+            REPLACE_WITH_STR(model, "_IN_C_LAYER_", in_C_layer);
+            REPLACE_WITH_STR(model, "_IN_C_GEMM_PORT_", in_C_port);
+            REPLACE_WITH_STR(model, "_IN_C_EDGE_", in_C_edge);
+        } else {
+            REPLACE_WITH_STR(model, "_IN_C_LAYER_", "");
+            REPLACE_WITH_STR(model, "_IN_C_GEMM_PORT_", "");
+            REPLACE_WITH_STR(model, "_IN_C_EDGE_", "");
+        }
+
+        REPLACE_WITH_STR(model, "_PRECISION_", params.precision);
+
+        REPLACE_WITH_NUM(model, "_ALPHA_", params.alpha);
+        REPLACE_WITH_NUM(model, "_BETA_", params.beta);
+        REPLACE_WITH_NUM(model, "_TRANS_A_", params.transpose_A);
+        REPLACE_WITH_NUM(model, "_TRANS_B_", params.transpose_B);
+
+        auto get_dims_str = [](const SizeVector& dims) {
+            std::string result;
+            for (const auto& d : dims) {
+                result += "<dim>" + std::to_string(d) + "</dim>\n";
+            }
+            return result;
+        };
+
+        std::string in_A_dims = get_dims_str(params.dims_A);
+        std::string in_B_dims = get_dims_str(params.dims_B);
+        std::string in_C_dims = get_dims_str(params.dims_C);
+        std::string out_dims = get_dims_str(params.outDims());
+
+        REPLACE_WITH_STR(model, "_IN_A_DIMS_", in_A_dims);
+        REPLACE_WITH_STR(model, "_IN_B_DIMS_", in_B_dims);
+        REPLACE_WITH_STR(model, "_IN_C_DIMS_", in_C_dims);
+        REPLACE_WITH_STR(model, "_OUT_DIMS_", out_dims);
+
+        return model;
+    }
+
+    CNNNetwork getNetwork(Core & ie, const gemm_test_params& params) {
+        std::string model = getModel(params);
+
+        CNNNetwork network = ie.ReadNetwork(model, Blob::CPtr());
+
+        network.getInputsInfo().at("input_A")->setPrecision(Precision::FP32);
+        network.getInputsInfo().at("input_B")->setPrecision(Precision::FP32);
+        if (!params.dims_C.empty())
+            network.getInputsInfo().at("input_C")->setPrecision(Precision::FP32);
+
+        network.getOutputsInfo().at("gemm")->setPrecision(Precision::FP32);
+
+        return network;
+    }
+
+    void runTest(const gemm_test_params& test_params,
+                 const std::vector<float>& data_A,
+                 const std::vector<float>& data_B,
+                 const std::vector<float>& data_C,
+                 const std::vector<float>& ref_output) {
+        test_params.print(std::cout);
+
+        Core ie;
+        auto network = getNetwork(ie, test_params);
+        auto exec = ie.LoadNetwork(network, test_params.device_name);
+        auto request = exec.CreateInferRequest();
+
+        auto fill_blob = [&](const char* name, const std::vector<float>& data) {
+            Blob::Ptr blob = request.GetBlob(name);
+
+            auto fill_size = std::min(blob->size(), data.size());
+            auto buffer = blob->buffer().as<float*>();
+
+            for (size_t i = 0; i < fill_size; ++i) {
+                buffer[i] = data[i];
+            }
+        };
+
+        fill_blob("input_A", data_A);
+        fill_blob("input_B", data_B);
+        if (!test_params.dims_C.empty()) {
+            fill_blob("input_C", data_C);
+        }
+
+        request.Infer();
+
+        if (!ref_output.empty()) {
+            Blob::Ptr blob_out = request.GetBlob("gemm");
+            ASSERT_EQ(blob_out->size(), ref_output.size());
+
+            auto buf_out = blob_out->buffer().as<float*>();
+            compare(buf_out, ref_output.data(), blob_out->size(), getThreshold(test_params));
+        }
+    }
+};
+
+using GemmRandomTestParam = std::tuple<
+    std::string,        // plugin
+    std::string,        // precision
+    gemm_base_params>;  // gemm params
+
+class GemmRandomTest : public GemmTestBase, public testing::WithParamInterface<GemmRandomTestParam> {};
+
+// Basic cases: all transposition combinations, 2D-5D
+#define case1  gemm_base_params(1.2f, 3.f,   false, false, {9ul, 11ul}, {11ul, 13ul} )
+#define case2  gemm_base_params(1.2f, 3.f,   false, false, {9ul, 11ul}, {11ul, 13ul}, {9ul, 13ul} )
+#define case3  gemm_base_params(2.5f, 1.2f,  false, false, {7ul, 9ul, 11ul}, {7ul, 11ul, 13ul} )
+#define case4  gemm_base_params(2.5f, 1.2f,  false, false, {7ul, 9ul, 11ul}, {7ul, 11ul, 13ul}, {7ul, 9ul, 13ul} )
+#define case5  gemm_base_params(1.2f, -1.5f, false, false, {3ul, 7ul, 9ul, 11ul}, {3ul, 7ul, 11ul, 13ul})
+#define case6  gemm_base_params(1.2f, -1.5f, false, false, {3ul, 7ul, 9ul, 11ul}, {3ul, 7ul, 11ul, 13ul}, {3ul, 7ul, 9ul, 13ul} )
+#define case7  gemm_base_params(1.2f, -1.5f, false, false, {2ul, 3ul, 7ul, 9ul, 11ul}, {2ul, 3ul, 7ul, 11ul, 13ul})
+#define case8  gemm_base_params(1.2f, -1.5f, false, false, {2ul, 3ul, 7ul, 9ul, 11ul}, {2ul, 3ul, 7ul, 11ul, 13ul}, {2ul, 3ul, 7ul, 9ul, 13ul})
+#define case9  gemm_base_params(1.2f, 3.f,   true,  false, {11ul, 9ul}, {11ul, 13ul} )
+#define case10 gemm_base_params(1.2f, 3.f,   true,  false, {11ul, 9ul}, {11ul, 13ul}, {9ul, 13ul} )
+#define case11 gemm_base_params(2.5f, 1.2f,  true,  false, {7ul, 11ul, 9ul}, {7ul, 11ul, 13ul} )
+#define case12 gemm_base_params(2.5f, 1.2f,  true,  false, {7ul, 11ul, 9ul}, {7ul, 11ul, 13ul}, {7ul, 9ul, 13ul} )
+#define case13 gemm_base_params(1.2f, -1.5f, true,  false, {3ul, 7ul, 11ul, 9ul}, {3ul, 7ul, 11ul, 13ul})
+#define case14 gemm_base_params(1.2f, -1.5f, true,  false, {3ul, 7ul, 11ul, 9ul}, {3ul, 7ul, 11ul, 13ul}, {3ul, 7ul, 9ul, 13ul} )
+#define case15 gemm_base_params(1.2f, -1.5f, true,  false, {2ul, 3ul, 7ul, 11ul, 9ul}, {2ul, 3ul, 7ul, 11ul, 13ul})
+#define case16 gemm_base_params(1.2f, -1.5f, true,  false, {2ul, 3ul, 7ul, 11ul, 9ul}, {2ul, 3ul, 7ul, 11ul, 13ul}, {2ul, 3ul, 7ul, 9ul, 13ul})
+#define case17 gemm_base_params(1.2f, 3.f,   false, true,  {9ul, 11ul}, {13ul, 11ul} )
+#define case18 gemm_base_params(1.2f, 3.f,   false, true,  {9ul, 11ul}, {13ul, 11ul}, {9ul, 13ul} )
+#define case19 gemm_base_params(2.5f, 1.2f,  false, true,  {7ul, 9ul, 11ul}, {7ul, 13ul, 11ul} )
+#define case20 gemm_base_params(2.5f, 1.2f,  false, true,  {7ul, 9ul, 11ul}, {7ul, 13ul, 11ul}, {7ul, 9ul, 13ul} )
+#define case21 gemm_base_params(1.2f, -1.5f, false, true,  {3ul, 7ul, 9ul, 11ul}, {3ul, 7ul, 13ul, 11ul})
+#define case22 gemm_base_params(1.2f, -1.5f, false, true,  {3ul, 7ul, 9ul, 11ul}, {3ul, 7ul, 13ul, 11ul}, {3ul, 7ul, 9ul, 13ul} )
+#define case23 gemm_base_params(1.2f, -1.5f, false, true,  {2ul, 3ul, 7ul, 9ul, 11ul}, {2ul, 3ul, 7ul, 13ul, 11ul})
+#define case24 gemm_base_params(1.2f, -1.5f, false, true,  {2ul, 3ul, 7ul, 9ul, 11ul}, {2ul, 3ul, 7ul, 13ul, 11ul}, {2ul, 3ul, 7ul, 9ul, 13ul})
+#define case25 gemm_base_params(1.2f, 3.f,   true,  true,  {11ul, 9ul}, {13ul, 11ul} )
+#define case26 gemm_base_params(1.2f, 3.f,   true,  true,  {11ul, 9ul}, {13ul, 11ul}, {9ul, 13ul} )
+#define case27 gemm_base_params(2.5f, 1.2f,  true,  true,  {7ul, 11ul, 9ul}, {7ul, 13ul, 11ul} )
+#define case28 gemm_base_params(2.5f, 1.2f,  true,  true,  {7ul, 11ul, 9ul}, {7ul, 13ul, 11ul}, {7ul, 9ul, 13ul} )
+#define case29 gemm_base_params(1.2f, -1.5f, true,  true,  {3ul, 7ul, 11ul, 9ul}, {3ul, 7ul, 13ul, 11ul})
+#define case30 gemm_base_params(1.2f, -1.5f, true,  true,  {3ul, 7ul, 11ul, 9ul}, {3ul, 7ul, 13ul, 11ul}, {3ul, 7ul, 9ul, 13ul} )
+#define case31 gemm_base_params(1.2f, -1.5f, true,  true,  {2ul, 3ul, 7ul, 11ul, 9ul}, {2ul, 3ul, 7ul, 13ul, 11ul})
+#define case32 gemm_base_params(1.2f, -1.5f, true,  true,  {2ul, 3ul, 7ul, 11ul, 9ul}, {2ul, 3ul, 7ul, 13ul, 11ul}, {2ul, 3ul, 7ul, 9ul, 13ul})
+
+// Broadcasting/dimension inference cases
+#define case33 gemm_base_params(1.2f, -1.5f, false, false, {1ul, 1ul, 9ul, 11ul}, {3ul, 7ul, 11ul, 13ul})
+#define case34 gemm_base_params(1.2f, -1.5f, false, false, {3ul, 7ul, 9ul, 11ul}, {1ul, 1ul, 11ul, 13ul})
+#define case35 gemm_base_params(1.2f, -1.5f, false, false, {3ul, 7ul, 9ul, 11ul}, {3ul, 7ul, 11ul, 13ul}, {1ul, 1ul, 9ul, 13ul})
+#define case36 gemm_base_params(1.2f, -1.5f, false, false, {3ul, 1ul, 9ul, 11ul}, {3ul, 7ul, 11ul, 13ul})
+#define case37 gemm_base_params(1.2f, -1.5f, false, false, {3ul, 7ul, 9ul, 11ul}, {3ul, 1ul, 11ul, 13ul})
+#define case38 gemm_base_params(1.2f, -1.5f, false, false, {3ul, 7ul, 9ul, 11ul}, {3ul, 7ul, 11ul, 13ul}, {3ul, 1ul, 9ul, 13ul})
+#define case39 gemm_base_params(1.2f, -1.5f, false, false, {9ul, 11ul}, {3ul, 7ul, 11ul, 13ul})
+#define case40 gemm_base_params(1.2f, -1.5f, false, false, {3ul, 7ul, 9ul, 11ul}, {11ul, 13ul})
+#define case41 gemm_base_params(1.2f, -1.5f, false, false, {3ul, 7ul, 9ul, 11ul}, {3ul, 7ul, 11ul, 13ul}, {9ul, 13ul})
+#define case42 gemm_base_params(1.2f, -1.5f, false, false, {7ul, 9ul, 11ul}, {3ul, 7ul, 11ul, 13ul})
+#define case43 gemm_base_params(1.2f, -1.5f, false, false, {3ul, 7ul, 9ul, 11ul}, {7ul, 11ul, 13ul})
+#define case44 gemm_base_params(1.2f, -1.5f, false, false, {3ul, 7ul, 9ul, 11ul}, {3ul, 7ul, 11ul, 13ul}, {7ul, 9ul, 13ul})
+#define case45 gemm_base_params(1.2f, -1.5f, false, false, {7ul, 9ul, 11ul}, {3ul, 1ul, 11ul, 13ul})
+#define case46 gemm_base_params(1.2f, -1.5f, false, false, {3ul, 1ul, 9ul, 11ul}, {7ul, 11ul, 13ul})
+#define case47 gemm_base_params(1.2f, -1.5f, false, false, {3ul, 1ul, 9ul, 11ul}, {3ul, 1ul, 11ul, 13ul}, {7ul, 9ul, 13ul})
+
+#define all_cases                                                       \
+    case1,  case2,  case3,  case4,  case5,  case6,  case7,  case8,      \
+    case9,  case10, case11, case12, case13, case14, case15, case16,     \
+    case17, case18, case19, case20, case21, case22, case23, case24,     \
+    case25, case26, case27, case28, case29, case30, case31, case32,     \
+    case33, case34, case35, case36, case37, case38,                     \
+    case39, case40, case41, case42, case43, case44,                     \
+    case45, case46, case47
+
+TEST_P(GemmRandomTest, smoke_randomInput) {
+    gemm_test_params params = GetParam();
+
+    auto size_A = std::accumulate(params.dims_A.begin(), params.dims_A.end(), size_t(1), std::multiplies<size_t>());
+    auto size_B = std::accumulate(params.dims_B.begin(), params.dims_B.end(), size_t(1), std::multiplies<size_t>());
+    auto size_C = std::accumulate(params.dims_C.begin(), params.dims_C.end(), size_t(1), std::multiplies<size_t>());
+
+    std::vector<float> data_A(size_A);
+    std::vector<float> data_B(size_B);
+    std::vector<float> data_C(size_C);
+
+    fill_data(data_A.data(), size_A);
+    fill_data(data_B.data(), size_B);
+    fill_data(data_C.data(), size_C);
+
+    auto ref_output = ref_gemm(params, data_A, data_B, data_C);
+
+    runTest(params, data_A, data_B, data_C, ref_output);
+};
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/one_hot_tests.hpp b/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/one_hot_tests.hpp
new file mode 100644 (file)
index 0000000..f4842b1
--- /dev/null
@@ -0,0 +1,208 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <cmath>
+
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+#include <ie_core.hpp>
+
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace std;
+
+struct one_hot_base_params {
+    std::vector<size_t> in;
+    std::vector<size_t> out;
+    int axis;
+    unsigned int depth;
+    float on, off;
+};
+
+struct one_hot_test_params : one_hot_base_params {
+    std::string device_name;
+
+    one_hot_test_params(std::string name, one_hot_base_params params) :
+            one_hot_base_params(params), device_name(name) {}
+};
+
+class OneHotOnlyTestShared: public TestsCommon,
+                        public WithParamInterface<one_hot_test_params> {
+
+    std::string model_t = R"V0G0N(
+<net name="OneHot_Only" version="2" precision="FP32" batch="1">
+    <layers>
+        <layer id="1" name="input" precision="FP32" type="Input">
+            <output>
+                <port id="0">
+                    _IN_
+                </port>
+            </output>
+        </layer>
+        <layer id="2" name="OneHot1" type="OneHot" precision="FP32">
+
+            <data depth="_DEPTH_" axis="_AXIS_"/>
+
+            <input>
+                <port id="1">
+                    _IN_
+                </port>
+            </input>
+            <output>
+                <port id="2">
+                    _OUT_
+                </port>
+            </output>
+        </layer>
+    </layers>
+    <edges>l
+        <edge from-layer="1" from-port="0" to-layer="2" to-port="1"/>
+    </edges>
+</net>
+)V0G0N";
+
+    std::string getModel(one_hot_test_params p) {
+        std::string model = model_t;
+
+        std::string in_shape;
+        std::string out_shape;
+
+        for (size_t i = 0; i < p.in.size(); i++) {
+            in_shape += "<dim>";
+            in_shape += std::to_string(p.in[i]) + "</dim>\n";
+        }
+        for (size_t i = 0; i < p.out.size(); i++) {
+            out_shape += "<dim>";
+            out_shape += std::to_string(p.out[i]) + "</dim>\n";
+        }
+
+
+        REPLACE_WITH_STR(model, "_IN_", in_shape);
+        REPLACE_WITH_STR(model, "_OUT_", out_shape);
+
+        REPLACE_WITH_NUM(model, "_AXIS_", p.axis);
+        REPLACE_WITH_NUM(model, "_DEPTH_", p.depth);
+
+        return model;
+    }
+
+    void ref_one_hot_4d(Blob &src, Blob &dst, one_hot_test_params p)
+    {
+        float *src_ptr = src.buffer().as<float*>();
+        std::size_t src_size = src.size();
+        float *dst_ptr = dst.buffer().as<float*>();
+        std::size_t dst_size = dst.size();
+
+        int out_n = (p.out.size() >= 1) ? p.out[0] : 1;
+        int out_c = (p.out.size() >= 2) ? p.out[1] : 1;
+        int out_d = (p.out.size() == 5) ? p.out[2] : 1;
+        int out_h = (p.out.size() >= 3 && p.out.size() < 5) ? p.out[2] : (p.out.size() == 5) ? p.out[3] : 1;
+        int out_w = (p.out.size() >= 4 && p.out.size() < 5) ? p.out[3] : (p.out.size() == 5) ? p.out[4] : 1;
+
+        int hot_axis = (p.axis == - 1) ? p.in.size() : p.axis;
+
+        for (int ob = 0; ob < out_n; ob++) {
+            for (int oc = 0; oc < out_c; oc++) {
+                for (int od = 0; od < out_d; od++) {
+                    for (int oh = 0; oh < out_h; oh++) {
+                        for (int ow = 0; ow < out_w; ow++) {
+                            std::size_t dst_offset = ow + out_w * oh + out_w * out_h * od + out_w * out_h * out_d * oc + out_w * out_h * out_d * out_c * ob;
+                            std::size_t src_offset = 0;
+
+                            std::vector<int> out_dims = {ob, oc, oh, ow};
+                            if (p.out.size() == 5)
+                                out_dims.insert(out_dims.begin() + 2, od);
+                            std::vector<int> in_dims(out_dims.begin(), out_dims.end());
+                            in_dims.erase(in_dims.begin() + hot_axis);
+
+                            for (int i = 0; i < p.in.size(); i++) {
+                                int mul = 1;
+                                if (i == p.in.size() - 1) {
+                                    src_offset += in_dims[i];
+                                    break;
+                                }
+                                for (int j = i; j < p.in.size(); j++) {
+                                    if (j == i)
+                                        mul *= in_dims[j];
+                                    else
+                                        mul *= p.in[j];
+                                }
+                                src_offset += mul;
+                            }
+
+                            if (out_dims[hot_axis] == src_ptr[src_offset])
+                                dst_ptr[dst_offset] = p.on;
+                            else
+                                dst_ptr[dst_offset] = p.off;
+                        }
+                    }
+                }
+            }
+        }
+    }
+protected:
+    virtual void SetUp() {
+        try {
+            TestsCommon::SetUp();
+            one_hot_test_params p = ::testing::WithParamInterface<one_hot_test_params>::GetParam();
+            std::string model = getModel(p);
+
+            Core ie;
+            CNNNetwork net = ie.ReadNetwork(model, Blob::CPtr());
+            ExecutableNetwork executable_network = ie.LoadNetwork(net, p.device_name);
+            InferRequest inferRequest = executable_network.CreateInferRequest();
+
+            // Output Data
+            OutputsDataMap out = net.getOutputsInfo();
+            std::pair<std::string, DataPtr> item = *out.begin();
+
+            TBlob<float>::Ptr output;
+            output = make_shared_blob<float>(item.second->getTensorDesc());
+            output->allocate();
+            inferRequest.SetBlob(item.first, output);
+
+            // Output Reference
+            TBlob<float> dst_ref(item.second->getTensorDesc());
+            dst_ref.allocate();
+
+            Blob::Ptr src;
+            src = make_shared_blob<float>({ Precision::FP32, p.in, TensorDesc::getLayoutByDims(p.in) });
+            src->allocate();
+            float* s = src->buffer().as<float*>();
+            for (int i = 0; i < src->size(); ++i)
+                s[i] = -1;
+            s[0] = 1;
+            s[1] = 1;
+            inferRequest.SetBlob("input", src);
+
+            inferRequest.Infer();
+
+            // Check results
+            ref_one_hot_4d(*src, dst_ref, p);
+
+            compare(*output, dst_ref);
+        } catch (const details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+#define case_2d_0 one_hot_base_params({{3}, {3, 6},-1, 6, 1.0f, 0.0f })
+#define case_2d_1 one_hot_base_params({{3}, {6, 3}, 0, 6, 1.0f, 0.0f })
+#define case_2d_2 one_hot_base_params({{3}, {3, 6}, 1, 6, 1.0f, 0.0f })
+#define case_3d_0 one_hot_base_params({{3, 2}, {3, 2, 4},-1, 4, 1.0f, 0.0f })
+#define case_3d_1 one_hot_base_params({{3, 2}, {4, 3, 2}, 0, 4, 1.0f, 0.0f })
+#define case_3d_2 one_hot_base_params({{3, 2}, {3, 4, 2}, 1, 4, 1.0f, 0.0f })
+#define case_4d_0 one_hot_base_params({ {1, 3, 2}, {1, 3, 2, 4},-1, 4, 1.0f, 0.0f })
+#define case_4d_1 one_hot_base_params({ {1, 3, 2}, {4, 1, 3, 2}, 0, 4, 1.0f, 0.0f })
+#define case_4d_2 one_hot_base_params({ {1, 3, 2}, {1, 4, 3, 2}, 1, 4, 1.0f, 0.0f })
+#define case_4d_3 one_hot_base_params({ {1, 3, 2}, {1, 3, 4, 2}, 2, 4, 1.0f, 0.0f })
+#define case_5d_0 one_hot_base_params({ {1, 3, 2, 3}, {4, 1, 3, 2, 3}, 0, 4, 1.0f, 0.0f })
+#define case_5d_1 one_hot_base_params({ {1, 3, 2, 3}, {1, 4, 3, 2, 3}, 1, 4, 1.0f, 0.0f })
+#define case_5d_2 one_hot_base_params({ {1, 3, 2, 3}, {1, 3, 4, 2, 3}, 2, 4, 1.0f, 0.0f })
+#define case_5d_3 one_hot_base_params({ {1, 3, 2, 3}, {1, 3, 2, 4, 3}, 3, 4, 1.0f, 0.0f })
+#define case_5d_4 one_hot_base_params({ {1, 3, 2, 3}, {1, 3, 2, 3, 4}, 4, 4, 1.0f, 0.0f })
+
+TEST_P(OneHotOnlyTestShared, TestsOneHot) {}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/pad_tests.hpp b/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/pad_tests.hpp
new file mode 100644 (file)
index 0000000..7c5deb3
--- /dev/null
@@ -0,0 +1,182 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <ie_core.hpp>
+#include <cmath>
+
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace std;
+
+struct padTF_test_params {
+    std::string device;
+    SizeVector in_size;
+    std::vector<float> in;
+    SizeVector pads_begin;
+    SizeVector pads_end;
+    std::string pad_mode;
+    float pad_value;
+    SizeVector ref_size;
+    std::vector<float> ref;
+};
+
+class PadTFTests : public TestsCommon, public WithParamInterface<padTF_test_params> {
+    std::string model_t = R"V0G0N(
+<net Name="Pad_net" version="2" precision="FP32" batch="1">
+    <layers>
+        <layer name="input" type="Input" precision="FP32" id="1">
+            <output>
+                <port id="1">
+                    _IN_
+                </port>
+            </output>
+        </layer>
+        <layer name="output" id="2" type="Pad" precision="FP32">
+            <data pads_begin="_P_BEGIN_" pads_end="_P_END_" pad_mode="_P_MODE_" pad_value="_P_VAL_"/>
+            <input>
+                <port id="2">
+                    _IN_
+                </port>
+            </input>
+            <output>
+                <port id="3">
+                    _OUT_
+                </port>
+            </output>
+        </layer>
+    </layers>
+    <edges>
+        <edge from-layer="1" from-port="1" to-layer="2" to-port="2"/>
+    </edges>
+</net>
+)V0G0N";
+
+    std::string getModel(padTF_test_params p) {
+        std::string model = model_t;
+        std::string in_size;
+        std::string pads_begin;
+        std::string pads_end;
+        std::string ref_size;
+
+        for (auto& src : p.in_size) {
+            in_size += "<dim>";
+            in_size += std::to_string(src) + "</dim>\n";
+        }
+
+        for (auto& pb : p.pads_begin)
+            pads_begin += std::to_string(pb) + ",";
+        pads_begin.pop_back();
+
+        for (auto& pe : p.pads_end)
+            pads_end += std::to_string(pe) + ",";
+        pads_end.pop_back();
+
+        for (auto& dst : p.ref_size) {
+            ref_size += "<dim>";
+            ref_size += std::to_string(dst) + "</dim>\n";
+        }
+
+        REPLACE_WITH_STR(model, "_IN_", in_size);
+        REPLACE_WITH_STR(model, "_P_BEGIN_", pads_begin);
+        REPLACE_WITH_STR(model, "_P_END_", pads_end);
+        REPLACE_WITH_STR(model, "_P_MODE_", p.pad_mode);
+        REPLACE_WITH_NUM(model, "_P_VAL_", p.pad_value);
+        REPLACE_WITH_STR(model, "_OUT_", ref_size);
+        return model;
+    }
+
+protected:
+    virtual void TearDown() {
+    }
+
+    virtual void SetUp() {
+        try {
+            TestsCommon::SetUp();
+            padTF_test_params p = ::testing::WithParamInterface<padTF_test_params>::GetParam();
+            std::string model = getModel(p);
+
+            Core ie;
+            CNNNetwork net = ie.ReadNetwork(model, Blob::CPtr());
+            ExecutableNetwork executable_network = ie.LoadNetwork(net, p.device);
+            InferRequest inferRequest = executable_network.CreateInferRequest();
+
+            InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float>({ InferenceEngine::Precision::FP32, p.in_size, InferenceEngine::TensorDesc::getLayoutByDims(p.in_size) });
+            src->allocate();
+            float* psrc = src->buffer().as<float*>();
+            std::copy(p.in.begin(), p.in.end(), psrc);
+            inferRequest.SetBlob("input", src);
+
+            InferenceEngine::Blob::Ptr dst = InferenceEngine::make_shared_blob<float>({ InferenceEngine::Precision::FP32, p.ref_size, InferenceEngine::TensorDesc::getLayoutByDims(p.ref_size) });
+            dst->allocate();
+            inferRequest.SetBlob("output", dst);
+
+            //  Infer
+            inferRequest.Infer();
+
+            //  Check results
+            TBlob<float> dst_ref({ Precision::FP32, p.ref_size, TensorDesc::getLayoutByDims(p.ref_size) });
+            dst_ref.allocate();
+            float* pdst_ref = dst_ref.buffer().as<float*>();
+            std::copy(p.ref.begin(), p.ref.end(), pdst_ref);
+            compare(*dst, dst_ref);
+        } catch (const InferenceEngine::details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+TEST_P(PadTFTests, TestsPad) {}
+
+std::string getTestName(testing::TestParamInfo<padTF_test_params> obj) {
+    std::string name = obj.param.device + "_" + obj.param.pad_mode;
+    return name;
+}
+
+//  Examples of the standalone Pad operation input / output:
+std::vector<float> in =
+{1.f, 2.f, 3.f, 4.f,
+ 5.f, 6.f, 7.f, 8.f,
+ 9.f,10.f,11.f,12.f}; //  3x4
+
+std::vector<float> ref_constant =
+{0.f,0.f,0.f, 0.f, 0.f, 0.f,0.f,0.f,0.f,
+ 0.f,0.f,0.f, 0.f, 0.f, 0.f,0.f,0.f,0.f,
+ 0.f,0.f,1.f, 2.f, 3.f, 4.f,0.f,0.f,0.f,
+ 0.f,0.f,5.f, 6.f, 7.f, 8.f,0.f,0.f,0.f,
+ 0.f,0.f,9.f,10.f,11.f,12.f,0.f,0.f,0.f,
+ 0.f,0.f,0.f, 0.f, 0.f, 0.f,0.f,0.f,0.f}; //  6x9
+
+std::vector<float> ref_edge =
+{1.f,1.f,1.f, 2.f, 3.f, 4.f, 4.f, 4.f, 4.f,
+ 1.f,1.f,1.f, 2.f, 3.f, 4.f, 4.f, 4.f, 4.f,
+ 1.f,1.f,1.f, 2.f, 3.f, 4.f, 4.f, 4.f, 4.f,
+ 5.f,5.f,5.f, 6.f, 7.f, 8.f, 8.f, 8.f, 8.f,
+ 9.f,9.f,9.f,10.f,11.f,12.f,12.f,12.f,12.f,
+ 9.f,9.f,9.f,10.f,11.f,12.f,12.f,12.f,12.f}; //  6x9
+
+std::vector<float> ref_reflect =
+{11.f,10.f,9.f,10.f,11.f,12.f,11.f,10.f,9.f,
+  7.f, 6.f,5.f, 6.f, 7.f, 8.f, 7.f, 6.f,5.f,
+  3.f, 2.f,1.f, 2.f, 3.f, 4.f, 3.f, 2.f,1.f,
+  7.f, 6.f,5.f, 6.f, 7.f, 8.f, 7.f, 6.f,5.f,
+ 11.f,10.f,9.f,10.f,11.f,12.f,11.f,10.f,9.f,
+  7.f, 6.f,5.f, 6.f, 7.f, 8.f, 7.f, 6.f,5.f}; //  6x9
+
+std::vector<float> ref_symmetric =
+{6.f,5.f,5.f, 6.f, 7.f, 8.f, 8.f, 7.f, 6.f,
+ 2.f,1.f,1.f, 2.f, 3.f, 4.f, 4.f, 3.f, 2.f,
+ 2.f,1.f,1.f, 2.f, 3.f, 4.f, 4.f, 3.f, 2.f,
+ 6.f,5.f,5.f, 6.f, 7.f, 8.f, 8.f, 7.f, 6.f,
+10.f,9.f,9.f,10.f,11.f,12.f,12.f,11.f,10.f,
+10.f,9.f,9.f,10.f,11.f,12.f,12.f,11.f,10.f}; //  6x9
+
+#define PLUGING_CASE(_device, _test, __num, ...) \
+    INSTANTIATE_TEST_CASE_P(smoke_##_device##_run##__num, _test, ::testing::Values(padTF_test_params{#_device, __VA_ARGS__}) );
+
+#define PLUGING_CASE_WITH_SUFFIX(_device, _suffix, _test, __num, ...) \
+    INSTANTIATE_TEST_CASE_P(_device##_run##_suffix##__num, _test, ::testing::Values(padTF_test_params{#_device, __VA_ARGS__}) );
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/permute_tests.hpp b/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/permute_tests.hpp
new file mode 100644 (file)
index 0000000..1206787
--- /dev/null
@@ -0,0 +1,168 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <ie_core.hpp>
+#include <cmath>
+
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace std;
+
+
+struct permute_base_params {
+    SizeVector dims;
+    SizeVector order;
+};
+
+struct permute_test_params {
+    std::string device_name;
+    permute_base_params base;
+    permute_test_params(std::string name, permute_base_params params) : device_name(name), base(params) {}
+};
+
+template <typename data_t>
+void ref_permute(const TBlob<data_t> &src, TBlob<data_t> &dst, permute_base_params prm) {
+    const data_t *src_data = src.readOnly();
+    data_t *dst_data = dst.data();
+
+    SizeVector orderedDims;
+    for (auto ord : prm.order) {
+        orderedDims.push_back(src.getTensorDesc().getDims()[ord]);
+    }
+    TensorDesc desc(Precision::FP32, src.getTensorDesc().getDims(), {orderedDims, prm.order});
+
+    for (int i=0; i < src.size(); i++) {
+        dst_data[desc.offset(i)] = src_data[src.getTensorDesc().offset(i)];
+    }
+}
+
+class PermuteOnlyTests: public TestsCommon,
+                        public WithParamInterface<permute_test_params> {
+    std::string model_t = R"V0G0N(
+<Net Name="Power_Only" version="2" precision="FP32" batch="1">
+    <layers>
+        <layer name="in1" type="Input" precision="FP32" id="0">
+            <output>
+                <port id="0">
+                    __DIMS__
+                </port>
+            </output>
+        </layer>
+        <layer name="permute" id="1" type="Permute" precision="FP32">
+            <data order="_ORDER_"/>
+            <input>
+                <port id="1">
+                    __DIMS__
+                </port>
+            </input>
+            <output>
+                <port id="2">
+                    __DST_DIMS__
+                </port>
+            </output>
+        </layer>
+    </layers>
+    <edges>
+        <edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
+    </edges>
+</Net>
+)V0G0N";
+
+protected:
+    std::string getModel(permute_base_params p) {
+        std::string model = model_t;
+        std::string dims;
+        std::string dst_dims;
+        for (auto& dim : p.dims) {
+            dims += "<dim>";
+            dims += std::to_string(dim) + "</dim>\n";
+        }
+
+        std::string order;
+        for (auto& ord : p.order) {
+            if (!order.empty())
+                order += ",";
+            order += std::to_string(ord);
+            dst_dims += "<dim>";
+            dst_dims += std::to_string(p.dims[ord]) + "</dim>\n";
+        }
+
+        REPLACE_WITH_STR(model, "__DIMS__", dims);
+        REPLACE_WITH_STR(model, "__DST_DIMS__", dst_dims);
+        REPLACE_WITH_STR(model, "_ORDER_", order);
+
+        return model;
+    }
+
+    virtual void TearDown() {
+    }
+
+    virtual void SetUp() {
+        try {
+            TestsCommon::SetUp();
+            permute_test_params p = ::testing::WithParamInterface<permute_test_params>::GetParam();
+            std::string model = getModel(p.base);
+
+            Core ie;
+            CNNNetwork net = ie.ReadNetwork(model, Blob::CPtr());
+            ExecutableNetwork executable_network = ie.LoadNetwork(net, p.device_name);
+            InferRequest inferRequest = executable_network.CreateInferRequest();
+
+            Blob::Ptr src = make_shared_blob<float>({Precision::FP32, p.base.dims,
+                                        TensorDesc::getLayoutByDims(p.base.dims)});
+            src->allocate();
+            fill_data(src->buffer(), src->size());
+
+            auto* srcPtr = dynamic_cast<TBlob<float>*>(src.get());
+
+            if (srcPtr == nullptr)
+                FAIL() << "Cannot cast blob to TBlob<float>.";
+
+            inferRequest.SetBlob("in1", src);
+
+            OutputsDataMap out = net.getOutputsInfo();
+            auto item = *out.begin();
+
+            TBlob<float>::Ptr output;
+            output = make_shared_blob<float>(item.second->getTensorDesc());
+            output->allocate();
+            inferRequest.SetBlob(item.first, output);
+            inferRequest.Infer();
+
+            TensorDesc td(Precision::FP32, p.base.dims,
+                                           TensorDesc::getLayoutByDims(p.base.dims));
+            TBlob<float> dst_ref(td);
+            dst_ref.allocate();
+
+            ref_permute(*srcPtr, dst_ref, p.base);
+
+            compare(*output, dst_ref);
+        } catch (const details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+TEST_P(PermuteOnlyTests, TestsPermute) {}
+
+#define case_1  permute_base_params{{2, 3, 4, 5}, {0, 1, 2, 3}}
+#define case_2  permute_base_params{{2, 3, 4, 5}, {0, 2, 3, 1}}
+#define case_3  permute_base_params{{2, 3, 4, 5}, {0, 2, 1, 3}}
+#define case_4  permute_base_params{{2, 3, 4}, {0, 1, 2}}
+#define case_5  permute_base_params{{2, 3, 4}, {0, 2, 1}}
+#define case_6  permute_base_params{{2, 3}, {0, 1}}
+#define case_7  permute_base_params{{2, 3, 4, 5, 6}, {0, 1, 2, 3, 4}}
+#define case_8  permute_base_params{{2, 3, 4, 5, 6}, {0, 4, 2, 1, 3}}
+#define case_9  permute_base_params{{2, 3, 4, 5, 6}, {0, 2, 4, 3, 1}}
+#define case_10 permute_base_params{{2, 3, 4, 5, 6}, {0, 3, 2, 4, 1}}
+#define case_11 permute_base_params{{2, 8, 2, 2, 4, 5}, {0, 1, 4, 2, 5, 3}}
+#define case_12 permute_base_params{{2, 8, 3, 3, 4, 5}, {0, 1, 4, 2, 5, 3}}
+#define case_13 permute_base_params{{2, 12, 9}, {0, 2, 1}}
+#define case_14 permute_base_params{{2, 8, 3, 3, 4, 5}, {0, 3, 4, 1, 5, 2}}
+#define case_15 permute_base_params{{2, 3, 4, 5}, {0, 1, 3, 2}}
+#define case_16 permute_base_params{{2, 3, 4, 5, 7}, {0, 3, 1, 4, 2}}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/quantize_tests.hpp b/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/quantize_tests.hpp
new file mode 100644 (file)
index 0000000..357b37c
--- /dev/null
@@ -0,0 +1,331 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <ie_core.hpp>
+#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp"
+
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+#include "common_test_utils/data_utils.hpp"
+
+using namespace ::testing;
+using namespace InferenceEngine;
+
+struct quantize_test_params {
+    std::string device_name;
+
+    struct {
+        size_t n;
+        size_t c;
+        size_t h;
+        size_t w;
+    } in;
+
+    size_t ic_const_blobs;
+    size_t levels;
+    bool reverse_out_vals;
+};
+
+template<typename data_t>
+void ref_quantize(const std::vector<Blob::Ptr> &srcs, std::vector<Blob::Ptr> &dsts, quantize_test_params prm) {
+    assert(dsts.size() == 1);
+
+    const data_t* src_data = srcs[0]->buffer().as<data_t*>();
+    const data_t* input_low_data = srcs[1]->buffer().as<data_t*>();
+    const data_t* input_high_data = srcs[2]->buffer().as<data_t*>();
+    const data_t* output_low_data = srcs[3]->buffer().as<data_t*>();
+    const data_t* output_high_data = srcs[4]->buffer().as<data_t*>();
+
+    data_t* dst_data = dsts[0]->buffer().as<data_t*>();
+
+    size_t N = prm.in.n;
+    size_t C = prm.in.c;
+    size_t H = prm.in.h;
+    size_t W = prm.in.w;
+    size_t ICB = prm.ic_const_blobs;
+
+    for (int n = 0; n < N; n++) {
+        for (int c = 0; c < C; c++) {
+            for (int h = 0; h < H; h++) {
+                for (int w = 0; w < W; w++) {
+                    size_t idx = n*C*H*W + c*H*W + h*W + w;
+
+                    if (src_data[idx] <= input_low_data[c % ICB])
+                        dst_data[idx] = output_low_data[c % ICB];
+                    else if (src_data[idx] > input_high_data[c % ICB])
+                        dst_data[idx] = output_high_data[c % ICB];
+                    else
+                        dst_data[idx] = roundf((src_data[idx] - input_low_data[c % ICB]) /
+                                               (input_high_data[c % ICB] - input_low_data[c % ICB]) * (prm.levels-1)) /
+                                        (prm.levels-1) * (output_high_data[c % ICB] - output_low_data[c % ICB]) + output_low_data[c % ICB];
+                }
+            }
+        }
+    }
+}
+
+class QuantizeOnlyTest : public TestsCommon, public WithParamInterface<quantize_test_params> {
+
+    std::string model_t = R"V0G0N(
+<Net Name="Quantize_Only" version="6" precision="FP32" batch="1">
+    <layers>
+        <layer name="data" type="Input" precision="FP32" id="0">
+            <output>
+                <port id="0">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+            </output>
+        </layer>
+        <layer name="input_low" type="Const" precision="FP32" id="1">
+            <output>
+                <port id="0">
+                    <dim>1</dim>
+                    <dim>_ICB_</dim>
+                    <dim>1</dim>
+                    <dim>1</dim>
+                </port>
+            </output>
+            <blobs>
+                <custom offset="_O1_" size="_S1_"/>
+            </blobs>
+        </layer>
+        <layer name="input_high" type="Const" precision="FP32" id="2">
+            <output>
+                <port id="0">
+                    <dim>1</dim>
+                    <dim>_ICB_</dim>
+                    <dim>1</dim>
+                    <dim>1</dim>
+                </port>
+            </output>
+            <blobs>
+                <custom offset="_O2_" size="_S2_"/>
+            </blobs>
+        </layer>
+        <layer name="output_low" type="Const" precision="FP32" id="3">
+            <output>
+                <port id="0">
+                    <dim>1</dim>
+                    <dim>_ICB_</dim>
+                    <dim>1</dim>
+                    <dim>1</dim>
+                </port>
+            </output>
+            <blobs>
+                <custom offset="_O3_" size="_S3_"/>
+            </blobs>
+        </layer>
+        <layer name="output_high" type="Const" precision="FP32" id="4">
+            <output>
+                <port id="0">
+                    <dim>1</dim>
+                    <dim>_ICB_</dim>
+                    <dim>1</dim>
+                    <dim>1</dim>
+                </port>
+            </output>
+            <blobs>
+                <custom offset="_O4_" size="_S4_"/>
+            </blobs>
+        </layer>
+        <layer name="quantize" type="FakeQuantize" precision="FP32" id="5">
+            <data levels="_L_"/>
+            <input>
+                <port id="0">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+                <port id="1">
+                    <dim>1</dim>
+                    <dim>_ICB_</dim>
+                    <dim>1</dim>
+                    <dim>1</dim>
+                </port>
+                <port id="2">
+                    <dim>1</dim>
+                    <dim>_ICB_</dim>
+                    <dim>1</dim>
+                    <dim>1</dim>
+                </port>
+                <port id="3">
+                    <dim>1</dim>
+                    <dim>_ICB_</dim>
+                    <dim>1</dim>
+                    <dim>1</dim>
+                </port>
+                <port id="4">
+                    <dim>1</dim>
+                    <dim>_ICB_</dim>
+                    <dim>1</dim>
+                    <dim>1</dim>
+                </port>
+            </input>
+            <output>
+                <port id="5">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+            </output>
+        </layer>
+    </layers>
+    <edges>
+        <edge from-layer="0" from-port="0" to-layer="5" to-port="0"/>
+        <edge from-layer="1" from-port="0" to-layer="5" to-port="1"/>
+        <edge from-layer="2" from-port="0" to-layer="5" to-port="2"/>
+        <edge from-layer="3" from-port="0" to-layer="5" to-port="3"/>
+        <edge from-layer="4" from-port="0" to-layer="5" to-port="4"/>
+    </edges>
+</Net>
+)V0G0N";
+
+    std::string getModel(quantize_test_params p) {
+        std::string model = model_t;
+
+        REPLACE_WITH_NUM(model, "_IN_",  p.in.n);
+        REPLACE_WITH_NUM(model, "_IC_",  p.in.c);
+        REPLACE_WITH_NUM(model, "_IH_",  p.in.h);
+        REPLACE_WITH_NUM(model, "_IW_",  p.in.w);
+        REPLACE_WITH_NUM(model, "_L_",   p.levels);
+        REPLACE_WITH_NUM(model, "_ICB_", p.ic_const_blobs);
+
+        REPLACE_WITH_NUM(model, "_O1_",  0 * p.ic_const_blobs * sizeof(float));
+        REPLACE_WITH_NUM(model, "_S1_",  1 * p.ic_const_blobs * sizeof(float));
+        REPLACE_WITH_NUM(model, "_O2_",  1 * p.ic_const_blobs * sizeof(float));
+        REPLACE_WITH_NUM(model, "_S2_",  1 * p.ic_const_blobs * sizeof(float));
+        REPLACE_WITH_NUM(model, "_O3_",  2 * p.ic_const_blobs * sizeof(float));
+        REPLACE_WITH_NUM(model, "_S3_",  1 * p.ic_const_blobs * sizeof(float));
+        REPLACE_WITH_NUM(model, "_O4_",  3 * p.ic_const_blobs * sizeof(float));
+        REPLACE_WITH_NUM(model, "_S4_",  1 * p.ic_const_blobs * sizeof(float));
+
+        return model;
+    }
+
+protected:
+    virtual void SetUp() {
+
+        try {
+            quantize_test_params p = ::testing::WithParamInterface<quantize_test_params>::GetParam();
+            std::string model = getModel(p);
+
+            std::vector<Blob::Ptr> srcs_vec;
+            Blob::Ptr blob_data = make_shared_blob<float>({Precision::FP32, {p.in.n, p.in.c, p.in.h, p.in.w}, Layout::NCHW});
+            blob_data->allocate();
+            CommonTestUtils::fill_data_sine(blob_data->buffer().as<float*>(), blob_data->size(), 0.f, 2.f, 0.1f);
+            srcs_vec.push_back(blob_data);
+
+            float low_center = p.levels == 2 ? 0.f : -1.f;
+            float high_center = p.levels == 2 ? 0.f : 1.f;
+            float low_val = p.reverse_out_vals ? 1.0f : -1.f;
+            float high_val = p.reverse_out_vals ? -1.0f : 1.f;
+
+            Blob::Ptr input_low_data = make_shared_blob<float>({Precision::FP32, {p.ic_const_blobs}, Layout::C});
+            input_low_data->allocate();
+            CommonTestUtils::fill_data_sine(input_low_data->buffer().as<float*>(), input_low_data->size(), low_center, 2.f, 0.2f);
+            srcs_vec.push_back(input_low_data);
+
+            Blob::Ptr input_high_data = make_shared_blob<float>({Precision::FP32, {p.ic_const_blobs}, Layout::C});
+            input_high_data->allocate();
+            CommonTestUtils::fill_data_sine(input_high_data->buffer().as<float*>(), input_high_data->size(), high_center, 2.f, 0.2f);
+            srcs_vec.push_back(input_high_data);
+
+            Blob::Ptr output_low_data = make_shared_blob<float>({Precision::FP32, { p.ic_const_blobs }, Layout::C});
+            output_low_data->allocate();
+            if (p.levels == 2) {
+                CommonTestUtils::fill_data_const(output_low_data->buffer().as<float*>(), output_low_data->size(), low_val);
+            } else {
+                CommonTestUtils::fill_data_sine(output_low_data->buffer().as<float*>(), output_low_data->size(), low_center, 2.f, 0.3f);
+            };
+            srcs_vec.push_back(output_low_data);
+
+            Blob::Ptr output_high_data = make_shared_blob<float>({Precision::FP32, {p.ic_const_blobs}, Layout::C});
+            output_high_data->allocate();
+            if (p.levels == 2) {
+                CommonTestUtils::fill_data_const(output_high_data->buffer().as<float*>(), output_high_data->size(), high_val);
+            } else {
+                CommonTestUtils::fill_data_sine(output_high_data->buffer().as<float*>(), output_high_data->size(), high_center, 2.f, 0.3f);
+            };
+            srcs_vec.push_back(output_high_data);
+
+            TBlob<uint8_t> *weights_ptr = new TBlob<uint8_t>({Precision::U8, {4 * p.ic_const_blobs * sizeof(float)}, Layout::C});
+            weights_ptr->allocate();
+
+            float* pwei = weights_ptr->buffer().as<float*>();
+            int off = 0;
+            for (int i = 1; i < 5; i++) {
+                float* pdata = srcs_vec[i]->buffer();
+                for (int j = 0; j < p.ic_const_blobs; j++) {
+                    pwei[off++] = pdata[j];
+                }
+            }
+
+            Core ie;
+            CNNNetwork net = ie.ReadNetwork(model, TBlob<uint8_t>::Ptr(weights_ptr));
+
+            std::map<std::string, std::string> config = {{PluginConfigInternalParams::KEY_LP_TRANSFORMS_MODE, PluginConfigParams::NO}};
+            ExecutableNetwork executable_network = ie.LoadNetwork(net, p.device_name, config);
+            InferRequest inferRequest = executable_network.CreateInferRequest();
+            inferRequest.SetBlob("data", blob_data);
+
+            std::vector<Blob::Ptr> dsts_vec;
+            std::vector<Blob::Ptr> out_vec;
+
+            OutputsDataMap out_info_map = net.getOutputsInfo();
+            for (auto info : out_info_map) {
+                Blob::Ptr blob = make_shared_blob<float>({Precision::FP32, info.second->getDims() , Layout::NCHW});
+                blob->allocate();
+                inferRequest.SetBlob(info.first, blob);
+                out_vec.push_back(blob);
+
+                Blob::Ptr blob_ref = make_shared_blob<float>({Precision::FP32, info.second->getDims(), Layout::NCHW});
+                blob_ref->allocate();
+                dsts_vec.push_back(blob_ref);
+            }
+
+            ref_quantize<float>(srcs_vec, dsts_vec, p);
+
+            inferRequest.Infer();
+
+            compare(*out_vec[0], *dsts_vec[0]);
+
+        } catch (const InferenceEngine::details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+// {N, C, H, W}, ic_const_blobs, quantization_levels, reverse_out_vals
+#define case_1 {1, 8, 5, 5}, 1, 2, false
+#define case_2 {1, 8, 5, 5}, 8, 2, false
+#define case_3 {1, 8, 5, 5}, 1, 4, false
+#define case_4 {1, 8, 5, 5}, 8, 4, false
+#define case_5 {1, 8, 5, 4}, 1, 8, false
+#define case_6 {1, 8, 5, 4}, 8, 8, false
+#define case_7 {1, 17, 5, 5}, 1, 2, false
+#define case_8 {1, 17, 5, 5}, 17, 2, false
+#define case_9 {1, 17, 5, 5}, 1, 4, false
+#define case_10 {1, 17, 5, 5}, 17, 4, false
+#define case_11 {1, 17, 5, 4}, 1, 8, false
+#define case_12 {1, 17, 5, 4}, 17, 8, false
+#define case_13 {1, 8, 5, 5}, 1, 2, true
+#define case_14 {1, 8, 5, 5}, 8, 2, true
+#define case_15 {1, 8, 5, 5}, 1, 4, true
+#define case_16 {1, 8, 5, 5}, 8, 4, true
+#define case_17 {1, 8, 5, 4}, 1, 8, true
+#define case_18 {1, 8, 5, 4}, 8, 8, true
+#define case_19 {1, 17, 5, 5}, 1, 2, true
+#define case_20 {1, 17, 5, 5}, 17, 2, true
+#define case_21 {1, 17, 5, 5}, 1, 4, true
+#define case_22 {1, 17, 5, 5}, 17, 4, true
+#define case_23 {1, 17, 5, 4}, 1, 8, true
+#define case_24 {1, 17, 5, 4}, 17, 8, true
+
+TEST_P(QuantizeOnlyTest, TestsQuantize) {}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/reduce_tests.hpp b/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/reduce_tests.hpp
new file mode 100644 (file)
index 0000000..8b67fb7
--- /dev/null
@@ -0,0 +1,402 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <ie_core.hpp>
+#include <cmath>
+
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+#include "ie_memcpy.h"
+
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace std;
+
+struct reduce_test_params {
+    std::string                 device_name;
+    std::string                 inIdxPrecision;;
+    std::string                 reduce_type;
+    bool                        keep_dims;
+    SizeVector in_shape;
+    std::vector<float>          input_tensor;
+    std::vector<int32_t>        axes_for_reduction;
+    SizeVector out_shape;
+    std::vector<float>          reference;
+};
+
+template <typename F>
+void reduce(
+        const float* src_data,
+        SizeVector src_dims,
+        SizeVector srcStrides,
+        float* dst_data,
+        SizeVector dst_dims,
+        SizeVector dstStrides,
+        float init_value,
+        bool keep_dims,
+        SizeVector skip_dims,
+        F func
+) {
+    size_t i, src_idx, dst_idx;
+    for (i = 0; i < dstStrides[0] * dst_dims[0]; ++i)
+        dst_data[i] = init_value;
+
+    SizeVector counters(src_dims.size(), 0);
+    for (src_idx = 0; src_idx < srcStrides[0] * src_dims[0]; ++src_idx) {
+        if (keep_dims)
+            for (i = 0, dst_idx = 0; i < dst_dims.size(); ++i)
+                dst_idx += (counters[i] % dst_dims[i]) * dstStrides[i];
+        else
+            for (i = 0, dst_idx = 0; i < dst_dims.size(); ++i)
+                dst_idx += counters[skip_dims[i]] * dstStrides[i];
+
+        dst_data[dst_idx] = func(dst_data[dst_idx], src_data[src_idx]);
+        for (int j = src_dims.size() - 1; j >= 0; j--) {
+            counters[j] = (counters[j] + 1) % src_dims[j];
+            if (counters[j] != 0) break;
+        }
+    }
+}
+
+void ref_reduce(
+    std::string reduce_type,
+    TBlob<float> &src,
+    bool keep_dims,
+    std::vector<int32_t> axes_for_reduction,
+    TBlob<float> &dst,
+    SizeVector &out_dims
+) {
+    size_t i, src_idx, dst_idx;
+    const float* src_data = src.data();
+    SizeVector src_dims = src.getTensorDesc().getDims();
+    SizeVector srcStrides = src.getTensorDesc().getBlockingDesc().getStrides();
+    float* dst_data = dst.data();
+    SizeVector dst_dims = dst.getTensorDesc().getDims();
+    SizeVector dstStrides = dst.getTensorDesc().getBlockingDesc().getStrides();
+    SizeVector skip_dims;
+
+    if (!dst_dims.size())
+        dst_dims = InferenceEngine::SizeVector(1, 1);
+
+    if (!dstStrides.size())
+        dstStrides = InferenceEngine::SizeVector(1, 1);
+
+    if (axes_for_reduction.size() == 0)
+        FAIL() << " Index vector should be 1 dimension";
+
+    for (i = 0; i < axes_for_reduction.size(); i++) {
+        int32_t axis = axes_for_reduction[i];
+        if (axis < 0)
+            axis += src_dims.size();
+
+        if (axis > src_dims.size())
+            FAIL() << " Index to squeeze exceeds data tensor dimension";
+        axes_for_reduction[i] = axis;
+    }
+
+    for (size_t j = 0; j < src_dims.size(); j++) {
+        bool found = false;
+        for (size_t axis : axes_for_reduction)
+            if (j == axis) found = true;
+
+        if (!found) {
+            out_dims.push_back(src_dims[j]);
+            if (!keep_dims) skip_dims.push_back(j);
+        }
+        else {
+            if (keep_dims) out_dims.push_back(1);
+        }
+    }
+
+    if (reduce_type == "ReduceAnd") {
+        if (out_dims.size()) {
+            reduce(src_data, src_dims, srcStrides, dst_data, dst_dims, dstStrides, 1.0f, keep_dims, skip_dims,
+                   [](float x, float y)->float { return x && y; } );
+        } else {
+            dst_data[0] = 1.0f;
+            for (src_idx = 0; src_idx < srcStrides[0] * src_dims[0]; ++src_idx)
+                dst_data[0] = dst_data[0] && src_data[src_idx];
+        }
+    } else if (reduce_type == "ReduceL1") {
+        if (out_dims.size()) {
+            reduce(src_data, src_dims, srcStrides, dst_data, dst_dims, dstStrides, 0.0f, keep_dims, skip_dims,
+                   [](float x, float y)->float { return x + (std::abs)(y); } );
+        } else {
+            dst_data[0] = 0.0f;
+            for (src_idx = 0; src_idx < srcStrides[0] * src_dims[0]; ++src_idx)
+                dst_data[0] += (std::abs)(src_data[src_idx]);
+        }
+    } else if (reduce_type == "ReduceL2") {
+        if (out_dims.size()) {
+            reduce(src_data, src_dims, srcStrides, dst_data, dst_dims, dstStrides, 0.0f, keep_dims, skip_dims,
+                   [](float x, float y)->float { return x + y * y; } );
+
+            for (i = 0; i < dstStrides[0] * dst_dims[0]; ++i)
+                dst_data[i] = (std::sqrt)(dst_data[i]);
+        } else {
+            dst_data[0] = 0.0f;
+            for (src_idx = 0; src_idx < srcStrides[0] * src_dims[0]; ++src_idx)
+                dst_data[0] += src_data[src_idx] * src_data[src_idx];
+            dst_data[0] = sqrt(dst_data[0]);
+        }
+    } else if (reduce_type == "ReduceLogSum") {
+        if (out_dims.size()) {
+            reduce(src_data, src_dims, srcStrides, dst_data, dst_dims, dstStrides, 0.0f, keep_dims, skip_dims,
+                   [](float x, float y)->float { return x + y; });
+
+            for (i = 0; i < dstStrides[0] * dst_dims[0]; ++i)
+                dst_data[i] = logf(dst_data[i]);
+        } else {
+            dst_data[0] = 0.0f;
+            for (src_idx = 0; src_idx < srcStrides[0] * src_dims[0]; ++src_idx)
+                dst_data[0] += src_data[src_idx];
+            dst_data[0] = logf(dst_data[0]);
+        }
+    } else if (reduce_type == "ReduceLogSumExp") {
+        if (out_dims.size()) {
+            reduce(src_data, src_dims, srcStrides, dst_data, dst_dims, dstStrides, 0.0f, keep_dims, skip_dims,
+                   [](float x, float y)->float { return x + expf(y); });
+
+            for (i = 0; i < dstStrides[0] * dst_dims[0]; ++i)
+                dst_data[i] = logf(dst_data[i]);
+        } else {
+            dst_data[0] = 0.0f;
+            for (src_idx = 0; src_idx < srcStrides[0] * src_dims[0]; ++src_idx)
+                dst_data[0] += expf(src_data[src_idx]);
+            dst_data[0] = logf(dst_data[0]);
+        }
+    } else if (reduce_type == "ReduceMax") {
+        if (out_dims.size()) {
+            reduce(src_data, src_dims, srcStrides, dst_data, dst_dims, dstStrides, FLT_MIN, keep_dims, skip_dims,
+                   [](float x, float y)->float { return x > y ? x : y; });
+        } else {
+            dst_data[0] = FLT_MIN;
+            for (src_idx = 0; src_idx < srcStrides[0] * src_dims[0]; ++src_idx)
+                dst_data[0] = dst_data[0] > src_data[src_idx] ? dst_data[0] : src_data[src_idx];
+        }
+    } else if (reduce_type == "ReduceMean") {
+        if (out_dims.size()) {
+            reduce(src_data, src_dims, srcStrides, dst_data, dst_dims, dstStrides, 0.0f, keep_dims, skip_dims,
+                   [](float x, float y)->float { return x + y; });
+            float reduced_dims_work_amount = 1.f;
+            for (size_t axis : axes_for_reduction) {
+                reduced_dims_work_amount *= static_cast<float>(src_dims[axis]);
+            }
+            for (i = 0; i < dstStrides[0] * dst_dims[0]; ++i)
+                dst_data[i] /= reduced_dims_work_amount;
+        } else {
+            dst_data[0] = 0.0f;
+            for (src_idx = 0; src_idx < srcStrides[0] * src_dims[0]; ++src_idx)
+                dst_data[0] += src_data[src_idx];
+            dst_data[0] /= static_cast<float>(srcStrides[0] * src_dims[0]);
+        }
+    } else if (reduce_type == "ReduceMin") {
+        if (out_dims.size()) {
+            reduce(src_data, src_dims, srcStrides, dst_data, dst_dims, dstStrides, FLT_MAX, keep_dims, skip_dims,
+                   [](float x, float y)->float { return x < y ? x : y; });
+        } else {
+            dst_data[0] = FLT_MAX;
+            for (src_idx = 0; src_idx < srcStrides[0] * src_dims[0]; ++src_idx)
+                dst_data[0] = dst_data[0] < src_data[src_idx] ? dst_data[0] : src_data[src_idx];
+        }
+    } else if (reduce_type == "ReduceOr") {
+        if (out_dims.size()) {
+            reduce(src_data, src_dims, srcStrides, dst_data, dst_dims, dstStrides, 0.0f, keep_dims, skip_dims,
+                   [](float x, float y)->float { return x || y; });
+        } else {
+            dst_data[0] = 0;
+            for (src_idx = 0; src_idx < srcStrides[0] * src_dims[0]; ++src_idx)
+                dst_data[0] = dst_data[0] || src_data[src_idx];
+        }
+    } else if (reduce_type == "ReduceProd") {
+        if (out_dims.size()) {
+            reduce(src_data, src_dims, srcStrides, dst_data, dst_dims, dstStrides, 1.0f, keep_dims, skip_dims,
+                   [](float x, float y)->float { return x * y; });
+        } else {
+            dst_data[0] = 1.0f;
+            for (src_idx = 0; src_idx < srcStrides[0] * src_dims[0]; ++src_idx)
+                dst_data[0] *= src_data[src_idx];
+        }
+    } else if (reduce_type == "ReduceSum") {
+        if (out_dims.size()) {
+            reduce(src_data, src_dims, srcStrides, dst_data, dst_dims, dstStrides, 0.0f, keep_dims, skip_dims,
+                   [](float x, float y)->float { return x + y; });
+        } else {
+            dst_data[0] = 0.0f;
+            for (src_idx = 0; src_idx < srcStrides[0] * src_dims[0]; ++src_idx)
+                dst_data[0] += src_data[src_idx];
+        }
+    } else if (reduce_type == "ReduceSumSquare") {
+        if (out_dims.size()) {
+            reduce(src_data, src_dims, srcStrides, dst_data, dst_dims, dstStrides, 0.0f, keep_dims, skip_dims,
+                   [](float x, float y)->float { return x + y * y; });
+        } else {
+            dst_data[0] = 0.0f;
+            for (src_idx = 0; src_idx < srcStrides[0] * src_dims[0]; ++src_idx)
+                dst_data[0] += src_data[src_idx] * src_data[src_idx];
+        }
+    }
+}
+
+class ReduceTestsShared : public TestsCommon, public WithParamInterface<reduce_test_params> {
+    std::string model_t = R"V0G0N(
+<net Name="Reduce_net" version="2" precision="FP32" batch="1">
+    <layers>
+        <layer name="input" type="Input" precision="FP32" id="1">
+            <output>
+                <port id="1">
+                    _IN_
+                </port>
+            </output>
+        </layer>
+        <layer id="2" name="input2" precision="_IIDXP_" type="Const">
+            <output>
+                <port id="1">
+                    <dim>_DIM_SIZE_</dim>
+                </port>
+            </output>
+            <blobs>
+                <custom offset="0" size="_DIM_SIZE_"/>
+            </blobs>
+        </layer>
+        <layer name="reduce_REDUCE_TYPE_" id="3" type="_REDUCE_TYPE_" precision="FP32">
+            <data keep_dims="_KEEP_DIMS_" />
+            <input>
+                <port id="1">
+                    _IN_
+                </port>
+                <port id="2">
+                    <dim>_DIM_SIZE_</dim>
+                </port>
+            </input>
+            <output>
+                <port id="3">
+                    _OUT_
+                </port>
+            </output>
+        </layer>
+    </layers>
+    <edges>
+        <edge from-layer="1" from-port="1" to-layer="3" to-port="1"/>
+        <edge from-layer="2" from-port="1" to-layer="3" to-port="2"/>
+    </edges>
+</net>
+)V0G0N";
+
+    std::string getModel(reduce_test_params p) {
+        std::string model = model_t;
+        std::string in_shape;
+        std::string out_shape = "";
+
+        for (size_t i = 0; i < p.in_shape.size(); i++) {
+            in_shape += "<dim>";
+            in_shape += std::to_string(p.in_shape[i]) + "</dim>\n";
+        }
+        REPLACE_WITH_STR(model, "_IN_", in_shape);
+        REPLACE_WITH_NUM(model, "_DIM_SIZE_", p.axes_for_reduction.size());
+        REPLACE_WITH_STR(model, "_REDUCE_TYPE_", p.reduce_type);
+        REPLACE_WITH_STR(model, "_IIDXP_", p.inIdxPrecision);
+        REPLACE_WITH_NUM(model, "_KEEP_DIMS_", p.keep_dims);
+
+        for (size_t i = 0; i < p.out_shape.size(); i++) {
+            out_shape += "<dim>";
+            out_shape += std::to_string(p.out_shape[i]) + "</dim>\n";
+        }
+        REPLACE_WITH_STR(model, "_OUT_", out_shape);
+
+        return model;
+    }
+
+protected:
+    virtual void TearDown() {
+    }
+
+    static void fill_data_dbgval(float* data, size_t size) {
+        for (size_t i = 0; i < size; i++) {
+            data[i] = i + 1;
+        }
+    }
+
+    virtual void SetUp() {
+        try {
+            TestsCommon::SetUp();
+            reduce_test_params p = ::testing::WithParamInterface<reduce_test_params>::GetParam();
+            std::string model = getModel(p);
+
+            // std::cout << model << std::endl;
+
+            TBlob<uint8_t> * axes = nullptr;
+            if (p.inIdxPrecision == "I32") {
+                axes = new TBlob<uint8_t>({Precision::U8,
+                    {p.axes_for_reduction.size() * sizeof(int32_t)},
+                    Layout::C});
+                axes->allocate();
+                for (size_t i = 0; i < p.axes_for_reduction.size(); i++) {
+                    ((int32_t *) axes->buffer())[i] = p.axes_for_reduction[i];
+                }
+            } else {
+                axes = new TBlob<uint8_t>({Precision::U8,
+                    { p.axes_for_reduction.size() * sizeof(float) },
+                    Layout::C});
+                axes->allocate();
+                for (size_t i = 0; i < p.axes_for_reduction.size(); i++) {
+                    ((float *) axes->buffer())[i] = p.axes_for_reduction[i];
+                }
+            }
+            
+            Core ie;
+            auto net = ie.ReadNetwork(model, TBlob<uint8_t>::Ptr(axes));
+            OutputsDataMap out = net.getOutputsInfo();
+            std::pair<std::string, DataPtr> item = *out.begin();
+
+            ExecutableNetwork executable_network = ie.LoadNetwork(net, p.device_name);
+            InferRequest inferRequest = executable_network.CreateInferRequest();
+
+            // Input Data
+            Blob::Ptr src;
+            src = make_shared_blob<float>({ Precision::FP32, p.in_shape, TensorDesc::getLayoutByDims(p.in_shape) });
+            src->allocate();
+            if(p.input_tensor.size())
+                ie_memcpy(src->buffer(), src->byteSize(), &p.input_tensor[0], sizeof(float)*p.input_tensor.size());
+            else
+                fill_data_dbgval(src->buffer(), src->size());
+
+            auto* srcPtr = dynamic_cast<TBlob<float>*>(src.get());
+            if (srcPtr == nullptr)
+                FAIL() << "Cannot cast blob to TBlob<float>.";
+
+            // Output Reference
+            TBlob<float> dst_ref(item.second->getTensorDesc());
+            dst_ref.allocate();
+
+            SizeVector out_dims;
+            ref_reduce(p.reduce_type, *srcPtr, p.keep_dims, p.axes_for_reduction, dst_ref, out_dims);
+            if (p.out_shape.size()>0 && out_dims.size() != p.out_shape.size())
+                FAIL() << "Wrong out_shape size!";
+            for (size_t i = 0; i < p.out_shape.size(); i++) {
+                if (out_dims[i] != p.out_shape[i])
+                    FAIL() << "Wrong out_shape dimensions!";
+            }
+            if (p.reference.size())
+                if (memcmp(dst_ref.data(), &p.reference[0], p.reference.size() * sizeof(float)) != 0)
+                    FAIL() << "Wrong result with compare reference vector!";
+
+            // Output Data
+            auto output = make_shared_blob<float>(item.second->getTensorDesc());
+            output->allocate();
+            inferRequest.SetBlob(item.first, output);
+
+            // Input
+            inferRequest.SetBlob("input", src);
+            inferRequest.Infer();
+
+            compare(*output, dst_ref);
+        } catch (const details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+TEST_P(ReduceTestsShared, SharedReduceTests) {}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/resample_tests.hpp b/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/resample_tests.hpp
new file mode 100644 (file)
index 0000000..fcbfcdd
--- /dev/null
@@ -0,0 +1,269 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <cmath>
+#include <ie_core.hpp>
+
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace std;
+
+struct resample_test_params {
+    std::string device_name;
+    InferenceEngine::SizeVector in_dims;
+    float factor;
+    std::string type;
+};
+
+static inline float triangleCoeff(float x) {
+    return std::max(0.0f, 1 - std::abs(x));
+}
+
+template <typename data_t>
+static void ref_resample(const InferenceEngine::TBlob<data_t> &src, InferenceEngine::TBlob<data_t> &dst, resample_test_params p) {
+    const data_t *src_data = src.readOnly();
+    data_t *dst_data = dst.data();
+
+    size_t ndims = p.in_dims.size();
+
+    size_t N = p.in_dims[0];
+    size_t C = p.in_dims[1];
+    size_t ID = ndims == 5 ? p.in_dims[ndims - 3] : 1;
+    size_t IH = p.in_dims[ndims - 2];
+    size_t IW = p.in_dims[ndims - 1];
+    size_t OD = ndims == 5 ? static_cast<size_t>(ID / p.factor) : 1;
+    size_t OH = static_cast<size_t>(IH / p.factor);
+    size_t OW = static_cast<size_t>(IW / p.factor);
+
+    float fx = static_cast<float>(IW) / static_cast<float>(OW);
+    float fy = static_cast<float>(IH) / static_cast<float>(OH);
+    float fz = static_cast<float>(ID) / static_cast<float>(OD);
+
+    if (p.type == "caffe.ResampleParameter.NEAREST") {
+        for (size_t b = 0; b < N; b++) {
+            for (size_t c = 0; c < C; c++) {
+                const float* in_ptr = src_data + IW * IH * ID * C * b + IW * IH * ID * c;
+                float* out_ptr = dst_data + OW * OH * OD * C * b + OW * OH * OD * c;
+                for (size_t oz = 0; oz < OD; oz++) {
+                    for (size_t oy = 0; oy < OH; oy++) {
+                        for (size_t ox = 0; ox < OW; ox++) {
+                            float ix = ox * fx;
+                            float iy = oy * fy;
+                            float iz = oz * fz;
+
+                            size_t ix_r = static_cast<size_t>(std::floor(ix));
+                            size_t iy_r = static_cast<size_t>(std::floor(iy));
+                            size_t iz_r = static_cast<size_t>(std::floor(iz));
+
+                            out_ptr[oz * OH * OW + oy * OW + ox] = in_ptr[iz_r * IH * IW + iy_r * IW + ix_r];
+                        }
+                    }
+                }
+            }
+        }
+    } else if (p.type == "caffe.ResampleParameter.LINEAR") {
+        size_t kernel_width = 2;
+        bool isDownsample = (fx > 1) || (fy > 1) || (fz > 1);
+        bool antialias = false;
+
+        for (size_t b = 0; b < N; b++) {
+            for (size_t c = 0; c < C; c++) {
+                const float* in_ptr = src_data + IW * IH * ID * C * b + IW * IH * ID * c;
+                float* out_ptr = dst_data + OW * OH * OD * C * b + OW * OH * OD * c;
+
+                for (size_t oz = 0; oz < OD; oz++) {
+                    for (size_t oy = 0; oy < OH; oy++) {
+                        for (size_t ox = 0; ox < OW; ox++) {
+                            float ix = ox * fx + fy / 2.0f - 0.5f;
+                            float iy = oy * fy + fx / 2.0f - 0.5f;
+                            float iz = oz * fz + fz / 2.0f - 0.5f;
+
+                            int ix_r = static_cast<int>(round(ix));
+                            int iy_r = static_cast<int>(round(iy));
+                            int iz_r = static_cast<int>(round(iz));
+
+                            float sum = 0;
+                            float wsum = 0;
+
+                            float ax = 1.0f / (antialias ? fx : 1.0f);
+                            float ay = 1.0f / (antialias ? fy : 1.0f);
+                            float az = 1.0f / (antialias ? fz : 1.0f);
+
+                            int rx = (fx < 1.0f) ? 2 : static_cast<int>(ceil(static_cast<float>(kernel_width) / ax));
+                            int ry = (fy < 1.0f) ? 2 : static_cast<int>(ceil(static_cast<float>(kernel_width) / ay));
+                            int rz = (fz < 1.0f) ? 2 : static_cast<int>(ceil(static_cast<float>(kernel_width) / az));
+
+                            for (int z = iz_r - rz; z <= iz_r + rz; z++) {
+                                for (int y = iy_r - ry; y <= iy_r + ry; y++) {
+                                    for (int x = ix_r - rx; x <= ix_r + rx; x++) {
+                                        if (z < 0 || y < 0 || x < 0 || z >= static_cast<int>(ID) || y >= static_cast<int>(IH) || x >= static_cast<int>(IW))
+                                            continue;
+
+                                        float dx = ix - x;
+                                        float dy = iy - y;
+                                        float dz = iz - z;
+
+                                        float w = ax * triangleCoeff(ax * dx) * ay * triangleCoeff(ay * dy) * az * triangleCoeff(az * dz);
+
+                                        sum += w * in_ptr[z * IH * IW + y * IW + x];
+                                        wsum += w;
+                                    }
+                                }
+                            }
+                            out_ptr[oz * OH * OW + oy * OW + ox] = (!wsum) ? 0 : (sum / wsum);
+                        }
+                    }
+                }
+            }
+        }
+    } else {
+        assert(!"Unsupported resample operation type");
+    }
+}
+
+class ResampleTests : public TestsCommon, public WithParamInterface<resample_test_params> {
+    std::string model_t = R"V0G0N(
+<net Name="resample_net" version="2" precision="FP32" batch="1">
+    <layers>
+        <layer name="input" type="Input" precision="FP32" id="1">
+            <output>
+                <port id="1">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_ID_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+            </output>
+        </layer>
+        <layer name="resample" id="2" type="Resample" precision="FP32">
+            <data antialias="_AN_" factor="_F_" type="_T_"/>
+            <input>
+                <port id="1">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_ID_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+            </input>
+            <output>
+                <port id="2">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_OD_</dim>
+                    <dim>_OH_</dim>
+                    <dim>_OW_</dim>
+                </port>
+            </output>
+        </layer>
+    </layers>
+    <edges>
+        <edge from-layer="1" from-port="1" to-layer="2" to-port="1"/>
+    </edges>
+</net>
+)V0G0N";
+
+    std::string getModel(resample_test_params p) {
+        std::string model = model_t;
+        std::string inDim;
+
+        auto dims_size = p.in_dims.size();
+        if (dims_size == 4) {
+            REMOVE_LINE(model, "<dim>_ID_</dim>");
+            REMOVE_LINE(model, "<dim>_OD_</dim>");
+        }
+
+        REPLACE_WITH_NUM(model, "_IN_", p.in_dims[0]);
+        REPLACE_WITH_NUM(model, "_IC_", p.in_dims[1]);
+        if (dims_size == 5)
+            REPLACE_WITH_NUM(model, "_ID_", p.in_dims[dims_size - 3]);
+        REPLACE_WITH_NUM(model, "_IH_", p.in_dims[dims_size - 2]);
+        REPLACE_WITH_NUM(model, "_IW_", p.in_dims[dims_size - 1]);
+
+        if (dims_size == 5)
+            REPLACE_WITH_NUM(model, "_OD_", (int)(p.in_dims[dims_size - 3] / p.factor));
+        REPLACE_WITH_NUM(model, "_OH_", (int)(p.in_dims[dims_size - 2] / p.factor));
+        REPLACE_WITH_NUM(model, "_OW_", (int)(p.in_dims[dims_size - 1] / p.factor));
+
+        REPLACE_WITH_NUM(model, "_AN_", 0);
+        REPLACE_WITH_NUM(model, "_F_", p.factor);
+        REPLACE_WITH_STR(model, "_T_", p.type);
+
+        return model;
+    }
+
+protected:
+    virtual void TearDown() {
+    }
+
+    virtual void SetUp() {
+        try {
+            TestsCommon::SetUp();
+            resample_test_params p = ::testing::WithParamInterface<resample_test_params>::GetParam();
+            std::string model = getModel(p);
+
+            Core ie;
+            CNNNetwork net = ie.ReadNetwork(model, Blob::CPtr());
+            InputsDataMap in_info_map = net.getInputsInfo();
+            OutputsDataMap out_info_map = net.getOutputsInfo();
+
+            ExecutableNetwork executable_network = ie.LoadNetwork(net, p.device_name);
+            InferRequest inferRequest = executable_network.CreateInferRequest();
+
+            InferenceEngine::Layout layout = InferenceEngine::ANY;
+            switch (p.in_dims.size()) {
+            case 4: layout = InferenceEngine::NCHW; break;
+            case 5: layout = InferenceEngine::NCDHW; break;
+            default:
+                FAIL() << "Input dims size not supported in this test.";
+            }
+
+            InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float>({InferenceEngine::Precision::FP32, p.in_dims, layout});
+            src->allocate();
+            fill_data(src->buffer(), src->size());
+            for (size_t i = 0; i < src->size(); i++) {
+                src->buffer().as<float*>()[i] = static_cast<float>(i);
+            }
+
+            auto * srcPtr = dynamic_cast<InferenceEngine::TBlob<float>*>(src.get());
+
+            if (srcPtr == nullptr)
+                FAIL() << "Cannot cast blob to TBlob<float>.";
+
+            InferenceEngine::BlobMap srcs;
+            srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("input", src));
+
+            InferenceEngine::OutputsDataMap out;
+            out = net.getOutputsInfo();
+            InferenceEngine::BlobMap outputBlobs;
+
+            std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
+
+            InferenceEngine::TBlob<float>::Ptr output;
+            output = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
+            output->allocate();
+            outputBlobs[item.first] = output;
+
+            inferRequest.SetInput(srcs);
+            inferRequest.SetOutput(outputBlobs);
+            inferRequest.Infer();
+
+            InferenceEngine::TBlob<float> dst_ref(item.second->getTensorDesc());
+            dst_ref.allocate();
+
+            ref_resample<float>(*srcPtr, dst_ref, p);
+
+            compare(*output, dst_ref);
+        } catch (const InferenceEngine::details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+TEST_P(ResampleTests, TestsResample) {}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/select_tests.hpp b/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/select_tests.hpp
new file mode 100644 (file)
index 0000000..8274fde
--- /dev/null
@@ -0,0 +1,298 @@
+// Copyright (C) 2018-2019 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <ie_core.hpp>
+#include <cmath>
+#include <string>
+
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace std;
+
+struct select_params
+{
+    std::string device_name;
+    SizeVector input1_tensor;
+    SizeVector input2_tensor;
+    SizeVector mask_tensor;
+    std::string auto_broadcast;
+    bool fail_expected;
+};
+
+class SelectTests : public TestsCommon, public WithParamInterface<select_params> {
+    std::string model_base = R"V0G0N(
+    <net name="Select_net" version="7">
+        <layers>
+            <layer name="cond" type="Input" id="0" version="opset1">
+                <data element_type="boolean" shape="_MASK_SHAPE_"/>
+                <output>
+                    <port id="0" precision="BOOL">_MASK_DIMS_</port>
+                </output>
+            </layer>
+            <layer name="input1" type="Input" id="1" version="opset1">
+                <data element_type="f32" shape="_INPUT1_SHAPE_"/>
+                <output>
+                    <port id="0" precision="FP32">_INPUT1_DIMS_</port>
+                </output>
+            </layer>
+            <layer name="input2" type="Input" id="2" version="opset1">
+                <data element_type="f32" shape="_INPUT2_SHAPE_"/>
+                <output>
+                    <port id="0" precision="FP32">_INPUT2_DIMS_</port>
+                </output>
+            </layer>
+            <layer name="select" id="3" type="Select" version="opset1">
+                <data auto_broadcast="_AUTO_BROADCAST_"/>
+                <input>
+                    <port id="0">_MASK_DIMS_</port>
+                    <port id="1">_INPUT1_DIMS_</port>
+                    <port id="2">_INPUT2_DIMS_</port>
+                </input>
+                <output>
+                    <port id="3" precision="FP32">_OUTPUT_DIMS_</port>
+                </output>
+            </layer>
+        </layers>
+        <edges>
+            <edge from-layer="0" from-port="0" to-layer="3" to-port="0"/>
+            <edge from-layer="1" from-port="0" to-layer="3" to-port="1"/>
+            <edge from-layer="2" from-port="0" to-layer="3" to-port="2"/>
+        </edges>
+    </net>
+    )V0G0N";
+
+    SizeVector get_output_tensor(const SizeVector& cond_dims, const SizeVector& input1_dims, const SizeVector& input2_dims)
+    {
+        auto max_in_size = std::max({cond_dims.size(), input1_dims.size(), input2_dims.size()});
+        auto out_size = std::max(max_in_size, (size_t)4);
+
+        SizeVector cond_dims_extended = cond_dims;
+        SizeVector in1_dims_extended = input1_dims;
+        SizeVector in2_dims_extended = input2_dims;
+
+        cond_dims_extended.insert(cond_dims_extended.begin(), out_size - cond_dims_extended.size(), 1);
+        in1_dims_extended.insert(in1_dims_extended.begin(), out_size - in1_dims_extended.size(), 1);
+        in2_dims_extended.insert(in2_dims_extended.begin(), out_size - in2_dims_extended.size(), 1);
+
+        SizeVector output_tensor(out_size, 1);
+
+        for (size_t i = 0; i < output_tensor.size(); i++) {
+            output_tensor[i] = std::max({ cond_dims_extended[i], in1_dims_extended[i], in2_dims_extended[i] });
+        }
+
+        return output_tensor;
+    }
+
+    std::string getModel(select_params p) {
+        std::string mask_shape_str = "";
+        std::string mask_dims_str = "";
+
+        for (size_t i=0; i<p.mask_tensor.size(); i++) {
+            mask_shape_str += std::to_string(p.mask_tensor[i]);
+            mask_dims_str += "\n                        ";
+            mask_dims_str += "<dim>" + std::to_string(p.mask_tensor[i]) + "</dim>";
+            if (i < p.mask_tensor.size() - 1) {
+                mask_shape_str += ",";
+            } else {
+                mask_dims_str += "\n                    ";
+            }
+        }
+
+        std::string input1_shape_str = "";
+        std::string input1_dims_str = "";
+
+        for (size_t i=0; i<p.input1_tensor.size(); i++) {
+            input1_shape_str += std::to_string(p.input1_tensor[i]);
+            input1_dims_str += "\n                        ";
+            input1_dims_str += "<dim>" + std::to_string(p.input1_tensor[i]) + "</dim>";
+            if (i < p.input1_tensor.size() - 1) {
+                input1_shape_str += ",";
+            } else {
+                input1_dims_str += "\n                    ";
+            }
+        }
+
+        std::string input2_shape_str = "";
+        std::string input2_dims_str = "";
+
+        for (size_t i=0; i<p.input2_tensor.size(); i++) {
+            input2_shape_str += std::to_string(p.input2_tensor[i]);
+            input2_dims_str += "\n                        ";
+            input2_dims_str += "<dim>" + std::to_string(p.input2_tensor[i]) + "</dim>";
+            if (i < p.input2_tensor.size() - 1) {
+                input2_shape_str += ",";
+            } else {
+                input2_dims_str += "\n                    ";
+            }
+        }
+
+        SizeVector output_tensor = get_output_tensor(p.mask_tensor, p.input1_tensor, p.input2_tensor);
+
+        std::string output_shape_str = "";
+        std::string output_dims_str = "";
+
+        for (size_t i=0; i<output_tensor.size(); i++) {
+            output_shape_str += std::to_string(output_tensor[i]);
+            output_dims_str += "\n                        ";
+            output_dims_str += "<dim>" + std::to_string(output_tensor[i]) + "</dim>";
+            if (i < output_tensor.size() - 1) {
+                output_shape_str += ",";
+            } else {
+                output_dims_str += "\n                    ";
+            }
+        }
+
+        REPLACE_WITH_STR(model_base, "_MASK_SHAPE_", mask_shape_str);
+        REPLACE_WITH_STR(model_base, "_MASK_DIMS_", mask_dims_str);
+
+        REPLACE_WITH_STR(model_base, "_INPUT1_SHAPE_", input1_shape_str);
+        REPLACE_WITH_STR(model_base, "_INPUT1_DIMS_", input1_dims_str);
+
+        REPLACE_WITH_STR(model_base, "_INPUT2_SHAPE_", input2_shape_str);
+        REPLACE_WITH_STR(model_base, "_INPUT2_DIMS_", input2_dims_str);
+
+        REPLACE_WITH_STR(model_base, "_OUTPUT_SHAPE_", output_shape_str);
+        REPLACE_WITH_STR(model_base, "_OUTPUT_DIMS_", output_dims_str);
+
+        REPLACE_WITH_STR(model_base, "_AUTO_BROADCAST_", p.auto_broadcast);
+
+        return model_base;
+    }
+
+    size_t get_index_bfhw(SizeVector tensor, size_t b, size_t f, size_t h, size_t w)
+    {
+        if ((tensor.size() < 4) || (b >= tensor[tensor.size() - 4])) b = 0;
+        if ((tensor.size() < 3) || (f >= tensor[tensor.size() - 3])) f = 0;
+        if ((tensor.size() < 2) || (h >= tensor[tensor.size() - 2])) h = 0;
+        if ((tensor.size() < 1) || (w >= tensor[tensor.size() - 1])) w = 0;
+
+        size_t res = 0;
+
+        size_t b_multiplier = 1;
+        if (tensor.size() >= 3) {
+            b_multiplier = std::accumulate(std::end(tensor) - 3, std::end(tensor), 1, std::multiplies<size_t>());
+        } else {
+            b_multiplier = std::accumulate(std::begin(tensor), std::end(tensor), 1, std::multiplies<size_t>());
+        }
+        res += b * b_multiplier;
+
+        size_t f_multiplier = 1;
+        if (tensor.size() >= 2) {
+            f_multiplier = std::accumulate(std::end(tensor) - 2, std::end(tensor), 1, std::multiplies<size_t>());
+        } else {
+            f_multiplier = std::accumulate(std::begin(tensor), std::end(tensor), 1, std::multiplies<size_t>());
+        }
+        res += f * f_multiplier;
+
+        size_t h_multiplier = 1;
+        if (tensor.size() >= 1) {
+            h_multiplier = std::accumulate(std::end(tensor) - 1, std::end(tensor), 1, std::multiplies<size_t>());
+        }
+        res += h * h_multiplier;
+
+        res += w;
+        return res;
+    }
+
+    void check_output(const float* input1, const float* input2, const uint8_t* mask, const float* output, select_params p) {
+
+        SizeVector output_tensor = get_output_tensor(p.mask_tensor, p.input1_tensor, p.input2_tensor);
+
+        size_t b_max = (output_tensor.size() > 0) ? output_tensor[0] : 1;
+        size_t f_max = (output_tensor.size() > 1) ? output_tensor[1] : 1;
+        size_t h_max = (output_tensor.size() > 2) ? output_tensor[2] : 1;
+        size_t w_max = (output_tensor.size() > 3) ? output_tensor[3] : 1;
+
+        for (size_t b = 0; b < b_max; b++) {
+            for (size_t f = 0; f < f_max; f++) {
+                for (size_t h = 0; h < h_max; h++) {
+                    for (size_t w = 0; w < w_max; w++) {
+                        if (mask[get_index_bfhw(p.mask_tensor, b, f, h, w)] == 0)
+                        {
+                            EXPECT_EQ(output[get_index_bfhw(output_tensor, b, f, h, w)],
+                                      input2[get_index_bfhw(p.input2_tensor, b, f, h, w)]);
+                        }
+                        else
+                        {
+                            EXPECT_EQ(output[get_index_bfhw(output_tensor, b, f, h, w)],
+                                      input1[get_index_bfhw(p.input1_tensor, b, f, h, w)]);
+                        }
+                    }
+                }
+            }
+        }
+    }
+
+protected:
+    virtual void TearDown() {
+    }
+
+    virtual void SetUp() {
+        bool fail_expected = false;
+        try {
+            select_params p = ::testing::WithParamInterface<select_params>::GetParam();
+            fail_expected = p.fail_expected;
+
+            Core ie;
+            CNNNetwork net = ie.ReadNetwork(getModel(p), Blob::Ptr());
+            InputsDataMap in_info_map = net.getInputsInfo();
+            OutputsDataMap out_info_map = net.getOutputsInfo();
+
+            ExecutableNetwork executable_network = ie.LoadNetwork(net, p.device_name);
+            InferRequest infer_request = executable_network.CreateInferRequest();
+
+            uint8_t* mask;
+            float* input1_ptr, *input2_ptr;
+            auto input_iterator = in_info_map.begin();
+            size_t input1_buffer_size = std::accumulate(std::begin(p.input1_tensor), std::end(p.input1_tensor), 1, std::multiplies<size_t>());
+            size_t input2_buffer_size = std::accumulate(std::begin(p.input2_tensor), std::end(p.input2_tensor), 1, std::multiplies<size_t>());
+
+            // Creating mask buffer
+            // If true, take value from first buffer, if false, take from second
+            Blob::Ptr maskBlob = infer_request.GetBlob(input_iterator->first);
+            mask = maskBlob->buffer().as<uint8_t*>();
+            for (size_t id = 0; id < maskBlob->size(); id++) {
+                mask[id] = (id % 2);
+            }
+            input_iterator++;
+
+            // Inputs random generator
+            Blob::Ptr input1Blob = infer_request.GetBlob(input_iterator->first);
+            input_iterator++;
+            Blob::Ptr input2Blob = infer_request.GetBlob(input_iterator->first);
+            input1_ptr = input1Blob->buffer().as<float*>();
+            input2_ptr = input2Blob->buffer().as<float*>();
+            for (int index = 0; index < input1_buffer_size; index++) {
+                input1_ptr[index] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
+            }
+            for (int index = 0; index < input2_buffer_size; index++) {
+                input2_ptr[index] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
+            }
+
+            // Output allocation
+            SizeVector output_tensor = get_output_tensor(p.mask_tensor, p.input1_tensor, p.input2_tensor);
+            Blob::Ptr outputBlob = infer_request.GetBlob(out_info_map.begin()->first);
+            TBlob<float> dst_ref({ Precision::FP32, output_tensor, Layout::NCHW });
+            dst_ref.allocate();
+
+            infer_request.Infer();
+
+            // Output buffer
+            outputBlob = infer_request.GetBlob(out_info_map.begin()->first);
+            const float* output_ptr = outputBlob->buffer().as<float*>();
+
+            check_output(input1_ptr, input2_ptr, mask, output_ptr, p);
+        }
+        catch (const InferenceEngine::details::InferenceEngineException & e) {
+            if (!fail_expected) {
+                FAIL() << e.what();
+            }
+        }
+    }
+};
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/softmax_tests.hpp b/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/softmax_tests.hpp
new file mode 100644 (file)
index 0000000..116809c
--- /dev/null
@@ -0,0 +1,244 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <ie_core.hpp>
+
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+
+using namespace ::testing;
+using namespace InferenceEngine;
+
+
+struct softmax_base_params {
+    struct {
+        size_t w;
+        size_t h;
+        size_t c;
+        size_t n;
+    } in;
+
+    int axis;
+};
+
+struct softmax_test_params : softmax_base_params {
+    std::string device_name;
+    std::string model;
+
+    softmax_test_params(std::string name, softmax_base_params params, std::string model = "4D") :
+            softmax_base_params(params), device_name(name), model(model) {}
+};
+
+template <typename data_t>
+void check_softmax_fwd(const data_t *src_data, softmax_test_params prm)
+{
+  size_t W = prm.in.w;
+  size_t H = prm.in.h;
+  size_t C = prm.in.c;
+  size_t MB = prm.in.n;
+
+  auto off = [=](int n, int c, int h, int w)
+  {
+    return (n * W * H * C + c * W * H + h * W + w);
+  };
+
+  double result = 0.0f;
+
+  if(prm.axis == 0) {
+
+    for (int c = 0; c < C; ++c) {
+      for (int h = 0; h < H; ++h) {
+        for (int w = 0; w < W; ++w) {
+          result = 0.0f;
+          for (int n = 0; n < MB; ++n) {
+            result += src_data[off(n, c, h, w)];//dst_ptr[map_index(dst_pd, off(n, c, h, w))];
+          }
+
+          ASSERT_NEAR(result, 1.0f, 0.001);
+        }
+      }
+    }
+  }
+  else if(prm.axis == 1) {
+    for (int n = 0; n < MB; ++n) {
+      for (int h = 0; h < H; ++h) {
+        for (int w = 0; w < W; ++w) {
+          result = 0.0f;
+
+          for (int c = 0; c < C; ++c) {
+            result += src_data[off(n, c, h, w)];//dst_ptr[map_index(dst_pd, off(n, c, h, w))];
+          }
+
+          ASSERT_NEAR(result, 1.0f, 0.001);
+        }
+      }
+    }
+  }
+  else if(prm.axis == 2) {
+    for (int n = 0; n < MB; ++n) {
+      for (int c = 0; c < C; ++c) {
+        for (int w = 0; w < W; ++w) {
+          result = 0.0f;
+
+          for (int h = 0; h < H; ++h) {
+            result += src_data[off(n, c, h, w)];//dst_ptr[map_index(dst_pd, off(n, c, h, w))];
+          }
+
+          ASSERT_NEAR(result, 1.0f, 0.001);
+        }
+      }
+    }
+  }
+  else if(prm.axis == 3) {
+    for (int n = 0; n < MB; ++n) {
+      for (int c = 0; c < C; ++c) {
+        for (int h = 0; h < H; ++h) {
+          result = 0.0f;
+
+          for (int w = 0; w < W; ++w) {
+            result += src_data[off(n, c, h, w)];//dst_ptr[map_index(dst_pd, off(n, c, h, w))];
+          }
+
+          ASSERT_NEAR(result, 1.0f, 0.001);
+        }
+      }
+    }
+  }
+}
+
+class SoftmaxOnlyTest: public TestsCommon,
+                    public WithParamInterface<softmax_test_params> {
+
+    std::string model_t = R"V0G0N(
+    <Net Name="SoftmaxOnly" version="2" precision="FP32" batch="_IB_">
+    <layers>
+        <layer name="input_1" type="input" id="0" precision="FP32">
+            <output>
+                <port id="0">
+                    <dim>_IB_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+            </output>
+        </layer>
+        <layer name="softmax" id="1" type="Softmax" precision="FP32">
+            <input>
+                <port id="0">
+                    <dim>_IB_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+            </input>
+            <output>
+                <port id="1">
+                    <dim>_IB_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+            </output>
+        </layer>
+    </layers>
+    <edges>
+        <edge from-layer="0" from-port="0" to-layer="1" to-port="0" />
+    </edges>
+</Net>
+)V0G0N";
+
+    std::string model_2D = R"V0G0N(
+    <Net Name="SoftmaxOnly" version="2" precision="FP32" batch="_IB_">
+    <layers>
+        <layer name="input_1" type="input" id="0" precision="FP32">
+            <output>
+                <port id="0">
+                    <dim>_IB_</dim>
+                    <dim>_IC_</dim>
+                </port>
+            </output>
+        </layer>
+        <layer name="softmax" id="1" type="Softmax" precision="FP32">
+            <input>
+                <port id="0">
+                    <dim>_IB_</dim>
+                    <dim>_IC_</dim>
+                </port>
+            </input>
+            <output>
+                <port id="1">
+                    <dim>_IB_</dim>
+                    <dim>_IC_</dim>
+                </port>
+            </output>
+        </layer>
+    </layers>
+    <edges>
+        <edge from-layer="0" from-port="0" to-layer="1" to-port="0" />
+    </edges>
+</Net>
+)V0G0N";
+
+    std::string getModel(softmax_test_params p) {
+        std::string model = p.model == "2D" ? model_2D :  model_t;
+        REPLACE_WITH_NUM(model, "_IB_", p.in.n);
+        REPLACE_WITH_NUM(model, "_IW_", p.in.w);
+        REPLACE_WITH_NUM(model, "_IH_", p.in.h);
+        REPLACE_WITH_NUM(model, "_IC_", p.in.c);
+        return model;
+    }
+
+protected:
+    virtual void SetUp() {
+
+        try {
+            softmax_test_params p = ::testing::WithParamInterface<softmax_test_params>::GetParam();
+            std::string model = getModel(p);
+
+            bool is2D = p.model == "2D";
+
+            Core ie;
+            CNNNetwork net = ie.ReadNetwork(model, Blob::CPtr());
+
+            InputsDataMap in_info_map = net.getInputsInfo();
+            OutputsDataMap out_info_map = net.getOutputsInfo();
+
+            if (p.in.n != 1) {
+                net.setBatchSize(p.in.n);
+            }
+
+            ExecutableNetwork executable_network = ie.LoadNetwork(net, p.device_name);
+            InferRequest inferRequest = executable_network.CreateInferRequest();
+            
+            auto src = inferRequest.GetBlob(in_info_map.begin()->first);
+            auto src_data = src->buffer().as<float*>();
+            for (int i=0; i != p.in.n; i++) {
+                fill_data(src_data + p.in.w * p.in.h * p.in.c * i, src->size() / p.in.n);
+            }
+
+            inferRequest.Infer();
+            auto dst = inferRequest.GetBlob(out_info_map.begin()->first);
+
+            check_softmax_fwd(dst->buffer().as<float*>(), p);
+
+        } catch (const InferenceEngine::details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+#define case_1 softmax_base_params({{228, 228, 3, 1}, 1})
+#define case_8 softmax_base_params({{228, 228, 3, 8}, 1})
+#define case_8_nc softmax_base_params({{1, 1, 228*228*3, 8}, 1})
+
+TEST_P(SoftmaxOnlyTest, TestsSoftmax) {}
+
+std::string  getTestCaseName(testing::TestParamInfo<softmax_test_params> obj) {
+    return obj.param.device_name +
+           "_h" + std::to_string(obj.param.in.h) +
+           "_w" + std::to_string(obj.param.in.w) +
+           "_c" + std::to_string(obj.param.in.c) +
+           "_b" + std::to_string(obj.param.in.n);
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/space_to_depth_tests.hpp b/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/space_to_depth_tests.hpp
new file mode 100644 (file)
index 0000000..84287f1
--- /dev/null
@@ -0,0 +1,182 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <cmath>
+
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+#include <ie_core.hpp>
+
+
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace std;
+
+
+struct space_to_depth_test_params {
+    std::string device_name;
+    std::string inPrecision;
+    InferenceEngine::SizeVector in_dim;
+    std::string depth_mode;
+    size_t block_size;
+    InferenceEngine::SizeVector ref_dim;
+};
+
+template<typename data_t>
+void ref_spaceToDepth(const std::vector<Blob::Ptr> &srcs, std::vector<Blob::Ptr> &dsts, space_to_depth_test_params& prm) {
+    assert(dsts.size() == 1);
+
+    data_t *dst_data = dsts[0]->buffer().as<data_t*>();
+    const data_t *src_data = srcs[0]->buffer().as<data_t*>();
+
+    size_t feature_in = prm.in_dim[1];
+    size_t y_in = prm.in_dim[2];
+    size_t x_in = prm.in_dim[3];
+
+    size_t batch_out = prm.ref_dim[0];
+    size_t feature_out = prm.ref_dim[1];
+    size_t y_out = prm.ref_dim[2];
+    size_t x_out = prm.ref_dim[3];
+
+    if (prm.depth_mode != "depth_first" && prm.depth_mode != "blocks_first")
+        FAIL() << " Invalid mode for spaceToDepth: must be \"blocks_first\" or \"depth_first\" only";
+
+    if (prm.block_size < 1)
+        FAIL() << " Invalid block size number: must be greater than or equal to 1";
+
+    if (y_in % prm.block_size != 0 || x_in % prm.block_size != 0)
+        FAIL () << " Invalid sizes of spatials x, y: must be divisible by block size";
+
+    for (size_t batch = 0; batch < batch_out; ++batch) {
+        for (size_t feature = 0; feature < feature_out; ++feature) {
+            size_t offset_in, f_in;
+            if (prm.depth_mode == "blocks_first") {
+                offset_in = feature / feature_in;
+                f_in = feature % feature_in;
+            } else {
+                offset_in = feature % (prm.block_size * prm.block_size);
+                f_in = feature / (prm.block_size * prm.block_size);
+            }
+            for (size_t y = 0; y < y_out; ++y) {
+                size_t input_y = (y * prm.block_size) + (offset_in / prm.block_size);
+                for (size_t x = 0; x < x_out; ++x) {
+                    size_t input_x = (x * prm.block_size) + (offset_in % prm.block_size);
+                    size_t f_in_offset = (input_y * x_in) + input_x;
+                    size_t input_index = (batch * feature_in * y_in * x_in) + (f_in * y_in * x_in) + f_in_offset;
+                    size_t output_index = (batch * feature_out * y_out * x_out) + (feature * y_out * x_out) + (y * x_out) + x;
+                    dst_data[output_index] = src_data[input_index];
+                }
+            }
+        }
+    }
+}
+
+class SpaceToDepthTests : public TestsCommon, public WithParamInterface<space_to_depth_test_params> {
+    std::string model_t = R"V0G0N(
+<net Name="Space2depth_net" version="2" precision="FP32" batch="1">
+    <layers>
+        <layer name="Input0" type="Input" precision="_IPRS_" id="1">
+            <output>
+                <port id="1">
+                    _IDIM_
+                </port>
+            </output>
+        </layer>
+        <layer name="SpaceToDepth" id="3" type="SpaceToDepth" precision="FP32">
+            <data block_size="_BS_" depth_mode="_DM_"/>
+            <input>
+                <port id="1">
+                    _IDIM_
+                </port>
+            </input>
+            <output>
+                <port id="3">
+                    _OUT_
+                </port>
+            </output>
+        </layer>
+    </layers>
+    <edges>
+        <edge from-layer="1" from-port="1" to-layer="3" to-port="1"/>
+    </edges>
+</net>
+)V0G0N";
+
+    std::string getModel(space_to_depth_test_params p) {
+        std::string model = model_t;
+        std::string inIdx;
+        std::string inDict;
+        std::string out;
+
+        for (auto& dct : p.in_dim) {
+            inDict += "<dim>";
+            inDict += std::to_string(dct) + "</dim>\n";
+        }
+
+        for (auto& dst : p.ref_dim) {
+            out += "<dim>";
+            out += std::to_string(dst) + "</dim>\n";
+        }
+
+        REPLACE_WITH_STR(model, "_IPRS_", p.inPrecision);
+        REPLACE_WITH_STR(model, "_IDIM_", inDict);
+        REPLACE_WITH_STR(model, "_DM_", p.depth_mode);
+        REPLACE_WITH_NUM(model, "_BS_", p.block_size);
+        REPLACE_WITH_STR(model, "_OUT_", out);
+
+        return model;
+    }
+
+protected:
+    virtual void TearDown() {
+    }
+
+    virtual void SetUp() {
+        try {
+            space_to_depth_test_params p = ::testing::WithParamInterface<space_to_depth_test_params>::GetParam();
+            std::string model = getModel(p);
+
+            Core ie;
+            CNNNetwork net = ie.ReadNetwork(model, Blob::CPtr());
+            ExecutableNetwork executable_network = ie.LoadNetwork(net, p.device_name);
+            InferRequest inferRequest = executable_network.CreateInferRequest();
+
+            std::vector<Blob::Ptr> srcs_vec;
+            std::vector<Blob::Ptr> dsts_vec;
+            std::vector<Blob::Ptr> out_vec;
+
+            InputsDataMap in_info_map = net.getInputsInfo();
+            for (auto info : in_info_map) {
+                Blob::Ptr blob = make_shared_blob<float>({Precision::FP32, info.second->getTensorDesc().getDims(), NCHW});
+                blob->allocate();
+                fill_data_dbgval(blob->buffer().as<float*>(), blob->size());
+                inferRequest.SetBlob(info.first, blob);
+                srcs_vec.push_back(blob);
+            }
+
+            OutputsDataMap out_info_map = net.getOutputsInfo();
+            for (auto info : out_info_map) {
+                Blob::Ptr blob = make_shared_blob<float>({Precision::FP32, info.second->getTensorDesc().getDims(), NCHW});
+                blob->allocate();
+                inferRequest.SetBlob(info.first, blob);
+                out_vec.push_back(blob);
+
+                Blob::Ptr blob_ref = make_shared_blob<float>({Precision::FP32, info.second->getTensorDesc().getDims(), NCHW});
+                blob_ref->allocate();
+                dsts_vec.push_back(blob_ref);
+            }
+
+            ref_spaceToDepth<float>(srcs_vec, dsts_vec, p);
+
+            inferRequest.Infer();
+
+            compare(*out_vec[0], *dsts_vec[0]);
+        } catch (const InferenceEngine::details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+TEST_P(SpaceToDepthTests, TestsSpaceToDepth) {}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/ti_tests.hpp b/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/ti_tests.hpp
new file mode 100644 (file)
index 0000000..b939b48
--- /dev/null
@@ -0,0 +1,328 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <vector>
+#include <string>
+#include <gtest/gtest.h>
+#include <cpp/ie_infer_request.hpp>
+#include <blob_factory.hpp>
+#include <ie_algorithm.hpp>
+#include <precision_utils.h>
+
+#include "plg_test.hpp"
+#include "single_layer_common.hpp"
+#include "ir_gen_helper.hpp"
+
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace single_layer_tests;
+
+struct ti_test_params {
+    std::string device_name;
+    std::size_t tensorSize;
+    InferenceEngine::Precision precision;
+};
+
+static void setValuesInBlob(Blob::Ptr blob, float value) {
+    auto dims = blob->getTensorDesc().getDims();
+    auto output_size = details::product(std::begin(dims), std::end(dims));
+    std::vector<float> values_vector(output_size, value);
+
+    if (!blob->is<MemoryBlob>())
+        THROW_IE_EXCEPTION << "Only MemoryBlob is expected here";
+
+    auto m_blob = blob->as<MemoryBlob>();
+    if (m_blob->wmap().as<void*>() == nullptr)
+        blob->allocate();
+
+    CopyVectorToBlob(blob, values_vector);
+}
+
+/*
+         ______________main_ti__________________
+  in1 --|~~ iter  -> add -> plus_one -> next1   |
+  in2 --|~~ prev1 -> add -> out_iter ~~~~~~~~~~~|-- out1
+         ---------------------------------------
+*/
+
+class TITestBase: public PlgTest<ti_test_params> {
+    std::string model_t = R"V0G0N(
+<net batch="1" name="frozen" version="5">
+       <layers>
+               <layer id="0" name="in1" precision="_PRC_" type="Input">
+                       <output>
+                               <port id="0">
+                    <dim>_IN_</dim>
+                    <dim>_INPUT_SIZE_</dim>
+                               </port>
+                       </output>
+               </layer>
+               <layer id="1" name="in2" precision="_PRC_" type="Input">
+                       <output>
+                               <port id="0">
+                    <dim>_IN_</dim>
+                    <dim>_CHUNK_SIZE_</dim>
+                               </port>
+                       </output>
+               </layer>
+        <layer id="2" name="main_ti" type="TensorIterator" precision="_PRC_">
+            <input>
+                <port id="0">
+                    <dim>_IN_</dim>
+                    <dim>_INPUT_SIZE_</dim>
+                </port>
+                <port id="1">
+                    <dim>_IN_</dim>
+                    <dim>_CHUNK_SIZE_</dim>
+                </port>
+            </input>
+            <output>
+                <port id="2">
+                    <dim>_IN_</dim>
+                    <dim>_INPUT_SIZE_</dim>
+                </port>
+            </output>
+            <port_map>
+                               <input external_port_id="0" internal_layer_id="0" internal_port_id="0" axis="1" stride="_CHUNK_SIZE_"/>
+                               <input external_port_id="1" internal_layer_id="0" internal_port_id="1"/>
+                               <output external_port_id="2" internal_layer_id="0" internal_port_id="2" axis="1" stride="_CHUNK_SIZE_"/>
+                       </port_map>
+                       <back_edges>
+                               <edge from-layer="1" from-port="1" to-layer="0" to-port="1"/>
+                       </back_edges>
+            <body>
+                <layers>
+                    <layer id="0" name="add" precision="_PRC_" type="Eltwise">
+                        <data operation="sum"/>
+                        <input>
+                            <port id="0">
+                                <dim>_IN_</dim>
+                                <dim>_CHUNK_SIZE_</dim>
+                            </port>
+                            <port id="1">
+                                <dim>_IN_</dim>
+                                <dim>_CHUNK_SIZE_</dim>
+                            </port>
+                        </input>
+                        <output>
+                            <port id="2">
+                                <dim>_IN_</dim>
+                                <dim>_CHUNK_SIZE_</dim>
+                            </port>
+                        </output>
+                    </layer>
+                    <layer id="1" name="plus_one" precision="_PRC_" type="Power">
+                        <data scale="1" shift="1" power="1"/>
+                        <input>
+                            <port id="0">
+                                <dim>_IN_</dim>
+                                <dim>_CHUNK_SIZE_</dim>
+                            </port>
+                        </input>
+                        <output>
+                            <port id="1">
+                                <dim>_IN_</dim>
+                                <dim>_CHUNK_SIZE_</dim>
+                            </port>
+                        </output>
+                    </layer>
+                </layers>
+                <edges>
+                    <edge from-layer="0" from-port="2" to-layer="1" to-port="0"/>
+                </edges>
+            </body>
+        </layer>
+    </layers>
+    <edges>
+        <edge from-layer="0" from-port="0" to-layer="2" to-port="0"/>
+        <edge from-layer="1" from-port="0" to-layer="2" to-port="1"/>
+    </edges>
+</net>
+)V0G0N";
+
+    std::string getModel(const ti_test_params & p) {
+        std::string model = model_t;
+        std::size_t iteration_count = 3;
+
+        REPLACE_WITH_NUM(model, "_IN_", 1);
+        REPLACE_WITH_NUM(model, "_IC_", 3);
+        REPLACE_WITH_NUM(model, "_INPUT_SIZE_", iteration_count * p.tensorSize);
+        REPLACE_WITH_NUM(model, "_CHUNK_SIZE_", p.tensorSize);
+        REPLACE_WITH_STR(model, "_PRC_", p.precision.name());
+
+        return model;
+    }
+
+protected:
+    void RunTITest(const std::map<std::string, std::string> & config = {}) {
+
+        try {
+            ti_test_params p = param();
+            std::string model = getModel(p);
+
+            Core ie;
+            auto net = ie.ReadNetwork(model, Blob::CPtr());
+            auto exec = ie.LoadNetwork(net, device_name, config);
+            auto req = exec.CreateInferRequest();
+            setValuesInBlob(req.GetBlob("in1"), 1.0f);
+            setValuesInBlob(req.GetBlob("in2"), 1.0f);
+            req.Infer();
+
+        } catch (const InferenceEngine::details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+using TITest  = TITestBase;
+
+TEST_P(TITest, TestsWitUnusedOut) { RunTITest(); }
+
+/*
+  TI body contains const data placeholder
+
+         ______________main_ti__________________
+  in1 --|~~ iter  -> add -> plus_one ~~~~~~~~~~~|-- out1
+        |  const1 -> add                        |
+         ---------------------------------------
+*/
+
+class TITest2Base: public PlgTest<ti_test_params> {
+    std::string model_t = R"V0G0N(
+<net batch="1" name="frozen" version="5">
+       <layers>
+               <layer id="0" name="in1" precision="_PRC_" type="Input">
+                       <output>
+                               <port id="0">
+                    <dim>_IN_</dim>
+                    <dim>_INPUT_SIZE_</dim>
+                               </port>
+                       </output>
+               </layer>
+        <layer id="1" name="main_ti" type="TensorIterator" precision="_PRC_">
+            <input>
+                <port id="0">
+                    <dim>_IN_</dim>
+                    <dim>_INPUT_SIZE_</dim>
+                </port>
+            </input>
+            <output>
+                <port id="1">
+                    <dim>_IN_</dim>
+                    <dim>_INPUT_SIZE_</dim>
+                </port>
+            </output>
+            <port_map>
+                               <input external_port_id="0" internal_layer_id="1" internal_port_id="0" axis="1" stride="_CHUNK_SIZE_"/>
+                               <output external_port_id="1" internal_layer_id="2" internal_port_id="1" axis="1" stride="_CHUNK_SIZE_"/>
+                       </port_map>
+            <body>
+                <layers>
+                    <layer id="0" name="const" precision="_PRC_" type="Const">
+                        <output>
+                            <port id="1">
+                                <dim>1</dim>
+                                <dim>_CHUNK_SIZE_</dim>
+                            </port>
+                        </output>
+                        <blobs>
+                            <custom offset="0" size="_SZ_"/>
+                        </blobs>
+                    </layer>
+                    <layer id="1" name="add" precision="_PRC_" type="Eltwise">
+                        <data operation="sum"/>
+                        <input>
+                            <port id="0">
+                                <dim>_IN_</dim>
+                                <dim>_CHUNK_SIZE_</dim>
+                            </port>
+                            <port id="1">
+                                <dim>1</dim>
+                                <dim>_CHUNK_SIZE_</dim>
+                            </port>
+                        </input>
+                        <output>
+                            <port id="2">
+                                <dim>_IN_</dim>
+                                <dim>_CHUNK_SIZE_</dim>
+                            </port>
+                        </output>
+                    </layer>
+                    <layer id="2" name="plus_one" precision="_PRC_" type="Power">
+                        <data scale="1" shift="1" power="1"/>
+                        <input>
+                            <port id="0">
+                                <dim>_IN_</dim>
+                                <dim>_CHUNK_SIZE_</dim>
+                            </port>
+                        </input>
+                        <output>
+                            <port id="1">
+                                <dim>_IN_</dim>
+                                <dim>_CHUNK_SIZE_</dim>
+                            </port>
+                        </output>
+                    </layer>
+                </layers>
+                <edges>
+                    <edge from-layer="0" from-port="1" to-layer="1" to-port="1"/>
+                    <edge from-layer="1" from-port="2" to-layer="2" to-port="0"/>
+                </edges>
+            </body>
+        </layer>
+    </layers>
+    <edges>
+        <edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
+    </edges>
+</net>
+)V0G0N";
+
+    std::string getModel(const ti_test_params& p) {
+        std::string model = model_t;
+        std::size_t iteration_count = 3;
+
+        REPLACE_WITH_NUM(model, "_IN_", 1);
+        REPLACE_WITH_NUM(model, "_INPUT_SIZE_", iteration_count * p.tensorSize);
+        REPLACE_WITH_NUM(model, "_CHUNK_SIZE_", p.tensorSize);
+        REPLACE_WITH_STR(model, "_PRC_", p.precision.name());
+        REPLACE_WITH_NUM(model, "_SZ_", p.precision.size() * p.tensorSize);
+
+        return model;
+    }
+
+protected:
+    virtual void RunTITest(const std::map<std::string, std::string> & config = {}) {
+
+        try {
+            ti_test_params p = param();
+            std::string model = getModel(p);
+
+            auto weights = make_shared_blob<uint8_t>(TensorDesc {Precision::U8, {p.precision.size() * p.tensorSize}, C});
+            weights->allocate();
+            auto weights_size = details::product(std::begin(weights->getTensorDesc().getDims()), std::end(weights->getTensorDesc().getDims()));
+            if (p.precision == Precision::FP32) {
+                std::vector<float> weights_vector(weights_size, 1.0f);
+                ie_memcpy(weights->buffer().as<float *>(), sizeof(float), &weights_vector[0], weights_vector.size() * sizeof(float));
+            } else {
+                //  FP16 case
+                std::vector<ie_fp16> weights_vector(weights_size, PrecisionUtils::f32tof16(1.0f));
+                ie_memcpy(weights->buffer().as<ie_fp16 *>(), sizeof(ie_fp16), &weights_vector[0], weights_vector.size() * sizeof(ie_fp16));
+            }
+
+            Core ie;
+            auto net = ie.ReadNetwork(model, weights);
+            auto exec = ie.LoadNetwork(net, device_name, config);
+            auto req = exec.CreateInferRequest();
+            setValuesInBlob(req.GetBlob("in1"), 1.0f);
+            req.Infer();
+
+        } catch (const InferenceEngine::details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+using TITest2  = TITest2Base;
+
+TEST_P(TITest2, TestsWitCopy) { RunTITest(); }
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/tile_tests.hpp b/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/tile_tests.hpp
new file mode 100644 (file)
index 0000000..0ff6234
--- /dev/null
@@ -0,0 +1,131 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+
+#include <ie_core.hpp>
+#include "ir_gen_helper.hpp"
+#include "common_test_utils/data_utils.hpp"
+
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace single_layer_tests;
+
+struct tile_test_base_params {
+    SizeVector shape_1;
+    SizeVector shape_2;
+    int axis;
+    int tiles;
+};
+
+struct tile_test_params : public tile_test_base_params {
+    std::string device_name;
+
+    tile_test_params(std::string name, tile_test_base_params params)
+        : device_name(name), tile_test_base_params(params) {}
+};
+
+class TileTest: public TestsCommon, public WithParamInterface<tile_test_params> {
+    std::string model_t = R"V0G0N(
+<net batch="1" name="tile_net" version="5">
+       <layers>
+               <layer id="0" name="data" precision="FP32" type="Input">
+                       <output>
+                               <port id="0">
+                    _SHAPE_1_
+                               </port>
+                       </output>
+               </layer>
+               <layer id="1" name="tile" precision="FP32" type="Tile">
+            <data axis="_AXIS_" tiles="_TILES_" />
+            <input>
+                <port id="0">
+                    _SHAPE_1_
+                </port>
+            </input>
+            <output>
+                               <port id="1">
+                    _SHAPE_2_
+                               </port>
+                       </output>
+               </layer>
+    </layers>
+    <edges>
+        <edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
+    </edges>
+</net>
+)V0G0N";
+
+    std::string shape_xml(const SizeVector shape) {
+        std::string res;
+        for (auto dim : shape)
+            res += "<dim>" + std::to_string(dim) + "</dim>";
+        return res;
+    }
+
+    std::string getModel() {
+        auto p = ::testing::WithParamInterface<tile_test_params>::GetParam();
+
+
+        auto shape_1_xml = shape_xml(p.shape_1);
+        auto shape_2_xml = shape_xml(p.shape_2);
+
+        std::string model = model_t;
+        REPLACE_WITH_STR(model, "_SHAPE_1_", shape_1_xml);
+        REPLACE_WITH_STR(model, "_SHAPE_2_", shape_2_xml);
+        REPLACE_WITH_NUM(model, "_AXIS_", p.axis);
+        REPLACE_WITH_NUM(model, "_TILES_", p.tiles);
+
+        return model;
+    }
+
+protected:
+    virtual void SetUp() {
+        try {
+            auto p = GetParam();
+            std::string model = getModel();
+
+            InferenceEngine::Core ie;
+            auto network = ie.ReadNetwork(model, Blob::CPtr());
+            auto exec = ie.LoadNetwork(network, p.device_name);
+            auto req = exec.CreateInferRequest();
+
+            auto in_blob = req.GetBlob("data");
+            CommonTestUtils::fill_data_const(in_blob, 7);
+
+            req.Infer();
+
+            TensorDesc desc {Precision::FP32, p.shape_2, TensorDesc::getLayoutByDims(p.shape_2)};
+            Blob::Ptr out_ref = make_shared_blob<float>(desc);
+            out_ref->allocate();
+
+            CommonTestUtils::fill_data_const(out_ref, 7);
+            compare(*out_ref, *req.GetBlob("tile"));
+        } catch (const InferenceEngine::details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+#define case_1  tile_test_base_params{ {1}, {5}, 0, 5 }
+#define case_2  tile_test_base_params{ {2}, {6}, 0, 3 }
+#define case_3  tile_test_base_params{ {1, 3}, {5, 3}, 0, 5 }
+#define case_4  tile_test_base_params{ {1, 3}, {1, 6}, 1, 2 }
+#define case_5  tile_test_base_params{ {1, 2, 3}, {5, 2, 3}, 0, 5 }
+#define case_6  tile_test_base_params{ {1, 2, 3}, {1, 4, 3}, 1, 2 }
+#define case_7  tile_test_base_params{ {1, 2, 3}, {1, 2, 6}, 2, 2 }
+#define case_8  tile_test_base_params{ {1, 2, 3, 4}, {5, 2, 3, 4}, 0, 5 }
+#define case_9  tile_test_base_params{ {1, 2, 3, 4}, {1, 4, 3, 4}, 1, 2 }
+#define case_10 tile_test_base_params{ {1, 2, 3, 4}, {1, 2, 6, 4}, 2, 2 }
+#define case_11 tile_test_base_params{ {1, 2, 3, 4}, {1, 2, 3, 8}, 3, 2 }
+#define case_12 tile_test_base_params{ {1, 2, 3, 4, 2}, {5, 2, 3, 4, 2}, 0, 5 }
+#define case_13 tile_test_base_params{ {1, 2, 3, 4, 2}, {1, 4, 3, 4, 2}, 1, 2 }
+#define case_14 tile_test_base_params{ {1, 2, 3, 4, 2}, {1, 2, 6, 4, 2}, 2, 2 }
+#define case_15 tile_test_base_params{ {1, 2, 3, 4, 2}, {1, 2, 3, 8, 2}, 3, 2 }
+#define case_16 tile_test_base_params{ {1, 2, 3, 4, 2}, {1, 2, 3, 4, 4}, 4, 2 }
+
+TEST_P(TileTest, TestsGeneralTile) {}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/topk_tests.hpp b/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/topk_tests.hpp
new file mode 100644 (file)
index 0000000..7d6de08
--- /dev/null
@@ -0,0 +1,299 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <ie_core.hpp>
+#include <cmath>
+
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace std;
+
+struct topk_test_params {
+    std::string          device_name;
+    SizeVector           in_shape;
+    int                  axis;
+    std::vector<size_t>  src_k;
+    std::string          sort;
+    std::string          mode;
+    SizeVector           out_shape;
+    Precision            precision;
+};
+
+static inline int count(std::vector<size_t> dims, size_t start_ind, size_t end_ind) {
+    size_t count = 1;
+    for (size_t i = start_ind; i < end_ind; i++)
+        count *= dims[i];
+    return static_cast<int>(count);
+}
+
+static inline int count(std::vector<size_t> dims, size_t start_ind = 0) {
+    return count(dims, start_ind, dims.size());
+}
+
+template <typename T>
+static void ref_topk(TBlob<T> &src, TBlob<T> &dst_data, TBlob<int> &dst_indx, topk_test_params p) {
+    T* src_data = src.data();
+    T* dst_val = dst_data.data();
+    int* dst_idx = dst_indx.data();
+
+    int dim, axis_dist;
+    int src_k = static_cast<int>(p.src_k[0]);
+
+
+    SizeVector src_dims = src.getTensorDesc().getDims();;
+    int axis_ = p.axis;
+    if (axis_ < 0)
+        axis_ += src_dims.size();
+
+    size_t axis = static_cast<size_t>(axis_);
+
+    if (src_dims.size() < (1 + axis))
+        FAIL() << " Incorrect input parameters dimensions and axis number!";
+
+    bool mode_max;
+    if (p.mode == "max")
+        mode_max = true;
+    else
+        mode_max = false;
+
+    bool sort_value;
+    if (p.sort == "value")
+        sort_value = true;
+    else
+        sort_value = false;
+
+    int j;
+    for (j = src_dims.size() - 1; j >= 0; j--) {
+        if (src_dims[j] != 1) break;
+    }
+    if (static_cast<size_t>(j) == axis) {
+        dim = count(src_dims, static_cast<size_t>(j));
+        axis_dist = 1;
+    } else {
+        int axis_ = (p.axis < 0) ? p.axis + static_cast<int>(src_dims.size()) : p.axis;
+        dim = static_cast<int>(src_dims[axis_]);
+        axis_dist = count(src_dims, axis_) / dim;
+    }
+
+    int num = count(src_dims) / dim;
+    std::vector<std::pair<T, int> > src_vector(src_k);
+
+    for (int i = 0; i < num; ++i) {
+        src_vector[0] = std::make_pair(src_data[(i / axis_dist * dim) * axis_dist + i % axis_dist], 0);
+        for (j = 1; j < src_k; ++j) {
+            src_vector[j] = std::make_pair(src_data[(i / axis_dist * dim + j) * axis_dist + i % axis_dist], j);
+            if (mode_max) {
+                if (src_vector[j].first > src_vector[j - 1].first)
+                    std::sort(src_vector.begin(), src_vector.begin() + j + 1, std::greater<std::pair<T, int> >());
+            } else {
+                if (src_vector[j].first < src_vector[0].first)
+                    std::sort(src_vector.begin(), src_vector.begin() + j + 1, std::less<std::pair<T, int> >());
+            }
+        }
+
+        for (; j < dim; ++j) {
+            T value = src_data[(i / axis_dist * dim + j) * axis_dist + i % axis_dist];
+            if (mode_max) {
+                if (value > src_vector[src_k - 1].first) {
+                    src_vector[src_k - 1] = std::make_pair(value, j);
+                    std::sort(src_vector.begin(), src_vector.end(), std::greater<std::pair<T, int> >());
+                }
+            } else {
+                if (value < src_vector[0].first) {
+                    src_vector[src_k - 1] = std::make_pair(value, j);
+                    std::sort(src_vector.begin(), src_vector.end(), std::less<std::pair<T, int> >());
+                }
+            }
+        }
+
+        if (!sort_value)
+            std::sort(src_vector.begin(), src_vector.begin() + src_k, [&src_vector](const pair<int, int> &a, const pair<int, int> &b)
+            { return (a.second < b.second); });
+
+        for (int j = 0; j < src_k; ++j) {
+            if (axis_dist != 1) {
+                // Produces max_val per axis
+                dst_val[(i / axis_dist * src_k + j) * axis_dist + i % axis_dist] = src_vector[j].first;
+                dst_idx[(i / axis_dist * src_k + j) * axis_dist + i % axis_dist] = src_vector[j].second;
+            } else {
+                // Produces max_ind and max_val
+                dst_val[i * src_k + j] = src_vector[j].first;
+                dst_idx[i * src_k + j] = src_vector[j].second;
+            }
+        }
+    }
+}
+
+template <typename src_data_t>
+class TopKTests : public TestsCommon, public WithParamInterface<topk_test_params> {
+    std::string model_t = (std::string)R"V0G0N(
+<net Name="TopK_net" version="2" precision="_SRC_DATA_T_" batch="1">
+    <layers>
+        <layer name="value" type="Input" precision="_SRC_DATA_T_" id="1">
+            <output>
+                <port id="1">
+                    _IN_
+                </port>
+            </output>
+        </layer>
+        <layer name="src_k" type="Const" precision="I32" id="2">
+            <output>
+                <port id="2"/>
+            </output>
+            <blobs>
+                <custom offset="0" size="1"/>
+            </blobs>
+        </layer>
+        <layer name="output" id="3" type="TopK">
+            <data axis="_AXIS_" sort="_SORT_" mode="_MODE_"/>
+            <input>
+                <port id="1">
+                    _IN_
+                </port>
+                <port id="2"/>
+            </input>
+            <output>
+                <port id="3" precision="_SRC_DATA_T_">
+                    _OUT_
+                </port>
+                <port id="4" precision="I32">
+                    _OUT_
+                </port>
+            </output>
+        </layer>
+    </layers>
+    <edges>
+        <edge from-layer="1" from-port="1" to-layer="3" to-port="1"/>
+        <edge from-layer="2" from-port="2" to-layer="3" to-port="2"/>
+    </edges>
+</net>
+)V0G0N";
+
+    std::string getModel(topk_test_params p) {
+        std::string model = model_t;
+        std::string in_shape;
+        std::string out_shape;
+
+        for (size_t i = 0; i < p.out_shape.size(); i++) {
+            out_shape += "<dim>";
+            out_shape += std::to_string(p.out_shape[i]) + "</dim>\n";
+        }
+        REPLACE_WITH_STR(model, "_OUT_", out_shape);
+
+        for (auto& dct : p.in_shape) {
+            in_shape += "<dim>";
+            in_shape += std::to_string(dct) + "</dim>\n";
+        }
+
+        switch (p.precision) {
+            case Precision::FP32:
+                REPLACE_WITH_STR(model, "_SRC_DATA_T_", "FP32"); break;
+            case Precision::I32:
+                REPLACE_WITH_STR(model, "_SRC_DATA_T_", "I32"); break;
+            default:
+                THROW_IE_EXCEPTION << "Unsupported test precision";
+        }
+
+        REPLACE_WITH_STR(model, "_IN_", in_shape);
+        REPLACE_WITH_STR(model, "_SORT_", p.sort);
+        REPLACE_WITH_STR(model, "_MODE_", p.mode);
+        REPLACE_WITH_NUM(model, "_AXIS_", p.axis);
+
+        return model;
+    }
+
+protected:
+    virtual void TearDown() {
+    }
+
+    virtual void SetUp() {
+        try {
+            TestsCommon::SetUp();
+            topk_test_params p = ::testing::WithParamInterface<topk_test_params>::GetParam();
+            std::string model = getModel(p);
+
+
+            TBlob<uint8_t>* top_k = new TBlob<uint8_t>(
+                { Precision::U8,{ p.src_k.size() * sizeof(int32_t) }, Layout::C });
+            top_k->allocate();
+            for (size_t i = 0; i < p.src_k.size(); i++) {
+                ((int32_t *) top_k->buffer())[i] = p.src_k[i];
+            }
+            
+            Core ie;
+            CNNNetwork net = ie.ReadNetwork(model, TBlob<uint8_t>::Ptr(top_k));
+            ExecutableNetwork executable_network = ie.LoadNetwork(net, p.device_name);
+            InferRequest inferRequest = executable_network.CreateInferRequest();
+
+            // Output Data
+            OutputsDataMap out;
+            out = net.getOutputsInfo();
+            BlobMap outputBlobs;
+
+            auto it = out.begin();
+            std::pair<std::string, DataPtr> item0 = *it;
+            std::pair<std::string, DataPtr> item1 = *(++it);
+
+            typename TBlob<src_data_t>::Ptr output0;
+            output0 = make_shared_blob<src_data_t>(item0.second->getTensorDesc());
+            output0->allocate();
+            inferRequest.SetBlob(item0.first, output0);
+            TBlob<int>::Ptr output1;
+            output1 = make_shared_blob<int>(item1.second->getTensorDesc());
+            output1->allocate();
+            inferRequest.SetBlob(item1.first, output1);
+
+
+            // Input Data
+            Blob::Ptr src;
+            src = make_shared_blob<src_data_t>({ p.precision, p.in_shape, TensorDesc::getLayoutByDims(p.in_shape) });
+            src->allocate();
+            for (size_t i = 0; i < src->size(); i++) {
+                src->buffer().as<src_data_t*>()[i] = i % 2 == 0 ? static_cast<src_data_t>(i) : static_cast<src_data_t>(-1.f * i - i * 2);
+            }
+
+            inferRequest.SetBlob("value", src);
+
+            // Output Reference
+            TBlob<src_data_t> dst_data_ref(item0.second->getTensorDesc());
+            dst_data_ref.allocate();
+            TBlob<int> dst_indx_ref(item1.second->getTensorDesc());
+            dst_indx_ref.allocate();
+            auto* srcPtr = dynamic_cast<TBlob<src_data_t>*>(src.get());
+            if (srcPtr == nullptr)
+                FAIL() << "Cannot cast blob to TBlob<src_data_t>.";
+            ref_topk<src_data_t>(*srcPtr, dst_data_ref, dst_indx_ref, p);
+
+            inferRequest.Infer();
+
+            for (size_t i = 0; i < dst_data_ref.size(); i++) {
+                if (dst_data_ref.buffer().template as<src_data_t*>()[i] != output0.get()->buffer().template as<src_data_t*>()[i]) {
+                    FAIL() << "The difference between ref_val " << dst_data_ref.buffer().template as<src_data_t*>()[i] <<
+                              " and res_val " << output0.get()->buffer().template as<src_data_t*>()[i] << " at " << i << " index";
+                }
+            }
+
+            for (size_t i = 0; i < dst_data_ref.size(); i++) {
+                if (dst_indx_ref.buffer().as<int*>()[i] != output1.get()->buffer().as<int*>()[i]) {
+                    FAIL() << "The difference between ref_idx " << dst_indx_ref.buffer().as<int*>()[i] <<
+                           " and res_idx " << output1.get()->buffer().as<int*>()[i] << " at " << i << " index";
+                }
+            }
+        } catch (const details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+using topk_test_int32 = TopKTests<int32_t>;
+using topk_test_fp32 = TopKTests<float>;
+
+TEST_P(topk_test_int32, TestsTopK_I32) {}
+
+TEST_P(topk_test_fp32, TestsTopK_FP32) {}
+
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/variadic_split_tests.hpp b/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/variadic_split_tests.hpp
new file mode 100644 (file)
index 0000000..48ba82e
--- /dev/null
@@ -0,0 +1,244 @@
+// Copyright (C) 2018-2019 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <ie_core.hpp>
+#include <cmath>
+#include <string>
+
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace std;
+
+struct variadic_split_params
+{
+    std::string device_name;
+    int axis;
+    std::vector<int> variadic_lenghts;
+    SizeVector input_dims;
+    std::vector<SizeVector> output_dims;
+};
+
+class VariadicSplitTests : public TestsCommon, public WithParamInterface<variadic_split_params> {
+    std::string model_base = R"V0G0N(
+    <net name="Activation" version="10">
+        <layers>
+            <layer id="0" name="in1" type="Parameter"  version="opset1">
+                <data element_type="f32" shape="_IB_,_IC_,_IH_,_IW_"/>
+                <output>
+                    <port id="0" precision="FP32">
+                        <dim>_IB_</dim>
+                        <dim>_IC_</dim>
+                        <dim>_IH_</dim>
+                        <dim>_IW_</dim>
+                    </port>
+                </output>
+            </layer>
+            <layer id="1" name="const1" type="Const" version="opset1">
+                           <data offset="0" size="8"/>
+                <output>
+                    <port id="0" precision="I64"/>
+                </output>
+            </layer>
+            <layer id="2" name="const2" type="Const" version="opset1">
+                           <data offset="8" size="_VARIADIC_LENGHTS_BYTE_SIZE_"/>
+                <output>
+                    <port id="0" precision="I64">
+                        <dim>_VARIADIC_LENGHTS_SIZE_</dim>
+                    </port>
+                </output>
+            </layer>
+            <layer id="3" name="split" type="VariadicSplit" version="opset1">
+                <input>
+                    <port id="0" precision="FP32">
+                        <dim>_IB_</dim>
+                        <dim>_IC_</dim>
+                        <dim>_IH_</dim>
+                        <dim>_IW_</dim>
+                    </port>
+                    <port id="1" precision="I64"/>
+                    <port id="2" precision="I64">
+                        <dim>_VARIADIC_LENGHTS_SIZE_</dim>
+                    </port>
+                </input>
+                <output>
+                    _VARIADIC_OUTPUTS_
+                </output>
+            </layer>
+            _OUTPUT_LAYERS_
+        </layers>
+        <edges>
+            <edge from-layer="0" from-port="0" to-layer="3" to-port="0"/>
+            <edge from-layer="1" from-port="0" to-layer="3" to-port="1"/>
+            <edge from-layer="2" from-port="0" to-layer="3" to-port="2"/>
+            _OUTPUT_PORTS_
+        </edges>
+    </net>
+    )V0G0N";
+
+    std::string getModel(variadic_split_params p) {
+        std::string variadic_outputs, output_layers, output_ports;
+
+        size_t variadic_port_id = 3;
+        for (auto& size_vector : p.output_dims) {
+            variadic_outputs += "<port id=\"" + std::to_string(variadic_port_id) + "\" precision=\"FP32\">\n";
+            variadic_outputs += "<dim>" + std::to_string(size_vector[0]) + "</dim>\n";
+            variadic_outputs += "<dim>" + std::to_string(size_vector[1]) + "</dim>\n";
+            variadic_outputs += "<dim>" + std::to_string(size_vector[2]) + "</dim>\n";
+            variadic_outputs += "<dim>" + std::to_string(size_vector[3]) + "</dim>\n";
+            variadic_outputs += "</port>\n";
+            variadic_port_id++;
+        }
+
+        size_t layer_id = 4;
+        size_t layer_name_id = 1;
+        for (auto& size_vector : p.output_dims) {
+            output_layers += "<layer name=\"output" + std::to_string(layer_name_id) +  "\" type=\"Result\" id=\"" + std::to_string(layer_id) + "\" version=\"opset1\">\n";
+            output_layers += "<input>\n";
+            output_layers += "<port id=\"0\" precision=\"FP32\">\n";
+            output_layers += "<dim>" + std::to_string(size_vector[0]) + "</dim>\n";
+            output_layers += "<dim>" + std::to_string(size_vector[1]) + "</dim>\n";
+            output_layers += "<dim>" + std::to_string(size_vector[2]) + "</dim>\n";
+            output_layers += "<dim>" + std::to_string(size_vector[3]) + "</dim>\n";
+            output_layers += "</port>\n";
+            output_layers += "</input>\n";
+            output_layers += "</layer>\n";
+            layer_id++;
+            layer_name_id++;
+        }
+
+        for (int id = 3; id < p.variadic_lenghts.size() + 3; id++) {
+            output_ports += "<edge from-layer=\"3\" from-port=\"" + std::to_string(id) + "\" to-layer=\"" + std::to_string(id + 1) + "\" to-port=\"0\"/>\n";
+        }
+
+        REPLACE_WITH_STR(model_base, "_IB_", std::to_string(p.input_dims[0]));
+        REPLACE_WITH_STR(model_base, "_IC_", std::to_string(p.input_dims[1]));
+        REPLACE_WITH_STR(model_base, "_IH_", std::to_string(p.input_dims[2]));
+        REPLACE_WITH_STR(model_base, "_IW_", std::to_string(p.input_dims[3]));
+
+        REPLACE_WITH_STR(model_base, "_VARIADIC_LENGHTS_BYTE_SIZE_", std::to_string(p.variadic_lenghts.size() * sizeof(int64_t)));
+        REPLACE_WITH_STR(model_base, "_VARIADIC_LENGHTS_SIZE_", std::to_string(p.variadic_lenghts.size()));
+        REPLACE_WITH_STR(model_base, "_VARIADIC_OUTPUTS_", variadic_outputs);
+        REPLACE_WITH_STR(model_base, "_OUTPUT_LAYERS_", output_layers);
+        REPLACE_WITH_STR(model_base, "_OUTPUT_PORTS_", output_ports);
+
+        return model_base;
+    }
+
+    size_t get_index_bfhw(SizeVector tensor, size_t b, size_t f, size_t h, size_t w)
+    {
+        size_t res = 0;
+        res += b * (tensor[1] * tensor[2] * tensor[3]);
+        res += f * (tensor[2] * tensor[3]);
+        res += h * (tensor[3]);
+        res += w;
+        return res;
+    }
+
+    void check_buffers_after_split(InferRequest& inf_req, InputsDataMap& inputs, OutputsDataMap& outputs, variadic_split_params vs_params){
+        Blob::Ptr inputBlob = inf_req.GetBlob(inputs.begin()->first);
+        float* src_ptr = inputBlob->buffer().as<float*>();
+
+        size_t outputs_number = outputs.size();
+        std::vector<const float*> output_ptrs(outputs_number);
+
+        // Getting raw output pointers
+        OutputsDataMap::iterator output_it = outputs.begin();
+        for (size_t index = 0; index < outputs_number; ++index) {
+            Blob::Ptr temp_blob = inf_req.GetBlob(output_it->first);
+            output_ptrs[index] = temp_blob->buffer().as<float*>();
+            output_it++;
+        }
+
+        // Getting number of elements inside buffer
+        auto input_tensor = vs_params.input_dims;
+        size_t input_tensor_size = input_tensor[0] * input_tensor[1] * input_tensor[2] * input_tensor[3];
+        std::vector<size_t> output_tensor_sizes(outputs_number);
+        for (size_t output_id = 0; output_id < outputs_number; ++output_id) {
+            auto output_tensors = vs_params.output_dims;
+            output_tensor_sizes[output_id] =
+                output_tensors[output_id][0] * output_tensors[output_id][1] * output_tensors[output_id][2] * output_tensors[output_id][3];
+        }
+
+        // Comparing input and output buffers
+        SizeVector input_it_tensor = { 0, 0, 0, 0 };
+        SizeVector output_tensor = { 0, 0, 0, 0 };
+        for (size_t output_id = 0; output_id < outputs_number; ++output_id) {
+            // Tensor iteration
+            for (size_t b = input_it_tensor[0]; b < input_it_tensor[0] + vs_params.output_dims[output_id][0]; b++) {
+                for (size_t f = input_it_tensor[1]; f < input_it_tensor[1] + vs_params.output_dims[output_id][1]; f++) {
+                    for (size_t h = input_it_tensor[2]; h < input_it_tensor[2] + vs_params.output_dims[output_id][2]; h++) {
+                        for (size_t w = input_it_tensor[3]; w < input_it_tensor[3] + vs_params.output_dims[output_id][3]; w++) {
+                            ASSERT_EQ(
+                                src_ptr[get_index_bfhw(vs_params.input_dims, b, f, h, w)],
+                                output_ptrs[output_id][get_index_bfhw(vs_params.output_dims[output_id], output_tensor[0], output_tensor[1], output_tensor[2], output_tensor[3])]
+                            );
+                            output_tensor[3]++;
+                        }
+                        output_tensor[3] = 0;
+                        output_tensor[2]++;
+                    }
+                    output_tensor[2] = 0;
+                    output_tensor[1]++;
+                }
+                output_tensor[1] = 0;
+                output_tensor[0]++;
+            }
+            output_tensor = { 0, 0, 0, 0 };
+            input_it_tensor[vs_params.axis] += vs_params.variadic_lenghts[output_id];
+        }
+    }
+
+protected:
+    virtual void TearDown() {
+    }
+
+    virtual void SetUp() {
+        try {
+            variadic_split_params p = ::testing::WithParamInterface<variadic_split_params>::GetParam();
+
+            // Fill weights data
+            auto fillBlob = [p](Blob::Ptr& weights) {
+                auto* data = weights->buffer().as<int64_t*>();
+                data[0] = p.axis;
+                size_t id = 1;
+                for (auto& variadic_lenght : p.variadic_lenghts)
+                {
+                    data[id] = variadic_lenght;
+                    id++;
+                }
+            };
+
+            // Allocate weights data for axis + variadic_lenghts vector
+            Blob::Ptr weights;
+            weights = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, { (1 + p.variadic_lenghts.size()) * sizeof(int64_t) }, Layout::C));
+            weights->allocate();
+            fill_data((float*)weights->buffer(), weights->size() / sizeof(float));
+            fillBlob(weights);
+
+            Core ie;
+            CNNNetwork net = ie.ReadNetwork(getModel(p), weights);
+            InputsDataMap in_info_map = net.getInputsInfo();
+            OutputsDataMap out_info_map = net.getOutputsInfo();
+
+            ExecutableNetwork executable_network = ie.LoadNetwork(net, p.device_name );
+            InferRequest infer_request = executable_network.CreateInferRequest();
+
+            // Generate input data
+            Blob::Ptr inputBlob = infer_request.GetBlob(in_info_map.begin()->first);
+            float* src_ptr = inputBlob->buffer().as<float*>();
+            fill_data(src_ptr, inputBlob->size());
+         
+            infer_request.Infer();
+
+            check_buffers_after_split(infer_request, in_info_map, out_info_map, p);
+        }
+        catch (const InferenceEngine::details::InferenceEngineException & e) {
+            FAIL() << e.what();
+        }
+    }
+};
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/common/low_precision_tests_utils.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/common/low_precision_tests_utils.cpp
new file mode 100644 (file)
index 0000000..fd8f17b
--- /dev/null
@@ -0,0 +1,167 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_tests_utils.hpp"
+
+#include <details/ie_cnn_network_tools.h>
+#include <details/caseless.hpp>
+#include <precision_utils.h>
+#include <cmath>
+
+using InferenceEngine::CNNLayerPtr;
+using InferenceEngine::Blob;
+using InferenceEngine::details::CNNNetworkImpl;
+using InferenceEngine::CNNNetwork;
+using InferenceEngine::DataPtr;
+using InferenceEngine::Precision;
+
+// TODO: FP32 detected
+void fillDataWithInitValue(float *data, size_t size, float initValue) {
+    for (size_t i = 0lu; i < size; i++) {
+        data[i] = sin((i + initValue + 1.0f) * 0.03f);
+    }
+}
+
+void fillDataWithInitValue(std::vector<float>& data, float initValue) {
+    for (size_t i = 0; i < data.size(); i++) {
+        data[i] = sin((i + initValue + 1.0) * 0.03);
+    }
+}
+
+void fillDataWithInitValue(Blob::Ptr& blob, float initValue) {
+    if (blob == nullptr) {
+        THROW_IE_EXCEPTION << "Blob is nullable";
+    }
+
+    const Precision& precision = blob->getTensorDesc().getPrecision();
+    const size_t dataSize = blob->size();
+    if (precision == Precision::FP32) {
+        float* buffer = blob->buffer().as<float*>();
+        for (size_t i = 0lu; i < dataSize; i++) {
+            buffer[i] = sin((float(i) + initValue + 1.f) * 0.03f);
+        }
+    } else if (precision == Precision::FP16) {
+        short* buffer = blob->buffer().as<short*>();
+        for (size_t i = 0lu; i < dataSize; i++) {
+            buffer[i] = InferenceEngine::PrecisionUtils::f32tof16(sin((float(i) + initValue + 1.f) * 0.03f));
+        }
+    }
+}
+
+void fillDataWithInitValue(CNNLayerPtr layer, const std::string& blobName, float initValue) {
+    if (layer == nullptr) {
+        THROW_IE_EXCEPTION << "layer is nullable";
+    }
+    if (blobName.empty() && (layer->blobs.size() != 1)) {
+        THROW_IE_EXCEPTION << "several blobs";
+    }
+
+    Blob::Ptr blob = blobName.empty() ? layer->blobs.begin()->second : layer->blobs[blobName];
+    if (blob == nullptr)
+        THROW_IE_EXCEPTION << "Layer '" << layer->name << "' does not have blob '" << blobName << "'";
+    fillDataWithInitValue(blob, initValue);
+}
+
+void fillData(float *dst, size_t size, float value) {
+    std::fill(dst, dst + size, value);
+}
+
+void fillData(float* dst, size_t size, const float* src) {
+    std::copy(src, src + size, dst);
+}
+
+void fillData(float *dst, size_t size, const std::vector<float>& src) {
+    if (size != src.size()) {
+        THROW_IE_EXCEPTION << "values size is not correct";
+    }
+    fillData(dst, size, src.data());
+}
+
+void fillData(Blob::Ptr& blob, float value) {
+    if (blob == nullptr) {
+        THROW_IE_EXCEPTION << "Blob is nullable";
+    }
+
+    const Precision& precision = blob->getTensorDesc().getPrecision();
+    const size_t dataSize = blob->size();
+    if (precision == Precision::FP32) {
+        fillData(blob->buffer().as<float*>(), dataSize, value);
+    } else if (precision == Precision::FP16) {
+        short* buffer = blob->buffer().as<short*>();
+        for (size_t i = 0lu; i < blob->size(); i++) {
+            buffer[i] = InferenceEngine::PrecisionUtils::f32tof16(value);
+        }
+    }
+}
+
+void fillData(Blob::Ptr& blob, const float* src) {
+    if (blob == nullptr) {
+        THROW_IE_EXCEPTION << "Blob is nullable";
+    }
+
+    const Precision& precision = blob->getTensorDesc().getPrecision();
+    const size_t dataSize = blob->size();
+    if (precision == Precision::FP32) {
+        fillData(blob->buffer().as<float*>(), dataSize, src);
+    } else if (precision == Precision::FP16) {
+        short* dstData = blob->buffer().as<short*>();
+        InferenceEngine::PrecisionUtils::f32tof16Arrays(dstData, src, dataSize, 1.f, 0.f);
+    } else {
+        THROW_IE_EXCEPTION << "Unsupported precision: " << precision;
+    }
+}
+
+void fillData(Blob::Ptr& blob, const std::vector<float>& src) {
+    fillData(blob, src.data());
+}
+
+void fillData(CNNLayerPtr layer, float value, const std::string& blobName) {
+    if (layer == nullptr) {
+        THROW_IE_EXCEPTION << "layer is nullable";
+    }
+    if (blobName.empty() && (layer->blobs.size() != 1)) {
+        THROW_IE_EXCEPTION << "several blobs";
+    }
+
+    Blob::Ptr blob = blobName.empty() ? layer->blobs.begin()->second : layer->blobs[blobName];
+    fillData(blob, value);
+}
+
+void fillData(CNNLayerPtr layer, const std::vector<float>& values, const std::string& blobName) {
+    if (layer == nullptr) {
+        THROW_IE_EXCEPTION << "layer is nullable";
+    }
+    if (blobName.empty() && (layer->blobs.size() != 1)) {
+        THROW_IE_EXCEPTION << "several blobs";
+    }
+
+    Blob::Ptr blob = blobName.empty() ? layer->blobs.begin()->second : layer->blobs[blobName];
+    if (blob->size() != values.size()) {
+        THROW_IE_EXCEPTION << "values size is not correct";
+    }
+
+    fillData(blob, values);
+}
+
+CNNLayerPtr getLayer(const CNNNetwork& network, const std::string& layerName) {
+    std::vector<CNNLayerPtr> layers = InferenceEngine::details::CNNNetSortTopologically(network);
+    for (CNNLayerPtr& layer : layers) {
+        if (layer->name == layerName) {
+            return layer;
+        }
+    }
+
+    return nullptr;
+}
+
+Blob::Ptr getBlob(CNNLayerPtr layer, const std::string& blobName) {
+    if (layer == nullptr) {
+        THROW_IE_EXCEPTION << "layer is nullable";
+    }
+    if (blobName.empty() && (layer->blobs.size() != 1)) {
+        THROW_IE_EXCEPTION << "several blobs";
+    }
+    Blob::Ptr blob = blobName.empty() ? layer->blobs.begin()->second : layer->blobs[blobName];
+    return blob;
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/common/low_precision_tests_utils.hpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/common/low_precision_tests_utils.hpp
new file mode 100644 (file)
index 0000000..51f95ba
--- /dev/null
@@ -0,0 +1,33 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <vector>
+
+#include <cpp/ie_cnn_network.h>
+#include <cnn_network_impl.hpp>
+
+void fillDataWithInitValue(InferenceEngine::Blob::Ptr& blob, float initValue);
+
+void fillDataWithInitValue(float *data, size_t size, float initValue = 0.0);
+
+void fillDataWithInitValue(std::vector<float>& data, float initValue = 0.0);
+
+void fillDataWithInitValue(InferenceEngine::CNNLayerPtr layer, const std::string& blobName = "", float initValue = 0.0);
+
+void fillData(InferenceEngine::CNNLayerPtr layer, float value, const std::string& blobName = "");
+void fillData(InferenceEngine::CNNLayerPtr layer, const std::vector<float>& values, const std::string& blobName = "");
+
+inline void fillData(float *dst, size_t size, float value);
+inline void fillData(float *dst, size_t size, const float* src);
+inline void fillData(float *dst, size_t size, const std::vector<float>& src);
+
+void fillData(InferenceEngine::Blob::Ptr& blob, float value);
+void fillData(InferenceEngine::Blob::Ptr& blob, const float* src);
+void fillData(InferenceEngine::Blob::Ptr& blob, const std::vector<float>& values);
+
+InferenceEngine::CNNLayerPtr getLayer(const InferenceEngine::CNNNetwork& network, const std::string& layerName);
+
+InferenceEngine::Blob::Ptr getBlob(InferenceEngine::CNNLayerPtr layer, const std::string& blobName);
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/common/validation.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/common/validation.cpp
new file mode 100644 (file)
index 0000000..4d37bb4
--- /dev/null
@@ -0,0 +1,768 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "validation.hpp"
+
+#include <gtest/gtest.h>
+#include <string>
+#include <vector>
+#include <unordered_set>
+
+#include <details/caseless.hpp>
+#include "low_precision_transformations/network_helper.hpp"
+#include "low_precision_transformations/fake_quantize.hpp"
+#include "low_precision_transformations/transformer.hpp"
+
+using namespace InferenceEngine;
+using namespace InferenceEngine::details;
+
+void LowPrecisionTransformationValidation::validate(
+        CNNNetwork& network,
+        // TODO: not correct, quantization parameters are defined per transformation
+        const LayerTransformation::Params& params,
+        const std::unordered_set<std::string>& notTransformedLayers,
+        const std::vector<std::pair<std::string, std::string>>& originalLayersInfo) {
+    validateIntervalsAndLevel(network, params, notTransformedLayers);
+    validateWeightsToConst(network, params, notTransformedLayers);
+    validatePrecision(network, params, notTransformedLayers);
+    validateActivations(network, params, notTransformedLayers);
+    validateScaleShifts(network, params, notTransformedLayers);
+    validateConvolutions(network, params, notTransformedLayers);
+    validateWithReference(network, originalLayersInfo);
+
+    validateAsymmetricPattern(network, params, notTransformedLayers);
+
+    const std::vector<CNNLayerPtr> layers = CNNNetSortTopologically(network);
+    for (const CNNLayerPtr layer : layers) {
+        if (layer->type == "Eltwise") {
+            validateEltwise(network, params, *layer);
+        }
+    }
+
+    // TODO: not ready
+    // validateCustomLayerHandling(network, notTransformedLayers);
+}
+
+std::vector<std::pair<std::string, std::string>> LowPrecisionTransformationValidation::getLayers(const CNNNetwork& network) {
+    std::vector<std::pair<std::string, std::string>> layerNames;
+    const std::vector<CNNLayerPtr> layers = CNNNetSortTopologically(network);
+    for (const CNNLayerPtr layer : layers) {
+        layerNames.push_back(std::pair<std::string, std::string>(layer->name, layer->type));
+    }
+    return layerNames;
+}
+
+void LowPrecisionTransformationValidation::validateIntervalsAndLevel(
+        const CNNNetwork& network,
+        const LayerTransformation::Params& params,
+        const std::unordered_set<std::string>& notTransformedLayers) {
+    const std::vector<CNNLayerPtr> layers = CNNNetSortTopologically(network);
+    for (const CNNLayerPtr layer : layers) {
+        if (notTransformedLayers.find(layer->name) != notTransformedLayers.end()) {
+            continue;
+        }
+
+        if (layer->type == "FakeQuantize") {
+            const size_t levelsAsParam = layer->GetParamAsUInt("levels");
+            QuantizeLayer* quantizeLayer = dynamic_cast<QuantizeLayer*>(layer.get());
+            if (quantizeLayer == nullptr) {
+                THROW_IE_EXCEPTION << "unexpected type";
+            }
+
+            if (levelsAsParam != quantizeLayer->levels) {
+                THROW_IE_EXCEPTION << "level as param " << levelsAsParam << " is not equal level as member " << quantizeLayer->levels;
+            }
+
+            //// TODO: debug only
+            //QuantizationDetails quantizationDetails = QuantizationDetails::getDetails(*layer);
+            //std::cout << layer->name << (CNNNetworkHelper::onWeights(*layer) ? " on weights" : " on activations") <<
+            //    ": levels=" << quantizationDetails.levels <<
+            //    ": input [" << quantizationDetails.inputLowValues[0] << " - " << quantizationDetails.inputHighValues[0]
+            //    << "], output [" << quantizationDetails.outputLowValues[0] << " - " << quantizationDetails.outputHighValues[0] << "]" << std::endl;
+            bool multiBranch = false;
+
+            const std::vector<CNNLayerPtr> children = CNNNetworkHelper::getChildren(*layer, "Pooling");
+            for (const CNNLayerPtr& child : children) {
+                if ((child->type == "Eltwise") || (child->type == "Concat")) {
+                    multiBranch = true;
+                    break;
+                }
+            }
+
+            validateFakeQuantize(layer, params, multiBranch);
+        } else if (layer->type == "Eltwise") {
+            // TODO: FQ on Eltwise specific logic is under development
+        } else if (layer->type == "Concat") {
+            // TODO: FQ on Concat specific logic is under development
+        }
+    }
+}
+
+void LowPrecisionTransformationValidation::validateWeightsToConst(
+        const CNNNetwork& network,
+        const LayerTransformation::Params& params,
+        const std::unordered_set<std::string>& notTransformedLayers) {
+    if ((!params.weightsToConst) ||
+        (!std::any_of(
+            params.precisionsOnActivations.begin(),
+            params.precisionsOnActivations.end(),
+            [](const Precision precision) { return precision == Precision::U8; }))) {
+        return;
+    }
+
+    if ((!params.supportAsymmetricQuantization) &&
+        (!std::any_of(params.precisionsOnWeights.begin(), params.precisionsOnWeights.end(), [](const Precision precision) { return precision.isSigned(); }))) {
+        // U8 on weights in symmetric mode is ignored, shifts on weights are not supported
+        return;
+    }
+
+    const std::vector<CNNLayerPtr> layers = InferenceEngine::details::CNNNetSortTopologically(network);
+    for (const CNNLayerPtr layer : layers) {
+        if ((layer->type == "FakeQuantize") && CNNNetworkHelper::onWeights(*layer) && (layer->outData.size() == 1) &&
+            (layer->outData[0]->getInputTo().begin()->second->type == "Convolution")) {
+            CNNLayerPtr childLayer = CNNNetworkHelper::getChildren(*layer)[0];
+            if (params.quantizeOutputs || (childLayer->outData[0]->getInputTo().size() != 0)) {
+                ASSERT_TRUE(notTransformedLayers.find(childLayer->name) != notTransformedLayers.end()) <<
+                    "FakeQuantize on weights was found: " << layer->name <<
+                    " for layer " << childLayer->name;
+            }
+        }
+    }
+}
+
+Precision getInputPrecision(const CNNLayer& layer) {
+    if (layer.insData.size() < 1ul) {
+        THROW_IE_EXCEPTION << "unexpected inputs count";
+    }
+
+    DataPtr layerParentData = layer.insData[0].lock();
+    if (layerParentData == nullptr) {
+        THROW_IE_EXCEPTION << "input data is nullable";
+    }
+
+    CNNLayerPtr layerParent = layerParentData->getCreatorLayer().lock();
+    if (layerParent == nullptr) {
+        THROW_IE_EXCEPTION << "parent is nullable";
+    }
+
+    if ((layer.type == "Convolution") && (layerParent->type == "Eltwise")) {
+        DataPtr eltwiseParentData = layerParent->insData[0].lock();
+        if (eltwiseParentData == nullptr) {
+            THROW_IE_EXCEPTION << "Eltwise parent data is nullable";
+        }
+
+        // TODO: workaround for the first Convolution:
+        // https://jira.devtools.intel.com/browse/CVS-26622
+        // CVS-26622: [IE COMMON][LPT] Check if ScaleShift is dequantization ScaleShift(dequantizationLayersNames) before to apply transformation
+        CNNLayerPtr eltwiseParent = eltwiseParentData->getCreatorLayer().lock();
+        if (eltwiseParent->type == "Input") {
+            return Precision::U8;
+        }
+
+        return eltwiseParentData->getTensorDesc().getPrecision();;
+    } else {
+        return layerParentData->getTensorDesc().getPrecision();
+    }
+}
+
+Precision getOutputPrecision(const CNNLayer& layer) {
+    if (layer.outData.size() < 1ul) {
+        THROW_IE_EXCEPTION << "unexpected outputs count";
+    }
+
+    return layer.outData[0]->getTensorDesc().getPrecision();
+}
+
+// TODO: refactor (I8/U8 is used)
+void LowPrecisionTransformationValidation::validatePrecision(
+        const CNNNetwork& network,
+        const LayerTransformation::Params& params,
+        const std::unordered_set<std::string>& notTransformedLayers) {
+    const std::vector<CNNLayerPtr> layers = InferenceEngine::details::CNNNetSortTopologically(network);
+    for (const CNNLayerPtr layer : layers) {
+        if (notTransformedLayers.find(layer->name) != notTransformedLayers.end()) {
+            continue;
+        }
+
+        if ((!params.quantizeOutputs) && (layer->outData[0]->getInputTo().size() == 0ul)) {
+            continue;
+        }
+
+        if (CaselessEq<std::string>()(layer->type, "FakeQuantize") && !isFakeQuantizeBeforeEltwiseOnConvolutionBranch(*layer)) {
+            // TODO: handle if FakeQuantize on weights -> Const on weights transformation is disabled
+            //if (CNNNetworkHelper::onWeights(*layer)) {
+            //    for (const DataPtr data : layer->outData) {
+            //        ASSERT_EQ(Precision::I8, data->getPrecision()) << "FakeQuantize out data on weights has unexpected precision";
+            //    }
+            //}
+
+            if (!params.quantizeOutputs) {
+                const std::vector<CNNLayerPtr> children = CNNNetworkHelper::getChildrenRecursivelyExceptTypes(*layer, { "ScaleShift" });
+                if ((children.size() == 0ul) || (children[0]->outData.size() == 0ul) || (children[0]->outData[0]->getInputTo().size() == 0ul)) {
+                    continue;
+                }
+            }
+
+            const std::vector<CNNLayerPtr> children = CNNNetworkHelper::getChildren(*layer);
+            bool hasDequantizationSS = false;
+            for (const auto& child : children) {
+                if (CaselessEq<std::string>()(child->type, "ScaleShift")) {
+                    hasDequantizationSS = true;
+                    break;
+                }
+            }
+
+            if (params.updatePrecisions && hasDequantizationSS) {
+                // while S8 is not supported on activations
+                for (const DataPtr data : layer->outData) {
+                    ASSERT_TRUE((data->getPrecision() == Precision::U8) || (data->getPrecision() == Precision::I8)) << "'" <<
+                        layer->type << "', name '" <<
+                        layer->name << "' out data on activations has unexpected precision " << data->getPrecision();
+                }
+            }
+        } else if (layer->type == "Const") {
+            if (CNNNetworkHelper::onWeights(*layer)) {
+                // Note: Const layer on weights can has any original precision - check original network Const layer precision
+
+                const std::vector<CNNLayerPtr> children = CNNNetworkHelper::getChildrenRecursivelyExceptTypes(*layer, { "Eltwise" });
+                if (children[0]->type == "FakeQuantize") {
+                    // FakeQuantize on weights is possible if weights graph is complex
+                    continue;
+                }
+
+                ASSERT_EQ(1ul, children.size()) <<
+                    "children count " << children.size() <<
+                    " is unexpected for " << layer->type << " '" << layer->name << "' layer on weights";
+                ASSERT_TRUE((children[0]->type == "Convolution") || (children[0]->type == "FullyConnected") || (children[0]->type == "GEMM")) <<
+                    "unexpected child type " << children[0]->type << " '" << children[0]->name << "' for layer " << layer->type << " '" << layer->name << "' on weights";
+
+                if (children[0]->outData[0]->getInputTo().size() == 0) {
+                    // output data precision depends on device
+                    continue;
+                }
+
+                const Precision originalPrecision = getOutputPrecision(*children[0]);
+                const Precision inputPrecision = getInputPrecision(*children[0]);
+                const Precision weightsPrecision = inputPrecision == originalPrecision ? originalPrecision : params.precisionsOnWeights[0];
+
+                if (inputPrecision != originalPrecision) {
+                    ASSERT_TRUE((weightsPrecision == Precision::I8) || (weightsPrecision == Precision::U8)) <<
+                        "unexpected weights precision " << weightsPrecision <<
+                        " for " << children[0]->type << " " << children[0]->name;
+                }
+
+                for (auto it = layer->blobs.begin(); it != layer->blobs.end(); ++it) {
+                    ASSERT_EQ(params.updatePrecisions ? weightsPrecision : originalPrecision, it->second->getTensorDesc().getPrecision()) <<
+                        " constant layer on weights blob precison is not correct" <<
+                        " for " << layer->type << " " << layer->name;;
+                }
+
+                for (const DataPtr data : layer->outData) {
+                    ASSERT_EQ(params.updatePrecisions ? weightsPrecision : originalPrecision, data->getPrecision()) <<
+                        " constant layer " << layer->name << " on weights blob precison is not correct";
+                }
+            }
+        } else if ((layer->type == "Concat") || (layer->type == "Pooling")) {
+            for (const DataPtr data : layer->outData) {
+                if (params.updatePrecisions && (!CNNNetworkHelper::onWeights(*layer))) {
+                    ASSERT_TRUE((data->getPrecision() == Precision::U8) || (data->getPrecision() == Precision::I8)) <<
+                        layer->type << " layer, name '" <<
+                        layer->name << "' out data has unexpected precision " << data->getPrecision();
+                }
+                // ASSERT_EQ(params.updatePrecisions ? Precision::U8 : Precision::FP32, data->getPrecision()) << " " << layer->type << " out data has unexpected precision " << data->getPrecision();
+            }
+        } else if ((layer->type == "Eltwise") || (layer->type == "Convolution")) {
+            for (const DataPtr data : layer->outData) {
+                // TODO: refactor: get original layer output precision from original network
+                ASSERT_TRUE((data->getPrecision() == Precision::FP16) || (data->getPrecision() == Precision::FP32)) << "'" <<
+                    layer->type << "', name '" <<
+                    layer->name << "' out data has unexpected precision " << data->getPrecision();
+            }
+        }
+    }
+}
+
+void LowPrecisionTransformationValidation::validateActivations(
+    const CNNNetwork& network,
+    const LayerTransformation::Params& params,
+    const std::unordered_set<std::string>& notTransformedLayers) {
+    const std::vector<CNNLayerPtr> layers = InferenceEngine::details::CNNNetSortTopologically(network);
+    for (const CNNLayerPtr layer : layers) {
+        if ((notTransformedLayers.find(layer->name) != notTransformedLayers.end()) || (layer->type != "ReLU")) {
+            continue;
+        }
+
+        const std::vector<CNNLayerPtr> reluParents = CNNNetworkHelper::getParentsRecursivelyExceptTypes(*layer, { "Pooling" });
+        if ((reluParents.size() != 1) || (reluParents[0]->type != "ScaleShift")) {
+            continue;
+        }
+
+        const CNNLayerPtr scaleShift = reluParents[0];
+
+        const std::vector<CNNLayerPtr> scaleShiftParents = CNNNetworkHelper::getParentsRecursivelyExceptTypes(*scaleShift, { "Pooling" });
+        // if Convolution is parent then ScaleShift can be generated by clean up transformation
+        if ((scaleShiftParents.size() != 1) || (scaleShiftParents[0]->type == "Convolution")) {
+            continue;
+        }
+
+        const float negativeSlope = layer->GetParamAsFloat("negative_slope", 0.0);
+        if (negativeSlope != 0.0) {
+            continue;
+        }
+
+        const Blob::Ptr weightsBlob = CNNNetworkHelper::getBlob(scaleShift, "weights");
+        auto weights = CNNNetworkHelper::getFloatData(weightsBlob);
+        const std::vector<float> scales = std::vector<float>(weights.get(), weights.get() + weightsBlob->size());
+
+        const Blob::Ptr biasesBlob = CNNNetworkHelper::getBlob(scaleShift, "biases");
+        auto biases = CNNNetworkHelper::getFloatData(biasesBlob);
+        const std::vector<float> shifts = std::vector<float>(biases.get(), biases.get() + biasesBlob->size());
+
+        if (!(std::equal(shifts.begin() + 1, shifts.end(), shifts.begin())) ||
+            !(std::equal(scales.begin() + 1, scales.end(), scales.begin()))) {
+            continue;
+        }
+
+        ASSERT_TRUE(true) << scaleShift->type << " '" << scaleShift->name << "' before " << layer->type << " '" << layer->name << "' was found";
+    }
+}
+
+void LowPrecisionTransformationValidation::validateScaleShifts(
+    const CNNNetwork& network,
+    const LayerTransformation::Params& params,
+    const std::unordered_set<std::string>& notTransformedLayers) {
+    if (!params.updateBiases) {
+        return;
+    }
+
+    const std::vector<CNNLayerPtr> layers = InferenceEngine::details::CNNNetSortTopologically(network);
+    for (const CNNLayerPtr layer : layers) {
+        if ((notTransformedLayers.find(layer->name) != notTransformedLayers.end()) || (layer->type != "ScaleShift")) {
+            continue;
+        }
+
+        const std::vector<CNNLayerPtr> scaleShiftParents = CNNNetworkHelper::getParentsRecursivelyExceptTypes(*layer, { "Pooling" });
+        if ((scaleShiftParents.size() != 1) || (scaleShiftParents[0]->type != "Convolution")) {
+            continue;
+        }
+
+        const Blob::Ptr biasesBlob = CNNNetworkHelper::getBlob(layer, "biases");
+        auto biases = CNNNetworkHelper::getFloatData(biasesBlob);
+        const std::vector<float> shifts = std::vector<float>(biases.get(), biases.get() + biasesBlob->size());
+
+        ASSERT_TRUE(std::all_of(shifts.begin(), shifts.end(), [](float value) { return value == 0.0; })) <<
+            layer->type << " '" << layer->name << "' after " <<
+            scaleShiftParents[0]->type << " '" << scaleShiftParents[0]->name << "' has not zero shift values";
+    }
+}
+
+void LowPrecisionTransformationValidation::validateConvolutions(
+    const CNNNetwork& network,
+    const LayerTransformation::Params& params,
+    const std::unordered_set<std::string>& notTransformedLayers) {
+    if (!params.updatePrecisions) {
+        return;
+    }
+
+    const std::vector<CNNLayerPtr> layers = InferenceEngine::details::CNNNetSortTopologically(network);
+    for (const CNNLayerPtr layer : layers) {
+        if (layer->type != "Convolution") {
+            continue;
+        }
+
+        CNNLayerPtr parent = CNNNetworkHelper::getParent(*layer, 0ul);
+        const CNNLayerPtr precisionLayer = (parent->type == "Eltwise") ? parent : layer;
+        const Precision precision = precisionLayer->insData[0].lock()->getTensorDesc().getPrecision();
+        ASSERT_NE(Precision::I8, precision) << "unexpected input precision " << precision << " for " << layer->type << " " << layer->name;
+
+        //std::cout << "LowPrecisionTransformationValidation::validateConvolutions: " << layer->type << " " << layer->name << ": " << precision << std::endl;
+    }
+}
+
+void LowPrecisionTransformationValidation::validateWithReference(
+    CNNNetwork& network,
+    const std::vector<std::pair<std::string, std::string>>& originalLayersInfo) {
+    std::unordered_map<std::string, CNNLayerPtr> layersMap;
+    const std::vector<CNNLayerPtr> layers = InferenceEngine::details::CNNNetSortTopologically(network);
+    for (const CNNLayerPtr layer : layers) {
+        layersMap.emplace(layer->name, layer);
+    }
+
+    for (const auto layerInfo : originalLayersInfo) {
+        const auto it = layersMap.find(layerInfo.first);
+
+        // TODO: refactor: transformations move all ScaleShifts
+        if (layerInfo.second == "ScaleShift") {
+            continue;
+        }
+
+        // TODO: refactor: transformations can remove FakeQuantize and Const layers on weights
+        if ((layerInfo.second == "FakeQuantize") || (layerInfo.second == "Const")) {
+            continue;
+        }
+
+        if (it == layersMap.end()) {
+            THROW_IE_EXCEPTION << "Layer '" << layerInfo.first << "' (" << layerInfo.second << ") is absent in transformed network";
+            // std::cout << "Layer '" << layerInfo.first << "' (" << layerInfo.second << ") is absent in transformed network" << std::endl;
+            // continue;
+        }
+
+        // TODO: last layer is ignored
+        if ((it->second->outData.size() != 0) && (it->second->outData[0]->getInputTo().size() == 0)) {
+            continue;
+        }
+
+        if (it->second->type != layerInfo.second) {
+            THROW_IE_EXCEPTION << "Layer '" << layerInfo.first << "' (" << layerInfo.second << ") has unexpected type. Expected value " << it->second->type;
+            // std::cout << "Layer '" << layerInfo.first << "' (" << layerInfo.second << ") has unexpected type. Expected value " << it->second->type << std::endl;
+        }
+    }
+}
+
+void LowPrecisionTransformationValidation::validateCustomLayerHandling(
+    const CNNNetwork& network,
+    const std::unordered_set<std::string>& notTransformedLayers) {
+    const std::vector<CNNLayerPtr> layers = InferenceEngine::details::CNNNetSortTopologically(network);
+    for (const CNNLayerPtr layer : layers) {
+        if (layer->type == "FullyConnected") {
+            const std::vector<CNNLayerPtr> children = CNNNetworkHelper::getChildren(*layer);
+            if ((children.size() == 0) || (children[0]->type != "ScaleShift")) {
+                THROW_IE_EXCEPTION << "Layer " << layer->name << " is not handled";
+            }
+        }
+    }
+}
+
+DataPrecision LowPrecisionTransformationValidation::getDataPrecision(const CNNLayer& layer, const LayerTransformation::Params& params) {
+    const QuantizationDetails quantizationDetails = QuantizationDetails::getDetails(layer);
+    const bool onWeights = CNNNetworkHelper::onWeights(layer);
+
+    if ((onWeights && (params.precisionsOnWeights.size() > 1ul)) ||
+        ((!onWeights) && (params.precisionsOnActivations.size() > 1ul))) {
+        const LayerTransformation::PrecisionDetails precisionDetails = FakeQuantizeTransformation(params).getPrecisionDetails(quantizationDetails);
+        if (precisionDetails.precision != Precision::UNSPECIFIED) {
+            const std::vector<Precision>& supportedPrecisions = onWeights ? params.precisionsOnWeights : params.precisionsOnActivations;
+            const auto foundIt = std::find(supportedPrecisions.begin(), supportedPrecisions.end(), precisionDetails.precision);
+            if (foundIt != supportedPrecisions.end()) {
+                return DataPrecision(
+                    precisionDetails.precision,
+                    DataPrecision::getMinValue(precisionDetails.precision, quantizationDetails.levels),
+                    DataPrecision::getMaxValue(precisionDetails.precision),
+                    false);
+            }
+        }
+    }
+
+    const Precision precision = onWeights ? *params.precisionsOnWeights.begin() : *params.precisionsOnActivations.begin();
+    return DataPrecision(
+        precision,
+        DataPrecision::getMinValue(precision, quantizationDetails.levels),
+        DataPrecision::getMaxValue(precision),
+        false);
+}
+
+// TODO: quantizedTensorAlignmentOnActivations is used
+void LowPrecisionTransformationValidation::validateFakeQuantize(
+    const CNNLayerPtr& layer,
+    const LayerTransformation::Params& params,
+    const bool multiBranch) {
+
+    if (isFakeQuantizeBeforeEltwiseOnConvolutionBranch(*layer) || isFakeQuantizeBeforeConcat(*layer)) {
+        return;
+    }
+
+    if (!params.quantizeOutputs) {
+        const std::vector<CNNLayerPtr> children = CNNNetworkHelper::getChildren(*layer);
+        for (const CNNLayerPtr& child : children) {
+            for (const DataPtr data : child->outData) {
+                if (data->getInputTo().size() == 0ul) {
+                    return;
+                }
+            }
+        }
+    }
+
+    // TODO: Eltwise doesn't support assymetric quantization
+    // TODO: make params per transformation
+    // TODO: uncomment
+    //if (params.supportAsymmetricQuantization) {
+    //    if (CNNNetworkHelper::onWeights(*layer) && (params.precisionsOnWeights.size() == 1)) {
+    //        const QuantizationDetails quantizationDetails = QuantizationDetails::getDetails(*layer);
+    //        if (params.precisionsOnWeights.begin()->isSigned()) {
+    //            ASSERT_TRUE(quantizationDetails.hasNegativeOutput());
+    //        } else {
+    //            ASSERT_FALSE(quantizationDetails.hasNegativeOutput());
+    //        }
+    //    } else if ((!CNNNetworkHelper::onWeights(*layer)) && (params.precisionsOnActivations.size() == 1)) {
+    //        const QuantizationDetails quantizationDetails = QuantizationDetails::getDetails(*layer);
+    //        if (params.precisionsOnActivations.begin()->isSigned()) {
+    //            ASSERT_TRUE(quantizationDetails.hasNegativeOutput());
+    //        } else {
+    //            ASSERT_FALSE(quantizationDetails.hasNegativeOutput());
+    //        }
+    //    }
+    //}
+
+    const QuantizationDetails quantizationDetails = QuantizationDetails::getDetails(*layer);
+    // TODO: temporary fix: not possible to get min/max value for I8 if level was changed
+    if (((quantizationDetails.levels != 255) && (quantizationDetails.levels != 256)) ||
+        (!layer->outData.empty() &&
+        // not quantized
+        ((layer->outData[0]->getTensorDesc().getPrecision() == Precision::FP16) ||
+        (layer->outData[0]->getTensorDesc().getPrecision() == Precision::FP32)))) {
+        return;
+    }
+
+    const DataPrecision dataPrecision = getDataPrecision(*layer, params);
+    for (size_t i = 0; i < quantizationDetails.outputLowValues.size(); ++i) {
+        const auto lowValue = quantizationDetails.outputLowValues[i];
+        const auto highValue = quantizationDetails.outputHighValues[i];
+
+        if (((
+                (params.quantizedTensorAlignmentOnActivations == LayerTransformation::QuantizedTensorAlignment::None) ||
+                (params.quantizedTensorAlignmentOnActivations == LayerTransformation::QuantizedTensorAlignment::UpdateLevel)) &&
+            ((!equals(dataPrecision.min, lowValue)) && (!equals(dataPrecision.max, highValue)))
+            ) ||
+            ((params.quantizedTensorAlignmentOnActivations == LayerTransformation::QuantizedTensorAlignment::UpdateIntervals) &&
+            ((!equals(dataPrecision.min, lowValue)) || (!equals(dataPrecision.max, highValue))))
+            ) {
+            ASSERT_TRUE(true) <<
+                "Output interval [" << lowValue << " - " << highValue <<
+                "] for layer " << layer->name << " is not correct, " <<
+                "expected [" << dataPrecision.min << " - " << dataPrecision.max << "]";
+
+            //// TODO: debug only
+            //std::cout <<
+            //    "Output interval [" << lowValue << " - " << highValue <<
+            //    "] for layer " << layer->name << " is not correct, " <<
+            //    "expected [" << dataPrecision.min << " - " << dataPrecision.max << "]" << std::endl;
+        }
+
+
+        switch (params.quantizedTensorAlignmentOnActivations) {
+            case LayerTransformation::QuantizedTensorAlignment::None: {
+                if ((dataPrecision.precision == Precision::U8) || (dataPrecision.precision == Precision::I8)) {
+                    if ((quantizationDetails.levels != 255) && (quantizationDetails.levels != 256)) {
+                        ASSERT_TRUE(false) << "unexpected quantization levels " << quantizationDetails.levels <<
+                            " for layer " << layer->name;
+                    }
+                } else {
+                    ASSERT_TRUE(false) << "layer '" << layer->type << "', name '" << layer->name << "' has unexpected precision" << dataPrecision.precision;
+                }
+
+                break;
+            }
+            case LayerTransformation::QuantizedTensorAlignment::UpdateIntervals: {
+                if ((dataPrecision.precision == Precision::U8) || (dataPrecision.precision == Precision::I8)) {
+                    if ((quantizationDetails.levels != 255) && (quantizationDetails.levels != 256)) {
+                        ASSERT_TRUE(false) << "unexpected quantization levels " << quantizationDetails.levels <<
+                            " for layer " << layer->name;
+                    }
+                } else {
+                    ASSERT_TRUE(false) << "layer '" << layer->type << "', name '" << layer->name << "' has unexpected precision" << dataPrecision.precision;
+                }
+
+                break;
+            }
+            case LayerTransformation::QuantizedTensorAlignment::UpdateLevel: {
+                if ((dataPrecision.precision == Precision::U8) || (dataPrecision.precision == Precision::I8)) {
+                    if (quantizationDetails.levels > 256) {
+                        ASSERT_TRUE(false) << "layer '" << layer->type << "', name '" << layer->name << "' has unexpected quantization levels " << quantizationDetails.levels;
+                    }
+
+                    if (dataPrecision.precision == Precision::U8) {
+                        if (quantizationDetails.outputLowValues[0] != 0.0) {
+                            ASSERT_TRUE(false) << "unexpected output interval low value: " << quantizationDetails << " for layer " << layer->name;
+                        }
+                        if (quantizationDetails.levels != (quantizationDetails.outputHighValues[0] + 1)) {
+                            ASSERT_TRUE(false) << "unexpected quantization levels " << quantizationDetails.levels <<
+                                " for layer " << layer->name;
+                        }
+                    } else if (dataPrecision.precision == Precision::I8) {
+                        // FIXME: alignment on weights is temporary unsupported
+                        if (CNNNetworkHelper::onWeights(*layer)) {
+                            break;
+                        }
+
+                        if (quantizationDetails.levels != (fabs(quantizationDetails.outputLowValues[0]) + quantizationDetails.outputHighValues[0] + 1)) {
+                            ASSERT_TRUE(false) << "unexpected quantization levels " << quantizationDetails.levels << " for layer " << layer->name;
+                        }
+                    }
+                } else {
+                    ASSERT_TRUE(false) << "layer '" << layer->type << "', name '" << layer->name << "' has unexpected precision" << dataPrecision.precision;
+                }
+                break;
+            }
+            default: {
+                THROW_IE_EXCEPTION << "unsupported QuantizedTensorAlignment mode";
+            }
+        }
+
+
+        if (multiBranch) {
+            if (((dataPrecision.precision == Precision::I8) || (dataPrecision.precision == Precision::U8)) &&
+                (quantizationDetails.levels > 256)) {
+                ASSERT_TRUE(false) << "unexpected quantization levels " << quantizationDetails.levels;
+            }
+
+            // TODO: FQ before Eltwise uses another algorithm - fix it
+            //if ((lowValue < (dataPrecision.min - 0.0001)) || (highValue > (dataPrecision.max + 0.0001))) {
+            //    ASSERT_TRUE(false) <<
+            //        "Output interval [" << lowValue << " - " << highValue << "] for layer " << layer->name <<
+            //        " is not included in [" << dataPrecision.min << " - " << dataPrecision.max << "]";
+
+            //    //// TODO: debug only
+            //    //std::cout <<
+            //    //    "Output interval [" << lowValue << " - " << highValue << "] for layer " << layer->name <<
+            //    //    " is not included in [" << dataPrecision.min << " - " << dataPrecision.max << "]" << std::endl;
+            //}
+        } else {
+            if ((dataPrecision.precision == Precision::I8) || (dataPrecision.precision == Precision::U8)) {
+                // FIXME: alignment on weights is temporary unsupported
+                if (!CNNNetworkHelper::onWeights(*layer)) {
+                    if ((dataPrecision.precision == Precision::U8) &&
+                        ((!equals(dataPrecision.min, lowValue)) || (!equals(dataPrecision.max, highValue)))) {
+                        ASSERT_TRUE(false) <<
+                            "Output interval [" << lowValue << " - " << highValue <<
+                            "] for layer " << layer->name << " is not correct, " <<
+                            "expected [" << dataPrecision.min << " - " << dataPrecision.max << "]";
+                    }
+                }
+            } else {
+                ASSERT_TRUE(false) << "layer '" << layer->type << "', name '" << layer->name << "' has unexpected precision" << dataPrecision.precision;
+            }
+        }
+    }
+}
+
+bool LowPrecisionTransformationValidation::isFakeQuantizeBeforeEltwiseOnConvolutionBranch(const CNNLayer& fakeQuantize) {
+    // TODO: were is check on Convolution branch?
+    const std::vector<CNNLayerPtr> children = CNNNetworkHelper::getChildren(fakeQuantize);
+    if (children.size() == 1lu) {
+        if (CaselessEq<std::string>()(children[0]->type, "Eltwise"))
+            return true;
+        if (CaselessEq<std::string>()(children[0]->type, "ScaleShift")) {
+            const std::vector<CNNLayerPtr> children2 = CNNNetworkHelper::getChildren(*children[0]);
+            return (children2.size() == 1lu) && (CaselessEq<std::string>()(children2[0]->type, "Eltwise"));
+        }
+    }
+    return false;
+}
+
+bool LowPrecisionTransformationValidation::isFakeQuantizeBeforeConcat(const CNNLayer& fakeQuantize) {
+    const std::vector<CNNLayerPtr> children = CNNNetworkHelper::getChildrenRecursivelyExceptTypes(fakeQuantize, { "Pooling" });
+    for (const CNNLayerPtr& child : children) {
+        if (child->type == "Concat") {
+            return true;
+        }
+    }
+    return false;
+}
+
+bool inline LowPrecisionTransformationValidation::equals(const float value1, const float value2, const float max_diff) {
+    return (std::fabs(value1 - value2) < max_diff);
+}
+
+void LowPrecisionTransformationValidation::validateEltwise(CNNNetwork& network, const LayerTransformation::Params& params, const CNNLayer& eltwise) {
+    if (params.updatePrecisions) {
+        // TODO: refactor: use used transformations to identify is Eltwise transformation or Eltwise CPU transformation used
+        //const std::vector<CNNLayerPtr> parents = CNNNetworkHelper::getParentsRecursivelyExceptTypes(eltwise, { "Pooling", "ScaleShift" });
+        //if ((parents[0]->type == "FakeQuantize") && (parents[1]->type == "FakeQuantize")) {
+        //    const Precision precision0 = parents[0]->outData[0]->getPrecision();
+        //    const Precision precision1 = parents[1]->outData[0]->getPrecision();
+        //    if (
+        //        (((precision0 != Precision::I8) && (precision0 != Precision::U8)) ||
+        //        ((precision1 != Precision::FP32) && (precision1 != Precision::FP16))) &&
+        //        (((precision0 != Precision::FP32) && (precision0 != Precision::FP16)) ||
+        //        ((precision1 != Precision::I8) && (precision1 != Precision::U8)))
+        //        ) {
+        //        ASSERT_TRUE(false) << "layer precisions are not correct: " <<
+        //            parents[0]->name << ", " << parents[0]->precision << " and " <<
+        //            parents[1]->name << ", " << parents[1]->precision;
+        //    }
+        //}
+    }
+}
+
+void LowPrecisionTransformationValidation::validateAsymmetricPattern(
+    const CNNNetwork& network,
+    const LayerTransformation::Params& params,
+    const std::unordered_set<std::string>& notTransformedLayers) {
+    const std::vector<CNNLayerPtr> layers = InferenceEngine::details::CNNNetSortTopologically(network);
+    for (const CNNLayerPtr layer : layers) {
+        if (notTransformedLayers.find(layer->name) != notTransformedLayers.end()) {
+            continue;
+        }
+        validateAsymmetricPattern(*layer, params);
+    }
+}
+
+void LowPrecisionTransformationValidation::validateAsymmetricPattern(const CNNLayer& layer, const LayerTransformation::Params& params) {
+    if (layer.type != "Convolution") {
+        return;
+    }
+
+    if (params.supportAsymmetricQuantization && params.updatePrecisions) {
+        CNNLayerPtr parentOnData = CNNNetworkHelper::getParent(layer, 0ul);
+        if (parentOnData->type == "Eltwise") {
+            validateAsymmetricPatternEltwise(*parentOnData, params);
+        }
+
+        CNNLayerPtr parentOnWeights = CNNNetworkHelper::getParent(layer, 1ul);
+        if (parentOnWeights == nullptr) {
+            THROW_IE_EXCEPTION << "weights layer is absent for " << layer.type << " " << layer.name;
+            // std::cout << "weights layer is absent for " << layer.type << " " << layer.name << std::endl;
+            // return;
+        }
+        if (parentOnWeights->type == "Eltwise") {
+            validateAsymmetricPatternEltwise(*parentOnWeights, params);
+        }
+    }
+}
+
+void LowPrecisionTransformationValidation::validateAsymmetricPatternEltwise(const CNNLayer& eltwise, const LayerTransformation::Params& params) {
+    if ((!eltwise.CheckParamPresence("operation")) || (eltwise.GetParamAsString("operation") != "sub")) {
+        return;
+    }
+
+    const std::vector<CNNLayerPtr> parents = CNNNetworkHelper::getParents(eltwise);
+    for (const CNNLayerPtr& parent : parents) {
+        if (parent->type == "Input") {
+            return;
+        }
+    }
+
+    // TODO: hardcoded for CPU
+    const Precision precision = CNNNetworkHelper::onWeights(eltwise) ? Precision::I8 : Precision::U8;
+    for (const CNNLayerPtr& parent : parents) {
+        if (parent->type == "Const") {
+            validateEmptyConst(*parent, params);
+        }
+
+        ASSERT_EQ(1, parent->outData.size());
+        ASSERT_EQ(precision, parent->outData[0]->getPrecision()) <<
+            "layer " << parent->type << " '" << parent->name <<
+            "' has unexpected precision " << parent->outData[0]->getPrecision() <<
+            ", expected: " << precision;
+    }
+}
+
+void LowPrecisionTransformationValidation::validateEmptyConst(const CNNLayer& layer, const LayerTransformation::Params& params) {
+    if (layer.type == "Const") {
+        const Precision precision = layer.outData[0]->getTensorDesc().getPrecision();
+        if (params.updatePrecisions) {
+            // TODO: get correct precision here
+            ASSERT_TRUE((precision == Precision::U8) || (precision == Precision::I8));
+        } else {
+            ASSERT_TRUE((precision == Precision::FP32) || (precision == Precision::FP16));
+        }
+
+        const auto it = layer.blobs.find("custom");
+        ASSERT_NE(layer.blobs.end(), it);
+        const Blob::Ptr blob = it->second;
+        std::shared_ptr<float> buffer = CNNNetworkHelper::getFloatData(blob);
+        ASSERT_TRUE(std::any_of(buffer.get(), buffer.get() + blob->size(), [](const float value) { return value != 0.0; })) <<
+            layer.type << " layer '" << layer.name << "' has " << blob->getTensorDesc().getPrecision() << " zero values blob";
+    }
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/common/validation.hpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/common/validation.hpp
new file mode 100644 (file)
index 0000000..c0864b7
--- /dev/null
@@ -0,0 +1,363 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <string>
+#include <vector>
+#include <memory>
+#include <unordered_map>
+#include <unordered_set>
+
+#include "details/ie_cnn_network_tools.h"
+#include <details/caseless.hpp>
+#include "low_precision_transformations/network_helper.hpp"
+#include "low_precision_transformations/layer_transformation.hpp"
+
+using namespace InferenceEngine;
+using namespace InferenceEngine::details;
+
+class LowPrecisionChainValidation {
+public:
+    class Chain : public std::unordered_set<std::string> {
+    public:
+        Chain(const Precision precision) : precision(precision) {}
+        const Precision precision;
+        bool exist(const std::vector<std::string> layerNames) {
+            for (const std::string& layerName : layerNames) {
+                if (find(layerName) == end()) {
+                    return false;
+                }
+            }
+            return true;
+        }
+    };
+
+    using ChainsVector = std::vector<std::shared_ptr<Chain>>;
+
+    static ChainsVector validate(
+        const CNNNetwork& network,
+        const CNNLayerPtr layer,
+        const CNNLayerPtr endLayer) {
+        std::unordered_map<std::string, Precision> precisionByPort;
+        analyse(network, precisionByPort);
+
+        std::unordered_map<std::string, std::shared_ptr<InternalChain>> handledLayers;
+
+        InternalChainsMap chains;
+        const std::shared_ptr<InternalChain> chain = std::make_shared<InternalChain>(handledLayers.size(), layer->outData[0]->getTensorDesc().getPrecision());
+        chains.emplace(chain->id, chain);
+
+        std::unordered_map<size_t, std::unordered_set<size_t>> hasToBeMerged;
+
+        validate(
+            layer,
+            endLayer,
+            precisionByPort,
+            handledLayers,
+            chains,
+            chains[0],
+            layer->outData[0]->getTensorDesc().getPrecision(),
+            hasToBeMerged);
+
+        auto it = hasToBeMerged.begin();
+        while (it != hasToBeMerged.end()) {
+            const size_t destinationChainId = it->first;
+            const auto destinationChainIt = chains.find(destinationChainId);
+            if (destinationChainIt == chains.end()) {
+                THROW_IE_EXCEPTION << "chain with id was not found " << destinationChainId;
+            }
+
+            const std::shared_ptr<InternalChain> destinationChain = destinationChainIt->second;
+
+            for (auto const sourceChainId : it->second) {
+                const auto sourceChainIt = chains.find(sourceChainId);
+                if (sourceChainIt == chains.end()) {
+                    THROW_IE_EXCEPTION << "chain with id was not found " << sourceChainId;
+                }
+
+                std::shared_ptr<InternalChain> sourceChain = sourceChainIt->second;
+                for (auto sourceIt = sourceChain->begin(); sourceIt != sourceChain->end(); ++sourceIt) {
+                    destinationChain->emplace(*sourceIt);
+                }
+
+                chains.erase(sourceChainIt);
+            }
+
+            hasToBeMerged.erase(it);
+            it = hasToBeMerged.begin();
+        }
+
+        ChainsVector resultChains;
+        for (auto internalChainIt : chains) {
+            auto internalChain = internalChainIt.second;
+            std::shared_ptr<Chain> chain = std::make_shared<Chain>(internalChain->precision);
+            resultChains.push_back(chain);
+            for (auto layerNameIt = internalChain->begin(); layerNameIt != internalChain->end(); ++layerNameIt) {
+                chain->insert(*layerNameIt);
+            }
+        }
+        return resultChains;
+    }
+
+private:
+    class InternalChain : public std::unordered_set<std::string> {
+    public:
+        InternalChain(const size_t id, const Precision precision) : id(id), precision(precision) {}
+        const size_t id;
+        const Precision precision;
+    };
+
+    using InternalChainsMap = std::map<size_t, std::shared_ptr<InternalChain>>;
+
+    static void validate(
+        const CNNLayerPtr layer,
+        const CNNLayerPtr endLayer,
+        const std::unordered_map<std::string, Precision>& precisionByPort,
+        std::unordered_map<std::string, std::shared_ptr<InternalChain>>& handledLayers,
+        InternalChainsMap& chains,
+        std::shared_ptr<InternalChain> chain,
+        const Precision chainPrecision,
+        std::unordered_map<std::size_t, std::unordered_set<size_t>>& hasToBeMerged) {
+        const auto handledLayerIt = handledLayers.find(layer->name);
+        if (handledLayerIt != handledLayers.end())
+        {
+            if (chain->precision == handledLayerIt->second->precision) {
+                const auto it = hasToBeMerged.find(handledLayerIt->second->id);
+                std::unordered_set<size_t>& fused = it == hasToBeMerged.end() ?
+                    hasToBeMerged.emplace(handledLayerIt->second->id, std::unordered_set<size_t>()).first->second :
+                    it->second;
+                fused.insert(chain->id);
+            }
+            return;
+        }
+
+        handledLayers.emplace(layer->name, chain);
+
+        chain->insert(layer->name);
+
+        if ((endLayer != nullptr) && (layer->name == endLayer->name)) {
+            return;
+        }
+
+        for (size_t outDataIndex = 0; outDataIndex < layer->outData.size(); ++outDataIndex) {
+            DataPtr outData = layer->outData[outDataIndex];
+            const std::map<std::string, CNNLayerPtr> inputTo = outData->getInputTo();
+            const Precision parentOutPrecision = getDataPrecision(precisionByPort, *layer, outDataIndex);
+
+            for (auto it = inputTo.begin(); it != inputTo.end(); it++) {
+                const CNNLayerPtr child = it->second;
+
+                for (size_t childOutDataIndex = 0ul; childOutDataIndex < child->outData.size(); ++childOutDataIndex) {
+                    const Precision childOutPrecision = getDataPrecision(precisionByPort, *child, childOutDataIndex);
+                    if (parentOutPrecision == childOutPrecision) {
+                        validate(child, endLayer, precisionByPort, handledLayers, chains, chain, chainPrecision, hasToBeMerged);
+                    } else {
+                        std::shared_ptr<InternalChain> childChain = std::make_shared<InternalChain>(handledLayers.size(), childOutPrecision);
+                        chains.emplace(childChain->id, childChain);
+                        validate(child, endLayer, precisionByPort, handledLayers, chains, childChain, childOutPrecision, hasToBeMerged);
+                    }
+                }
+            }
+        }
+    }
+
+    static void analyse(const CNNNetwork& network, std::unordered_map<std::string, Precision>& precisionByPort) {
+        std::unordered_set<std::string> handledLayers;
+
+        const std::vector<CNNLayerPtr> layers = CNNNetSortTopologically(network);
+        for (const CNNLayerPtr layer : layers) {
+            if (handledLayers.find(layer->name) != handledLayers.end()) {
+                continue;
+            }
+
+            if (analyseAsymmetricQuantizationPattern(*layer, precisionByPort, handledLayers) != Precision::UNSPECIFIED) {
+                continue;
+            }
+
+            if (analyseSymmetricQuantizationPattern(*layer, precisionByPort, handledLayers) != Precision::UNSPECIFIED) {
+                continue;
+            }
+
+            fillPrecisionByPort(*layer, Precision::UNSPECIFIED, precisionByPort);
+            handledLayers.emplace(layer->name);
+        }
+    }
+
+    static void fillPrecisionByPort(
+        const CNNLayer& layer,
+        const Precision precision,
+        std::unordered_map<std::string, Precision>& precisionByPort) {
+        for (size_t outDataIndex = 0; outDataIndex < layer.outData.size(); ++outDataIndex) {
+            DataPtr outData = layer.outData[outDataIndex];
+            const std::string outDataId = getDataId(layer, outDataIndex);
+            if (precisionByPort.find(outDataId) != precisionByPort.end()) {
+                continue;
+            }
+
+            precisionByPort.emplace(outDataId, precision == Precision::UNSPECIFIED ? outData->getTensorDesc().getPrecision() : precision);
+        }
+    }
+
+    static std::string getDataId(const CNNLayer& layer, const size_t dataIndex) {
+        return layer.name + ".outputPort" + std::to_string(dataIndex);
+    }
+
+    static Precision getDataPrecision(const std::unordered_map<std::string, Precision>& precisionByPort, const CNNLayer& layer, const size_t dataIndex) {
+        const auto precisionIt = precisionByPort.find(getDataId(layer, dataIndex));
+        if (precisionIt == precisionByPort.end()) {
+            THROW_IE_EXCEPTION <<
+                "Precision for data '" << getDataId(layer, dataIndex) <<
+                "' was not found for layer " << layer.type << " " << layer.name;
+        }
+        return precisionIt->second;
+    }
+
+    static Precision analyseAsymmetricQuantizationPattern(
+        const CNNLayer& layer,
+        std::unordered_map<std::string, Precision>& precisionByPort,
+        std::unordered_set<std::string>& handledLayers) {
+        if (!CaselessEq<std::string>()(layer.type, "Eltwise")) {
+            return Precision::UNSPECIFIED;
+        }
+
+        const std::vector<CNNLayerPtr> parents = CNNNetworkHelper::getParents(layer);
+        if ((parents.size() != 2ul) ||
+            (!CaselessEq<std::string>()(parents[0]->type, "FakeQuantize")) ||
+            (!CaselessEq<std::string>()(parents[1]->type, "Const")) ||
+            CNNNetworkHelper::getParents(*parents[1]).size() != 0) {
+            return Precision::UNSPECIFIED;
+        }
+
+        const std::vector<CNNLayerPtr> children = CNNNetworkHelper::getChildren(layer);
+        if ((children.size() != 1ul) || (!CaselessEq<std::string>()(children[0]->type, "Convolution"))) {
+            return Precision::UNSPECIFIED;
+        }
+
+        const std::vector<CNNLayerPtr> convolutionChildren = CNNNetworkHelper::getChildren(*children[0]);
+        if ((convolutionChildren.size() != 1ul) || (!CaselessEq<std::string>()(convolutionChildren[0]->type, "FakeQuantize"))) {
+            return Precision::UNSPECIFIED;
+        }
+
+        const Precision precisionBefore = CNNNetworkHelper::getPrecisionParent(layer);
+        const Precision precisionAfterFakeQuantize = convolutionChildren[0]->outData[0]->getTensorDesc().getPrecision();
+        const Precision precision = (precisionBefore == precisionAfterFakeQuantize) ? precisionAfterFakeQuantize : layer.outData[0]->getTensorDesc().getPrecision();
+
+        fillPrecisionByPort(layer, precision, precisionByPort);
+        handledLayers.emplace(layer.name);
+        handledLayers.emplace(children[0]->name);
+
+        return precision;
+    }
+
+    static Precision analyseSymmetricQuantizationPattern(
+        const CNNLayer& layer,
+        std::unordered_map<std::string, Precision>& precisionByPort,
+        std::unordered_set<std::string>& handledLayers) {
+        if ((!CaselessEq<std::string>()(layer.type, "Convolution")) &&
+            (!CaselessEq<std::string>()(layer.type, "FullyConnected")) &&
+            (!CaselessEq<std::string>()(layer.type, "GEMM"))) {
+            return Precision::UNSPECIFIED;
+        }
+
+        const std::vector<CNNLayerPtr> children = CNNNetworkHelper::getChildren(layer);
+        if ((children.size() != 1ul) || (!CaselessEq<std::string>()(children[0]->type, "FakeQuantize"))) {
+            return Precision::UNSPECIFIED;
+        }
+
+        const Precision precisionBefore = CNNNetworkHelper::getPrecisionParent(layer, 0ul);
+        const Precision precisionAfterFakeQuantize = children[0]->outData[0]->getTensorDesc().getPrecision();
+        const Precision precision = (precisionBefore == precisionAfterFakeQuantize) ? precisionAfterFakeQuantize : layer.outData[0]->getTensorDesc().getPrecision();
+
+        // TODO: convolution weights and biases layers are skipped
+        fillPrecisionByPort(layer, precision, precisionByPort);
+        handledLayers.emplace(layer.name);
+
+        return precision;
+    }
+};
+
+class LowPrecisionTransformationValidation {
+public:
+    static void validate(
+            InferenceEngine::CNNNetwork& network,
+            // TODO: not correct, quantization parameters are defined per transformation
+            const InferenceEngine::details::LayerTransformation::Params& params,
+            const std::unordered_set<std::string>& notTransformedLayers = {},
+            const std::vector<std::pair<std::string, std::string>>& originalLayersInfo = {});
+
+    static std::vector<std::pair<std::string, std::string>> getLayers(const InferenceEngine::CNNNetwork& network);
+
+    static void validateIntervalsAndLevel(
+            const InferenceEngine::CNNNetwork& network,
+            const InferenceEngine::details::LayerTransformation::Params& params,
+            const std::unordered_set<std::string>& notTransformedLayers);
+
+    static void validateWeightsToConst(
+            const InferenceEngine::CNNNetwork& network,
+            const InferenceEngine::details::LayerTransformation::Params& params,
+            const std::unordered_set<std::string>& notTransformedLayers);
+
+    // TODO: refactor (I8/U8 is used)
+    static void validatePrecision(
+            const InferenceEngine::CNNNetwork& network,
+            const InferenceEngine::details::LayerTransformation::Params& params,
+            const std::unordered_set<std::string>& notTransformedLayers);
+
+    static void validateActivations(
+        const InferenceEngine::CNNNetwork& network,
+        const InferenceEngine::details::LayerTransformation::Params& params,
+        const std::unordered_set<std::string>& notTransformedLayers);
+
+    static void validateScaleShifts(
+        const InferenceEngine::CNNNetwork& network,
+        const InferenceEngine::details::LayerTransformation::Params& params,
+        const std::unordered_set<std::string>& notTransformedLayers);
+
+    static void validateConvolutions(
+        const InferenceEngine::CNNNetwork& network,
+        const InferenceEngine::details::LayerTransformation::Params& params,
+        const std::unordered_set<std::string>& notTransformedLayers);
+
+    static void validateWithReference(
+        InferenceEngine::CNNNetwork& network,
+        const std::vector<std::pair<std::string, std::string>>& originalLayersInfo);
+
+    static void validateCustomLayerHandling(
+        const InferenceEngine::CNNNetwork& network,
+        const std::unordered_set<std::string>& notTransformedLayers);
+
+private:
+    static InferenceEngine::details::DataPrecision getDataPrecision(
+        const InferenceEngine::CNNLayer& layer,
+        const InferenceEngine::details::LayerTransformation::Params& params);
+
+    // TODO: quantizedTensorAlignmentOnActivations is used
+    static void validateFakeQuantize(
+        const InferenceEngine::CNNLayerPtr& layer,
+        const InferenceEngine::details::LayerTransformation::Params& params,
+        const bool multiBranch);
+
+    static bool isFakeQuantizeBeforeEltwiseOnConvolutionBranch(const InferenceEngine::CNNLayer& fakeQuantize);
+
+    static bool isFakeQuantizeBeforeConcat(const InferenceEngine::CNNLayer& fakeQuantize);
+
+    static inline bool equals(const float value1, const float value2, const float max_diff = 0.0001f);
+
+    static void validateEltwise(
+        InferenceEngine::CNNNetwork& network,
+        const InferenceEngine::details::LayerTransformation::Params& params,
+        const InferenceEngine::CNNLayer& eltwise);
+
+    static void validateAsymmetricPattern(
+        const InferenceEngine::CNNNetwork& network,
+        const InferenceEngine::details::LayerTransformation::Params& params,
+        const std::unordered_set<std::string>& notTransformedLayers);
+
+    static void validateAsymmetricPattern(const InferenceEngine::CNNLayer& layer, const InferenceEngine::details::LayerTransformation::Params& params);
+
+    static void validateAsymmetricPatternEltwise(const InferenceEngine::CNNLayer& eltwise, const InferenceEngine::details::LayerTransformation::Params& params);
+
+    static void validateEmptyConst(const InferenceEngine::CNNLayer& layer, const InferenceEngine::details::LayerTransformation::Params& params);
+};
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/concat_multi_branch_test.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/concat_multi_branch_test.cpp
new file mode 100644 (file)
index 0000000..10ac172
--- /dev/null
@@ -0,0 +1,627 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+
+std::string ConcatMultiBranchTestModel::getModel(SingleLayerTransformationsTestParams& p) const {
+    std::string layers = layersTemplate;
+    // TODO: hard-coded values
+
+    size_t totalOffset = 0;
+
+    REPLACE_WITH_NUM(layers, "DATA_CONST_INPUT_LOW_OFFSET_1", totalOffset);
+    totalOffset += 4;
+    REPLACE_WITH_NUM(layers, "DATA_CONST_INPUT_HIGHT_OFFSET_1", totalOffset);
+    totalOffset += 4;
+    REPLACE_WITH_NUM(layers, "DATA_CONST_OUTPUT_LOW_OFFSET_1", totalOffset);
+    totalOffset += 4;
+    REPLACE_WITH_NUM(layers, "DATA_CONST_OUTPUT_HIGH_OFFSET_1", totalOffset);
+    totalOffset += 4;
+
+    REPLACE_WITH_NUM(layers, "DATA_CONST_INPUT_LOW_OFFSET_2", totalOffset);
+    totalOffset += 4;
+    REPLACE_WITH_NUM(layers, "DATA_CONST_INPUT_HIGHT_OFFSET_2", totalOffset);
+    totalOffset += 4;
+    REPLACE_WITH_NUM(layers, "DATA_CONST_OUTPUT_LOW_OFFSET_2", totalOffset);
+    totalOffset += 4;
+    REPLACE_WITH_NUM(layers, "DATA_CONST_OUTPUT_HIGH_OFFSET_2", totalOffset);
+    totalOffset += 4;
+
+    REPLACE_WITH_NUM(layers, "DATA_CONST_INPUT_LOW_OFFSET_3", totalOffset);
+    totalOffset += 4;
+    REPLACE_WITH_NUM(layers, "DATA_CONST_INPUT_HIGHT_OFFSET_3", totalOffset);
+    totalOffset += 4;
+    REPLACE_WITH_NUM(layers, "DATA_CONST_OUTPUT_LOW_OFFSET_3", totalOffset);
+    totalOffset += 4;
+    REPLACE_WITH_NUM(layers, "DATA_CONST_OUTPUT_HIGH_OFFSET_3", totalOffset);
+    totalOffset += 4;
+
+    REPLACE_WITH_NUM(layers, "WEIGHTS_CONST_INPUT_OFFSET", totalOffset);
+    totalOffset += 6 * 6 * 3 * 3 * 4;
+    REPLACE_WITH_NUM(layers, "WEIGHTS_CONST_INPUT_SIZE", 6 * 6 * 3 * 3 * 4);
+
+    REPLACE_WITH_NUM(layers, "WEIGHTS_CONST_INPUT_LOW_OFFSET", totalOffset);
+    totalOffset += 4;
+    REPLACE_WITH_NUM(layers, "WEIGHTS_CONST_INPUT_HIGHT_OFFSET", totalOffset);
+    totalOffset += 4;
+    REPLACE_WITH_NUM(layers, "WEIGHTS_CONST_OUTPUT_LOW_OFFSET", totalOffset);
+    totalOffset += 4;
+    REPLACE_WITH_NUM(layers, "WEIGHTS_CONST_OUTPUT_HIGH_OFFSET", totalOffset);
+    totalOffset += 4;
+
+    REPLACE_WITH_NUM(layers, "BIASES_CONST_OFFSET", totalOffset);
+    totalOffset += 6 * 4;
+    REPLACE_WITH_NUM(layers, "BIASES_CONST_SIZE", 6 * 4);
+
+    REPLACE_WITH_NUM(layers, "DATA_CONST_INPUT_LOW_OFFSET_4", totalOffset);
+    totalOffset += 4;
+    REPLACE_WITH_NUM(layers, "DATA_CONST_INPUT_HIGHT_OFFSET_4", totalOffset);
+    totalOffset += 4;
+    REPLACE_WITH_NUM(layers, "DATA_CONST_OUTPUT_LOW_OFFSET_4", totalOffset);
+    totalOffset += 4;
+    REPLACE_WITH_NUM(layers, "DATA_CONST_OUTPUT_HIGH_OFFSET_4", totalOffset);
+    totalOffset += 4;
+
+    REPLACE_WITH_NUM(layers, "DEQUANTIZE_SCALESHIFT_WEIGHTS_OFFSET", totalOffset);
+    totalOffset += 24;
+    REPLACE_WITH_NUM(layers, "DEQUANTIZE_SCALESHIFT_BIASES_OFFSET", totalOffset);
+    totalOffset += 24;
+
+    REPLACE_WITH_STR(layers, "_PR_", p._network_precision);
+
+    const std::string model = IRTemplateGenerator::getIRTemplate(
+        "TransformationsTest",
+        { { 1lu, 3, 299, 299 }, { 1lu, 3, 299, 299 } },
+        p._network_precision,
+        layers,
+        edgesTemplate,
+        6);
+
+    return model;
+}
+
+std::string ConcatMultiBranchTestModel::getName() const {
+    return "ConcatMultiBranchTestModel";
+}
+
+bool ConcatMultiBranchTestModel::transform(CNNNetwork& network, LayerTransformation::Params& params) const {
+    LowPrecisionTransformer transformer(LowPrecisionTransformer::getAllTransformations(params));
+    transformer.transform(network);
+    return true;
+}
+
+void ConcatMultiBranchTestModel::resetTransformation(CNNNetwork& network) const {
+    fillData(getLayer(network, "branch1/dataConstInputLow1"), 255.0 / 200.0, "custom");
+    fillData(getLayer(network, "branch1/dataConstInputHigh1"), 255.0 / 100.0, "custom");
+    fillData(getLayer(network, "branch1/dataConstOutputLow1"), 255.0 / 200.0, "custom");
+    fillData(getLayer(network, "branch1/dataConstOutputHigh1"), 255.0 / 100.0, "custom");
+
+    fillData(getLayer(network, "branch1/dataConstInputLow2"), 255.0 / 400.0, "custom");
+    fillData(getLayer(network, "branch1/dataConstInputHigh2"), 255.0 / 200.0, "custom");
+    fillData(getLayer(network, "branch1/dataConstOutputLow2"), 255.0 / 400.0, "custom");
+    fillData(getLayer(network, "branch1/dataConstOutputHigh2"), 255.0 / 200.0, "custom");
+
+    fillData(getLayer(network, "branch2/dataConstInputLow3"), 255.0 / 200.0, "custom");
+    fillData(getLayer(network, "branch2/dataConstInputHigh3"), 255.0 / 100.0, "custom");
+    fillData(getLayer(network, "branch2/dataConstOutputLow3"), 255.0 / 200.0, "custom");
+    fillData(getLayer(network, "branch2/dataConstOutputHigh3"), 255.0 / 100.0, "custom");
+
+    fillData(getLayer(network, "branch2/weightsConstInput"), 0.0, "custom");
+    fillData(getLayer(network, "branch2/weightsConstInputLow"), 0.0, "custom");
+    fillData(getLayer(network, "branch2/weightsConstInputHigh"), 255.0 / 200.0, "custom");
+    fillData(getLayer(network, "branch2/weightsConstOutputLow"), 0.0, "custom");
+    fillData(getLayer(network, "branch2/weightsConstOutputHigh"), 255.0 / 200.0, "custom");
+
+    fillData(getLayer(network, "branch2/biasesConst"), { 1.0, 2.0, 3.0, 4.0, 5.0, 6.0 });
+
+    fillData(getLayer(network, "branch2/dataConstInputLow4"), 255.0 / 800.0, "custom");
+    fillData(getLayer(network, "branch2/dataConstInputHigh4"), 255.0 / 400.0, "custom");
+    fillData(getLayer(network, "branch2/dataConstOutputLow4"), 255.0 / 800.0, "custom");
+    fillData(getLayer(network, "branch2/dataConstOutputHigh4"), 255.0 / 400.0, "custom");
+}
+
+const std::string ConcatMultiBranchTestModel::layersTemplate = R"V0G0N(
+<layer name="branch1/dataConstInputLow1" type="Const" precision="_PR_" id="102">
+    <output>
+        <port id="0">
+            <dim>1</dim>
+        </port>
+    </output>
+    <blobs>
+        <custom offset="DATA_CONST_INPUT_LOW_OFFSET_1" size="4"/>
+    </blobs>
+</layer>
+<layer name="branch1/dataConstInputHigh1" type="Const" precision="_PR_" id="103">
+    <output>
+        <port id="0">
+            <dim>1</dim>
+        </port>
+    </output>
+    <blobs>
+        <custom offset="DATA_CONST_INPUT_HIGHT_OFFSET_1" size="4"/>
+    </blobs>
+</layer>
+
+<layer name="branch1/dataConstOutputLow1" type="Const" precision="_PR_" id="104">
+    <output>
+        <port id="0">
+            <dim>1</dim>
+        </port>
+    </output>
+    <blobs>
+        <custom offset="DATA_CONST_OUTPUT_LOW_OFFSET_1" size="4"/>
+    </blobs>
+</layer>
+<layer name="branch1/dataConstOutputHigh1" type="Const" precision="_PR_" id="105">
+    <output>
+        <port id="0">
+            <dim>1</dim>
+        </port>
+    </output>
+    <blobs>
+        <custom offset="DATA_CONST_OUTPUT_HIGH_OFFSET_1" size="4"/>
+    </blobs>
+</layer>
+
+<layer name="branch1/dataFakeQuantize1" type="FakeQuantize" precision="_PR_" id="106">
+    <data levels="256" />
+    <input>
+        <port id="0">
+            <dim>1</dim>
+            <dim>3</dim>
+            <dim>299</dim>
+            <dim>299</dim>
+        </port>
+        <port id="1">
+            <dim>1</dim>
+        </port>
+        <port id="2">
+            <dim>1</dim>
+        </port>
+        <port id="3">
+            <dim>1</dim>
+        </port>
+        <port id="4">
+            <dim>1</dim>
+        </port>
+    </input>
+    <output>
+        <port id="5">
+            <dim>1</dim>
+            <dim>3</dim>
+            <dim>299</dim>
+            <dim>299</dim>
+        </port>
+    </output>
+</layer>
+
+<layer name="branch1/dataConstInputLow2" type="Const" precision="_PR_" id="107">
+    <output>
+        <port id="0">
+            <dim>1</dim>
+        </port>
+    </output>
+    <blobs>
+        <custom offset="DATA_CONST_INPUT_LOW_OFFSET_2" size="4"/>
+    </blobs>
+</layer>
+<layer name="branch1/dataConstInputHigh2" type="Const" precision="_PR_" id="108">
+    <output>
+        <port id="0">
+            <dim>1</dim>
+        </port>
+    </output>
+    <blobs>
+        <custom offset="DATA_CONST_INPUT_HIGHT_OFFSET_2" size="4"/>
+    </blobs>
+</layer>
+
+<layer name="branch1/dataConstOutputLow2" type="Const" precision="_PR_" id="109">
+        <output>
+            <port id="0">
+                <dim>1</dim>
+            </port>
+        </output>
+    <blobs>
+        <custom offset="DATA_CONST_OUTPUT_LOW_OFFSET_2" size="4"/>
+    </blobs>
+</layer>
+<layer name="branch1/dataConstOutputHigh2" type="Const" precision="_PR_" id="110">
+        <output>
+            <port id="0">
+                <dim>1</dim>
+            </port>
+        </output>
+    <blobs>
+        <custom offset="DATA_CONST_OUTPUT_HIGH_OFFSET_2" size="4"/>
+    </blobs>
+</layer>
+
+
+<layer name="branch1/dataFakeQuantize2" type="FakeQuantize" precision="_PR_" id="111">
+    <data levels="256" />
+    <input>
+        <port id="0">
+            <dim>1</dim>
+            <dim>3</dim>
+            <dim>299</dim>
+            <dim>299</dim>
+        </port>
+        <port id="1">
+            <dim>1</dim>
+        </port>
+        <port id="2">
+            <dim>1</dim>
+        </port>
+        <port id="3">
+            <dim>1</dim>
+        </port>
+        <port id="4">
+            <dim>1</dim>
+        </port>
+    </input>
+    <output>
+        <port id="5">
+            <dim>1</dim>
+            <dim>3</dim>
+            <dim>299</dim>
+            <dim>299</dim>
+        </port>
+    </output>
+</layer>
+
+<layer name="branch1/concat" type="Concat" precision="_PR_" id="113">
+    <data axis="1" />
+    <input>
+        <port id="0">
+            <dim>1</dim>
+            <dim>3</dim>
+            <dim>299</dim>
+            <dim>299</dim>
+        </port>
+        <port id="1">
+            <dim>1</dim>
+            <dim>3</dim>
+            <dim>299</dim>
+            <dim>299</dim>
+        </port>
+
+    </input>
+    <output>
+        <port id="2">
+            <dim>1</dim>
+            <dim>6</dim>
+            <dim>299</dim>
+            <dim>299</dim>
+        </port>
+    </output>
+</layer>
+
+<layer name="branch2/dataConstInputLow3" type="Const" precision="_PR_" id="207">
+        <output>
+            <port id="0">
+                <dim>1</dim>
+            </port>
+        </output>
+    <blobs>
+        <custom offset="DATA_CONST_INPUT_LOW_OFFSET_3" size="4"/>
+    </blobs>
+</layer>
+<layer name="branch2/dataConstInputHigh3" type="Const" precision="_PR_" id="208">
+        <output>
+            <port id="0">
+                <dim>1</dim>
+            </port>
+        </output>
+    <blobs>
+        <custom offset="DATA_CONST_INPUT_HIGHT_OFFSET_3" size="4"/>
+    </blobs>
+</layer>
+
+<layer name="branch2/dataConstOutputLow3" type="Const" precision="_PR_" id="209">
+        <output>
+            <port id="0">
+                <dim>1</dim>
+            </port>
+        </output>
+    <blobs>
+        <custom offset="DATA_CONST_OUTPUT_LOW_OFFSET_3" size="4"/>
+    </blobs>
+</layer>
+<layer name="branch2/dataConstOutputHigh3" type="Const" precision="_PR_" id="210">
+        <output>
+            <port id="0">
+                <dim>1</dim>
+            </port>
+        </output>
+    <blobs>
+        <custom offset="DATA_CONST_OUTPUT_HIGH_OFFSET_3" size="4"/>
+    </blobs>
+</layer>
+
+
+<layer name="branch2/dataFakeQuantize3" type="FakeQuantize" precision="_PR_" id="211">
+    <data levels="256" />
+    <input>
+        <port id="0">
+            <dim>1</dim>
+            <dim>6</dim>
+            <dim>299</dim>
+            <dim>299</dim>
+        </port>
+        <port id="1">
+            <dim>1</dim>
+        </port>
+        <port id="2">
+            <dim>1</dim>
+        </port>
+        <port id="3">
+            <dim>1</dim>
+        </port>
+        <port id="4">
+            <dim>1</dim>
+        </port>
+    </input>
+    <output>
+        <port id="5">
+            <dim>1</dim>
+            <dim>6</dim>
+            <dim>299</dim>
+            <dim>299</dim>
+        </port>
+    </output>
+</layer>
+
+
+<layer name="branch2/weightsConstInput" type="Const" precision="_PR_" id="212">
+    <output>
+        <port id="0">
+            <dim>6</dim>
+            <dim>6</dim>
+            <dim>3</dim>
+            <dim>3</dim>
+        </port>
+    </output>
+    <blobs>
+        <custom offset="WEIGHTS_CONST_INPUT_OFFSET" size="WEIGHTS_CONST_INPUT_SIZE"/>
+    </blobs>
+</layer>
+<layer name="branch2/weightsConstInputLow" type="Const" precision="_PR_" id="213">
+    <output>
+        <port id="0">
+            <dim>1</dim>
+        </port>
+    </output>
+    <blobs>
+        <custom offset="WEIGHTS_CONST_INPUT_LOW_OFFSET" size="4"/>
+    </blobs>
+</layer>
+<layer name="branch2/weightsConstInputHigh" type="Const" precision="_PR_" id="214">
+    <output>
+        <port id="0">
+            <dim>1</dim>
+        </port>
+    </output>
+    <blobs>
+        <custom offset="WEIGHTS_CONST_INPUT_HIGHT_OFFSET" size="4"/>
+    </blobs>
+</layer>
+
+<layer name="branch2/weightsConstOutputLow" type="Const" precision="_PR_" id="215">
+    <output>
+    <port id="0">
+        <dim>1</dim>
+    </port>
+    </output>
+    <blobs>
+        <custom offset="WEIGHTS_CONST_OUTPUT_LOW_OFFSET" size="4"/>
+    </blobs>
+</layer>
+<layer name="branch2/weightsConstOutputHigh" type="Const" precision="_PR_" id="216">
+    <output>
+        <port id="0">
+            <dim>1</dim>
+        </port>
+    </output>
+    <blobs>
+        <custom offset="WEIGHTS_CONST_OUTPUT_HIGH_OFFSET" size="4"/>
+    </blobs>
+</layer>
+
+
+<layer name="branch2/weightsFakeQuantize" type="FakeQuantize" precision="_PR_" id="218">
+    <data levels="256" />
+    <input>
+        <port id="0">
+            <dim>6</dim>
+            <dim>6</dim>
+            <dim>3</dim>
+            <dim>3</dim>
+        </port>
+        <port id="1">
+            <dim>1</dim>
+        </port>
+        <port id="2">
+            <dim>1</dim>
+        </port>
+        <port id="3">
+            <dim>1</dim>
+        </port>
+        <port id="4">
+            <dim>1</dim>
+        </port>
+    </input>
+    <output>
+        <port id="5">
+            <dim>6</dim>
+            <dim>6</dim>
+            <dim>3</dim>
+            <dim>3</dim>
+        </port>
+    </output>
+</layer>
+
+<layer name="branch2/biasesConst" type="Const" precision="_PR_" id="219">
+    <output>
+        <port id="0">
+            <dim>6</dim>
+        </port>
+    </output>
+    <blobs>
+        <custom offset="BIASES_CONST_OFFSET" size="BIASES_CONST_SIZE"/>
+    </blobs>
+</layer>
+
+
+<layer name="branch2/convolution" precision="_PR_" type="Convolution" id="220">
+                <data auto_pad="valid" dilations="1,1" group="1" kernel="3,3" output="6" pads_begin="0,0" pads_end="0,0" strides="1,1"/>
+                <input>
+                        <port id="0">
+                                <dim>1</dim>
+                                <dim>6</dim>
+                                <dim>299</dim>
+                                <dim>299</dim>
+                        </port>
+                        <port id="1">
+                                <dim>6</dim>
+                                <dim>6</dim>
+                                <dim>3</dim>
+                                <dim>3</dim>
+                        </port>
+                        <port id="2">
+                                <dim>6</dim>
+                        </port>
+                </input>
+                <output>
+                        <port id="3">
+                                <dim>1</dim>
+                                <dim>6</dim>
+                                <dim>299</dim>
+                                <dim>299</dim>
+                        </port>
+                </output>
+        </layer>
+
+<layer name="branch2/dataConstInputLow4" type="Const" precision="_PR_" id="222">
+    <output>
+        <port id="0">
+            <dim>1</dim>
+        </port>
+    </output>
+    <blobs>
+        <custom offset="DATA_CONST_INPUT_LOW_OFFSET_4" size="4"/>
+    </blobs>
+</layer>
+<layer name="branch2/dataConstInputHigh4" type="Const" precision="_PR_" id="223">
+    <output>
+        <port id="0">
+            <dim>1</dim>
+        </port>
+    </output>
+    <blobs>
+        <custom offset="DATA_CONST_INPUT_HIGHT_OFFSET_4" size="4"/>
+    </blobs>
+</layer>
+
+<layer name="branch2/dataConstOutputLow4" type="Const" precision="_PR_" id="224">
+    <output>
+        <port id="0">
+            <dim>1</dim>
+        </port>
+    </output>
+    <blobs>
+        <custom offset="DATA_CONST_OUTPUT_LOW_OFFSET_4" size="4"/>
+    </blobs>
+</layer>
+<layer name="branch2/dataConstOutputHigh4" type="Const" precision="_PR_" id="225">
+    <output>
+        <port id="0">
+            <dim>1</dim>
+        </port>
+    </output>
+    <blobs>
+        <custom offset="DATA_CONST_OUTPUT_HIGH_OFFSET_4" size="4"/>
+    </blobs>
+</layer>
+
+<layer name="branch2/dataFakeQuantize4" type="FakeQuantize" precision="_PR_" id="226">
+    <data levels="256" />
+    <input>
+        <port id="0">
+            <dim>1</dim>
+            <dim>6</dim>
+            <dim>299</dim>
+            <dim>299</dim>
+        </port>
+        <port id="1">
+            <dim>1</dim>
+        </port>
+        <port id="2">
+            <dim>1</dim>
+        </port>
+        <port id="3">
+            <dim>1</dim>
+        </port>
+        <port id="4">
+            <dim>1</dim>
+        </port>
+    </input>
+    <output>
+        <port id="5">
+            <dim>1</dim>
+            <dim>6</dim>
+            <dim>299</dim>
+            <dim>299</dim>
+        </port>
+    </output>
+</layer>
+
+<layer name="branch2/concat" type="Concat" precision="_PR_" id="227">
+    <input>
+        <port id="0">
+            <dim>1</dim>
+            <dim>6</dim>
+            <dim>299</dim>
+            <dim>299</dim>
+        </port>
+        <port id="1">
+            <dim>1</dim>
+            <dim>6</dim>
+            <dim>299</dim>
+            <dim>299</dim>
+        </port>
+
+    </input>
+    <output>
+        <port id="2">
+            <dim>1</dim>
+            <dim>12</dim>
+            <dim>299</dim>
+            <dim>299</dim>
+        </port>
+    </output>
+</layer>
+
+
+<layer name="outputPower" type="Power" precision="_PR_" id="300">
+    <power_data power="1" scale="1" shift="0"/>
+    <input>
+        <port id="0">
+            <dim>1</dim>
+            <dim>12</dim>
+            <dim>299</dim>
+            <dim>299</dim>
+        </port>
+    </input>
+    <output>
+        <port id="1">
+            <dim>1</dim>
+            <dim>12</dim>
+            <dim>299</dim>
+            <dim>299</dim>
+        </port>
+    </output>
+</layer>
+
+)V0G0N";
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/concat_multi_channels_test.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/concat_multi_channels_test.cpp
new file mode 100644 (file)
index 0000000..221b9f6
--- /dev/null
@@ -0,0 +1,77 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+#include "low_precision_transformations/eltwise.hpp"
+#include "low_precision_transformations/concat_multi_channels.hpp"
+
+std::string ConcatMultiChannelTestModel::getModel(SingleLayerTransformationsTestParams& p) const {
+//    ASSERT_EQ(2, p.inputDimensions.size());
+    size_t type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP32>::value_type);
+    if (p._network_precision == "FP16")
+        type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP16>::value_type);
+
+    const size_t axis = 1; // should be passed in 'p' argument
+
+    std::vector<size_t> concat_out_dims = p.inputDimensions[0];
+    concat_out_dims[axis] += p.inputDimensions[1][axis];
+
+    std::map<std::string, std::string> const_params = {};
+    std::map<std::string, std::string> fake_quantize_params = {
+        {"levels", "256"}
+    };
+    std::map<std::string, std::string> concat_params = {
+        {"axis", "1"}
+    };
+    std::map<std::string, std::string> power_params = {
+        {"power", "1"}, {"scale", "1"}, {"shift", "0"}
+    };
+
+    std::vector<std::pair<std::string, std::string>> edges = {
+        {"0,0", "10,10"}, {"1,1", "11,16"}, // Inputs to FakeQuantize
+        {"2,2", "10,11"}, {"3,3", "10,12"}, {"4,4", "10,13"}, {"5,5", "10,14"}, // Const layers
+        {"6,6", "11,17"}, {"7,7", "11,18"}, {"8,8", "11,19"}, {"9,9", "11,20"}, // Const layers
+        {"10,15", "12,22"}, {"11,21", "12,23"} // FakeQuantize to Concat
+    };
+
+    return CommonTestUtils::DefaultNetBuilder::buildNetworkWithOneInput(
+            "Concat_transformations_", p.inputDimensions[0], p._network_precision)
+        .addInputLayer(p._network_precision, p.inputDimensions[1])
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("FakeQuantize", p._network_precision, &fake_quantize_params, {{p.inputDimensions[0], {1}, {1}, {1}, {1}}, {{p.inputDimensions[0]}}})
+        .addLayer("FakeQuantize", p._network_precision, &fake_quantize_params, {{p.inputDimensions[1], {1}, {1}, {1}, {1}}, {{p.inputDimensions[1]}}})
+        .addLayer("Concat", p._network_precision, &concat_params, { {p.inputDimensions[0], p.inputDimensions[1]}, { concat_out_dims }})
+        .finish(&edges);
+}
+
+std::string ConcatMultiChannelTestModel::getName() const {
+    return "ConcatMultiChannelTestModel";
+}
+
+bool ConcatMultiChannelTestModel::transform(CNNNetwork& network, LayerTransformation::Params& params) const {
+    LowPrecisionTransformer transformer(LowPrecisionTransformer::getAllTransformations(params).
+        addBranchSpecific<ConcatMultiChannelsTransformation>(params, "Concat")
+    );
+    transformer.transform(network);
+    return true;
+}
+
+void ConcatMultiChannelTestModel::resetTransformation(CNNNetwork& network) const {
+    fillData(getLayer(network, "Const2"), 0.0, "custom");
+    fillData(getLayer(network, "Const3"), 255.0 / 10.0, "custom");
+    fillData(getLayer(network, "Const4"), 0.0, "custom");
+    fillData(getLayer(network, "Const5"), 255.0 / 10.0, "custom");
+
+    fillData(getLayer(network, "Const6"), -255.0 / 400.0, "custom");
+    fillData(getLayer(network, "Const7"), 255.0 / 200.0, "custom");
+    fillData(getLayer(network, "Const8"), -255.0 / 400.0, "custom");
+    fillData(getLayer(network, "Const9"), 255.0 / 200.0, "custom");
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/concat_test.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/concat_test.cpp
new file mode 100644 (file)
index 0000000..a326655
--- /dev/null
@@ -0,0 +1,167 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+#include "low_precision_transformations/concat.hpp"
+#include "low_precision_transformations/eltwise.hpp"
+
+ConcatTestModel::ConcatTestModel(
+    const bool signedIntervals,
+    const bool symmetricInterval,
+    const bool multiChannel,
+    const std::vector<size_t>& constInputDimentions) :
+    signedIntervals(signedIntervals),
+    symmetricInterval(symmetricInterval),
+    multiChannel(multiChannel),
+    constInputDimentions(constInputDimentions) {}
+
+std::string ConcatTestModel::getModel(SingleLayerTransformationsTestParams& p) const {
+//    ASSERT_EQ(2, p.inputDimensions.size());
+    size_t type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP32>::value_type);
+    if (p._network_precision == "FP16")
+        type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP16>::value_type);
+
+    const size_t axis = 1; // should be passed in 'p' argument
+
+    std::vector<size_t> concat_out_dims = p.inputDimensions[0];
+    concat_out_dims[axis] += p.inputDimensions[1][axis];
+
+    std::map<std::string, std::string> const_params = {};
+    std::map<std::string, std::string> fake_quantize_params = {{"levels", "256"}};
+    std::map<std::string, std::string> concat_params = {{"axis", "1"}};
+    std::map<std::string, std::string> power_params = { {"power", "1"}, {"scale", "1"}, {"shift", "0"} };
+
+    std::vector<std::pair<std::string, std::string>> edges = {
+        {"0,0", "10,10"}, {"1,1", "11,16"}, // Inputs to FakeQuantize
+        {"2,2", "10,11"}, {"3,3", "10,12"}, {"4,4", "10,13"}, {"5,5", "10,14"}, // Const layers
+        {"6,6", "11,17"}, {"7,7", "11,18"}, {"8,8", "11,19"}, {"9,9", "11,20"}, // Const layers
+        {"10,15", "12,22"}, {"11,21", "12,23"} // FakeQuantize to Concat
+    };
+
+    return CommonTestUtils::DefaultNetBuilder::buildNetworkWithOneInput(
+            "Concat_transformations_", p.inputDimensions[0], p._network_precision)
+        .addInputLayer(p._network_precision, p.inputDimensions[1])
+        .addLayer("Const", p._network_precision, &const_params, {{}, {constInputDimentions}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {constInputDimentions}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {constInputDimentions}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {constInputDimentions}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {constInputDimentions}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {constInputDimentions}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {constInputDimentions}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {constInputDimentions}}, type_size, 0)
+        .addLayer(
+            "FakeQuantize",
+            p._network_precision,
+            &fake_quantize_params,
+            {{p.inputDimensions[0], constInputDimentions, constInputDimentions, constInputDimentions, constInputDimentions}, {{p.inputDimensions[0]}}},
+            "fakeQuantize1")
+        .addLayer(
+            "FakeQuantize",
+            p._network_precision,
+            &fake_quantize_params,
+            {{p.inputDimensions[1], constInputDimentions, constInputDimentions, constInputDimentions, constInputDimentions}, {{p.inputDimensions[1]}}},
+            "fakeQuantize2")
+        .addLayer("Concat", p._network_precision, &concat_params, { {p.inputDimensions[0], p.inputDimensions[1]}, { concat_out_dims }}, "concat")
+        .finish(&edges);
+}
+
+std::string ConcatTestModel::getName() const {
+    return std::string("ConcatTestModel") +
+        (signedIntervals ? "_Signed" : "_Unsigned") +
+        (symmetricInterval ? "_Symmetric" : "_Asymmetric") +
+        (multiChannel ? "_MultiChannel" : "_OneChannel") +
+        (constInputDimentions.size() == 1ul ? "" : ("_const" + std::to_string(constInputDimentions.size()) + "D"));
+}
+
+bool ConcatTestModel::transform(CNNNetwork& network, LayerTransformation::Params& params) const {
+    LowPrecisionTransformations transformations = getLowPrecisionTransformations(params);
+
+    if (!multiChannel) {
+        // avoid ConcatMultiChannelsTransformation
+        transformations = transformations.
+            removeBranchSpecificTransformations("Concat").
+            addBranchSpecific<ConcatTransformation>(params, "Concat");
+    }
+
+    LowPrecisionTransformer transformer(transformations);
+    transformer.transform(network);
+
+    const CNNLayerPtr concatLayer = network.getLayerByName("concat");
+    if (concatLayer == nullptr) {
+        THROW_IE_EXCEPTION << "concat layer was not found";
+    }
+
+    const std::vector<size_t> dims = concatLayer->outData[0]->getDims();
+    if (dims.size() == 4ul) {
+        const CNNLayerPtr fakeQuantizeLayer1 = network.getLayerByName("fakeQuantize1");
+        QuantizeLayer* fakeQuantize1 = dynamic_cast<QuantizeLayer*>(fakeQuantizeLayer1.get());
+        if (fakeQuantize1 == nullptr) {
+            THROW_IE_EXCEPTION << "incorrect type for layer " << fakeQuantizeLayer1->name;
+        }
+        if (fakeQuantize1->levels == 0) {
+            //
+        }
+
+        const CNNLayerPtr fakeQuantizeLayer2 = network.getLayerByName("fakeQuantize2");
+        QuantizeLayer* fakeQuantize2 = dynamic_cast<QuantizeLayer*>(fakeQuantizeLayer2.get());
+        if (fakeQuantize2 == nullptr) {
+            THROW_IE_EXCEPTION << "incorrect type for layer " << fakeQuantizeLayer2->name;
+        }
+        if (fakeQuantize2->levels == 0) {
+            //
+        }
+    } else if (dims.size() == 2ul) {
+        if (concatLayer->outData[0]->getInputTo().size() != 0ul) {
+            THROW_IE_EXCEPTION << "2D is not supported";
+        }
+    }
+    return true;
+}
+
+void ConcatTestModel::resetTransformation(CNNNetwork& network) const {
+    const float intervalsCoefficient = 0.5f;
+    if (signedIntervals) {
+        const float symmetricCoefficient = symmetricInterval ? 1.f : 0.5f;
+        fillData(getLayer(network, "Const2"), (-128.f / 20.0) * symmetricCoefficient * intervalsCoefficient, "custom");
+        fillData(getLayer(network, "Const3"), (127.f / 20.0) * symmetricCoefficient * intervalsCoefficient, "custom");
+        fillData(getLayer(network, "Const4"), (-128.f / 20.0) * symmetricCoefficient * intervalsCoefficient, "custom");
+        fillData(getLayer(network, "Const5"), (127.f / 20.0) * symmetricCoefficient * intervalsCoefficient, "custom");
+
+        fillData(getLayer(network, "Const6"), (-128.f / 20.0) * symmetricCoefficient, "custom");
+        fillData(getLayer(network, "Const7"), 127.f / 20.0, "custom");
+        fillData(getLayer(network, "Const8"), (-128.f / 20.0) * symmetricCoefficient, "custom");
+        fillData(getLayer(network, "Const9"), 127.f / 20.0, "custom");
+
+    } else {
+        const float shift = symmetricInterval ? 0.f : (255.f / 20.0) / 4.f;
+        fillData(getLayer(network, "Const2"), (0.0 + shift) * intervalsCoefficient, "custom");
+        fillData(getLayer(network, "Const3"), (255.f / 20.0) * intervalsCoefficient, "custom");
+        fillData(getLayer(network, "Const4"), (0.0 + shift) * intervalsCoefficient, "custom");
+        fillData(getLayer(network, "Const5"), (255.f / 20.0) * intervalsCoefficient, "custom");
+
+        fillData(getLayer(network, "Const6"), 0.f, "custom");
+        fillData(getLayer(network, "Const7"), 255.f / 20.0, "custom");
+        fillData(getLayer(network, "Const8"), 0.f, "custom");
+        fillData(getLayer(network, "Const9"), 255.f / 20.0, "custom");
+    }
+}
+
+float ConcatTestModel::getThreshold(const std::string& device_name, const Precision precision, LayerTransformation::Params& params) const {
+    if (device_name == "CPU") {
+        if (params.updatePrecisions) {
+            // FakeQuantize intervals are rounded in INT8 and as result threshold is increased
+            return 0.0250001f;
+        }
+    }
+
+    if (device_name == "GPU") {
+        if (precision == Precision::FP32) {
+            return 0.00200001f;
+        } else {
+            return 0.00062f;
+        }
+    }
+
+    return precision == Precision::FP16 ? 0.0005f : 0.0003f;
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/conv_and_dequantization_scaleshift_and_quantize_on_activations_test.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/conv_and_dequantization_scaleshift_and_quantize_on_activations_test.cpp
new file mode 100644 (file)
index 0000000..8c8a293
--- /dev/null
@@ -0,0 +1,64 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+
+std::string ConvolutionAndDequantizationScaleShiftAndQuantizeOnActivationsTestModel::getModel(SingleLayerTransformationsTestParams& p) const {
+    size_t type_size = sizeof(float);
+    if (p._network_precision == "FP16")
+        type_size = sizeof(short);
+
+    CommonTestUtils::conv_common_params conv =
+            { {2, 2}, {3, 3}, {0, 0}, {0, 0}, {1, 1}, "", 1, 32, true, true };
+    std::vector<size_t> convOutShape(p.inputDimensions[0].size());
+    getConvOutShape(p.inputDimensions[0], conv, convOutShape);
+
+    std::map<std::string, std::string> const_params = {};
+    std::map<std::string, std::string> fake_quantize_params = {
+        {"levels", "256"}
+    };
+    std::map<std::string, std::string> power_params = {
+        {"power", "1"}, {"scale", "1"}, {"shift", "0"}
+    };
+
+    std::vector<std::pair<std::string, std::string>> edges = {
+        {"0,0", "1,1"}, {"1,2", "6,7"},
+        {"2,3", "6,8"}, {"3,4", "6,9"}, {"4,5", "6,10"}, {"5,6", "6,11"}, // Const layers
+        {"6,12", "7,13"}, // Fake quantize to Convolution
+        {"7,14", "8,15"} // Convolution to Power
+    };
+
+    return CommonTestUtils::DefaultNetBuilder::buildNetworkWithOneInput(
+            "dequantizeScaleShift_", p.inputDimensions[0], p._network_precision)
+        .addLayer("ScaleShift", p._network_precision, &const_params, {{p.inputDimensions[0]}, {p.inputDimensions[0]}}, p.inputDimensions[0][1] * type_size, p.inputDimensions[0][1] * type_size)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("FakeQuantize", p._network_precision, &fake_quantize_params, {{p.inputDimensions[0], {1}, {1}, {1}, {1}}, {{p.inputDimensions[0]}}})
+        .convolutionLayer(p._network_precision, {{p.inputDimensions[0]}, {convOutShape}}, conv)
+        .addLayer("Power", p._network_precision, &power_params, {{convOutShape}, {convOutShape}})
+        .finish(&edges);
+}
+
+std::string ConvolutionAndDequantizationScaleShiftAndQuantizeOnActivationsTestModel::getName() const {
+    return "ConvolutionAndDequantizationScaleShiftAndQuantizeOnActivationsTestModel";
+}
+
+bool ConvolutionAndDequantizationScaleShiftAndQuantizeOnActivationsTestModel::transform(CNNNetwork& network, LayerTransformation::Params& params) const {
+    LowPrecisionTransformer transformer = getLowPrecisionTransformer(params);
+    transformer.transform(network);
+    return true;
+}
+
+void ConvolutionAndDequantizationScaleShiftAndQuantizeOnActivationsTestModel::resetTransformation(CNNNetwork& network) const {
+    fillData(getLayer(network, "ScaleShift1"), 3, "weights");
+    fillData(getLayer(network, "ScaleShift1"), 5, "biases");
+    fillData(getLayer(network, "Const2"), -128.0, "custom");
+    fillData(getLayer(network, "Const3"), 127.0, "custom");
+    fillData(getLayer(network, "Const4"), -128.0, "custom");
+    fillData(getLayer(network, "Const5"), 127.0, "custom");
+    fillDataWithInitValue(getLayer(network, "Convolution7"), "weights", 1.234);
+    fillDataWithInitValue(getLayer(network, "Convolution7"), "biases", 5.678);
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/conv_and_dequantization_scaleshifts_on_activations_test.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/conv_and_dequantization_scaleshifts_on_activations_test.cpp
new file mode 100644 (file)
index 0000000..1a6ca54
--- /dev/null
@@ -0,0 +1,49 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+
+std::string ConvolutionAndDequantizationScaleShiftsOnActivationsTestModel::getModel(SingleLayerTransformationsTestParams& p) const {
+    size_t type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP32>::value_type);
+    if (p._network_precision == "FP16")
+        type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP16>::value_type);
+
+    std::map<std::string, std::string> scale_shift_params = {};
+    CommonTestUtils::conv_common_params conv =
+            { {1, 1}, {3, 3}, {0, 0}, {0, 0}, {1, 1}, "", 1, 32, true, true };
+    std::vector<size_t> convOutShape(p.inputDimensions[0].size());
+    getConvOutShape(p.inputDimensions[0], conv, convOutShape);
+
+    std::map<std::string, std::string> power_params = {
+        {"power", "1"}, {"scale", "1"}, {"shift", "0"}
+    };
+    std::vector<std::pair<std::string, std::string>> edges = {
+        {"0,0", "1,1"}, {"1,2", "2,3"}, {"2,4", "3,5"}
+    };
+
+    return CommonTestUtils::DefaultNetBuilder::buildNetworkWithOneInput(
+            "Conv_ScaleShift_transformations", p.inputDimensions[0], p._network_precision)
+        .addLayer("ScaleShift", p._network_precision, &scale_shift_params, {{p.inputDimensions[0]}, {p.inputDimensions[0]}}, p.inputDimensions[0][1] * type_size, p.inputDimensions[0][1] * type_size)
+        .convolutionLayer(p._network_precision, {{p.inputDimensions[0]}, {convOutShape}}, conv)
+        .addLayer("Power", p._network_precision, &power_params, {{convOutShape}, {convOutShape}})
+        .finish(&edges);
+}
+
+std::string ConvolutionAndDequantizationScaleShiftsOnActivationsTestModel::getName() const {
+    return "ConvolutionAndDequantizationScaleShiftsOnActivationsTestModel";
+}
+
+bool ConvolutionAndDequantizationScaleShiftsOnActivationsTestModel::transform(CNNNetwork& network, LayerTransformation::Params& params) const {
+    LowPrecisionTransformer transformer = getLowPrecisionTransformer(params);
+    transformer.transform(network);
+    return true;
+}
+
+void ConvolutionAndDequantizationScaleShiftsOnActivationsTestModel::resetTransformation(CNNNetwork& network) const {
+    fillData(getLayer(network, "ScaleShift1"), 3.f, "weights");
+    fillData(getLayer(network, "ScaleShift1"), 4.f, "biases");
+
+    fillDataWithInitValue(getLayer(network, "Convolution2"), "weights", 1.234f);
+    fillDataWithInitValue(getLayer(network, "Convolution2"), "biases", 5.678f);
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/conv_and_pooling_and_quantize_on_activations_test.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/conv_and_pooling_and_quantize_on_activations_test.cpp
new file mode 100644 (file)
index 0000000..8210bbb
--- /dev/null
@@ -0,0 +1,65 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+
+std::string ConvolutionAndPoolingAndQuantizeOnActivationsTestModel::getModel(SingleLayerTransformationsTestParams& p) const {
+    size_t type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP32>::value_type);
+    if (p._network_precision == "FP16")
+        type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP16>::value_type);
+
+    CommonTestUtils::pool_common_params pooling =
+            { {2, 2}, {3, 3}, {0, 0}, {0, 0}, "valid", false, true };
+    std::vector<size_t> poolOutShape(p.inputDimensions[0].size());
+    getPoolOutShape(p.inputDimensions[0], pooling, poolOutShape);
+
+    CommonTestUtils::conv_common_params conv =
+            { {1, 1}, {1, 1}, {0, 0}, {0, 0}, {1, 1}, "valid", 1, 80, true, true };
+    std::vector<size_t> convOutShape(poolOutShape.size());
+    getConvOutShape(poolOutShape, conv, convOutShape);
+
+    std::map<std::string, std::string> power_params = {
+        {"power", "1"}, {"scale", "1"}, {"shift", "0"}
+    };
+    std::map<std::string, std::string> const_params = {};
+    std::map<std::string, std::string> fake_quantize_params = {
+        {"levels", "256"}
+    };
+
+    std::vector<std::pair<std::string, std::string>> edges = {
+        {"0,0", "5,5"},  // FQ
+        {"1,1", "5,6"}, {"2,2", "5,7"}, {"3,3", "5,8"}, {"4,4", "5,9"}, // const
+        {"5,10", "6,11"}, {"6,12", "7,13"} // Pool, Conv
+    };
+
+    return CommonTestUtils::DefaultNetBuilder::buildNetworkWithOneInput(
+            "Conv_ScaleShift_transformations", p.inputDimensions[0], p._network_precision)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("FakeQuantize", p._network_precision, &fake_quantize_params, {{p.inputDimensions[0], {1}, {1}, {1}, {1}}, {{p.inputDimensions[0]}}})
+        .poolingLayer(p._network_precision, {{p.inputDimensions[0]}, {poolOutShape}}, pooling)
+        .convolutionLayer(p._network_precision, {{poolOutShape}, {convOutShape}}, conv)
+        .finish(&edges);
+}
+
+std::string ConvolutionAndPoolingAndQuantizeOnActivationsTestModel::getName() const {
+    return "ConvolutionAndPoolingAndQuantizeOnActivationsTestModel";
+}
+
+bool ConvolutionAndPoolingAndQuantizeOnActivationsTestModel::transform(CNNNetwork& network, LayerTransformation::Params& params) const {
+    LowPrecisionTransformer transformer = getLowPrecisionTransformer(params);
+    transformer.transform(network);
+    return true;
+}
+
+void ConvolutionAndPoolingAndQuantizeOnActivationsTestModel::resetTransformation(CNNNetwork& network) const {
+    fillData(getLayer(network, "Const1"), -128.f / 20.f, "custom");
+    fillData(getLayer(network, "Const2"), 127.f / 20.f, "custom");
+    fillData(getLayer(network, "Const3"), -128.f / 20.f, "custom");
+    fillData(getLayer(network, "Const4"), 127.f / 20.f, "custom");
+    fillDataWithInitValue(getLayer(network, "Convolution7"), "weights", 1.234f);
+    fillDataWithInitValue(getLayer(network, "Convolution7"), "biases", 5.678f);
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/conv_and_quantize_on_activations_and_weights_simple_base_test.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/conv_and_quantize_on_activations_and_weights_simple_base_test.cpp
new file mode 100644 (file)
index 0000000..c84fe27
--- /dev/null
@@ -0,0 +1,54 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+
+std::string ConvolutionAndQuantizeOnActivationsAndWeightsBaseTestModel::getModel(SingleLayerTransformationsTestParams& p) const {
+    size_t type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP32>::value_type);
+    if (p._network_precision == "FP16")
+        type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP16>::value_type);
+
+    CommonTestUtils::conv_common_params conv =
+            { {1, 1}, {3, 3}, {0, 0}, {0, 0}, {1, 1}, "valid", 1, 32, false, false };
+
+    std::vector<size_t> convOutShape(p.inputDimensions[0].size());
+    getConvOutShape(p.inputDimensions[0], conv, convOutShape);
+
+    std::vector<size_t> weightsConstInputDims = { 32lu, 32lu, 3lu, 3lu };
+    std::vector<size_t> biasesConvolutionConstDims = { conv.out_c };
+    std::map<std::string, std::string> const_params = {};
+    std::map<std::string, std::string> fake_quantize_params = {
+        {"levels", "256"}
+    };
+    std::map<std::string, std::string> power_params = {
+        {"power", "1"}, {"scale", "1"}, {"shift", "0"}
+    };
+
+    std::vector<std::pair<std::string, std::string>> edges = {
+        {"0,0", "1,1"}, {"1,2", "6,7"}, // Power
+        {"2,3", "6,8"}, {"3,4", "6,9"}, {"4,5", "6,10"}, {"5,6", "6,11"}, // Const layers
+        {"7,13", "12,18"}, {"8,14", "12,19"}, {"9,15", "12,20"}, {"10,16", "12,21"}, {"11,17", "12,22"}, // Const layers
+        {"6,12", "14,25"},  {"12,23", "14,26"}, // Fake quantize to Conv
+        {"13,24", "14,27"} // biases to Conv
+    };
+
+    return CommonTestUtils::DefaultNetBuilder::buildNetworkWithOneInput(
+            "QuantizationOnWeights", p.inputDimensions[0], p._network_precision)
+        .addLayer("Power", p._network_precision, &power_params, {{p.inputDimensions[0]}, {p.inputDimensions[0]}})
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("FakeQuantize", p._network_precision, &fake_quantize_params, {{p.inputDimensions[0], {1}, {1}, {1}, {1}}, {{p.inputDimensions[0]}}})
+        .addLayer("Const", p._network_precision, &const_params, {{}, {weightsConstInputDims}},
+                std::accumulate(weightsConstInputDims.begin(), weightsConstInputDims.end(), 1lu, std::multiplies<size_t>()) * type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("FakeQuantize", p._network_precision, &fake_quantize_params, {{weightsConstInputDims, {1}, {1}, {1}, {1}}, {{weightsConstInputDims}}})
+        .addLayer("Const", p._network_precision, &const_params, {{}, {biasesConvolutionConstDims}}, type_size * conv.out_c, 0)
+        .convolutionLayer(p._network_precision, {{p.inputDimensions[0], weightsConstInputDims, biasesConvolutionConstDims }, {convOutShape}}, conv)
+        .finish(&edges);
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/conv_and_quantize_on_activations_test.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/conv_and_quantize_on_activations_test.cpp
new file mode 100644 (file)
index 0000000..87b0475
--- /dev/null
@@ -0,0 +1,60 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+
+std::string ConvolutionAndQuantizeOnActivationsTestModel::getModel(SingleLayerTransformationsTestParams& p) const {
+    size_t type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP32>::value_type);
+    if (p._network_precision == "FP16")
+        type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP16>::value_type);
+
+    CommonTestUtils::conv_common_params conv =
+            { {2, 2}, {3, 3}, {0, 0}, {0, 0}, {1, 1}, "", 1, 32, true, true };
+    std::vector<size_t> convOutShape(p.inputDimensions[0].size());
+    getConvOutShape(p.inputDimensions[0], conv, convOutShape);
+
+    std::map<std::string, std::string> power_params = {
+        {"power", "1"}, {"scale", "1"}, {"shift", "0"}
+    };
+    std::map<std::string, std::string> const_params = {};
+    std::map<std::string, std::string> fake_quantize_params = {
+        {"levels", "256"}
+    };
+
+    std::vector<std::pair<std::string, std::string>> edges = {
+        {"0,0", "5,5"},  // FQ
+        {"1,1", "5,6"}, {"2,2", "5,7"}, {"3,3", "5,8"}, {"4,4", "5,9"}, // const
+        {"5,10", "6,11"}, {"6,12", "7,13"} // Pool, Conv, power
+    };
+
+    return CommonTestUtils::DefaultNetBuilder::buildNetworkWithOneInput(
+            "Conv_ScaleShift_transformations", p.inputDimensions[0], p._network_precision)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("FakeQuantize", p._network_precision, &fake_quantize_params, {{p.inputDimensions[0], {1}, {1}, {1}, {1}}, {{p.inputDimensions[0]}}})
+        .convolutionLayer(p._network_precision, {{p.inputDimensions[0]}, {convOutShape}}, conv)
+        .addLayer("Power", p._network_precision, &power_params, {{convOutShape}, {convOutShape}})
+        .finish(&edges);
+}
+
+std::string ConvolutionAndQuantizeOnActivationsTestModel::getName() const {
+    return "ConvolutionAndQuantizeOnActivationsTestModel";
+}
+
+bool ConvolutionAndQuantizeOnActivationsTestModel::transform(CNNNetwork& network, LayerTransformation::Params& params) const {
+    LowPrecisionTransformer transformer = getLowPrecisionTransformer(params);
+    transformer.transform(network);
+    return true;
+}
+
+void ConvolutionAndQuantizeOnActivationsTestModel::resetTransformation(CNNNetwork& network) const {
+    fillData(getLayer(network, "Const1"), -128.0 / 20.0, "custom");
+    fillData(getLayer(network, "Const2"), 127.0 / 20.0, "custom");
+    fillData(getLayer(network, "Const3"), -128.0 / 20.0, "custom");
+    fillData(getLayer(network, "Const4"), 127.0 / 20.0, "custom");
+    fillDataWithInitValue(getLayer(network, "Convolution6"), "weights", 1.234);
+    fillDataWithInitValue(getLayer(network, "Convolution6"), "biases", 5.678);
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/conv_and_quantize_on_signed_activations_and_inverted_weights_test.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/conv_and_quantize_on_signed_activations_and_inverted_weights_test.cpp
new file mode 100644 (file)
index 0000000..bdf3314
--- /dev/null
@@ -0,0 +1,53 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+
+void ConvolutionAndQuantizeOnSignedActivationsAndInvertedWeightsTestModel::resetTransformation(CNNNetwork& network) const {
+    fillData(getLayer(network, "Const2"), -128.f / 4.f, "custom");
+    fillData(getLayer(network, "Const3"), 127.f / 4.f, "custom");
+    fillData(getLayer(network, "Const4"), -128.f / 4.f, "custom");
+    fillData(getLayer(network, "Const5"), 127.f / 4.f, "custom");
+
+    fillDataWithInitValue(getLayer(network, "Const7"), "custom", 1.234);
+
+    fillData(getLayer(network, "Const8"), 1.28f, "custom");
+    fillData(getLayer(network, "Const9"), -1.27f, "custom");
+    fillData(getLayer(network, "Const10"), 1.28f, "custom");
+    fillData(getLayer(network, "Const11"), -1.27f, "custom");
+
+    fillDataWithInitValue(getLayer(network, "Const13"), "custom", 2.123f);
+}
+
+std::string ConvolutionAndQuantizeOnSignedActivationsAndInvertedWeightsTestModel::getName() const {
+    return "ConvolutionAndQuantizeOnSignedActivationsAndInvertedWeightsTestModel";
+}
+
+bool ConvolutionAndQuantizeOnSignedActivationsAndInvertedWeightsTestModel::transform(CNNNetwork& network, LayerTransformation::Params& params) const {
+    LowPrecisionTransformer transformer = getLowPrecisionTransformer(params);
+    transformer.transform(network);
+
+    if (std::any_of(
+        params.precisionsOnActivations.begin(),
+        params.precisionsOnActivations.end(),
+        [](const Precision precision) { return precision == Precision::U8; }) &&
+        params.quantizeOutputs) {
+        CNNLayerPtr scaleShfit = CNNNetworkHelper::getLayer(network, "Convolution14");
+        if (scaleShfit->type != "ScaleShift") {
+            THROW_IE_EXCEPTION << "unexpected last output dequantization layer type " << scaleShfit->name;
+        }
+
+        if (params.updateBiases) {
+            const Blob::Ptr shiftsBlob = CNNNetworkHelper::getBlob(scaleShfit, "biases");
+            std::shared_ptr<float> shiftsBuffer = CNNNetworkHelper::getFloatData(shiftsBlob);
+            for (size_t i = 0ul; i < shiftsBlob->size(); ++i) {
+                if (shiftsBuffer.get()[i] != 0.0) {
+                    THROW_IE_EXCEPTION << "unexpected dequantization shift value";
+                }
+            }
+        }
+    }
+
+    return true;
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/conv_and_quantize_on_signed_activations_and_weights_negative_test.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/conv_and_quantize_on_signed_activations_and_weights_negative_test.cpp
new file mode 100644 (file)
index 0000000..bafc5e0
--- /dev/null
@@ -0,0 +1,59 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+
+void ConvolutionAndQuantizeOnSignedActivationsAndWeightsNegativeTestModel::resetTransformation(CNNNetwork& network) const {
+    fillData(getLayer(network, "Const2"), -128.f / 4.f, "custom");
+    fillData(getLayer(network, "Const3"), 127.f / 4.f, "custom");
+    fillData(getLayer(network, "Const4"), -128.f / 4.f, "custom");
+    fillData(getLayer(network, "Const5"), 127.f / 4.f, "custom");
+
+    fillDataWithInitValue(getLayer(network, "Const7"), "custom", 1.234);
+
+    //fillData(getLayer(network, "Const8"), 0.f, "custom");
+    //fillData(getLayer(network, "Const9"), 255.f / 40.f, "custom");
+    //fillData(getLayer(network, "Const10"), 0.f, "custom");
+    //fillData(getLayer(network, "Const11"), 255.f / 40.f, "custom");
+
+    fillData(getLayer(network, "Const8"), -255.f / 40.f, "custom");
+    fillData(getLayer(network, "Const9"), 0.f, "custom");
+    fillData(getLayer(network, "Const10"), -255.f / 40.f, "custom");
+    fillData(getLayer(network, "Const11"), 0.f, "custom");
+
+
+    fillDataWithInitValue(getLayer(network, "Const13"), "custom", 2.123f);
+}
+
+std::string ConvolutionAndQuantizeOnSignedActivationsAndWeightsNegativeTestModel::getName() const {
+    return "ConvolutionAndQuantizeOnSignedActivationsAndWeightsNegativeTestModel";
+}
+
+bool ConvolutionAndQuantizeOnSignedActivationsAndWeightsNegativeTestModel::transform(CNNNetwork& network, LayerTransformation::Params& params) const {
+    LowPrecisionTransformer transformer = getLowPrecisionTransformer(params);
+    transformer.transform(network);
+
+    if (std::any_of(
+        params.precisionsOnActivations.begin(),
+        params.precisionsOnActivations.end(),
+        [](const Precision precision) { return precision == Precision::U8; }) &&
+        params.quantizeOutputs) {
+        CNNLayerPtr scaleShfit = CNNNetworkHelper::getLayer(network, "Convolution14");
+        if (scaleShfit->type != "ScaleShift") {
+            THROW_IE_EXCEPTION << "unexpected last output dequantization layer type " << scaleShfit->name;
+        }
+
+        if (params.updateBiases) {
+            const Blob::Ptr shiftsBlob = CNNNetworkHelper::getBlob(scaleShfit, "biases");
+            std::shared_ptr<float> shiftsBuffer = CNNNetworkHelper::getFloatData(shiftsBlob);
+            for (size_t i = 0ul; i < shiftsBlob->size(); ++i) {
+                if (shiftsBuffer.get()[i] != 0.0) {
+                    THROW_IE_EXCEPTION << "unexpected dequantization shift value";
+                }
+            }
+        }
+    }
+
+    return true;
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/conv_and_quantize_on_signed_activations_and_weights_positive_test.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/conv_and_quantize_on_signed_activations_and_weights_positive_test.cpp
new file mode 100644 (file)
index 0000000..dbc79fe
--- /dev/null
@@ -0,0 +1,53 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+
+void ConvolutionAndQuantizeOnSignedActivationsAndWeightsPositiveTestModel::resetTransformation(CNNNetwork& network) const {
+    fillData(getLayer(network, "Const2"), -128.f / 4.f, "custom");
+    fillData(getLayer(network, "Const3"), 127.f / 4.f, "custom");
+    fillData(getLayer(network, "Const4"), -128.f / 4.f, "custom");
+    fillData(getLayer(network, "Const5"), 127.f / 4.f, "custom");
+
+    fillDataWithInitValue(getLayer(network, "Const7"), "custom", 1.234);
+
+    fillData(getLayer(network, "Const8"), 0.f, "custom");
+    fillData(getLayer(network, "Const9"), 255.f / 40.f, "custom");
+    fillData(getLayer(network, "Const10"), 0.f, "custom");
+    fillData(getLayer(network, "Const11"), 255.f / 40.f, "custom");
+
+    fillDataWithInitValue(getLayer(network, "Const13"), "custom", 2.123f);
+}
+
+std::string ConvolutionAndQuantizeOnSignedActivationsAndWeightsPositiveTestModel::getName() const {
+    return "ConvolutionAndQuantizeOnSignedActivationsAndWeightsPositiveTestModel";
+}
+
+bool ConvolutionAndQuantizeOnSignedActivationsAndWeightsPositiveTestModel::transform(CNNNetwork& network, LayerTransformation::Params& params) const {
+    LowPrecisionTransformer transformer = getLowPrecisionTransformer(params);
+    transformer.transform(network);
+
+    if (std::any_of(
+        params.precisionsOnActivations.begin(),
+        params.precisionsOnActivations.end(),
+        [](const Precision precision) { return precision == Precision::U8;}) &&
+        params.quantizeOutputs) {
+        CNNLayerPtr scaleShfit = CNNNetworkHelper::getLayer(network, "Convolution14");
+        if (scaleShfit->type != "ScaleShift") {
+            THROW_IE_EXCEPTION << "unexpected last output dequantization layer type " << scaleShfit->name;
+        }
+
+        if (params.updateBiases) {
+            const Blob::Ptr shiftsBlob = CNNNetworkHelper::getBlob(scaleShfit, "biases");
+            std::shared_ptr<float> shiftsBuffer = CNNNetworkHelper::getFloatData(shiftsBlob);
+            for (size_t i = 0ul; i < shiftsBlob->size(); ++i) {
+                if (shiftsBuffer.get()[i] != 0.0) {
+                    THROW_IE_EXCEPTION << "unexpected dequantization shift value";
+                }
+            }
+        }
+    }
+
+    return true;
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/conv_and_quantize_on_unsigned_activations_and_weights_test.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/conv_and_quantize_on_unsigned_activations_and_weights_test.cpp
new file mode 100644 (file)
index 0000000..9bbee0d
--- /dev/null
@@ -0,0 +1,46 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+
+void ConvolutionAndQuantizeOnUnsignedActivationsAndWeightsTestModel::resetTransformation(CNNNetwork& network) const {
+    fillData(getLayer(network, "Const2"), 63.5f, "custom");
+    fillData(getLayer(network, "Const3"), 127.f, "custom");
+    fillData(getLayer(network, "Const4"), 63.5f, "custom");
+    fillData(getLayer(network, "Const5"), 127.f, "custom");
+
+    fillDataWithInitValue(getLayer(network, "Const7"), "custom", 1.234f);
+
+    fillData(getLayer(network, "Const8"), -1.275f / 2.f, "custom");
+    fillData(getLayer(network, "Const9"), 1.275f, "custom");
+    fillData(getLayer(network, "Const10"), -1.275f / 2.f, "custom");
+    fillData(getLayer(network, "Const11"), 1.275f, "custom");
+
+    fillDataWithInitValue(getLayer(network, "Const13"), "custom", 2.123f);
+}
+
+std::string ConvolutionAndQuantizeOnUnsignedActivationsAndWeightsTestModel::getName() const {
+    return "ConvolutionAndQuantizeOnUnsignedActivationsAndWeightsTestModel";
+}
+
+bool ConvolutionAndQuantizeOnUnsignedActivationsAndWeightsTestModel::transform(CNNNetwork& network, LayerTransformation::Params& params) const {
+    LowPrecisionTransformer transformer = getLowPrecisionTransformer(params);
+    transformer.transform(network);
+
+    if (params.quantizeOutputs) {
+        const std::vector<CNNLayerPtr> layers = CNNNetSortTopologically(network);
+
+        const CNNLayerPtr convolution = layers[layers.size() - 2];
+        if ((convolution->type != "Convolution") || (convolution->name != "Convolution14_original")) {
+            THROW_IE_EXCEPTION << "unexpected layer type '" << convolution->type << "' or name '" << convolution->name << "'";
+        }
+
+        const CNNLayerPtr dequantizationScaleShift = layers[layers.size() - 1];
+        if ((dequantizationScaleShift->type != "ScaleShift") || (dequantizationScaleShift->name != "Convolution14")) {
+            THROW_IE_EXCEPTION << "unexpected layer type '" << dequantizationScaleShift->type << "' or name '" << dequantizationScaleShift->name << "'";
+        }
+    }
+
+    return true;
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/conv_and_quantize_on_weights_with_multi_output_intervals_test.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/conv_and_quantize_on_weights_with_multi_output_intervals_test.cpp
new file mode 100644 (file)
index 0000000..32bc7bc
--- /dev/null
@@ -0,0 +1,83 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+
+std::string ConvolutionAndQuantizeOnWeightsWithMultiOutputIntervalsTestModel::getModel(SingleLayerTransformationsTestParams& p) const {
+    size_t type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP32>::value_type);
+    if (p._network_precision == "FP16")
+        type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP16>::value_type);
+
+    CommonTestUtils::conv_common_params conv =
+            { {1, 1}, {3, 3}, {0, 0}, {0, 0}, {1, 1}, "valid", 1, 64, false, false };
+    std::vector<size_t> convOutShape(p.inputDimensions[0].size());
+    getConvOutShape(p.inputDimensions[0], conv, convOutShape);
+
+    std::vector<size_t> weightsConstInputDims = { 64lu, 32lu, 3lu, 3lu };
+    std::vector<size_t> weightsConstOutputDims = { 64lu, 1lu, 1lu, 1lu };
+    std::vector<size_t> biasesConvolutionConstDims = { conv.out_c };
+    std::map<std::string, std::string> const_params = {};
+    std::map<std::string, std::string> fake_quantize_params = {
+        {"levels", "256"}
+    };
+    std::map<std::string, std::string> power_params = {
+        {"power", "1"}, {"scale", "1"}, {"shift", "0"}
+    };
+
+    std::vector<std::pair<std::string, std::string>> edges = {
+        {"0,0", "1,1"}, {"1,2", "6,7"}, // Power
+        {"2,3", "6,8"}, {"3,4", "6,9"}, {"4,5", "6,10"}, {"5,6", "6,11"}, // Const layers
+        {"7,13", "12,18"}, {"8,14", "12,19"}, {"9,15", "12,20"}, {"10,16", "12,21"}, {"11,17", "12,22"}, // Const layers
+        {"6,12", "14,25"},  {"12,23", "14,26"}, // Fake quantize to Conv
+        {"13,24", "14,27"} // biases to Conv
+    };
+
+    return CommonTestUtils::DefaultNetBuilder::buildNetworkWithOneInput(
+            "QuantizationOnWeights", p.inputDimensions[0], p._network_precision)
+        .addLayer("Power", p._network_precision, &power_params, {{p.inputDimensions[0]}, {p.inputDimensions[0]}})
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("FakeQuantize", p._network_precision, &fake_quantize_params, {{p.inputDimensions[0], {1}, {1}, {1}, {1}}, {{p.inputDimensions[0]}}})
+        .addLayer("Const", p._network_precision, &const_params, {{}, {weightsConstInputDims}},
+                std::accumulate(weightsConstInputDims.begin(), weightsConstInputDims.end(), 1lu, std::multiplies<size_t>()) * type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {weightsConstOutputDims}},
+                std::accumulate(weightsConstOutputDims.begin(), weightsConstOutputDims.end(), 1lu, std::multiplies<size_t>()) * type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {weightsConstOutputDims}},
+                std::accumulate(weightsConstOutputDims.begin(), weightsConstOutputDims.end(), 1lu, std::multiplies<size_t>()) * type_size, 0)
+        .addLayer("FakeQuantize", p._network_precision, &fake_quantize_params, {{weightsConstInputDims, {1}, {1}, weightsConstOutputDims, weightsConstOutputDims}, {{weightsConstInputDims}}})
+        .addLayer("Const", p._network_precision, &const_params, {{}, {biasesConvolutionConstDims}}, type_size * conv.out_c, 0)
+        .convolutionLayer(p._network_precision, {{p.inputDimensions[0], weightsConstInputDims, biasesConvolutionConstDims }, {convOutShape}}, conv)
+        .finish(&edges);
+}
+
+std::string ConvolutionAndQuantizeOnWeightsWithMultiOutputIntervalsTestModel::getName() const {
+    return "ConvolutionAndQuantizeOnWeightsWithMultiOutputIntervalsTestModel";
+}
+
+bool ConvolutionAndQuantizeOnWeightsWithMultiOutputIntervalsTestModel::transform(CNNNetwork& network, LayerTransformation::Params& params) const {
+    LowPrecisionTransformer transformer = getLowPrecisionTransformer(params);
+    transformer.transform(network);
+    return true;
+}
+
+void ConvolutionAndQuantizeOnWeightsWithMultiOutputIntervalsTestModel::resetTransformation(CNNNetwork& network) const {
+    // int values for range test
+    fillData(getLayer(network, "Const2"), 0.0, "custom");
+    fillData(getLayer(network, "Const3"), 255.0, "custom");
+    fillData(getLayer(network, "Const4"), 0.0, "custom");
+    fillData(getLayer(network, "Const5"), 255.0, "custom");
+
+    fillData(getLayer(network, "Const7"), 4.0, "custom");
+
+    fillData(getLayer(network, "Const8"), -128.0, "custom");
+    fillData(getLayer(network, "Const9"), 127.0, "custom");
+    fillData(getLayer(network, "Const10"), -128.0, "custom");
+    fillData(getLayer(network, "Const11"), 127.0, "custom");
+
+    fillData(getLayer(network, "Const13"), 5.0, "custom");
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/conv_and_quantize_on_weights_without_const_transformation_test.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/conv_and_quantize_on_weights_without_const_transformation_test.cpp
new file mode 100644 (file)
index 0000000..9b6e7f2
--- /dev/null
@@ -0,0 +1,83 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+
+std::string ConvolutionAndQuantizeOnWeightsWithoutConstTransformationTestModel::getModel(SingleLayerTransformationsTestParams& p) const {
+    size_t type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP32>::value_type);
+    if (p._network_precision == "FP16")
+        type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP16>::value_type);
+
+    CommonTestUtils::conv_common_params conv =
+            { {1, 1}, {3, 3}, {0, 0}, {0, 0}, {1, 1}, "valid", 1, 32, false, false };
+    std::vector<size_t> convOutShape(p.inputDimensions[0].size());
+    getConvOutShape(p.inputDimensions[0], conv, convOutShape);
+
+    std::vector<size_t> weightsConstInputDims = { 32lu, 32lu, 3lu, 3lu };
+    std::vector<size_t> biasesConvolutionConstDims = { conv.out_c };
+    std::map<std::string, std::string> const_params = {};
+    std::map<std::string, std::string> fake_quantize_params = {
+        {"levels", "256"}
+    };
+    std::map<std::string, std::string> power_params = {
+        {"power", "1"}, {"scale", "1"}, {"shift", "0"}
+    };
+
+    std::vector<std::pair<std::string, std::string>> edges = {
+        {"0,0", "1,1"}, {"1,2", "6,7"}, // Power
+        {"2,3", "6,8"}, {"3,4", "6,9"}, {"4,5", "6,10"}, {"5,6", "6,11"}, // Const layers
+        {"7,13", "12,18"}, {"8,14", "12,19"}, {"9,15", "12,20"}, {"10,16", "12,21"}, {"11,17", "12,22"}, // Const layers
+        {"6,12", "14,25"},  {"12,23", "14,26"}, // Fake quantize to Conv
+        {"13,24", "14,27"} // biases to Conv
+    };
+
+    return CommonTestUtils::DefaultNetBuilder::buildNetworkWithOneInput(
+            "QuantizationOnWeights", p.inputDimensions[0], p._network_precision)
+        .addLayer("Power", p._network_precision, &power_params, {{p.inputDimensions[0]}, {p.inputDimensions[0]}})
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("FakeQuantize", p._network_precision, &fake_quantize_params, {{p.inputDimensions[0], {1}, {1}, {1}, {1}}, {{p.inputDimensions[0]}}})
+        .addLayer("Const", p._network_precision, &const_params, {{}, {weightsConstInputDims}},
+                std::accumulate(weightsConstInputDims.begin(), weightsConstInputDims.end(), 1lu, std::multiplies<size_t>()) * type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("FakeQuantize", p._network_precision, &fake_quantize_params, {{weightsConstInputDims, {1}, {1}, {1}, {1}}, {{weightsConstInputDims}}})
+        .addLayer("Const", p._network_precision, &const_params, {{}, {biasesConvolutionConstDims}}, type_size * conv.out_c, 0)
+        .convolutionLayer(p._network_precision, {{p.inputDimensions[0], weightsConstInputDims, biasesConvolutionConstDims }, {convOutShape}}, conv)
+        .finish(&edges);
+}
+
+std::string ConvolutionAndQuantizeOnWeightsWithoutConstTransformationTestModel::getName() const {
+    return "ConvolutionAndQuantizeOnWeightsWithoutConstTransformationTestModel";
+}
+
+bool ConvolutionAndQuantizeOnWeightsWithoutConstTransformationTestModel::transform(CNNNetwork& network, LayerTransformation::Params& params) const {
+    auto transformationsWithoutConst = getLowPrecisionTransformations(params);
+    transformationsWithoutConst.remove("Const");
+
+    LowPrecisionTransformer transformer(transformationsWithoutConst);
+    transformer.transform(network);
+
+    return true;
+}
+
+void ConvolutionAndQuantizeOnWeightsWithoutConstTransformationTestModel::resetTransformation(CNNNetwork& network) const {
+    fillData(getLayer(network, "Const2"), 63.5f, "custom");
+    fillData(getLayer(network, "Const3"), 127.f, "custom");
+    fillData(getLayer(network, "Const4"), 63.5f, "custom");
+    fillData(getLayer(network, "Const5"), 127.f, "custom");
+
+    fillDataWithInitValue(getLayer(network, "Const7"), "custom", 1.234f);
+
+    fillData(getLayer(network, "Const8"), -1.275f / 2.f, "custom");
+    fillData(getLayer(network, "Const9"), 1.275f, "custom");
+    fillData(getLayer(network, "Const10"), -1.275f / 2.f, "custom");
+    fillData(getLayer(network, "Const11"), 1.275f, "custom");
+
+    fillDataWithInitValue(getLayer(network, "Const13"), "custom", 2.123f);
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/conv_base_test.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/conv_base_test.cpp
new file mode 100644 (file)
index 0000000..a82af43
--- /dev/null
@@ -0,0 +1,147 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+
+//const size_t channelsCount = 32ul;
+//const size_t group = channelsCount;
+//std::vector<size_t> weightsConstInputDims = { channelsCount, 1lu, 3lu, 3lu };
+
+ConvolutionBaseTestModel::ConvolutionBaseTestModel(const bool addBiasesLayer) : addBiasesLayer(addBiasesLayer) {}
+
+std::string ConvolutionBaseTestModel::getModel(SingleLayerTransformationsTestParams& p) const {
+    size_t type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP32>::value_type);
+    if (p._network_precision == "FP16")
+        type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP16>::value_type);
+
+    const size_t group = getGroupsCount(p);
+    const size_t inputChannelsCount = p.inputDimensions[0][1];
+    const size_t outputChannelsCount = p.outputDimensions[0][1];
+    CommonTestUtils::conv_common_params conv = { {1, 1}, {3, 3}, {1, 1}, {1, 1}, {1, 1}, "valid", group, outputChannelsCount, false, false };
+    std::vector<size_t> weightsConstInputDims = { outputChannelsCount, inputChannelsCount / group, 3lu, 3lu };
+
+    std::vector<size_t> convOutShape(p.inputDimensions[0].size());
+    getConvOutShape(p.inputDimensions[0], conv, convOutShape);
+
+    std::map<std::string, std::string> const_params = {};
+    std::map<std::string, std::string> fake_quantize_params = { {"levels", "256"} };
+    std::map<std::string, std::string> fake_quantize_params2 = { {"levels", "255"} };
+    std::map<std::string, std::string> power_params = {
+        {"power", "1"}, {"scale", "1"}, {"shift", "0"}
+    };
+
+    std::vector<size_t> biasesConvolutionConstDims = { conv.out_c };
+
+    const std::vector<std::vector<size_t>> convolutionDims = addBiasesLayer ?
+        std::vector<std::vector<size_t>>({p.inputDimensions[0], weightsConstInputDims, biasesConvolutionConstDims }) :
+        std::vector<std::vector<size_t>>({p.inputDimensions[0], weightsConstInputDims });
+
+    std::vector<std::pair<std::string, std::string>> edges = {
+        {"0,0", "1,1"}, {"1,2", "6,7"}, // Power
+        {"2,3", "6,8"}, {"3,4", "6,9"}, {"4,5", "6,10"}, {"5,6", "6,11"}, // Const layers
+        {"7,13", "12,18"}, {"8,14", "12,19"}, {"9,15", "12,20"}, {"10,16", "12,21"}, {"11,17", "12,22"}, // Const layers
+        {"6,12", "13,24"},  {"12,23", "13,25"} // Fake quantize to Conv
+    };
+
+    if (addBiasesLayer) {
+        edges.push_back({ "14,28", "13,26" }); // biases to Conv
+    }
+
+    std::vector<size_t> quantizationParamsDims(p.inputDimensions[0].size(), 1);
+    quantizationParamsDims[1] = inputChannelsCount;
+
+    CommonTestUtils::DefaultNetBuilder builder = CommonTestUtils::DefaultNetBuilder::buildNetworkWithOneInput(
+        "QuantizationOnWeights", p.inputDimensions[0], p._network_precision)
+        .addLayer("Power", p._network_precision, &power_params, { {p.inputDimensions[0]}, {p.inputDimensions[0]} })
+        .addLayer("Const", p._network_precision, &const_params, { {}, {quantizationParamsDims} }, inputChannelsCount * type_size, "dataInputLowConst")
+        .addLayer("Const", p._network_precision, &const_params, { {}, {quantizationParamsDims} }, inputChannelsCount * type_size, "dataInputHighConst")
+        .addLayer("Const", p._network_precision, &const_params, { {}, {quantizationParamsDims} }, inputChannelsCount * type_size, "dataOutputLowConst")
+        .addLayer("Const", p._network_precision, &const_params, { {}, {quantizationParamsDims} }, inputChannelsCount * type_size, "dataOutputHighConst")
+        .addLayer("FakeQuantize",
+            p._network_precision,
+            &fake_quantize_params,
+            { {p.inputDimensions[0], quantizationParamsDims, quantizationParamsDims, quantizationParamsDims, quantizationParamsDims},
+              {{p.inputDimensions[0]}} },
+            "fakeQuantizeOnActivations")
+        .addLayer("Const", p._network_precision, &const_params, { {}, {weightsConstInputDims} },
+            std::accumulate(weightsConstInputDims.begin(), weightsConstInputDims.end(), 1lu, std::multiplies<size_t>()) * type_size, "weigthsConst")
+        .addLayer("Const", p._network_precision, &const_params, { {}, {{1}} }, type_size, "weigthsInputLowConst")
+        .addLayer("Const", p._network_precision, &const_params, { {}, {{1}} }, type_size, "weigthsInputHighConst")
+        .addLayer("Const", p._network_precision, &const_params, { {}, {{1}} }, type_size, "weigthsOutputLowConst")
+        .addLayer("Const", p._network_precision, &const_params, { {}, {{1}} }, type_size, "weigthsOutputHighConst")
+        .addLayer(
+            "FakeQuantize",
+            p._network_precision,
+            &fake_quantize_params,
+            { {weightsConstInputDims, {1}, {1}, {1}, {1}}, {{weightsConstInputDims}} },
+            "fakeQuantizeOnWeights")
+        .convolutionLayer(p._network_precision, { convolutionDims, {convOutShape} }, conv, {}, "Convolution");
+
+    if (addBiasesLayer) {
+        builder.addLayer("Const", p._network_precision, &const_params, { {}, {biasesConvolutionConstDims} }, type_size * conv.out_c, "biasesConst");
+    }
+
+    return builder.finish(&edges);
+}
+
+bool ConvolutionBaseTestModel::transform(CNNNetwork& network, LayerTransformation::Params& params) const {
+    LowPrecisionTransformer transformer = getLowPrecisionTransformer(params);
+    transformer.transform(network);
+    return true;
+}
+
+void ConvolutionBaseTestModel::resetTransformation(CNNNetwork& network) const {
+    CNNLayerPtr convolution = CNNNetworkHelper::getLayer(network, "Convolution");
+
+    const size_t channelsCount = convolution->GetParamAsUInt("output");
+    const size_t groupsCount = convolution->GetParamAsUInt("group");
+    const size_t filtersCountPerOutputChannel = channelsCount / groupsCount;
+    const size_t kernelH = convolution->GetParamAsUInts("kernel")[0];
+    const size_t kernelW = convolution->GetParamAsUInts("kernel")[1];
+
+    // Const on activations
+    std::vector<float> lowValues(channelsCount);  // to have shifts
+    std::vector<float> highValues(channelsCount);
+    if (areScalesOnActivationsDifferent()) {
+        for (size_t inputChannel = 0; inputChannel < highValues.size(); ++inputChannel) {
+            highValues[inputChannel] = 255.f / (1.f + inputChannel);
+        }
+    } else {
+        highValues = std::vector<float>(channelsCount, 255.f);
+    }
+
+    fillData(getLayer(network, "dataInputLowConst"), lowValues, "custom");
+    fillData(getLayer(network, "dataInputHighConst"), highValues, "custom");
+    fillData(getLayer(network, "dataOutputLowConst"), lowValues, "custom");
+    fillData(getLayer(network, "dataOutputHighConst"), highValues, "custom");
+
+    // Const on weights
+    std::vector<float> weights(channelsCount * filtersCountPerOutputChannel * kernelH * kernelW);
+    for (size_t outputChannel = 0ul; outputChannel < channelsCount; ++outputChannel) {
+        for (size_t filter = 0ul; filter < filtersCountPerOutputChannel; ++filter) {
+            for (size_t kernel = 0ul; kernel < kernelH * kernelW; ++kernel) {
+                weights[outputChannel * filtersCountPerOutputChannel * kernelH * kernelW + filter * kernelH * kernelW + kernel] =
+                    static_cast<float>(outputChannel * filtersCountPerOutputChannel + filter) + 1.f;
+            }
+        }
+    }
+    fillData(getLayer(network, "weigthsConst"), weights, "custom");
+
+    fillData(getLayer(network, "weigthsInputLowConst"), -128.f / 4.0, "custom");
+    fillData(getLayer(network, "weigthsInputHighConst"), 127.f / 4.0, "custom");
+    fillData(getLayer(network, "weigthsOutputLowConst"), -128.f / 4.0, "custom");
+    fillData(getLayer(network, "weigthsOutputHighConst"), 127.f / 4.0, "custom");
+
+    if (addBiasesLayer) {
+        fillData(getLayer(network, "biasesConst"), 2.f, "custom");
+    }
+}
+
+size_t ConvolutionBaseTestModel::getGroupsCount(SingleLayerTransformationsTestParams& p) const {
+    return 1ul;
+}
+
+bool ConvolutionBaseTestModel::areScalesOnActivationsDifferent() const {
+    return false;
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/conv_depthwise_test.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/conv_depthwise_test.cpp
new file mode 100644 (file)
index 0000000..1a384ff
--- /dev/null
@@ -0,0 +1,17 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+
+std::string ConvolutionDepthwiseTestModel::getName() const {
+    return "ConvolutionDepthwiseTestModel";
+}
+
+size_t ConvolutionDepthwiseTestModel::getGroupsCount(SingleLayerTransformationsTestParams& p) const {
+    return p.inputDimensions[0][1];
+}
+
+bool ConvolutionDepthwiseTestModel::areScalesOnActivationsDifferent() const {
+    return true;
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/conv_grouped_test.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/conv_grouped_test.cpp
new file mode 100644 (file)
index 0000000..6a05d3e
--- /dev/null
@@ -0,0 +1,27 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+
+std::string ConvolutionGroupedTestModel::getName() const {
+    return "ConvolutionGroupedTestModel";
+}
+
+void ConvolutionGroupedTestModel::initInput(Blob::Ptr input) const {
+    fillDataWithInitValue(input, -1.f);
+}
+
+size_t ConvolutionGroupedTestModel::getGroupsCount(SingleLayerTransformationsTestParams& p) const {
+    const size_t channelsPerGroup = 8ul;
+    const size_t inputChannelsCount = p.inputDimensions[0][1];
+    if ((inputChannelsCount % channelsPerGroup) != 0ul) {
+        THROW_IE_EXCEPTION << "not possible to divide " << inputChannelsCount << " channels to groups";
+    }
+
+    return inputChannelsCount / channelsPerGroup;
+}
+
+bool ConvolutionGroupedTestModel::areScalesOnActivationsDifferent() const {
+    return false;
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/eltwise_broadcast_test.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/eltwise_broadcast_test.cpp
new file mode 100644 (file)
index 0000000..2ee90b0
--- /dev/null
@@ -0,0 +1,71 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+
+using namespace InferenceEngine;
+using namespace InferenceEngine::details;
+
+std::string EltwiseBroadcastTestModel::getModel(SingleLayerTransformationsTestParams& p) const {
+    size_t type_size = sizeof(PrecisionTrait<Precision::FP32>::value_type);
+    if (p._network_precision == "FP16")
+        type_size = sizeof(PrecisionTrait<Precision::FP16>::value_type);
+
+    std::map<std::string, std::string> const_params = {};
+    std::map<std::string, std::string> fake_quantize_params = {
+        {"levels", "256"}
+    };
+    std::map<std::string, std::string> eltwise_params = {
+        {"operation", "sum"}
+    };
+    std::map<std::string, std::string> power_params = {
+        {"power", "1"}, {"scale", "1"}, {"shift", "0"}
+    };
+
+    std::vector<std::pair<std::string, std::string>> edges = {
+        {"0,0", "6,6"}, {"1,1", "11,16"}, // Inputs
+        {"2,2", "6,7"}, {"3,3", "6,8"}, {"4,4", "6,9"}, {"5,5", "6,10"}, // Const layers
+        {"7,12", "11,17"}, {"8,13", "11,18"}, {"9,14", "11,19"}, {"10,15", "11,20"}, // Const layers
+        {"6,11", "12,22"}, {"11,21", "12,23"} // Fake quantize to Convolution
+    };
+
+    return CommonTestUtils::DefaultNetBuilder::buildNetworkWithOneInput(
+            "Eltwise", p.inputDimensions[0], p._network_precision)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {p.inputDimensions[1]}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("FakeQuantize", p._network_precision, &fake_quantize_params, {{p.inputDimensions[0], {1}, {1}, {1}, {1}}, {{p.inputDimensions[0]}}})
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("FakeQuantize", p._network_precision, &fake_quantize_params, {{p.inputDimensions[1], {1}, {1}, {1}, {1}}, {{p.inputDimensions[1]}}})
+        .addLayer("Eltwise", p._network_precision, &eltwise_params, {{p.inputDimensions[0], p.inputDimensions[1]}, {{p.outputDimensions[0]}}}, 0, 0)
+        .finish(&edges);
+}
+
+std::string EltwiseBroadcastTestModel::getName() const {
+    return "EltwiseBroadcastTestModel";
+}
+
+bool EltwiseBroadcastTestModel::transform(CNNNetwork& network, LayerTransformation::Params& params) const {
+    LowPrecisionTransformer transformer(LowPrecisionTransformer::getAllTransformations(
+        LayerTransformation::Params(params)));
+    transformer.transform(network);
+    return true;
+}
+
+void EltwiseBroadcastTestModel::resetTransformation(CNNNetwork& network) const {
+    fillData(getLayer(network, "Const2"), 255.f / 10.0, "custom");
+    fillData(getLayer(network, "Const3"), 255.f / 4.0, "custom");
+    fillData(getLayer(network, "Const4"), 255.f / 10.0, "custom");
+    fillData(getLayer(network, "Const5"), 255.f / 4.0, "custom");
+
+    fillData(getLayer(network, "Const7"), 255.f / 10.0, "custom");
+    fillData(getLayer(network, "Const8"), 255.f / 2.0, "custom");
+    fillData(getLayer(network, "Const9"), 255.f / 10.0, "custom");
+    fillData(getLayer(network, "Const10"), 255.f / 2.0, "custom");
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/eltwise_fq_with_children_test.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/eltwise_fq_with_children_test.cpp
new file mode 100644 (file)
index 0000000..7d07b4c
--- /dev/null
@@ -0,0 +1,122 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+
+using namespace InferenceEngine;
+using namespace InferenceEngine::details;
+
+std::string EltwiseFqWithChildrenTestModel::getModel(SingleLayerTransformationsTestParams& p) const {
+    size_t type_size = sizeof(PrecisionTrait<Precision::FP32>::value_type);
+    if (p._network_precision == "FP16")
+        type_size = sizeof(PrecisionTrait<Precision::FP16>::value_type);
+
+    std::map<std::string, std::string> constParams = {};
+    std::map<std::string, std::string> fakeQuantizeParams = { {"levels", "256"} };
+    std::map<std::string, std::string> eltwiseParams = { {"operation", operation} };
+    std::map<std::string, std::string> poolingParams = { {"kernel", "1,1"}, {"pool-method", "max"}, {"exclude-pad", "false"} };
+
+    std::vector<std::pair<std::string, std::string>> edges = {
+        {"0,0", "5,5"}, {"5,10", "12,24"}, // Inputs
+        {"1,1", "5,6"}, {"2,2", "5,7"}, {"3,3", "5,8"}, {"4,4", "5,9"}, // Const layers
+        {"6,11", "10,16"}, {"7,12", "10,17"}, {"8,13", "10,18"}, {"9,14", "10,19"}, // Const layers
+        {"5,10", "11,21"}, {"10,20", "11,22"}, // Fake quantize to Eltwise
+        {"12,25", "10,15"},
+    };
+
+    return CommonTestUtils::DefaultNetBuilder::buildNetworkWithOneInput("EltwiseTestModel", p.inputDimensions[0], p._network_precision)
+        // 1
+        .addLayer("Const", p._network_precision, &constParams, {{}, {{1}}}, type_size, 0)
+        // 2
+        .addLayer("Const", p._network_precision, &constParams, {{}, {{1}}}, type_size, 0)
+        // 3
+        .addLayer("Const", p._network_precision, &constParams, {{}, {{1}}}, type_size, 0)
+        // 4
+        .addLayer("Const", p._network_precision, &constParams, {{}, {{1}}}, type_size, 0)
+        // 5
+        .addLayer("FakeQuantize", p._network_precision, &fakeQuantizeParams, {{p.inputDimensions[0], {1}, {1}, {1}, {1}}, {{p.inputDimensions[0]}}}, "fakeQuantize1")
+        // 6
+        .addLayer("Const", p._network_precision, &constParams, {{}, {{1}}}, type_size, 0)
+        // 7
+        .addLayer("Const", p._network_precision, &constParams, {{}, {{1}}}, type_size, 0)
+        // 8
+        .addLayer("Const", p._network_precision, &constParams, {{}, {{1}}}, type_size, 0)
+        // 9
+        .addLayer("Const", p._network_precision, &constParams, {{}, {{1}}}, type_size, 0)
+        // 10
+        .addLayer("FakeQuantize", p._network_precision, &fakeQuantizeParams, {{p.inputDimensions[0], {1}, {1}, {1}, {1}}, {{p.inputDimensions[0]}}}, "fakeQuantize2")
+        // 11
+        .addLayer("Eltwise", p._network_precision, &eltwiseParams, {{p.inputDimensions[0], p.inputDimensions[0]}, {{p.inputDimensions[0]}}}, 0, "eltwise")
+
+        // 12
+        .addLayer("Pooling", p._network_precision, &poolingParams, {p.inputDimensions, {p.inputDimensions}}, 0, "pooling")
+        .finish(&edges);
+}
+
+std::string EltwiseFqWithChildrenTestModel::getName() const {
+    return std::string("EltwiseFqWithChildrenTestModel") +
+        (cpuSpecific ? "_cpuSpecific" : "") +
+        "_" + operation +
+        (signedIntervals ? "_signedInterval" : "_notsignedInterval") +
+        (minLevels != 2ul ? ("_minLevels" + std::to_string(minLevels)) : "");
+}
+
+bool EltwiseFqWithChildrenTestModel::transform(CNNNetwork& network, LayerTransformation::Params& params) const {
+    params.updatePrecisions = true;
+    LowPrecisionTransformations transformations = getLowPrecisionTransformations(params);
+    if (!cpuSpecific) {
+        THROW_IE_EXCEPTION << "not CPU/GPU specific Eltwise is not supported";
+    }
+
+    LayerTransformationPtr eltwiseTransformation = transformations.find("Eltwise");
+    eltwiseTransformation->setMinQuantizationLevels(minLevels);
+
+    LowPrecisionTransformer transformer(transformations);
+    transformer.transform(network);
+
+    if (params.quantizeOutputs) {
+        if ((params.quantizedTensorAlignmentOnActivations == LayerTransformation::QuantizedTensorAlignment::UpdateLevel) && (minLevels != 2ul)) {
+            const CNNLayerPtr eltwise = getLayer(network, "eltwise");
+            if (eltwise->type != "Eltwise") {
+                THROW_IE_EXCEPTION << "layer " << eltwise->type << " " << eltwise->name << " was quantized";
+            }
+        }
+
+        if (params.updatePrecisions) {
+            {
+                const CNNLayerPtr fakeQuantize1 = getLayer(network, "fakeQuantize1");
+                const Precision defaultPrecision = signedIntervals ? Precision::I8 : Precision::U8;
+                const Precision expectedPrecision = params.precisionsOnActivations.size() == 1 ? params.precisionsOnActivations[0] : defaultPrecision;
+                if (fakeQuantize1->outData[0]->getPrecision() != expectedPrecision) {
+                    THROW_IE_EXCEPTION << "unexpected precision " << fakeQuantize1->outData[0]->getPrecision() << " for " << fakeQuantize1->type << " " << fakeQuantize1->name;
+                }
+            }
+
+            {
+                const CNNLayerPtr fakeQuantize2 = getLayer(network, "fakeQuantize2");
+                const CNNLayerPtr input = getLayer(network, "Input0");
+                const Precision originalPrecision = input->outData[0]->getTensorDesc().getPrecision();
+                if (fakeQuantize2->outData[0]->getPrecision() != originalPrecision) {
+                    THROW_IE_EXCEPTION << "unexpected precision " << fakeQuantize2->outData[0]->getPrecision() << " for " << fakeQuantize2->type << " " << fakeQuantize2->name;
+                }
+            }
+        }
+    }
+    return true;
+}
+
+void EltwiseFqWithChildrenTestModel::resetTransformation(CNNNetwork& network) const {
+    const float low = signedIntervals ? -128 : 0.f;
+    const float high = signedIntervals ? 127 : 255.f;
+
+    fillData(getLayer(network, "Const1"), low / 4.f, "custom");
+    fillData(getLayer(network, "Const2"), high / 4.f, "custom");
+    fillData(getLayer(network, "Const3"), low / 4.f, "custom");
+    fillData(getLayer(network, "Const4"), high / 4.f, "custom");
+
+    fillData(getLayer(network, "Const6"), low / 2.f, "custom");
+    fillData(getLayer(network, "Const7"), high / 2.f, "custom");
+    fillData(getLayer(network, "Const8"), low / 2.f, "custom");
+    fillData(getLayer(network, "Const9"), high / 2.f, "custom");
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/eltwise_test.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/eltwise_test.cpp
new file mode 100644 (file)
index 0000000..7b2c5eb
--- /dev/null
@@ -0,0 +1,100 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+
+using namespace InferenceEngine;
+using namespace InferenceEngine::details;
+
+std::string EltwiseTestModel::getModel(SingleLayerTransformationsTestParams& p) const {
+    size_t type_size = sizeof(PrecisionTrait<Precision::FP32>::value_type);
+    if (p._network_precision == "FP16")
+        type_size = sizeof(PrecisionTrait<Precision::FP16>::value_type);
+
+    std::map<std::string, std::string> const_params = {};
+    std::map<std::string, std::string> fake_quantize_params = { {"levels", "256"} };
+    std::map<std::string, std::string> eltwise_params = { {"operation", operation} };
+    std::map<std::string, std::string> power_params = { {"power", "1"}, {"scale", "1"}, {"shift", "0"} };
+
+    std::vector<std::pair<std::string, std::string>> edges = {
+        {"0,0", "6,6"}, {"1,1", "11,16"}, // Inputs
+        {"2,2", "6,7"}, {"3,3", "6,8"}, {"4,4", "6,9"}, {"5,5", "6,10"}, // Const layers
+        {"7,12", "11,17"}, {"8,13", "11,18"}, {"9,14", "11,19"}, {"10,15", "11,20"}, // Const layers
+        {"6,11", "12,22"}, {"11,21", "12,23"} // Fake quantize to Convolution
+    };
+
+    return CommonTestUtils::DefaultNetBuilder::buildNetworkWithOneInput("EltwiseTestModel", p.inputDimensions[0], p._network_precision)
+        .addInputLayer(p._network_precision, p.inputDimensions[1])
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("FakeQuantize", p._network_precision, &fake_quantize_params, {{p.inputDimensions[0], {1}, {1}, {1}, {1}}, {{p.inputDimensions[0]}}})
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("FakeQuantize", p._network_precision, &fake_quantize_params, {{p.inputDimensions[1], {1}, {1}, {1}, {1}}, {{p.inputDimensions[1]}}})
+        .addLayer("Eltwise", p._network_precision, &eltwise_params, {{p.inputDimensions[0], p.inputDimensions[1]}, {{p.inputDimensions[0]}}}, 0, 0)
+        .finish(&edges);
+}
+
+std::string EltwiseTestModel::getName() const {
+    return std::string("EltwiseTestModel") +
+        (cpuSpecific ? "_cpuSpecific" : "") +
+        "_" + operation +
+        (signedIntervals ? "_signedInterval" : "_notsignedInterval") +
+        (minLevels != 2ul ? ("_minLevels" + std::to_string(minLevels)) : "");
+}
+
+bool EltwiseTestModel::transform(CNNNetwork& network, LayerTransformation::Params& params) const {
+    LowPrecisionTransformations transformations = getLowPrecisionTransformations(params);
+    if (!cpuSpecific) {
+        THROW_IE_EXCEPTION << "not CPU/GPU specific Eltwise is not supported";
+    }
+
+    LayerTransformationPtr eltwiseTransformation = transformations.find("Eltwise");
+    eltwiseTransformation->setMinQuantizationLevels(minLevels);
+
+    LowPrecisionTransformer transformer(transformations);
+    transformer.transform(network);
+
+    if (params.quantizeOutputs) {
+        if ((params.quantizedTensorAlignmentOnActivations == LayerTransformation::QuantizedTensorAlignment::UpdateLevel) && (minLevels != 2ul)) {
+            const CNNLayerPtr eltwise = getLayer(network, "Eltwise12");
+            if (eltwise->type != "Eltwise") {
+                THROW_IE_EXCEPTION << "layer " << eltwise->type << " " << eltwise->name << " was quantized";
+            }
+        }
+
+        if (params.updatePrecisions) {
+            const CNNLayerPtr fakeQuantize1 = getLayer(network, "FakeQuantize6");
+            const CNNLayerPtr fakeQuantize2 = getLayer(network, "FakeQuantize11");
+
+            const Precision expectedPrecision = signedIntervals ? Precision::I8 : Precision::U8;
+            if (fakeQuantize1->outData[0]->getPrecision() != expectedPrecision) {
+                THROW_IE_EXCEPTION << "unexpected precision " << fakeQuantize1->outData[0]->getPrecision() << " for " << fakeQuantize1->type << " " << fakeQuantize1->name;
+            }
+            if (fakeQuantize2->outData[0]->getPrecision() != expectedPrecision) {
+                THROW_IE_EXCEPTION << "unexpected precision " << fakeQuantize2->outData[0]->getPrecision() << " for " << fakeQuantize2->type << " " << fakeQuantize2->name;
+            }
+        }
+    }
+    return true;
+}
+
+void EltwiseTestModel::resetTransformation(CNNNetwork& network) const {
+    const float low = signedIntervals ? -128 : 0.f;
+    const float high = signedIntervals ? 127 : 255.f;
+
+    fillData(getLayer(network, "Const2"), low / 4.f, "custom");
+    fillData(getLayer(network, "Const3"), high / 4.f, "custom");
+    fillData(getLayer(network, "Const4"), low / 4.f, "custom");
+    fillData(getLayer(network, "Const5"), high / 4.f, "custom");
+
+    fillData(getLayer(network, "Const7"), low / 2.f, "custom");
+    fillData(getLayer(network, "Const8"), high / 2.f, "custom");
+    fillData(getLayer(network, "Const9"), low / 2.f, "custom");
+    fillData(getLayer(network, "Const10"), high / 2.f, "custom");
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/eltwise_with_pooling_test.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/eltwise_with_pooling_test.cpp
new file mode 100644 (file)
index 0000000..8ca0d39
--- /dev/null
@@ -0,0 +1,208 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+
+using namespace InferenceEngine;
+using namespace InferenceEngine::details;
+
+std::string EltwiseWithPoolingTestModel::getModel(SingleLayerTransformationsTestParams& p) const {
+    size_t type_size = sizeof(PrecisionTrait<Precision::FP32>::value_type);
+    if (p._network_precision == "FP16")
+        type_size = sizeof(PrecisionTrait<Precision::FP16>::value_type);
+
+    std::map<std::string, std::string> constParams = {};
+    std::map<std::string, std::string> fakeQuantizeParams = { {"levels", "256"} };
+    std::map<std::string, std::string> eltwiseParams = { {"operation", operation} };
+    std::map<std::string, std::string> powerParams = { {"power", "1"}, {"scale", "1"}, {"shift", "0"} };
+    std::map<std::string, std::string> poolingParams = {
+        {"kernel", "1,1"},
+        {"pool-method", "max"},
+        {"exclude-pad", "false"}
+    };
+
+    CommonTestUtils::conv_common_params convolutionParams = { {1, 1}, {1, 1}, {0, 0}, {0, 0}, {1, 1}, "valid", 1, 3, false, false };
+    std::vector<size_t> weightsConstInputDims = { 3lu, 3lu, 1lu, 1lu };
+    std::vector<size_t> biasesConvolutionConstDims = { convolutionParams.out_c };
+
+    std::vector<std::pair<std::string, std::string>> edges = {
+        {"0,0", "11,17"}, {"1,2", "6,7"}, // Inputs
+        {"2,3", "6,8"}, {"3,4", "6,9"}, {"4,5", "6,10"}, {"5,6", "6,11"}, // Const layers
+        {"7,13", "11,18"}, {"8,14", "11,19"}, {"9,15", "11,20"}, {"10,16", "11,21"}, // Const layers
+        {"6,12", "17,33"}, {"11,22", "12,23"}, // Pooling12
+        {"12,24", "15,27"}, // Pooling12 -> Convolution15
+        {"13,25", "15,28"}, // Const13 -> Convolution15
+        {"14,26", "15,29"}, // Const14 -> Convolution15
+        {"15,30", "1,1"}, // Convolution15 -> Power
+        {"12,24", "16,31"}, // Pooling12 -> Pooling16
+        {"16,32", "17,34"}  // Pooling16 -> FakeQuantize20
+    };
+
+    auto modelBuilder = CommonTestUtils::DefaultNetBuilder::buildNetworkWithOneInput("EltwiseWithPoolingTestModel", p.inputDimensions[0], p._network_precision)
+        // 1
+        //.addInputLayer(p._network_precision, p.inputDimensions[1])
+        .addLayer("Power", p._network_precision, &powerParams, { {p.inputDimensions[1]}, {p.inputDimensions[1]} })
+        // 2
+        .addLayer("Const", p._network_precision, &constParams, { {}, {{1}} }, type_size, 0)
+        // 3
+        .addLayer("Const", p._network_precision, &constParams, { {}, {{1}} }, type_size, 0)
+        // 4
+        .addLayer("Const", p._network_precision, &constParams, { {}, {{1}} }, type_size, 0)
+        // 5
+        .addLayer("Const", p._network_precision, &constParams, { {}, {{1}} }, type_size, 0)
+        // 6
+        .addLayer("FakeQuantize", p._network_precision, &fakeQuantizeParams, { {p.inputDimensions[0], {1}, {1}, {1}, {1}}, {{p.inputDimensions[0]}} })
+        // 7
+        .addLayer("Const", p._network_precision, &constParams, { {}, {{1}} }, type_size, 0)
+        // 8
+        .addLayer("Const", p._network_precision, &constParams, { {}, {{1}} }, type_size, 0)
+        // 9
+        .addLayer("Const", p._network_precision, &constParams, { {}, {{1}} }, type_size, 0)
+        // 10
+        .addLayer("Const", p._network_precision, &constParams, { {}, {{1}} }, type_size, 0)
+        // 11
+        .addLayer("FakeQuantize", p._network_precision, &fakeQuantizeParams, { {p.inputDimensions[1], {1}, {1}, {1}, {1}}, {{p.inputDimensions[1]}} })
+        // 12
+        .addLayer("Pooling", p._network_precision, &poolingParams, { {p.inputDimensions[1]}, {p.inputDimensions[1]} })
+        // 13
+        .addLayer("Const", p._network_precision, &constParams, { {}, {weightsConstInputDims} },
+            std::accumulate(weightsConstInputDims.begin(), weightsConstInputDims.end(), 1lu, std::multiplies<size_t>()) * type_size)
+        // 14
+        .addLayer("Const", p._network_precision, &constParams, { {}, {biasesConvolutionConstDims} }, type_size * convolutionParams.out_c, 0)
+        // 15
+        .convolutionLayer(p._network_precision, { {p.inputDimensions[0], weightsConstInputDims, biasesConvolutionConstDims }, {p.inputDimensions[0]} }, convolutionParams)
+        // 16
+        .addLayer("Pooling", p._network_precision, &poolingParams, { {p.inputDimensions[1]}, {p.inputDimensions[1]} })
+        // 17
+        .addLayer("Eltwise", p._network_precision, &eltwiseParams, { {p.inputDimensions[0], p.inputDimensions[1]}, {{p.inputDimensions[0]}} }, 0, 0);
+
+    auto modelString = modelBuilder.finish(&edges);
+    return modelString;
+}
+
+std::string EltwiseWithPoolingTestModel::getName() const {
+    return std::string("EltwiseWithPoolingTestModel") +
+        (cpuSpecific ? "_cpuSpecific" : "") +
+        "_" + operation +
+        (signedIntervals ? "_signedInterval" : "_notSignedInterval") +
+        (minLevels != 2ul ? ("_minLevels" + std::to_string(minLevels)) : "");
+}
+
+bool EltwiseWithPoolingTestModel::transform(CNNNetwork& network, LayerTransformation::Params& params) const {
+    if (std::any_of(
+        params.precisionsOnActivations.begin(),
+        params.precisionsOnActivations.end(),
+        [](const Precision precision) { return precision == Precision::U8; })) {
+        params.updatePrecisions = true;
+    }
+
+    LowPrecisionTransformations transformations = getLowPrecisionTransformations(params);
+    if (cpuSpecific) {
+        transformations = transformations.
+            remove("Eltwise").
+            add<EltwiseTransformation>(LayerTransformation::Params(params), "Eltwise");
+    } else {
+        THROW_IE_EXCEPTION << "not CPU/GPU specific Eltwise is not supported";
+    }
+
+    LayerTransformationPtr eltwiseTransformation = transformations.find("Eltwise");
+    eltwiseTransformation->setMinQuantizationLevels(minLevels);
+
+    LowPrecisionTransformer transformer(transformations);
+    transformer.transform(network);
+
+    if (params.quantizeOutputs && params.updatePrecisions) {
+        // INT8 way
+        const CNNLayerPtr fakeQuantize11 = getLayer(network, "FakeQuantize11");
+        if ((fakeQuantize11->outData[0]->getPrecision() != Precision::U8) && (fakeQuantize11->outData[0]->getPrecision() != Precision::I8)) {
+            THROW_IE_EXCEPTION <<
+                "layer " << fakeQuantize11->type << " " << fakeQuantize11->name <<
+                " was not quantized " << fakeQuantize11->outData[0]->getPrecision();
+        }
+
+        const CNNLayerPtr pooling12 = getLayer(network, "Pooling16");
+        if ((pooling12->outData[0]->getPrecision() != Precision::U8) && (pooling12->outData[0]->getPrecision() != Precision::I8)) {
+            THROW_IE_EXCEPTION <<
+                "layer " << pooling12->type << " " << pooling12->name <<
+                " was not quantized " << pooling12->outData[0]->getPrecision();
+        }
+
+        const CNNLayerPtr pooling16 = getLayer(network, "Pooling16");
+        if ((pooling16->outData[0]->getPrecision() != Precision::U8) && (pooling16->outData[0]->getPrecision() != Precision::I8)) {
+            THROW_IE_EXCEPTION <<
+                "layer " << pooling16->type << " " << pooling16->name <<
+                " was not quantized " << pooling16->outData[0]->getPrecision();
+        }
+
+        if (operation == "sum") {
+            const CNNLayerPtr eltwise = getLayer(network, "Eltwise17_original");
+            if (eltwise->type != "Eltwise") {
+                THROW_IE_EXCEPTION << "layer type " << eltwise->type << " " << eltwise->name << " is not correct";
+            }
+
+            if ((eltwise->outData[0]->getPrecision() != Precision::FP32) && (eltwise->outData[0]->getPrecision() != Precision::FP16)) {
+                THROW_IE_EXCEPTION << "layer " << eltwise->type << " " << eltwise->name << " output port precision is not correct";
+            }
+
+            const CNNLayerPtr dequantizationScaleShift = getLayer(network, "Eltwise17");
+            if (dequantizationScaleShift == nullptr) {
+                THROW_IE_EXCEPTION << "dequantization layer was not found";
+            }
+
+            Blob::Ptr shiftsBlob = CNNNetworkHelper::getBlob(dequantizationScaleShift, "biases");
+            const auto shiftsBuffer = CNNNetworkHelper::getFloatData(shiftsBlob);
+            const size_t shiftsBlobSize = shiftsBlob->size();
+            for (size_t i = 0; i < shiftsBlobSize; ++i) {
+                if (shiftsBuffer.get()[i] != 0.f) {
+                    THROW_IE_EXCEPTION << "unexpected shift value " << shiftsBuffer.get()[i] << " for dequantization layer";
+                }
+            }
+        } else if ((operation == "mul") || (operation == "prod")) {
+            const CNNLayerPtr eltwise = getLayer(network, "Eltwise17");
+            if (eltwise->type != "Eltwise") {
+                THROW_IE_EXCEPTION << "layer type " << eltwise->type << " " << eltwise->name << " is not correct";
+            }
+
+            const CNNLayerPtr dequantizationScaleShift = getLayer(network, "Eltwise17_original");
+            if (dequantizationScaleShift != nullptr) {
+                THROW_IE_EXCEPTION
+                    << "dequantization layer " << dequantizationScaleShift->type << " " << dequantizationScaleShift->name
+                    << " has to be absent (moved to full path branch)";
+            }
+        }
+    } else {
+        const CNNLayerPtr eltwise = getLayer(network, "Eltwise17");
+        if (eltwise->type != "Eltwise") {
+            THROW_IE_EXCEPTION << "layer type " << eltwise->type << " " << eltwise->name << " is not correct";
+        }
+
+        if ((eltwise->outData[0]->getPrecision() != Precision::FP32) && (eltwise->outData[0]->getPrecision() != Precision::FP16)) {
+            THROW_IE_EXCEPTION << "layer " << eltwise->type << " " << eltwise->name << " output port precision is not correct";
+        }
+    }
+
+    // FP32 way
+    const CNNLayerPtr fakeQuantize6 = getLayer(network, "FakeQuantize6");
+    if ((fakeQuantize6->outData[0]->getPrecision() != Precision::FP32) && (fakeQuantize6->outData[0]->getPrecision() != Precision::FP16)) {
+        THROW_IE_EXCEPTION << "layer " << fakeQuantize6->type << " " << fakeQuantize6->name << " was quantized";
+    }
+
+
+    return true;
+}
+
+void EltwiseWithPoolingTestModel::resetTransformation(CNNNetwork& network) const {
+    const float low = signedIntervals ? -128 : 0.f;
+    const float high = signedIntervals ? 127 : 255.f;
+
+    fillData(getLayer(network, "Const2"), low / 4.f, "custom");
+    fillData(getLayer(network, "Const3"), high / 4.f, "custom");
+    fillData(getLayer(network, "Const4"), low / 4.f, "custom");
+    fillData(getLayer(network, "Const5"), high / 4.f, "custom");
+
+    fillData(getLayer(network, "Const7"), low / 2.f, "custom");
+    fillData(getLayer(network, "Const8"), high / 2.f, "custom");
+    fillData(getLayer(network, "Const9"), low / 2.f, "custom");
+    fillData(getLayer(network, "Const10"), high / 2.f, "custom");
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/fake_quantize_and_activation_test.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/fake_quantize_and_activation_test.cpp
new file mode 100644 (file)
index 0000000..4ecda82
--- /dev/null
@@ -0,0 +1,101 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+#include <vector>
+
+FakeQuantizeAndActivationTestModel::FakeQuantizeAndActivationTestModel(const std::vector<std::pair<float, float>>& intervals) :
+    intervals(intervals) {}
+
+void FakeQuantizeAndActivationTestModel::initInput(Blob::Ptr input) const {
+    const Precision& precision = input->getTensorDesc().getPrecision();
+    const size_t dataSize = input->size();
+
+    std::vector<float> data(input->size(), 4.0);
+    const float step = (intervals[0].second - intervals[0].first) / dataSize;
+    float value = intervals[0].first;
+    for (size_t i = 0ul; i < dataSize; ++i) {
+        if (precision == Precision::FP32) {
+            float* buffer = input->buffer().as<float*>();
+            buffer[i] = InferenceEngine::PrecisionUtils::f32tof16(value);
+        } else if (precision == Precision::FP16) {
+            short* buffer = input->buffer().as<short*>();
+            buffer[i] = InferenceEngine::PrecisionUtils::f32tof16(value);
+        }
+
+        value += step;
+        if (value > intervals[0].second) {
+            value = intervals[0].first;
+        }
+    }
+}
+
+float FakeQuantizeAndActivationTestModel::getZeroThreshold() const {
+    const float interval = intervals[0].second - intervals[0].first;
+    return interval / (256.f * 1.e3f);
+}
+
+std::string FakeQuantizeAndActivationTestModel::getModel(SingleLayerTransformationsTestParams& p) const {
+    size_t type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP32>::value_type);
+    if (p._network_precision == "FP16")
+        type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP16>::value_type);
+
+    std::map<std::string, std::string> const_params = {};
+    std::map<std::string, std::string> fake_quantize_params = {{"levels", "256"}};
+    std::map<std::string, std::string> power_params = {{"power", "1"}, {"scale", "1"}, {"shift", "0"}};
+
+    std::vector<std::pair<std::string, std::string>> edges = {
+        {"0,0", "1,1"}, {"1,2", "6,7"}, // Power
+        {"2,3", "6,8"}, {"3,4", "6,9"}, {"4,5", "6,10"}, {"5,6", "6,11"}, // Const layers
+        {"6,12", "7,13"}, // Fake quantize to ReLU
+        {"7,14", "8,15"}
+    };
+
+    return CommonTestUtils::DefaultNetBuilder::buildNetworkWithOneInput("FakeQuantizeAndActivationTestModel", p.inputDimensions[0], p._network_precision)
+        // 1
+        .addLayer("Power", p._network_precision, &power_params, {{p.inputDimensions[0]}, {p.inputDimensions[0]}})
+        // 2
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        // 3
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        // 4
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        // 5
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        // 6
+        .addLayer("FakeQuantize", p._network_precision, &fake_quantize_params, {{p.inputDimensions[0], {1}, {1}, {1}, {1}}, {{p.inputDimensions[0]}}})
+        // 7
+        .addLayer("ReLU", p._network_precision, {}, { {p.inputDimensions[0]}, {p.inputDimensions[0]} })
+        // 8
+        .addLayer("Power", p._network_precision, &power_params, {{p.inputDimensions[0]}, {p.inputDimensions[0]}})
+        .finish(&edges);
+}
+
+std::string FakeQuantizeAndActivationTestModel::getName() const {
+    return
+        "FakeQuantizeAndActivationTestModel_" +
+        std::to_string(intervals.size()) + "_" +
+        std::to_string(intervals[0].first) + "_" + std::to_string(intervals[0].second);
+}
+
+bool FakeQuantizeAndActivationTestModel::transform(CNNNetwork& network, LayerTransformation::Params& params) const {
+    LowPrecisionTransformer transformer = getLowPrecisionTransformer(params);
+    transformer.transform(network);
+    return true;
+}
+
+void FakeQuantizeAndActivationTestModel::resetTransformation(CNNNetwork& network) const {
+    std::vector<float> low(intervals.size());
+    std::vector<float> high(intervals.size());
+    for (size_t i = 0ul; i < intervals.size(); ++i) {
+        const std::pair<float, float> interval = intervals[i];
+        low[i] = interval.first;
+        high[i] = interval.second;
+    }
+
+    fillData(getLayer(network, "Const2"), low, "custom");
+    fillData(getLayer(network, "Const3"), high, "custom");
+    fillData(getLayer(network, "Const4"), low, "custom");
+    fillData(getLayer(network, "Const5"), high, "custom");
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/fake_quantize_and_activation_with_negative_scales_test.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/fake_quantize_and_activation_with_negative_scales_test.cpp
new file mode 100644 (file)
index 0000000..54011c3
--- /dev/null
@@ -0,0 +1,84 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+
+void FakeQuantizeAndActivationWithNegativeScalesTestModel::initInput(Blob::Ptr input) const {
+    const Precision& precision = input->getTensorDesc().getPrecision();
+    const size_t dataSize = input->size();
+
+    std::vector<float> data(input->size(), 4.0);
+    float value = -64.0;
+    for (size_t i = 0ul; i < std::min(static_cast<size_t>(256), dataSize); ++i) {
+        if (precision == Precision::FP32) {
+            float* buffer = input->buffer().as<float*>();
+            buffer[i] = InferenceEngine::PrecisionUtils::f32tof16(value);
+        } else if (precision == Precision::FP16) {
+            short* buffer = input->buffer().as<short*>();
+            buffer[i] = InferenceEngine::PrecisionUtils::f32tof16(value);
+        }
+        value += 1.0;
+    }
+}
+
+std::string FakeQuantizeAndActivationWithNegativeScalesTestModel::getModel(SingleLayerTransformationsTestParams& p) const {
+    size_t type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP32>::value_type);
+    if (p._network_precision == "FP16")
+        type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP16>::value_type);
+
+    std::map<std::string, std::string> const_params = {};
+    std::map<std::string, std::string> fake_quantize_params = { {"levels", "256"} };
+    std::map<std::string, std::string> scale_shift_params = {};
+    std::map<std::string, std::string> power_params = {{"power", "1"}, {"scale", "1"}, {"shift", "0"}};
+
+    std::vector<std::pair<std::string, std::string>> edges = {
+        {"0,0", "1,1"}, // Input -> Power
+        {"1,2", "6,7"}, // Power -> FakeQuantize
+        {"2,3", "6,8"}, {"3,4", "6,9"}, {"4,5", "6,10"}, {"5,6", "6,11"}, // Const layers
+        {"6,12", "7,13"}, // FakeQuantize -> ScaleShift
+        {"7,14", "8,15"}, // ScaleShift -> ReLU
+        {"8,16", "9,17"}  // ReLU -> Power
+    };
+
+    return CommonTestUtils::DefaultNetBuilder::buildNetworkWithOneInput("FakeQuantizeAndActivationWithNegativeScalesTestModel", p.inputDimensions[0], p._network_precision)
+        // 1
+        .addLayer("Power", p._network_precision, &power_params, { {p.inputDimensions[0]}, {p.inputDimensions[0]} })
+        // 2
+        .addLayer("Const", p._network_precision, &const_params, { {}, {{1}} }, type_size, 0)
+        // 3
+        .addLayer("Const", p._network_precision, &const_params, { {}, {{1}} }, type_size, 0)
+        // 4
+        .addLayer("Const", p._network_precision, &const_params, { {}, {{1}} }, type_size, 0)
+        // 5
+        .addLayer("Const", p._network_precision, &const_params, { {}, {{1}} }, type_size, 0)
+        // 6
+        .addLayer("FakeQuantize", p._network_precision, &fake_quantize_params, { {p.inputDimensions[0], {1}, {1}, {1}, {1}}, {{p.inputDimensions[0]}} })
+        // 7
+        .addLayer("ScaleShift", p._network_precision, {}, {{p.inputDimensions[0]}, {p.inputDimensions[0]}}, p.inputDimensions[0][1] * type_size, p.inputDimensions[0][1] * type_size)
+        // 8
+        .addLayer("ReLU", p._network_precision, {}, {{p.inputDimensions[0]}, {p.inputDimensions[0]}})
+        // 9
+        .addLayer("Power", p._network_precision, &power_params, {{p.inputDimensions[0]}, {p.inputDimensions[0]}})
+        .finish(&edges);
+}
+
+std::string FakeQuantizeAndActivationWithNegativeScalesTestModel::getName() const {
+    return "FakeQuantizeAndActivationWithNegativeScalesTestModel";
+}
+
+bool FakeQuantizeAndActivationWithNegativeScalesTestModel::transform(CNNNetwork& network, LayerTransformation::Params& params) const {
+    LowPrecisionTransformer transformer = getLowPrecisionTransformer(params);
+    transformer.transform(network);
+    return true;
+}
+
+void FakeQuantizeAndActivationWithNegativeScalesTestModel::resetTransformation(CNNNetwork& network) const {
+    fillData(getLayer(network, "Const2"), -128.f / 4.f, "custom");
+    fillData(getLayer(network, "Const3"), 127.f / 4.f, "custom");
+    fillData(getLayer(network, "Const4"), -128.f / 4.f, "custom");
+    fillData(getLayer(network, "Const5"), 127.f / 4.f, "custom");
+
+    fillData(getLayer(network, "ScaleShift7"), -1.f, "weights");
+    fillData(getLayer(network, "ScaleShift7"), 0.f, "biases");
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/fake_quantize_and_activation_with_negative_slope_test.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/fake_quantize_and_activation_with_negative_slope_test.cpp
new file mode 100644 (file)
index 0000000..a2cce45
--- /dev/null
@@ -0,0 +1,156 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+
+void FakeQuantizeAndActivationWithNegativeSlopeTestModel::initInput(Blob::Ptr input) const {
+    const Precision& precision = input->getTensorDesc().getPrecision();
+    const size_t dataSize = input->size();
+
+    std::vector<float> data(input->size(), 4.0);
+    float value = -64.0;
+    for (size_t i = 0ul; i < std::min(static_cast<size_t>(256), dataSize); ++i) {
+        if (precision == Precision::FP32) {
+            float* buffer = input->buffer().as<float*>();
+            buffer[i] = InferenceEngine::PrecisionUtils::f32tof16(value);
+        } else if (precision == Precision::FP16) {
+            short* buffer = input->buffer().as<short*>();
+            buffer[i] = InferenceEngine::PrecisionUtils::f32tof16(value);
+        }
+        value += 1.0;
+    }
+}
+
+std::string FakeQuantizeAndActivationWithNegativeSlopeTestModel::getModel(SingleLayerTransformationsTestParams& p) const {
+    size_t type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP32>::value_type);
+    if (p._network_precision == "FP16")
+        type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP16>::value_type);
+
+    std::map<std::string, std::string> const_params = {};
+    std::map<std::string, std::string> fake_quantize_params = { {"levels", "256"} };
+    std::map<std::string, std::string> power_params = {{"power", "1"}, {"scale", "1"}, {"shift", "0"}};
+    std::map<std::string, std::string> reluParams = { {"negative_slope", "-1.0"} };
+
+    std::vector<std::pair<std::string, std::string>> edges = {
+        {"0,0", "1,1"}, // Input -> Power
+        {"1,2", "6,7"}, // Power -> FakeQuantize
+        {"2,3", "6,8"}, {"3,4", "6,9"}, {"4,5", "6,10"}, {"5,6", "6,11"}, // Const layers
+        {"6,12", "7,13"}, // FakeQuantize -> ScaleShift
+        {"7,14", "8,15"}, // ScaleShift -> ReLU
+        {"8,16", "9,17"}  // ReLU -> Power
+    };
+
+    return CommonTestUtils::DefaultNetBuilder::buildNetworkWithOneInput("FakeQuantizeAndActivationWithNegativeSlopeTestModel", p.inputDimensions[0], p._network_precision)
+        // 1
+        .addLayer("Power", p._network_precision, &power_params, { {p.inputDimensions[0]}, {p.inputDimensions[0]} })
+        // 2
+        .addLayer("Const", p._network_precision, &const_params, { {}, {{1}} }, type_size, 0)
+        // 3
+        .addLayer("Const", p._network_precision, &const_params, { {}, {{1}} }, type_size, 0)
+        // 4
+        .addLayer("Const", p._network_precision, &const_params, { {}, {{1}} }, type_size, 0)
+        // 5
+        .addLayer("Const", p._network_precision, &const_params, { {}, {{1}} }, type_size, 0)
+        // 6
+        .addLayer("FakeQuantize", p._network_precision, &fake_quantize_params, { {p.inputDimensions[0], {1}, {1}, {1}, {1}}, {{p.inputDimensions[0]}} })
+        // 7
+        .addLayer("ScaleShift", p._network_precision, {}, {{p.inputDimensions[0]}, {p.inputDimensions[0]}}, p.inputDimensions[0][1] * type_size, p.inputDimensions[0][1] * type_size)
+        // 8
+        .addLayer("ReLU", p._network_precision, &reluParams, { {p.inputDimensions[0]}, {p.inputDimensions[0]} })
+        // 9
+        .addLayer("Power", p._network_precision, &power_params, {{p.inputDimensions[0]}, {p.inputDimensions[0]}})
+        .finish(&edges);
+}
+
+std::string FakeQuantizeAndActivationWithNegativeSlopeTestModel::getName() const {
+    return "FakeQuantizeAndActivationWithNegativeSlopeTestModel";
+}
+
+bool FakeQuantizeAndActivationWithNegativeSlopeTestModel::transform(CNNNetwork& network, LayerTransformation::Params& params) const {
+    LowPrecisionTransformer transformer = getLowPrecisionTransformer(params);
+    transformer.transform(network);
+
+    CNNLayerPtr relu = getLayer(network, "ReLU8");
+    if (relu == nullptr) {
+        THROW_IE_EXCEPTION << "layer was not found " << relu->name;
+    }
+
+    const std::vector<CNNLayerPtr> parents = CNNNetworkHelper::getParents(*relu);
+    if (parents.size() != 1) {
+        THROW_IE_EXCEPTION << "unexpected parent layers size " << parents.size();
+    }
+
+    if (parents[0]->name != "FakeQuantize6") {
+        // FQ -> dequantization -> ReLU
+        if (parents[0]->name != "ScaleShift7") {
+            THROW_IE_EXCEPTION << "unexpected parent layer " << parents[0]->name;
+        }
+
+        if (parents[0]->type == "ScaleShift") {
+            CNNLayerPtr dequantizationScaleShift = parents[0];
+            const Blob::Ptr weightsBlob = CNNNetworkHelper::getBlob(dequantizationScaleShift, "weights");
+            auto weights = CNNNetworkHelper::getFloatData(weightsBlob);
+            const std::vector<float> scales = std::vector<float>(weights.get(), weights.get() + weightsBlob->size());
+
+            const Blob::Ptr biasesBlob = CNNNetworkHelper::getBlob(dequantizationScaleShift, "biases");
+            auto biases = CNNNetworkHelper::getFloatData(biasesBlob);
+            const std::vector<float> shifts = std::vector<float>(biases.get(), biases.get() + biasesBlob->size());
+
+            if ((std::all_of(shifts.begin(), shifts.end(), [](float value) { return value == 0.0; })) &&
+                (std::all_of(scales.begin(), scales.end(), [](float value) { return value >= 0.0; }))) {
+                THROW_IE_EXCEPTION << "dequantization " << parents[0]->type << " " << parents[0]->name << " was not moved via " << " " << relu->type << " " << relu->name;
+            }
+        } else if (parents[0]->type == "Convolution") {
+            const CNNLayerPtr convolution = parents[0];
+            const std::vector<CNNLayerPtr> parents =  CNNNetworkHelper::getParents(*convolution);
+
+            const Blob::Ptr weightsBlob = CNNNetworkHelper::getBlob(parents[1], "custom");
+            if (weightsBlob == nullptr) {
+                THROW_IE_EXCEPTION << "weights are absent";
+            }
+            const std::shared_ptr<float> weights = CNNNetworkHelper::getFloatData(weightsBlob);
+            if (weights == nullptr) {
+                THROW_IE_EXCEPTION << "weights are not received";
+            }
+            const std::vector<float> scales = std::vector<float>(weights.get(), weights.get() + weightsBlob->size());
+
+
+            if (std::any_of(scales.begin(), scales.end(), [](float value) { return value < 0.0; })) {
+                THROW_IE_EXCEPTION << "dequantization scales are not correct";
+            }
+
+            const Blob::Ptr biasesBlob = CNNNetworkHelper::getBlob(parents[2], "custom");
+            if (biasesBlob == nullptr) {
+                THROW_IE_EXCEPTION << "biases are absent";
+            }
+            const std::shared_ptr<float> biases = CNNNetworkHelper::getFloatData(biasesBlob);
+            if (biases == nullptr) {
+                THROW_IE_EXCEPTION << "biases are not received";
+            }
+        } else {
+            THROW_IE_EXCEPTION << "unexpected parent layer type " << parents[0]->type;
+        }
+    } else {
+        // FQ -> ReLU -> dequantization or FQ -> ReLU -> Power
+        const std::vector<CNNLayerPtr> children = CNNNetworkHelper::getChildren(*relu);
+        if (children.size() != 1lu) {
+            THROW_IE_EXCEPTION << "unexpected children layers size " << children.size();
+        }
+        if (children[0]->name != "Power9" && children[0]->name != "ReLU8_ScaleShift_Power9") {
+            THROW_IE_EXCEPTION << "Unexpected child layer '" << children[0]->name << "'";
+        }
+    }
+
+    return true;
+}
+
+void FakeQuantizeAndActivationWithNegativeSlopeTestModel::resetTransformation(CNNNetwork& network) const {
+    fillData(getLayer(network, "Const2"), 0.f, "custom");
+    fillData(getLayer(network, "Const3"), 255.f / 8.f, "custom");
+    fillData(getLayer(network, "Const4"), 0.f, "custom");
+    fillData(getLayer(network, "Const5"), 255.f / 8.f, "custom");
+
+    fillData(getLayer(network, "ScaleShift7"), 3.f, "weights");
+    fillData(getLayer(network, "ScaleShift7"), 0.f, "biases");
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/fake_quantize_and_scaleshift_test.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/fake_quantize_and_scaleshift_test.cpp
new file mode 100644 (file)
index 0000000..e7a83e3
--- /dev/null
@@ -0,0 +1,57 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+
+std::string FakeQuantizeAndScaleShiftTestModel::getModel(SingleLayerTransformationsTestParams& p) const {
+    size_t type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP32>::value_type);
+    if (p._network_precision == "FP16")
+        type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP16>::value_type);
+
+    std::map<std::string, std::string> scale_shift_params = {};
+    std::map<std::string, std::string> const_params = {};
+    std::map<std::string, std::string> fake_quantize_params = {
+        {"levels", "256"}
+    };
+    std::map<std::string, std::string> power_params = {
+        {"power", "1"}, {"scale", "1"}, {"shift", "0"}
+    };
+
+    std::vector<std::pair<std::string, std::string>> edges = {
+        {"0,0", "1,1"}, {"1,2", "6,7"}, // ScaleShift
+        {"2,3", "6,8"}, {"3,4", "6,9"}, {"4,5", "6,10"}, {"5,6", "6,11"}, // Const layers
+        {"6,12", "7,13"} // Fake quantize to Power
+    };
+
+    return CommonTestUtils::DefaultNetBuilder::buildNetworkWithOneInput(
+            "DWConvFQ", p.inputDimensions[0], p._network_precision)
+        .addLayer("ScaleShift", p._network_precision, &scale_shift_params, {{p.inputDimensions[0]}, {p.inputDimensions[0]}}, p.inputDimensions[0][1] * type_size, p.inputDimensions[0][1] * type_size)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("FakeQuantize", p._network_precision, &fake_quantize_params, {{p.inputDimensions[0], {1}, {1}, {1}, {1}}, {{p.inputDimensions[0]}}})
+        .addLayer("Power", p._network_precision, &power_params, {{p.inputDimensions[0]}, {p.inputDimensions[0]}})
+        .finish(&edges);
+}
+
+std::string FakeQuantizeAndScaleShiftTestModel::getName() const {
+    return "FakeQuantizeAndScaleShiftTestModel";
+}
+
+bool FakeQuantizeAndScaleShiftTestModel::transform(CNNNetwork& network, LayerTransformation::Params& params) const {
+    LowPrecisionTransformer transformer = getLowPrecisionTransformer(params);
+    transformer.transform(network);
+    return true;
+}
+
+void FakeQuantizeAndScaleShiftTestModel::resetTransformation(CNNNetwork& network) const {
+    fillData(getLayer(network, "Const2"), -128.f / 4.f, "custom");
+    fillData(getLayer(network, "Const3"), 127.f / 4.f, "custom");
+    fillData(getLayer(network, "Const4"), -128.f / 4.f, "custom");
+    fillData(getLayer(network, "Const5"), 127.f / 4.f, "custom");
+
+    fillDataWithInitValue(getLayer(network, "ScaleShift1"), "weights", 1.234f);
+    fillDataWithInitValue(getLayer(network, "ScaleShift1"), "biases", 5.678f);
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/fake_quantize_reshape_pooling_test_model_with_constants_test.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/fake_quantize_reshape_pooling_test_model_with_constants_test.cpp
new file mode 100644 (file)
index 0000000..ec66078
--- /dev/null
@@ -0,0 +1,84 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+
+void FakeQuantizeReshapePoolingTestModelWithConstants::resetTransformation(CNNNetwork& network) const {
+    fillData(getLayer(network, "inputLow"), -128.f / 4.f, "custom");
+    fillData(getLayer(network, "inputHigh"), 127.f / 4.f, "custom");
+    fillData(getLayer(network, "outputLow"), -128.f / 4.f, "custom");
+    fillData(getLayer(network, "outputHigh"), 127.f / 4.f, "custom");
+
+    fillDataMy(getLayer(network, "reshapeConst1"), { 0, 1280, 7, 1 }, "custom");
+    fillDataMy(getLayer(network, "reshapeConst2"), { 0, 1280 }, "custom");
+}
+
+std::string FakeQuantizeReshapePoolingTestModelWithConstants::getName() const {
+    return "FakeQuantizeReshapePoolingTestModelWithConstants";
+}
+
+bool FakeQuantizeReshapePoolingTestModelWithConstants::transform(CNNNetwork& network, LayerTransformation::Params& params) const {
+    LowPrecisionTransformer transformer = getLowPrecisionTransformer(params);
+    transformer.transform(network);
+    return true;
+}
+
+std::string FakeQuantizeReshapePoolingTestModelWithConstants::getModel(SingleLayerTransformationsTestParams& p) const {
+    size_t type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP32>::value_type);
+    if (p._network_precision == "FP16")
+        type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP16>::value_type);
+
+    CommonTestUtils::conv_common_params conv =
+            { {1, 1}, {3, 3}, {0, 0}, {0, 0}, {1, 1}, "valid", 1, 32, false, false };
+    std::vector<size_t> convOutShape(p.inputDimensions[0].size());
+    getConvOutShape(p.inputDimensions[0], conv, convOutShape);
+
+    std::vector<size_t> weightsConstInputDims = { 32lu, 32lu, 3lu, 3lu };
+    std::vector<size_t> biasesConvolutionConstDims = { conv.out_c };
+    std::map<std::string, std::string> const_params = {};
+    std::map<std::string, std::string> fakeQuantizeParams = {{ "levels", "256" }};
+    std::map<std::string, std::string> power_params = {{"power", "1"}, {"scale", "1"}, {"shift", "0"}};
+    std::map<std::string, std::string> poolingParams = { {"kernel", "7,1"}, { "pool-method", "avg" }, { "strides", "1,1" } };
+
+    std::vector<std::pair<std::string, std::string>> edges = {
+        {"0,0", "1,1"}, // input => inputPower
+        {"1,2", "6,7"}, // inputPower => fakeQuantize
+        {"2,3", "6,8"}, {"3,4", "6,9"}, {"4,5", "6,10"}, {"5,6", "6,11"}, // Const layers => fakeQuantize
+        {"6,12", "8,14"}, // fakeQuantize => reshape1
+        {"7,13", "8,15"}, // reshapeConst1 => reshape1
+        {"8,16", "9,17"}, // reshape1 => pooling
+        {"9,18", "11,20"}, // pooling => reshape2
+        {"10,19", "11,21"}, // reshapeConst2 => reshape2
+        {"11,22", "12,23"}, // reshape2 => outputPower
+    };
+
+    auto network = CommonTestUtils::DefaultNetBuilder::buildNetworkWithOneInput(
+        "QuantizationOnWeights", p.inputDimensions[0], p._network_precision)
+        // inputPower: id=1
+        .addLayer("Power", p._network_precision, &power_params, { {p.inputDimensions[0]}, {p.inputDimensions[0]} }, "inputPower")
+        // inputLow: id=2
+        .addLayer("Const", p._network_precision, &const_params, { {}, {{1}} }, type_size, "inputLow")
+        // inputHigh: id=3
+        .addLayer("Const", p._network_precision, &const_params, { {}, {{1}} }, type_size, "inputHigh")
+        // outputLow: id=4
+        .addLayer("Const", p._network_precision, &const_params, { {}, {{1}} }, type_size, "outputLow")
+        // outputHigh: id=5
+        .addLayer("Const", p._network_precision, &const_params, { {}, {{1}} }, type_size, "outputHigh")
+        // fakeQuantize: id=6
+        .addLayer("FakeQuantize", p._network_precision, &fakeQuantizeParams, { {p.inputDimensions[0], {1}, {1}, {1}, {1}}, {{p.inputDimensions[0]}} }, "fakeQuantize")
+        // reshapeConst1: id=7
+        .addLayer("Const", "I32", {}, { {}, {{4}} }, 4 * 4, "reshapeConst1")
+        // reshape1: id=8
+        .addLayer("Reshape", p._network_precision, {}, { {{ 1, 1280, 7 }, {4}}, {{1, 1280, 7, 1}} }, "reshape1")
+        // pooling: id=9
+        .addLayer("Pooling", p._network_precision, &poolingParams, { {{ 1, 1280, 7, 1 }}, {{1, 1280, 1, 1}} }, "pooling")
+        // reshapeConst2: id=10
+        .addLayer("Const", "I32", {}, { {}, {{2}} }, 2 * 4, "reshapeConst2")
+        // reshape2: id=11
+        .addLayer("Reshape", p._network_precision, {}, { {{ 1, 1280, 1, 1 }, {2}}, {{1, 1280 }} }, "reshape2")
+        // outputPower: id=12
+        .addLayer("Power", p._network_precision, &power_params, { {{ 1, 1280 }}, {{1, 1280}} }, "outputPower")
+        .finish(&edges);
+    return network;
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/fake_quantize_reshape_pooling_test_model_without_constants_test.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/fake_quantize_reshape_pooling_test_model_without_constants_test.cpp
new file mode 100644 (file)
index 0000000..4cf0f36
--- /dev/null
@@ -0,0 +1,75 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+
+void FakeQuantizeReshapePoolingTestModelWithoutConstants::resetTransformation(CNNNetwork& network) const {
+    fillData(getLayer(network, "inputLow"), -128.f / 4.f, "custom");
+    fillData(getLayer(network, "inputHigh"), 127.f / 4.f, "custom");
+    fillData(getLayer(network, "outputLow"), -128.f / 4.f, "custom");
+    fillData(getLayer(network, "outputHigh"), 127.f / 4.f, "custom");
+}
+
+std::string FakeQuantizeReshapePoolingTestModelWithoutConstants::getName() const {
+    return "FakeQuantizeReshapePoolingTestModelWithoutConstants";
+}
+
+bool FakeQuantizeReshapePoolingTestModelWithoutConstants::transform(CNNNetwork& network, LayerTransformation::Params& params) const {
+    LowPrecisionTransformer transformer = getLowPrecisionTransformer(params);
+    transformer.transform(network);
+    return true;
+}
+
+std::string FakeQuantizeReshapePoolingTestModelWithoutConstants::getModel(SingleLayerTransformationsTestParams& p) const {
+    size_t type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP32>::value_type);
+    if (p._network_precision == "FP16")
+        type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP16>::value_type);
+
+    CommonTestUtils::conv_common_params conv =
+            { {1, 1}, {3, 3}, {0, 0}, {0, 0}, {1, 1}, "valid", 1, 32, false, false };
+    std::vector<size_t> convOutShape(p.inputDimensions[0].size());
+    getConvOutShape(p.inputDimensions[0], conv, convOutShape);
+
+    std::vector<size_t> weightsConstInputDims = { 32lu, 32lu, 3lu, 3lu };
+    std::vector<size_t> biasesConvolutionConstDims = { conv.out_c };
+    std::map<std::string, std::string> const_params = {};
+    std::map<std::string, std::string> fakeQuantizeParams = {{ "levels", "256" }};
+    std::map<std::string, std::string> power_params = {{"power", "1"}, {"scale", "1"}, {"shift", "0"}};
+    std::map<std::string, std::string> poolingParams = { {"kernel", "7,1"}, { "pool-method", "avg" }, { "strides", "1,1" } };
+
+    std::vector<std::pair<std::string, std::string>> edges = {
+        {"0,0", "1,1"}, // input => inputPower
+        {"1,2", "6,7"}, // inputPower => fakeQuantize
+        {"2,3", "6,8"}, {"3,4", "6,9"}, {"4,5", "6,10"}, {"5,6", "6,11"}, // Const layers => fakeQuantize
+        {"6,12", "7,13"}, // fakeQuantize => reshape1
+        {"7,14", "8,15"}, // reshape1 => pooling
+        {"8,16", "9,17"}, // pooling => reshape2
+        {"9,18", "10,19"}, // reshape2 => outputPower
+    };
+
+    auto network = CommonTestUtils::DefaultNetBuilder::buildNetworkWithOneInput(
+        "QuantizationOnWeights", p.inputDimensions[0], p._network_precision)
+        // inputPower: id=1
+        .addLayer("Power", p._network_precision, &power_params, { {p.inputDimensions[0]}, {p.inputDimensions[0]} }, "inputPower")
+        // inputLow: id=2
+        .addLayer("Const", p._network_precision, &const_params, { {}, {{1}} }, type_size, "inputLow")
+        // inputHigh: id=3
+        .addLayer("Const", p._network_precision, &const_params, { {}, {{1}} }, type_size, "inputHigh")
+        // outputLow: id=4
+        .addLayer("Const", p._network_precision, &const_params, { {}, {{1}} }, type_size, "outputLow")
+        // outputHigh: id=5
+        .addLayer("Const", p._network_precision, &const_params, { {}, {{1}} }, type_size, "outputHigh")
+        // fakeQuantize: id=6
+        .addLayer("FakeQuantize", p._network_precision, &fakeQuantizeParams, { {p.inputDimensions[0], {1}, {1}, {1}, {1}}, {{p.inputDimensions[0]}} }, "fakeQuantize")
+        // reshape1: id=7
+        .addLayer("Reshape", p._network_precision, {}, { {{ 1, 1280, 7 }}, {{1, 1280, 7, 1}} }, "reshape1")
+        // pooling: id=8
+        .addLayer("Pooling", p._network_precision, &poolingParams, { {{ 1, 1280, 7, 1 }}, {{1, 1280, 1, 1}} }, "pooling")
+        // reshape2: id=9
+        .addLayer("Reshape", p._network_precision, {}, { {{ 1, 1280, 1, 1 }}, {{1, 1280 }} }, "reshape2")
+        // outputPower: id=10
+        .addLayer("Power", p._network_precision, &power_params, { {{ 1, 1280 }}, {{1, 1280}} }, "outputPower")
+        .finish(&edges);
+    return network;
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/fake_quantize_reshape_test_model_with_constants_test.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/fake_quantize_reshape_test_model_with_constants_test.cpp
new file mode 100644 (file)
index 0000000..4d50173
--- /dev/null
@@ -0,0 +1,74 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+
+void FakeQuantizeReshapeTestModelWithConstants::resetTransformation(CNNNetwork& network) const {
+    fillData(getLayer(network, "inputLow"), -128.f / 4.f, "custom");
+    fillData(getLayer(network, "inputHigh"), 127.f / 4.f, "custom");
+    fillData(getLayer(network, "outputLow"), -128.f / 4.f, "custom");
+    fillData(getLayer(network, "outputHigh"), 127.f / 4.f, "custom");
+
+    fillDataMy(getLayer(network, "reshapeConst"), { 0, -1 }, "custom");
+}
+
+std::string FakeQuantizeReshapeTestModelWithConstants::getName() const {
+    return "FakeQuantizeReshapeTestModelWithConstants";
+}
+
+bool FakeQuantizeReshapeTestModelWithConstants::transform(CNNNetwork& network, LayerTransformation::Params& params) const {
+    LowPrecisionTransformer transformer = getLowPrecisionTransformer(params);
+    transformer.transform(network);
+    return true;
+}
+
+std::string FakeQuantizeReshapeTestModelWithConstants::getModel(SingleLayerTransformationsTestParams& p) const {
+    size_t type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP32>::value_type);
+    if (p._network_precision == "FP16")
+        type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP16>::value_type);
+
+    CommonTestUtils::conv_common_params conv =
+            { {1, 1}, {3, 3}, {0, 0}, {0, 0}, {1, 1}, "valid", 1, 32, false, false };
+    std::vector<size_t> convOutShape(p.inputDimensions[0].size());
+    getConvOutShape(p.inputDimensions[0], conv, convOutShape);
+
+    std::vector<size_t> weightsConstInputDims = { 32lu, 32lu, 3lu, 3lu };
+    std::vector<size_t> biasesConvolutionConstDims = { conv.out_c };
+    std::map<std::string, std::string> const_params = {};
+    std::map<std::string, std::string> fakeQuantizeParams = {{ "levels", "256" }};
+    std::map<std::string, std::string> power_params = {{"power", "1"}, {"scale", "1"}, {"shift", "0"}};
+    std::map<std::string, std::string> poolingParams = { {"kernel", "7,1"}, { "pool-method", "avg" }, { "strides", "1,1" } };
+
+    std::vector<std::pair<std::string, std::string>> edges = {
+        {"0,0", "1,1"}, // input => inputPower
+        {"1,2", "6,7"}, // inputPower => fakeQuantize
+        {"2,3", "6,8"}, {"3,4", "6,9"}, {"4,5", "6,10"}, {"5,6", "6,11"}, // Const layers => fakeQuantize
+        {"6,12", "8,14"}, // fakeQuantize => reshape1
+        {"7,13", "8,15"}, // reshapeConst1 => reshape1
+        {"8,16", "9,17"}, // reshape => outputPower
+    };
+
+    auto network = CommonTestUtils::DefaultNetBuilder::buildNetworkWithOneInput(
+        "QuantizationOnWeights", p.inputDimensions[0], p._network_precision)
+        // inputPower: id=1
+        .addLayer("Power", p._network_precision, &power_params, { {p.inputDimensions[0]}, {p.inputDimensions[0]} }, "inputPower")
+        // inputLow: id=2
+        .addLayer("Const", p._network_precision, &const_params, { {}, {{1}} }, type_size, "inputLow")
+        // inputHigh: id=3
+        .addLayer("Const", p._network_precision, &const_params, { {}, {{1}} }, type_size, "inputHigh")
+        // outputLow: id=4
+        .addLayer("Const", p._network_precision, &const_params, { {}, {{1}} }, type_size, "outputLow")
+        // outputHigh: id=5
+        .addLayer("Const", p._network_precision, &const_params, { {}, {{1}} }, type_size, "outputHigh")
+        // fakeQuantize: id=6
+        .addLayer("FakeQuantize", p._network_precision, &fakeQuantizeParams, { {p.inputDimensions[0], {1}, {1}, {1}, {1}}, {{p.inputDimensions[0]}} }, "fakeQuantize")
+        // reshapeConst1: id=7
+        .addLayer("Const", "I32", {}, { {}, {{2}} }, 2 * 4, "reshapeConst")
+        // reshape1: id=8
+        .addLayer("Reshape", p._network_precision, {}, { {{ 1, 256, 6, 6 }, {2}}, {{1, 9216}} }, "reshape")
+        // outputPower: id=9
+        .addLayer("Power", p._network_precision, &power_params, { {{ 1, 9216 }}, {{1, 9216}} }, "outputPower")
+        .finish(&edges);
+    return network;
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/fc_and_scaleshifts_on_activations_test.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/fc_and_scaleshifts_on_activations_test.cpp
new file mode 100644 (file)
index 0000000..89d32e9
--- /dev/null
@@ -0,0 +1,52 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+
+std::string FullyConnectedAndScaleShiftsOnActivationsTestModel::getModel(SingleLayerTransformationsTestParams& p) const {
+    size_t type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP32>::value_type);
+    if (p._network_precision == "FP16")
+        type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP16>::value_type);
+
+    std::vector<size_t> const_1_dims = {1000, 2048};
+    std::vector<size_t> const_2_dims = {1000};
+    std::map<std::string, std::string> scale_shift_params = {};
+    std::map<std::string, std::string> const_params = {};
+    std::map<std::string, std::string> fc_params = {
+        { "out-size", "1000" }
+    };
+
+    std::vector<std::pair<std::string, std::string>> edges = {
+        {"0,0", "1,1"}, {"1,2", "4,5"}, // ScaleShift
+        {"2,3", "4,6"}, {"3,4", "4,7"}, // Const layers
+    };
+
+    return CommonTestUtils::DefaultNetBuilder::buildNetworkWithOneInput(
+            "FCandScaleShift", p.inputDimensions[0], p._network_precision)
+        .addLayer("ScaleShift", p._network_precision, &scale_shift_params, {{p.inputDimensions[0]}, {p.inputDimensions[0]}}, p.inputDimensions[0][1] * type_size, p.inputDimensions[0][1] * type_size)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {const_1_dims}},
+                std::accumulate(const_1_dims.begin(), const_1_dims.end(), 1lu, std::multiplies<size_t>()) * type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {const_2_dims}},
+                std::accumulate(const_2_dims.begin(), const_2_dims.end(), 1lu, std::multiplies<size_t>()) * type_size, 0)
+        .addLayer("FullyConnected", p._network_precision, &fc_params, {{p.inputDimensions[0], const_1_dims, const_2_dims}, {{1, 1000}}})
+        .finish(&edges);
+}
+
+std::string FullyConnectedAndScaleShiftsOnActivationsTestModel::getName() const {
+    return "FullyConnectedAndScaleShiftsOnActivationsTestModel";
+}
+
+bool FullyConnectedAndScaleShiftsOnActivationsTestModel::transform(CNNNetwork& network, LayerTransformation::Params& params) const {
+    LowPrecisionTransformer transformer = getLowPrecisionTransformer(params);
+    transformer.transform(network);
+    return true;
+}
+
+void FullyConnectedAndScaleShiftsOnActivationsTestModel::resetTransformation(CNNNetwork& network) const {
+    fillData(getLayer(network, "ScaleShift1"), 0.4f, "weights");
+    fillData(getLayer(network, "ScaleShift1"), 0.3f, "biases");
+
+    fillDataWithInitValue(getLayer(network, "Const2"), "custom", 0.2f);
+    fillDataWithInitValue(getLayer(network, "Const3"), "custom", 0.3f);
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/fq_as_output.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/fq_as_output.cpp
new file mode 100644 (file)
index 0000000..7e22b47
--- /dev/null
@@ -0,0 +1,26 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+
+std::string FakeQuantizeAsOutputTest::getName() const {
+    return "FakeQuantizeAsOutputTest";
+}
+
+bool FakeQuantizeAsOutputTest::transform(CNNNetwork& network, LayerTransformation::Params& params) const {
+    network.addOutput("FakeQuantize12");
+
+    LowPrecisionTransformer transformer(LowPrecisionTransformer::getAllTransformations(params));
+    transformer.transform(network);
+
+    const auto fq = network.getLayerByName("FakeQuantize12");
+    if (fq == nullptr)
+        THROW_IE_EXCEPTION << "Layer 'FakeQuantize12' should not be transformed";
+
+    return true;
+}
+
+std::unordered_set<std::string> FakeQuantizeAsOutputTest::getNotTransformedLayers() const {
+    return { "Convolution14" };
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/fq_with_multioutputs.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/fq_with_multioutputs.cpp
new file mode 100644 (file)
index 0000000..5305ce8
--- /dev/null
@@ -0,0 +1,91 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+
+using namespace InferenceEngine;
+using namespace InferenceEngine::details;
+
+std::string FakeQuantizeWithMultiOutputsTest::getModel(SingleLayerTransformationsTestParams& p) const {
+    size_t type_size = sizeof(PrecisionTrait<Precision::FP32>::value_type);
+    if (p._network_precision == "FP16")
+        type_size = sizeof(PrecisionTrait<Precision::FP16>::value_type);
+
+    CommonTestUtils::conv_common_params conv =
+            { {1, 1}, {3, 3}, {0, 0}, {0, 0}, {1, 1}, "valid", 1, 32, false, false };
+    std::vector<size_t> convOutShape(p.inputDimensions[0].size());
+    getConvOutShape(p.inputDimensions[0], conv, convOutShape);
+
+    std::vector<size_t> weightsConstInputDims = { 32lu, 32lu, 3lu, 3lu };
+    std::vector<size_t> biasesConvolutionConstDims = { conv.out_c };
+    std::map<std::string, std::string> const_params = {};
+    std::map<std::string, std::string> fake_quantize_params = {
+        {"levels", "256"}
+    };
+    std::map<std::string, std::string> power_params = {
+        {"power", "1"}, {"scale", "1"}, {"shift", "0"}
+    };
+
+    std::vector<std::pair<std::string, std::string>> edges = {
+        {"0,0", "1,1"}, {"1,2", "6,7"}, // Power
+        {"2,3", "6,8"}, {"3,4", "6,9"}, {"4,5", "6,10"}, {"5,6", "6,11"}, // Const layers
+        {"7,13", "12,18"}, {"8,14", "12,19"}, {"9,15", "12,20"}, {"10,16", "12,21"}, {"11,17", "12,22"}, // Const layers
+        {"6,12", "14,25"},  {"12,23", "14,26"}, // Fake quantize to Conv1
+        {"13,24", "14,27"}, // biases to Conv
+        {"14,28", "15,29"}, // Conv to Power1
+        {"12,23", "16,31"} // FQ to Power2
+    };
+
+    return CommonTestUtils::DefaultNetBuilder::buildNetworkWithOneInput(
+            "QuantizationOnWeights", p.inputDimensions[0], p._network_precision)
+        .addLayer("Power", p._network_precision, &power_params, {{p.inputDimensions[0]}, {p.inputDimensions[0]}})
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("FakeQuantize", p._network_precision, &fake_quantize_params, {{p.inputDimensions[0], {1}, {1}, {1}, {1}}, {{p.inputDimensions[0]}}})
+        .addLayer("Const", p._network_precision, &const_params, {{}, {weightsConstInputDims}},
+                std::accumulate(weightsConstInputDims.begin(), weightsConstInputDims.end(), 1lu, std::multiplies<size_t>()) * type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("FakeQuantize", p._network_precision, &fake_quantize_params, {{weightsConstInputDims, {1}, {1}, {1}, {1}}, {{weightsConstInputDims}}})
+        .addLayer("Const", p._network_precision, &const_params, {{}, {biasesConvolutionConstDims}}, type_size * conv.out_c, 0)
+        .convolutionLayer(p._network_precision, {{p.inputDimensions[0], weightsConstInputDims, biasesConvolutionConstDims }, {convOutShape}}, conv)
+        .addLayer("Power", p._network_precision, &power_params, {{convOutShape}, {convOutShape}})
+        .addLayer("Power", p._network_precision, &power_params, {{weightsConstInputDims}, {weightsConstInputDims}})
+        .finish(&edges);
+}
+
+std::string FakeQuantizeWithMultiOutputsTest::getName() const {
+    return "FakeQuantizeWithMultiOutputsTest";
+}
+
+bool FakeQuantizeWithMultiOutputsTest::transform(CNNNetwork& network, LayerTransformation::Params& params) const {
+    LowPrecisionTransformer transformer(LowPrecisionTransformer::getAllTransformations(params));
+    transformer.transform(network);
+
+    return true;
+}
+
+std::unordered_set<std::string> FakeQuantizeWithMultiOutputsTest::getNotTransformedLayers() const {
+    return { "Convolution14" };
+}
+
+void FakeQuantizeWithMultiOutputsTest::resetTransformation(CNNNetwork& network) const {
+    fillData(getLayer(network, "Const2"), 0.0, "custom");
+    fillData(getLayer(network, "Const3"), 127.5, "custom");
+    fillData(getLayer(network, "Const4"), 0.0, "custom");
+    fillData(getLayer(network, "Const5"), 127.5, "custom");
+
+    fillData(getLayer(network, "Const7"), 3.0, "custom");
+
+    fillData(getLayer(network, "Const8"), -1.275 / 2.0, "custom");
+    fillData(getLayer(network, "Const9"), 1.275, "custom");
+    fillData(getLayer(network, "Const10"), -1.275 / 2.0, "custom");
+    fillData(getLayer(network, "Const11"), 1.275, "custom");
+
+    fillData(getLayer(network, "Const13"), 5.0, "custom");
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/fq_with_two_scale_shifts_as_output.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/fq_with_two_scale_shifts_as_output.cpp
new file mode 100644 (file)
index 0000000..adde055
--- /dev/null
@@ -0,0 +1,67 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+
+using namespace InferenceEngine;
+using namespace InferenceEngine::details;
+
+std::string FakeQuantizeWithTwoScaleShiftsAsOutput::getModel(SingleLayerTransformationsTestParams& p) const {
+    size_t type_size = sizeof(PrecisionTrait<Precision::FP32>::value_type);
+    if (p._network_precision == "FP16")
+        type_size = sizeof(PrecisionTrait<Precision::FP16>::value_type);
+
+    std::map<std::string, std::string> scale_shift_params = {};
+
+    std::map<std::string, std::string> const_params = {};
+    std::map<std::string, std::string> fake_quantize_params = {
+        {"levels", "256"}
+    };
+    std::map<std::string, std::string> power_params = {
+        {"power", "1"}, {"scale", "1"}, {"shift", "0"}
+    };
+
+    std::vector<std::pair<std::string, std::string>> edges = {
+        {"0,0", "5,5"}, // input -> fq
+        {"1,1", "5,6"}, {"2,2", "5,7"}, {"3,3", "5,8"}, {"4,4", "5,9"}, // Const layers
+        {"5,10", "6,11"}, {"5,10", "7,13"}, // FQ -> SS
+        {"6,12", "8,15"}, {"7,14", "9,17"} // SS -> Power
+    };
+
+    return CommonTestUtils::DefaultNetBuilder::buildNetworkWithOneInput(
+            "FakeQuantizeWithTwoScaleShiftsAsOutput", p.inputDimensions[0], p._network_precision)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, "inputLow")
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, "inputHigh")
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, "outputLow")
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, "outputHigh")
+        .addLayer("FakeQuantize", p._network_precision, &fake_quantize_params, {{p.inputDimensions[0], {1}, {1}, {1}, {1}}, {{p.inputDimensions[0]}}})
+        .addLayer("ScaleShift", p._network_precision, &scale_shift_params, {{p.inputDimensions[0]}, {p.inputDimensions[0]}}, p.inputDimensions[0][1] * type_size, p.inputDimensions[0][1] * type_size)
+        .addLayer("ScaleShift", p._network_precision, &scale_shift_params, {{p.inputDimensions[0]}, {p.inputDimensions[0]}}, p.inputDimensions[0][1] * type_size, p.inputDimensions[0][1] * type_size)
+        .addLayer("Power", p._network_precision, &power_params, {{p.inputDimensions[0]}, {p.inputDimensions[0]}})
+        .addLayer("Power", p._network_precision, &power_params, {{p.inputDimensions[0]}, {p.inputDimensions[0]}})
+        .finish(&edges);
+}
+
+std::string FakeQuantizeWithTwoScaleShiftsAsOutput::getName() const {
+    return "FakeQuantizeWithTwoScaleShiftsAsOutput";
+}
+
+bool FakeQuantizeWithTwoScaleShiftsAsOutput::transform(CNNNetwork& network, LayerTransformation::Params& params) const {
+    LowPrecisionTransformer transformer(LowPrecisionTransformer::getAllTransformations(params));
+    transformer.transform(network);
+
+    return true;
+}
+
+void FakeQuantizeWithTwoScaleShiftsAsOutput::resetTransformation(CNNNetwork& network) const {
+    fillData(getLayer(network, "inputLow"), 0.f, "custom");
+    fillData(getLayer(network, "inputHigh"), 5.f, "custom");
+    fillData(getLayer(network, "outputLow"), 0.f, "custom");
+    fillData(getLayer(network, "outputHigh"), 5.f, "custom");
+
+    fillData(getLayer(network, "ScaleShift6"), 3.f, "weights");
+    fillData(getLayer(network, "ScaleShift6"), 3.f, "biases");
+    fillData(getLayer(network, "ScaleShift7"), 1.5f, "weights");
+    fillData(getLayer(network, "ScaleShift7"), 1.5f, "biases");
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/fully_connected_base_test.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/fully_connected_base_test.cpp
new file mode 100644 (file)
index 0000000..aede2ce
--- /dev/null
@@ -0,0 +1,166 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+
+//const size_t channelsCount = 32ul;
+//const size_t group = channelsCount;
+//std::vector<size_t> weightsConstInputDims = { channelsCount, 1lu, 3lu, 3lu };
+
+FullyConnectedBaseTestModel::FullyConnectedBaseTestModel(const bool addBiasesLayer) : addBiasesLayer(addBiasesLayer) {}
+
+std::string FullyConnectedBaseTestModel::getModel(SingleLayerTransformationsTestParams& p) const {
+    size_t type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP32>::value_type);
+    if (p._network_precision == "FP16")
+        type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP16>::value_type);
+
+    const size_t inputChannelsCount = p.inputDimensions[0][1];
+    const size_t outputChannelsCount = p.outputDimensions[0][1];
+    //conv_common_params conv = { {1, 1}, {3, 3}, {1, 1}, {1, 1}, {1, 1}, "valid", group, outputChannelsCount, false, false };
+    std::vector<size_t> weightsConstInputDims = { outputChannelsCount, inputChannelsCount };
+
+    //std::vector<size_t> convOutShape(p.inputDimensions[0].size());
+    //getConvOutShape(p.inputDimensions[0], conv, convOutShape);
+
+    std::map<std::string, std::string> const_params = {};
+    std::map<std::string, std::string> fake_quantize_params = { {"levels", "256"} };
+    std::map<std::string, std::string> fake_quantize_params2 = { {"levels", "255"} };
+    std::map<std::string, std::string> power_params = { {"power", "1"}, {"scale", "1"}, {"shift", "0"} };
+    std::map<std::string, std::string> poolingParams = { {"kernel", "112,112"}, {"pool-method", "max"} };
+    std::map<std::string, std::string> reshapeParams = { };
+    std::map<std::string, std::string> fullyConnectedParams = { {"out-size", std::to_string(p.outputDimensions[0][1])} };
+
+    std::vector<size_t> biasesConstDims = { p.outputDimensions[0][1] };
+
+    const std::vector<std::vector<size_t>> convolutionDims = addBiasesLayer ?
+        std::vector<std::vector<size_t>>({ p.inputDimensions[0], weightsConstInputDims, biasesConstDims }) :
+        std::vector<std::vector<size_t>>({p.inputDimensions[0], weightsConstInputDims });
+
+    std::vector<std::pair<std::string, std::string>> edges = {
+        {"0,0", "1,1"}, {"1,2", "6,7"}, // Power
+        {"2,3", "6,8"}, {"3,4", "6,9"}, {"4,5", "6,10"}, {"5,6", "6,11"}, // Const layers
+        {"6,12", "7,13"},  // FakeQuantize to Pooling
+        {"7,14", "8,15"},  // Pooling to Reshape
+        {"8,16", "15,28"},  // Reshape to FullyConnected
+        {"9,17", "14,22"}, {"10,18", "14,23"}, {"11,19", "14,24"}, {"12,20", "14,25"}, {"13,21", "14,26"}, // Const layers
+        {"14,27", "15,29"}
+    };
+
+    if (addBiasesLayer) {
+        edges.push_back({ "16,32", "15,30" }); // biases to Conv
+    }
+
+    const std::vector<std::vector<size_t>> fullyConnectedDims = addBiasesLayer ?
+        std::vector<std::vector<size_t>>({ {p.inputDimensions[0][0], p.inputDimensions[0][1]}, weightsConstInputDims, biasesConstDims }) :
+        std::vector<std::vector<size_t>>({ {p.inputDimensions[0][0], p.inputDimensions[0][1]}, weightsConstInputDims });
+
+    std::vector<size_t> quantizationParamsDims(p.inputDimensions[0].size(), 1);
+    quantizationParamsDims[1] = inputChannelsCount;
+
+    CommonTestUtils::DefaultNetBuilder builder = CommonTestUtils::DefaultNetBuilder::buildNetworkWithOneInput(
+        "FullyConnectedBaseTestModel", p.inputDimensions[0], p._network_precision)
+        // 1
+        .addLayer("Power", p._network_precision, &power_params, { {p.inputDimensions[0]}, {p.inputDimensions[0]} })
+        // 2
+        .addLayer("Const", p._network_precision, &const_params, { {}, {quantizationParamsDims} }, inputChannelsCount * type_size, "dataInputLowConst")
+        // 3
+        .addLayer("Const", p._network_precision, &const_params, { {}, {quantizationParamsDims} }, inputChannelsCount * type_size, "dataInputHighConst")
+        // 4
+        .addLayer("Const", p._network_precision, &const_params, { {}, {quantizationParamsDims} }, inputChannelsCount * type_size, "dataOutputLowConst")
+        // 5
+        .addLayer("Const", p._network_precision, &const_params, { {}, {quantizationParamsDims} }, inputChannelsCount * type_size, "dataOutputHighConst")
+        // 6
+        .addLayer("FakeQuantize",
+            p._network_precision,
+            &fake_quantize_params,
+            { {p.inputDimensions[0], quantizationParamsDims, quantizationParamsDims, quantizationParamsDims, quantizationParamsDims}, {{p.inputDimensions[0]}} },
+            "fakeQuantize")
+        // 7
+        .addLayer("Pooling", p._network_precision, &poolingParams, { {p.inputDimensions[0]}, {{1, 32, 1, 1}} }, "pooling")
+        // 8
+        .addLayer("Reshape", p._network_precision, &reshapeParams, { {{1, 32, 1, 1}}, {{1, 32}} }, "reshape")
+        // 9
+        .addLayer("Const", p._network_precision, &const_params, { {}, {weightsConstInputDims} },
+            std::accumulate(weightsConstInputDims.begin(), weightsConstInputDims.end(), 1lu, std::multiplies<size_t>()) * type_size, "weigthsConst")
+        // 10
+        .addLayer("Const", p._network_precision, &const_params, { {}, {{1}} }, type_size, "weigthsInputLowConst")
+        // 11
+        .addLayer("Const", p._network_precision, &const_params, { {}, {{1}} }, type_size, "weigthsInputHighConst")
+        // 12
+        .addLayer("Const", p._network_precision, &const_params, { {}, {{1}} }, type_size, "weigthsOutputLowConst")
+        // 13
+        .addLayer("Const", p._network_precision, &const_params, { {}, {{1}} }, type_size, "weigthsOutputHighConst")
+        // 14
+        .addLayer(
+            "FakeQuantize",
+            p._network_precision,
+            &fake_quantize_params,
+            { {weightsConstInputDims, {1}, {1}, {1}, {1}}, {{weightsConstInputDims}} },
+            "fakeQuantizeOnWeights")
+        // 15
+        .addLayer("FullyConnected", p._network_precision, &fullyConnectedParams, { fullyConnectedDims, {p.outputDimensions[0]} }, "fullyConnected");
+
+    if (addBiasesLayer) {
+        // 16
+        builder.addLayer("Const", p._network_precision, &const_params, { {}, {biasesConstDims} }, type_size * biasesConstDims[0], "biasesConst");
+    }
+
+    return builder.finish(&edges);
+}
+
+bool FullyConnectedBaseTestModel::transform(CNNNetwork& network, LayerTransformation::Params& params) const {
+    LowPrecisionTransformer transformer(LowPrecisionTransformer::getAllTransformations(params));
+    transformer.transform(network);
+    return true;
+}
+
+void FullyConnectedBaseTestModel::resetTransformation(CNNNetwork& network) const {
+    CNNLayerPtr fakeQuantize = CNNNetworkHelper::getLayer(network, "fakeQuantize");
+    const size_t inputChannels = fakeQuantize->outData[0]->getTensorDesc().getDims()[1];
+
+    CNNLayerPtr fullyConnected = CNNNetworkHelper::getLayer(network, "fullyConnected");
+    const size_t outputChannels = fullyConnected->outData[0]->getTensorDesc().getDims()[1];
+
+    // Const on activations
+    std::vector<float> lowValues(inputChannels, 1.0);  // to have shifts
+    std::vector<float> highValues(inputChannels);
+    if (areScalesOnActivationsDifferent()) {
+        for (size_t inputChannel = 0; inputChannel < highValues.size(); ++inputChannel) {
+            highValues[inputChannel] = static_cast<float>(inputChannel);
+        }
+    } else {
+        highValues = std::vector<float>(inputChannels, 255.f);
+    }
+
+    fillData(getLayer(network, "dataInputLowConst"), lowValues, "custom");
+    fillData(getLayer(network, "dataInputHighConst"), highValues, "custom");
+    fillData(getLayer(network, "dataOutputLowConst"), lowValues, "custom");
+    fillData(getLayer(network, "dataOutputHighConst"), highValues, "custom");
+
+    // Const on weights
+    std::vector<float> weights(outputChannels * inputChannels);
+    for (size_t outputChannel = 0ul; outputChannel < outputChannels; ++outputChannel) {
+        for (size_t inputChannel = 0ul; inputChannel < inputChannels; ++inputChannel) {
+            weights[outputChannel * inputChannels + inputChannel] = inputChannel;
+        }
+    }
+    fillData(getLayer(network, "weigthsConst"), weights, "custom");
+
+    fillData(getLayer(network, "weigthsInputLowConst"), -128.f, "custom");
+    fillData(getLayer(network, "weigthsInputHighConst"), 127.f, "custom");
+    fillData(getLayer(network, "weigthsOutputLowConst"), -128.f, "custom");
+    fillData(getLayer(network, "weigthsOutputHighConst"), 127.f, "custom");
+
+    if (addBiasesLayer) {
+        std::vector<float> biases(outputChannels);
+        for (size_t i = 0ul; i < outputChannels; ++i) {
+            biases[i] = static_cast<float>(i);
+        }
+        fillData(getLayer(network, "biasesConst"), biases, "custom");
+    }
+}
+
+bool FullyConnectedBaseTestModel::areScalesOnActivationsDifferent() const {
+    return false;
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/low_precision_transformer_single_layer_tests.hpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/low_precision_transformer_single_layer_tests.hpp
new file mode 100644 (file)
index 0000000..2464694
--- /dev/null
@@ -0,0 +1,1869 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <gtest/gtest.h>
+#include <string>
+#include <unordered_map>
+
+#include <ie_core.hpp>
+#include "cpp_interfaces/impl/ie_plugin_internal.hpp"
+
+#include "common/low_precision_tests_utils.hpp"
+#include "low_precision_transformations/transformer.hpp"
+#include "low_precision_transformations/convolution.hpp"
+#include "low_precision_transformations/network_helper.hpp"
+#include "low_precision_transformations/eltwise.hpp"
+
+#include "tests_common.hpp"
+#include "ir_gen_helper.hpp"
+
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace InferenceEngine::details;
+using namespace single_layer_tests;
+
+inline void fillDataMy(CNNLayerPtr layer, std::vector<int> values, const std::string& blobName = "") {
+    if (layer == nullptr) {
+        THROW_IE_EXCEPTION << "layer is nullable";
+    }
+    if (blobName.empty() && (layer->blobs.size() != 1)) {
+        THROW_IE_EXCEPTION << "several blobs";
+    }
+
+    Blob::Ptr blob = blobName.empty() ? layer->blobs.begin()->second : layer->blobs[blobName];
+    if (blob->size() != values.size()) {
+        THROW_IE_EXCEPTION << "values size is not correct";
+    }
+
+    int* buffer = blob->buffer().as<int*>();
+    for (size_t i = 0; i < blob->size(); i++) {
+        buffer[i] = values[i];
+    }
+}
+
+/**
+ * @brief base class for test model.
+  */
+class SingleLayerTransformationsTestParams;
+
+class SingleLayerTestModel {
+public:
+    typedef std::shared_ptr<SingleLayerTestModel> Ptr;
+
+    LowPrecisionTransformations getLowPrecisionTransformations(const LayerTransformation::Params& params) const;
+    LowPrecisionTransformer getLowPrecisionTransformer(const LayerTransformation::Params& params) const;
+
+    virtual std::string getModel(SingleLayerTransformationsTestParams& p) const = 0;
+    virtual std::string getName() const = 0;
+
+    virtual void initInput(Blob::Ptr input) const {}
+    virtual float getZeroThreshold() const {
+        return 1e-7;
+    }
+    virtual bool transform(CNNNetwork& network, LayerTransformation::Params& params) const = 0;
+    virtual void resetTransformation(CNNNetwork& network) const = 0;
+    virtual std::unordered_set<std::string> getNotTransformedLayers() const {
+        return {};
+    }
+
+    virtual float getThreshold(const std::string& device_name, const Precision precision, LayerTransformation::Params& params) const {
+        return precision == Precision::FP16 ? 0.0005f : 0.0003f;
+    }
+
+protected:
+    // TODO: pass as parameter: 22403
+    const std::string device_name = "CPU";
+};
+
+class SingleLayerTransformationsTestParams {
+public:
+    SingleLayerTransformationsTestParams(
+        const std::string& name,
+        SingleLayerTestModel::Ptr model,
+        const std::vector<std::vector<size_t>>& inputDimensions,
+        const std::vector<std::vector<size_t>>& outputDimensions,
+        const std::string& network_precision = "FP32") :
+        device_name(name),
+        model(model),
+        inputDimensions(inputDimensions),
+        outputDimensions(outputDimensions),
+        _network_precision(network_precision) {}
+
+    const std::string device_name;
+    SingleLayerTestModel::Ptr model;
+    const std::vector<std::vector<size_t>> inputDimensions;
+    const std::vector<std::vector<size_t>> outputDimensions;
+    std::string _network_precision;
+
+
+    static std::string getLowPrecisionTransformerSingleLayerTestName(testing::TestParamInfo<SingleLayerTransformationsTestParams> p) {
+        return p.param.model->getName();
+    }
+};
+
+class FullyConnectedAndScaleShiftsOnActivationsTestModel : public SingleLayerTestModel {
+public:
+    std::string getModel(SingleLayerTransformationsTestParams& p) const override;
+    std::string getName() const override;
+    bool transform(CNNNetwork& network, LayerTransformation::Params& params) const override;
+    void resetTransformation(CNNNetwork& network) const override;
+};
+
+class ResampleTestModel : public SingleLayerTestModel {
+public:
+    std::string getModel(SingleLayerTransformationsTestParams& p) const override;
+    std::string getName() const override;
+    bool transform(CNNNetwork& network, LayerTransformation::Params& params) const override;
+    void resetTransformation(CNNNetwork& network) const override;
+};
+
+
+class ConvolutionAndQuantizeOnActivationsAndWeightsBaseTestModel : public SingleLayerTestModel {
+public:
+    std::string getModel(SingleLayerTransformationsTestParams& p) const override;
+};
+
+class ConvolutionAndQuantizeOnSignedActivationsAndWeightsPositiveTestModel : public ConvolutionAndQuantizeOnActivationsAndWeightsBaseTestModel {
+public:
+    void resetTransformation(CNNNetwork& network) const override;
+    std::string getName() const override;
+    bool transform(CNNNetwork& network, LayerTransformation::Params& params) const override;
+};
+
+class ConvolutionAndQuantizeOnSignedActivationsAndWeightsNegativeTestModel : public ConvolutionAndQuantizeOnActivationsAndWeightsBaseTestModel {
+public:
+    void resetTransformation(CNNNetwork& network) const override;
+    std::string getName() const override;
+    bool transform(CNNNetwork& network, LayerTransformation::Params& params) const override;
+};
+
+class ConvolutionAndQuantizeOnUnsignedActivationsAndWeightsTestModel : public ConvolutionAndQuantizeOnActivationsAndWeightsBaseTestModel {
+public:
+    void resetTransformation(CNNNetwork& network) const override;
+    std::string getName() const override;
+    bool transform(CNNNetwork& network, LayerTransformation::Params& params) const override;
+};
+
+class ConvolutionAndQuantizeOnSignedActivationsAndInvertedWeightsTestModel : public ConvolutionAndQuantizeOnActivationsAndWeightsBaseTestModel {
+public:
+    void resetTransformation(CNNNetwork& network) const override;
+    std::string getName() const override;
+    bool transform(CNNNetwork& network, LayerTransformation::Params& params) const override;
+};
+
+class FakeQuantizeReshapePoolingTestModelWithConstants : public SingleLayerTestModel {
+public:
+    void resetTransformation(CNNNetwork& network) const override;
+    std::string getName() const override;
+    bool transform(CNNNetwork& network, LayerTransformation::Params& params) const override;
+    std::string getModel(SingleLayerTransformationsTestParams& p) const override;
+};
+
+class FakeQuantizeReshapePoolingTestModelWithoutConstants : public SingleLayerTestModel {
+public:
+    void resetTransformation(CNNNetwork& network) const override;
+    std::string getName() const override;
+    bool transform(CNNNetwork& network, LayerTransformation::Params& params) const override;
+    std::string getModel(SingleLayerTransformationsTestParams& p) const override;
+};
+
+class FakeQuantizeReshapeTestModelWithConstants : public SingleLayerTestModel {
+public:
+    void resetTransformation(CNNNetwork& network) const override;
+    std::string getName() const override;
+    bool transform(CNNNetwork& network, LayerTransformation::Params& params) const override;
+    std::string getModel(SingleLayerTransformationsTestParams& p) const override;
+};
+
+class ScaleShiftToConvolutionTestModel : public SingleLayerTestModel {
+public:
+    void resetTransformation(CNNNetwork& network) const override;
+    std::string getName() const override;
+    bool transform(CNNNetwork& network, LayerTransformation::Params& params) const override;
+    std::string getModel(SingleLayerTransformationsTestParams& p) const override;
+};
+
+class ScaleShiftToConvolutionAfterNotConcatIgnoreTestModel : public SingleLayerTestModel {
+public:
+    void resetTransformation(CNNNetwork& network) const override;
+    std::string getName() const override;
+    bool transform(CNNNetwork& network, LayerTransformation::Params& params) const override;
+    std::string getModel(SingleLayerTransformationsTestParams& p) const override;
+};
+
+class ScaleShiftToConvolutionAfterFakeQuantizeIgnoreTestModel : public SingleLayerTestModel {
+public:
+    void resetTransformation(CNNNetwork& network) const override;
+    std::string getName() const override;
+    bool transform(CNNNetwork& network, LayerTransformation::Params& params) const override;
+    std::string getModel(SingleLayerTransformationsTestParams& p) const override;
+};
+
+class ScaleShiftToConvolutionAfterConcatTestModel : public SingleLayerTestModel {
+public:
+    ScaleShiftToConvolutionAfterConcatTestModel(const bool scaleShiftIsOutput);
+    void resetTransformation(CNNNetwork& network) const override;
+    std::string getName() const override;
+    bool transform(CNNNetwork& network, LayerTransformation::Params& params) const override;
+    std::string getModel(SingleLayerTransformationsTestParams& p) const override;
+
+private:
+    const bool scaleShiftIsOutput;
+};
+
+class FullyConnectedAndQuantizeTestModel : public SingleLayerTestModel {
+public:
+    void resetTransformation(CNNNetwork& network) const override {
+        fillData(getLayer(network, "dataConstInputLow"), 63.5, "custom");
+        fillData(getLayer(network, "dataConstInputHigh"), 127.0, "custom");
+        fillData(getLayer(network, "dataConstOutputLow"), 63.5, "custom");
+        fillData(getLayer(network, "dataConstOutputHigh"), 127.0, "custom");
+
+        //fillData(getLayer(network, "weightsConstInput"), 3.0, "custom");
+        fillDataWithInitValue(getLayer(network, "weightsConstInput"), "custom", 1.234);
+
+        fillData(getLayer(network, "weightsConstInputLow"), -1.275 / 2.0, "custom");
+        fillData(getLayer(network, "weightsConstInputHigh"), 1.275, "custom");
+        fillData(getLayer(network, "weightsConstOutputLow"), -1.275 / 2.0, "custom");
+        fillData(getLayer(network, "weightsConstOutputHigh"), 1.275, "custom");
+
+        //fillData(getLayer(network, "biasesConvolutionConst"), 5.0, "custom");
+        fillDataWithInitValue(getLayer(network, "biasesConvolutionConst"), "custom", 2.123);
+
+        fillDataMy(getLayer(network, "reshapeConst"), { 1, -1 });
+    }
+
+    std::string getName() const override {
+        return "FullyConnectedAndQuantizeTestModel";
+    }
+
+    bool transform(CNNNetwork& network, LayerTransformation::Params& params) const override {
+        LowPrecisionTransformer transformer(LowPrecisionTransformer::getAllTransformations(params));
+        transformer.transform(network);
+
+        const std::vector<CNNLayerPtr> layers = CNNNetSortTopologically(network);
+
+        const CNNLayerPtr convolution = layers[layers.size() - 2];
+        if ((convolution->type != "FullyConnected") || (convolution->name != "fullyconnected_original")) {
+            THROW_IE_EXCEPTION << "unexpected layer type '" << convolution->type << "' or name '" << convolution->name << "'";
+        }
+
+        const CNNLayerPtr dequantizationScaleShift = layers[layers.size() - 1];
+        if ((dequantizationScaleShift->type != "ScaleShift") || (dequantizationScaleShift->name != "fullyconnected")) {
+            THROW_IE_EXCEPTION << "unexpected layer type '" << dequantizationScaleShift->type << "' or name '" << dequantizationScaleShift->name << "'";
+        }
+
+        return true;
+    }
+
+    std::string getModel(SingleLayerTransformationsTestParams& p) const override {
+        std::string layers = layersTemplate;
+        auto inputSizes = p.inputDimensions.at(0);
+        auto inBatch = inputSizes.at(0);
+        auto inChannel = inputSizes.at(1);
+        auto inX = inputSizes.at(2);
+        auto inY = inputSizes.at(3);
+
+        REPLACE_WITH_NUM(layers, "IN_BATCH", inBatch);
+        REPLACE_WITH_NUM(layers, "IN_CHANNEL", inChannel);
+        REPLACE_WITH_NUM(layers, "IN_X", inX);
+        REPLACE_WITH_NUM(layers, "IN_Y", inY);
+        REPLACE_WITH_NUM(layers, "RESHAPED_CH_X_Y", inChannel * inX * inY);
+
+        auto outputSizes = p.outputDimensions.at(0);
+        auto outBatch = outputSizes.at(0);
+        auto outChannel = outputSizes.at(1);
+        REPLACE_WITH_NUM(layers, "OUT_BATCH", outBatch);
+        REPLACE_WITH_NUM(layers, "OUT_CHANNEL", outChannel);
+
+        size_t totalOffset = 0;
+
+        REPLACE_WITH_NUM(layers, "DATA_CONST_INPUT_LOW_OFFSET", totalOffset);
+        totalOffset += 4;
+        REPLACE_WITH_NUM(layers, "DATA_CONST_INPUT_HIGH_OFFSET", totalOffset);
+        totalOffset += 4;
+        REPLACE_WITH_NUM(layers, "DATA_CONST_OUTPUT_LOW_OFFSET", totalOffset);
+        totalOffset += 4;
+        REPLACE_WITH_NUM(layers, "DATA_CONST_OUTPUT_HIGH_OFFSET", totalOffset);
+        totalOffset += 4;
+
+        REPLACE_WITH_NUM(layers, "WEIGHTS_CONST_INPUT_OFFSET", totalOffset);
+        totalOffset += inChannel * outChannel * 4;
+        REPLACE_WITH_NUM(layers, "WEIGHTS_CONST_INPUT_LOW_OFFSET", totalOffset);
+        totalOffset += 4;
+        REPLACE_WITH_NUM(layers, "WEIGHTS_CONST_INPUT_HIGH_OFFSET", totalOffset);
+        totalOffset += 4;
+        REPLACE_WITH_NUM(layers, "WEIGHTS_CONST_OUTPUT_LOW_OFFSET", totalOffset);
+        totalOffset += 4;
+        REPLACE_WITH_NUM(layers, "WEIGHTS_CONST_OUTPUT_HIGH_OFFSET", totalOffset);
+        totalOffset += 4;
+        REPLACE_WITH_NUM(layers, "RESHAPE_CONST_OFFSET", totalOffset);
+        totalOffset += 8;
+        REPLACE_WITH_NUM(layers, "FULLYCONNECTED_BIASES_CONST_OFFSET", totalOffset);
+        totalOffset += 128;
+
+
+        const std::string model = IRTemplateGenerator::getIRTemplate(
+            "TransformationsTest",
+            p.inputDimensions,
+            "FP32",
+            layers,
+            edgesTemplate,
+            6);
+
+        return model;
+    }
+
+private:
+    const std::string layersTemplate = R"V0G0N(
+        <layer name="inputPower" type="Power" precision="FP32" id="1">
+            <power_data power="1" scale="1" shift="0"/>
+            <input>
+                               <port id="0">
+                                       <dim>IN_BATCH</dim>
+                                       <dim>IN_CHANNEL</dim>
+                                       <dim>IN_X</dim>
+                                       <dim>IN_Y</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="1">
+                                       <dim>IN_BATCH</dim>
+                                       <dim>IN_CHANNEL</dim>
+                                       <dim>IN_X</dim>
+                                       <dim>IN_Y</dim>
+                               </port>
+                       </output>
+        </layer>
+
+
+        <layer id="9" name="dataConstInputLow" precision="FP32" type="Const">
+                       <output>
+                               <port id="1"/>
+                       </output>
+                       <blobs>
+                               <custom offset="DATA_CONST_INPUT_LOW_OFFSET" size="4"/>
+                       </blobs>
+               </layer>
+               <layer id="10" name="dataConstInputHigh" precision="FP32" type="Const">
+                       <output>
+                               <port id="1"/>
+                       </output>
+                       <blobs>
+                               <custom offset="DATA_CONST_INPUT_HIGH_OFFSET" size="4"/>
+                       </blobs>
+               </layer>
+               <layer id="11" name="dataConstOutputLow" precision="FP32" type="Const">
+                       <output>
+                               <port id="1"/>
+                       </output>
+                       <blobs>
+                               <custom offset="DATA_CONST_OUTPUT_LOW_OFFSET" size="4"/>
+                       </blobs>
+               </layer>
+               <layer id="12" name="dataConstOutputHigh" precision="FP32" type="Const">
+                       <output>
+                               <port id="1"/>
+                       </output>
+                       <blobs>
+                               <custom offset="DATA_CONST_OUTPUT_HIGH_OFFSET" size="4"/>
+                       </blobs>
+               </layer>
+               <layer id="13" name="dataFakeQuantize" precision="FP32" type="FakeQuantize">
+                       <data levels="256"/>
+                       <input>
+                               <port id="0">
+                                       <dim>IN_BATCH</dim>
+                                       <dim>IN_CHANNEL</dim>
+                                       <dim>IN_X</dim>
+                                       <dim>IN_Y</dim>
+                               </port>
+                               <port id="1"/>
+                               <port id="2"/>
+                               <port id="3"/>
+                               <port id="4"/>
+                       </input>
+                       <output>
+                               <port id="5">
+                                       <dim>IN_BATCH</dim>
+                                       <dim>IN_CHANNEL</dim>
+                                       <dim>IN_X</dim>
+                                       <dim>IN_Y</dim>
+                               </port>
+                       </output>
+               </layer>
+               <layer id="14" name="weightsConstInput" precision="FP32" type="Const">
+                       <output>
+                               <port id="1">
+                                       <dim>OUT_CHANNEL</dim>
+                                       <dim>IN_CHANNEL</dim>
+                               </port>
+                       </output>
+                       <blobs>
+                               <custom offset="WEIGHTS_CONST_INPUT_OFFSET" size="4096"/>
+                       </blobs>
+               </layer>
+               <layer id="15" name="weightsConstInputLow" precision="FP32" type="Const">
+                       <output>
+                               <port id="1"/>
+                       </output>
+                       <blobs>
+                               <custom offset="WEIGHTS_CONST_INPUT_LOW_OFFSET" size="4"/>
+                       </blobs>
+               </layer>
+               <layer id="16" name="weightsConstInputHigh" precision="FP32" type="Const">
+                       <output>
+                               <port id="1"/>
+                       </output>
+                       <blobs>
+                               <custom offset="WEIGHTS_CONST_INPUT_HIGH_OFFSET" size="4"/>
+                       </blobs>
+               </layer>
+               <layer id="17" name="weightsConstOutputLow" precision="FP32" type="Const">
+                       <output>
+                               <port id="1"/>
+                       </output>
+                       <blobs>
+                               <custom offset="WEIGHTS_CONST_OUTPUT_LOW_OFFSET" size="4"/>
+                       </blobs>
+               </layer>
+               <layer id="18" name="weightsConstOutputHigh" precision="FP32" type="Const">
+                       <output>
+                               <port id="1"/>
+                       </output>
+                       <blobs>
+                               <custom offset="WEIGHTS_CONST_OUTPUT_HIGH_OFFSET" size="4"/>
+                       </blobs>
+               </layer>
+               <layer id="19" name="weightsFakeQuantize" precision="FP32" type="FakeQuantize">
+                       <data levels="256"/>
+                       <input>
+                               <port id="0">
+                                       <dim>OUT_CHANNEL</dim>
+                                       <dim>IN_CHANNEL</dim>
+                               </port>
+                               <port id="1"/>
+                               <port id="2"/>
+                               <port id="3"/>
+                               <port id="4"/>
+                       </input>
+                       <output>
+                               <port id="5">
+                                       <dim>OUT_CHANNEL</dim>
+                                       <dim>IN_CHANNEL</dim>
+                               </port>
+                       </output>
+               </layer>
+               <layer id="20" name="biasesConvolutionConst" precision="FP32" type="Const">
+                       <output>
+                               <port id="1">
+                                       <dim>OUT_CHANNEL</dim>
+                               </port>
+                       </output>
+                       <blobs>
+                               <custom offset="FULLYCONNECTED_BIASES_CONST_OFFSET" size="128"/>
+                       </blobs>
+               </layer>
+        <layer id="211" name="reshapeConst" precision="I32" type="Const">
+                       <output>
+                               <port id="1">
+                                       <dim>2</dim>
+                               </port>
+                       </output>
+            <blobs>
+                               <custom offset="RESHAPE_CONST_OFFSET" size="8"/>
+                       </blobs>
+               </layer>
+        <layer id="21" name="reshape" precision="FP32" type="Reshape">
+                       <input>
+                               <port id="0">
+                                       <dim>IN_BATCH</dim>
+                                       <dim>IN_CHANNEL</dim>
+                                       <dim>IN_X</dim>
+                                       <dim>IN_Y</dim>
+                               </port>
+                               <port id="1">
+                                       <dim>2</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="2">
+                                       <dim>IN_BATCH</dim>
+                                       <dim>RESHAPED_CH_X_Y</dim>
+                               </port>
+                       </output>
+               </layer>
+               <layer id="22" name="fullyconnected" precision="FP32" type="FullyConnected">
+                       <data out-size="OUT_CHANNEL"/>
+                       <input>
+                               <port id="0">
+                                       <dim>IN_BATCH</dim>
+                                       <dim>RESHAPED_CH_X_Y</dim>
+                               </port>
+                               <port id="1">
+                                       <dim>OUT_CHANNEL</dim>
+                                       <dim>IN_CHANNEL</dim>
+                               </port>
+                               <port id="2">
+                                       <dim>OUT_CHANNEL</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="3">
+                                       <dim>OUT_BATCH</dim>
+                                       <dim>OUT_CHANNEL</dim>
+                               </port>
+                       </output>
+               </layer>
+        )V0G0N";
+
+    const std::string edgesTemplate = R"V0G0N(
+        <edge from-layer="0"  from-port="0" to-layer="1" to-port="0"/>
+        <edge from-layer="1"  from-port="1" to-layer="13" to-port="0"/>
+
+        <!-- data FakeQuantize -->
+        <edge from-layer="9"  from-port="1" to-layer="13" to-port="1"/>
+        <edge from-layer="10"  from-port="1" to-layer="13" to-port="2"/>
+        <edge from-layer="11"  from-port="1" to-layer="13" to-port="3"/>
+        <edge from-layer="12"  from-port="1" to-layer="13" to-port="4"/>
+
+        <!-- weights FakeQuantize -->
+        <edge from-layer="14"  from-port="1" to-layer="19" to-port="0"/>
+        <edge from-layer="15"  from-port="1" to-layer="19" to-port="1"/>
+        <edge from-layer="16"  from-port="1" to-layer="19" to-port="2"/>
+        <edge from-layer="17" from-port="1" to-layer="19" to-port="3"/>
+        <edge from-layer="18" from-port="1" to-layer="19" to-port="4"/>
+
+        <edge from-layer="13" from-port="5" to-layer="21" to-port="0"/>
+        <edge from-layer="211" from-port="1" to-layer="21" to-port="1"/>
+        <edge from-layer="21" from-port="2" to-layer="22" to-port="0"/>
+
+        <!-- FullyConnected -->
+        <edge from-layer="21" from-port="2" to-layer="22" to-port="0"/>
+        <edge from-layer="19" from-port="5" to-layer="22" to-port="1"/>
+        <edge from-layer="20" from-port="1" to-layer="22" to-port="2"/>
+        )V0G0N";
+};
+
+class GemmAndQuantizeTestModel : public SingleLayerTestModel {
+public:
+    void resetTransformation(CNNNetwork& network) const override {
+        fillData(getLayer(network, "dataConstInputLow"), 63.5, "custom");
+        fillData(getLayer(network, "dataConstInputHigh"), 127.0, "custom");
+        fillData(getLayer(network, "dataConstOutputLow"), 63.5, "custom");
+        fillData(getLayer(network, "dataConstOutputHigh"), 127.0, "custom");
+
+        //fillData(getLayer(network, "weightsConstInput"), 3.0, "custom");
+        fillDataWithInitValue(getLayer(network, "weightsConstInput"), "custom", 1.234);
+
+        fillData(getLayer(network, "weightsConstInputLow"), -1.275 / 2.0, "custom");
+        fillData(getLayer(network, "weightsConstInputHigh"), 1.275, "custom");
+        fillData(getLayer(network, "weightsConstOutputLow"), -1.275 / 2.0, "custom");
+        fillData(getLayer(network, "weightsConstOutputHigh"), 1.275, "custom");
+
+        fillDataMy(getLayer(network, "reshapeConst"), { 1, -1 });
+    }
+
+    std::string getName() const override {
+        return "GemmAndQuantizeTestModel";
+    }
+
+    bool transform(CNNNetwork& network, LayerTransformation::Params& params) const override {
+        LowPrecisionTransformer transformer(LowPrecisionTransformer::getAllTransformations(params));
+        transformer.transform(network);
+
+        const std::vector<CNNLayerPtr> layers = CNNNetSortTopologically(network);
+
+        const CNNLayerPtr convolution = layers[layers.size() - 2];
+        if ((convolution->type != "GEMM") || (convolution->name != "gemm_original")) {
+            THROW_IE_EXCEPTION << "unexpected layer type '" << convolution->type << "' or name '" << convolution->name << "'";
+        }
+
+        const CNNLayerPtr dequantizationScaleShift = layers[layers.size() - 1];
+        if ((dequantizationScaleShift->type != "ScaleShift") || (dequantizationScaleShift->name != "gemm")) {
+            THROW_IE_EXCEPTION << "unexpected layer type '" << dequantizationScaleShift->type << "' or name '" << dequantizationScaleShift->name << "'";
+        }
+
+        return true;
+    }
+
+    std::string getModel(SingleLayerTransformationsTestParams& p) const override {
+        std::string layers = layersTemplate;
+        size_t totalOffset = 0;
+
+        REPLACE_WITH_NUM(layers, "DATA_CONST_INPUT_LOW_OFFSET", totalOffset);
+        totalOffset += 4;
+        REPLACE_WITH_NUM(layers, "DATA_CONST_INPUT_HIGH_OFFSET", totalOffset);
+        totalOffset += 4;
+        REPLACE_WITH_NUM(layers, "DATA_CONST_OUTPUT_LOW_OFFSET", totalOffset);
+        totalOffset += 4;
+        REPLACE_WITH_NUM(layers, "DATA_CONST_OUTPUT_HIGH_OFFSET", totalOffset);
+        totalOffset += 4;
+
+        REPLACE_WITH_NUM(layers, "WEIGHTS_CONST_INPUT_OFFSET", totalOffset);
+        totalOffset += 32 * 32 * 4;
+        REPLACE_WITH_NUM(layers, "WEIGHTS_CONST_INPUT_LOW_OFFSET", totalOffset);
+        totalOffset += 4;
+        REPLACE_WITH_NUM(layers, "WEIGHTS_CONST_INPUT_HIGH_OFFSET", totalOffset);
+        totalOffset += 4;
+        REPLACE_WITH_NUM(layers, "WEIGHTS_CONST_OUTPUT_LOW_OFFSET", totalOffset);
+        totalOffset += 4;
+        REPLACE_WITH_NUM(layers, "WEIGHTS_CONST_OUTPUT_HIGH_OFFSET", totalOffset);
+        totalOffset += 4;
+        REPLACE_WITH_NUM(layers, "RESHAPE_CONST_OFFSET", totalOffset);
+        totalOffset += 8;
+
+        const std::string model = IRTemplateGenerator::getIRTemplate(
+            "TransformationsTest",
+            { 1, 32, 149, 149 },
+            "FP32",
+            layers,
+            edgesTemplate,
+            6);
+
+        return model;
+    }
+
+private:
+    const std::string layersTemplate = R"V0G0N(
+        <layer name="inputPower" type="Power" precision="FP32" id="1">
+            <power_data power="1" scale="1" shift="0"/>
+            <input>
+                               <port id="0">
+                                       <dim>1</dim>
+                                       <dim>32</dim>
+                                       <dim>149</dim>
+                                       <dim>149</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="1">
+                                       <dim>1</dim>
+                                       <dim>32</dim>
+                                       <dim>149</dim>
+                                       <dim>149</dim>
+                               </port>
+                       </output>
+        </layer>
+
+
+        <layer id="9" name="dataConstInputLow" precision="FP32" type="Const">
+                       <output>
+                               <port id="1"/>
+                       </output>
+                       <blobs>
+                               <custom offset="DATA_CONST_INPUT_LOW_OFFSET" size="4"/>
+                       </blobs>
+               </layer>
+               <layer id="10" name="dataConstInputHigh" precision="FP32" type="Const">
+                       <output>
+                               <port id="1"/>
+                       </output>
+                       <blobs>
+                               <custom offset="DATA_CONST_INPUT_HIGH_OFFSET" size="4"/>
+                       </blobs>
+               </layer>
+               <layer id="11" name="dataConstOutputLow" precision="FP32" type="Const">
+                       <output>
+                               <port id="1"/>
+                       </output>
+                       <blobs>
+                               <custom offset="DATA_CONST_OUTPUT_LOW_OFFSET" size="4"/>
+                       </blobs>
+               </layer>
+               <layer id="12" name="dataConstOutputHigh" precision="FP32" type="Const">
+                       <output>
+                               <port id="1"/>
+                       </output>
+                       <blobs>
+                               <custom offset="DATA_CONST_OUTPUT_HIGH_OFFSET" size="4"/>
+                       </blobs>
+               </layer>
+               <layer id="13" name="dataFakeQuantize" precision="FP32" type="FakeQuantize">
+                       <data levels="256"/>
+                       <input>
+                               <port id="0">
+                                       <dim>1</dim>
+                                       <dim>32</dim>
+                                       <dim>149</dim>
+                                       <dim>149</dim>
+                               </port>
+                               <port id="1"/>
+                               <port id="2"/>
+                               <port id="3"/>
+                               <port id="4"/>
+                       </input>
+                       <output>
+                               <port id="5">
+                                       <dim>1</dim>
+                                       <dim>32</dim>
+                                       <dim>149</dim>
+                                       <dim>149</dim>
+                               </port>
+                       </output>
+               </layer>
+               <layer id="14" name="weightsConstInput" precision="FP32" type="Const">
+                       <output>
+                               <port id="1">
+                                       <dim>32</dim>
+                                       <dim>32</dim>
+                               </port>
+                       </output>
+                       <blobs>
+                               <custom offset="WEIGHTS_CONST_INPUT_OFFSET" size="4096"/>
+                       </blobs>
+               </layer>
+               <layer id="15" name="weightsConstInputLow" precision="FP32" type="Const">
+                       <output>
+                               <port id="1"/>
+                       </output>
+                       <blobs>
+                               <custom offset="WEIGHTS_CONST_INPUT_LOW_OFFSET" size="4"/>
+                       </blobs>
+               </layer>
+               <layer id="16" name="weightsConstInputHigh" precision="FP32" type="Const">
+                       <output>
+                               <port id="1"/>
+                       </output>
+                       <blobs>
+                               <custom offset="WEIGHTS_CONST_INPUT_HIGH_OFFSET" size="4"/>
+                       </blobs>
+               </layer>
+               <layer id="17" name="weightsConstOutputLow" precision="FP32" type="Const">
+                       <output>
+                               <port id="1"/>
+                       </output>
+                       <blobs>
+                               <custom offset="WEIGHTS_CONST_OUTPUT_LOW_OFFSET" size="4"/>
+                       </blobs>
+               </layer>
+               <layer id="18" name="weightsConstOutputHigh" precision="FP32" type="Const">
+                       <output>
+                               <port id="1"/>
+                       </output>
+                       <blobs>
+                               <custom offset="WEIGHTS_CONST_OUTPUT_HIGH_OFFSET" size="4"/>
+                       </blobs>
+               </layer>
+               <layer id="19" name="weightsFakeQuantize" precision="FP32" type="FakeQuantize">
+                       <data levels="256"/>
+                       <input>
+                               <port id="0">
+                                       <dim>32</dim>
+                                       <dim>32</dim>
+                               </port>
+                               <port id="1"/>
+                               <port id="2"/>
+                               <port id="3"/>
+                               <port id="4"/>
+                       </input>
+                       <output>
+                               <port id="5">
+                                       <dim>32</dim>
+                                       <dim>32</dim>
+                               </port>
+                       </output>
+               </layer>
+               <layer id="211" name="reshapeConst" precision="I32" type="Const">
+                       <output>
+                               <port id="1">
+                                       <dim>2</dim>
+                               </port>
+                       </output>
+            <blobs>
+                               <custom offset="RESHAPE_CONST_OFFSET" size="8"/>
+                       </blobs>
+               </layer>
+        <layer id="21" name="reshape" precision="FP32" type="Reshape">
+                       <input>
+                               <port id="0">
+                                       <dim>1</dim>
+                                       <dim>32</dim>
+                                       <dim>149</dim>
+                                       <dim>149</dim>
+                               </port>
+                               <port id="1">
+                                       <dim>2</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="2">
+                                       <dim>1</dim>
+                                       <dim>32</dim>
+                               </port>
+                       </output>
+               </layer>
+               <layer id="22" name="gemm" precision="FP32" type="GEMM">
+                       <data transpose_a="0" transpose_b="1"/>
+                       <input>
+                               <port id="0">
+                                       <dim>1</dim>
+                                       <dim>32</dim>
+                               </port>
+                               <port id="1">
+                                       <dim>32</dim>
+                                       <dim>32</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="3">
+                                       <dim>1</dim>
+                                       <dim>32</dim>
+                               </port>
+                       </output>
+               </layer>
+        )V0G0N";
+
+    const std::string edgesTemplate = R"V0G0N(
+        <edge from-layer="0"  from-port="0" to-layer="1" to-port="0"/>
+        <edge from-layer="1"  from-port="1" to-layer="13" to-port="0"/>
+
+        <!-- data FakeQuantize -->
+        <edge from-layer="9"  from-port="1" to-layer="13" to-port="1"/>
+        <edge from-layer="10"  from-port="1" to-layer="13" to-port="2"/>
+        <edge from-layer="11"  from-port="1" to-layer="13" to-port="3"/>
+        <edge from-layer="12"  from-port="1" to-layer="13" to-port="4"/>
+
+        <!-- weights FakeQuantize -->
+        <edge from-layer="14"  from-port="1" to-layer="19" to-port="0"/>
+        <edge from-layer="15"  from-port="1" to-layer="19" to-port="1"/>
+        <edge from-layer="16"  from-port="1" to-layer="19" to-port="2"/>
+        <edge from-layer="17" from-port="1" to-layer="19" to-port="3"/>
+        <edge from-layer="18" from-port="1" to-layer="19" to-port="4"/>
+
+        <edge from-layer="13" from-port="5" to-layer="21" to-port="0"/>
+        <edge from-layer="211" from-port="1" to-layer="21" to-port="1"/>
+        <edge from-layer="21" from-port="2" to-layer="22" to-port="0"/>
+
+        <!-- FullyConnected -->
+        <edge from-layer="21" from-port="2" to-layer="22" to-port="0"/>
+        <edge from-layer="19" from-port="5" to-layer="22" to-port="1"/>
+        )V0G0N";
+};
+
+class PoolingTestModel : public SingleLayerTestModel {
+public:
+    void resetTransformation(CNNNetwork& network) const override;
+    std::string getName() const override;
+    bool transform(CNNNetwork& network, LayerTransformation::Params& params) const override;
+    std::string getModel(SingleLayerTransformationsTestParams& p) const override;
+};
+
+class ConvolutionAndQuantizeOnWeightsWithMultiOutputIntervalsTestModel : public SingleLayerTestModel {
+public:
+    std::string getModel(SingleLayerTransformationsTestParams& p) const override;
+    std::string getName() const override;
+    bool transform(CNNNetwork& network, LayerTransformation::Params& params) const override;
+    void resetTransformation(CNNNetwork& network) const override;
+};
+
+class ConvolutionAndQuantizeOnWeightsWithoutConstTransformationTestModel : public SingleLayerTestModel {
+public:
+    std::string getModel(SingleLayerTransformationsTestParams& p) const override;
+    std::string getName() const override;
+    bool transform(CNNNetwork& network, LayerTransformation::Params& params) const override;
+    void resetTransformation(CNNNetwork& network) const override;
+};
+
+// Base test class to manually quantize weights and biases
+class QuantizationOnWeightsTestModel : public SingleLayerTestModel {
+public:
+    std::string getModel(SingleLayerTransformationsTestParams& p) const override;
+    std::string getName() const override;
+    bool transform(CNNNetwork& network, LayerTransformation::Params& params) const override;
+    virtual std::unordered_set<std::string> getNotTransformedLayers() const override;
+    void resetTransformation(CNNNetwork& network) const override;
+};
+
+class QuantizationOnInvertedWeightsTestModel : public SingleLayerTestModel {
+public:
+    std::string getModel(SingleLayerTransformationsTestParams& p) const override;
+    std::string getName() const override;
+    bool transform(CNNNetwork& network, LayerTransformation::Params& params) const override;
+    virtual std::unordered_set<std::string> getNotTransformedLayers() const override;
+    void resetTransformation(CNNNetwork& network) const override;
+};
+
+class FakeQuantizeAsOutputTest : public QuantizationOnWeightsTestModel {
+public:
+    std::string getName() const override;
+    bool transform(CNNNetwork& network, LayerTransformation::Params& params) const override;
+    virtual std::unordered_set<std::string> getNotTransformedLayers() const override;
+};
+
+class FakeQuantizeWithMultiOutputsTest : public SingleLayerTestModel {
+public:
+    std::string getModel(SingleLayerTransformationsTestParams& p) const override;
+    std::string getName() const override;
+    bool transform(CNNNetwork& network, LayerTransformation::Params& params) const override;
+    virtual std::unordered_set<std::string> getNotTransformedLayers() const override;
+    void resetTransformation(CNNNetwork& network) const override;
+};
+
+class FakeQuantizeWithTwoScaleShiftsAsOutput : public SingleLayerTestModel {
+public:
+    std::string getModel(SingleLayerTransformationsTestParams& p) const override;
+    std::string getName() const override;
+    bool transform(CNNNetwork& network, LayerTransformation::Params& params) const override;
+    void resetTransformation(CNNNetwork& network) const override;
+};
+
+class ConvolutionAndPoolingAndQuantizeOnActivationsTestModel : public SingleLayerTestModel {
+public:
+    std::string getModel(SingleLayerTransformationsTestParams& p) const override;
+    std::string getName() const override;
+    bool transform(CNNNetwork& network, LayerTransformation::Params& params) const override;
+    void resetTransformation(CNNNetwork& network) const override;
+};
+
+class ConvolutionAndQuantizeOnActivationsTestModel : public SingleLayerTestModel {
+public:
+    std::string getModel(SingleLayerTransformationsTestParams& p) const override;
+    std::string getName() const override;
+    bool transform(CNNNetwork& network, LayerTransformation::Params& params) const override;
+    void resetTransformation(CNNNetwork& network) const override;
+};
+
+class ConvolutionAndDequantizationScaleShiftsOnActivationsTestModel : public SingleLayerTestModel {
+public:
+    std::string getModel(SingleLayerTransformationsTestParams& p) const override;
+    std::string getName() const override;
+    bool transform(CNNNetwork& network, LayerTransformation::Params& params) const override;
+    void resetTransformation(CNNNetwork& network) const override;
+};
+
+// base test type for FullyConnected test
+class FullyConnectedBaseTestModel : public SingleLayerTestModel {
+public:
+    FullyConnectedBaseTestModel(const bool addBiasesLayer = true);
+    std::string getModel(SingleLayerTransformationsTestParams& p) const override;
+    bool transform(CNNNetwork& network, LayerTransformation::Params& params) const override;
+    void resetTransformation(CNNNetwork& network) const override;
+protected:
+    virtual bool areScalesOnActivationsDifferent() const;
+    const bool addBiasesLayer;
+};
+
+// base test type for convolution test
+class ConvolutionBaseTestModel : public SingleLayerTestModel {
+public:
+    ConvolutionBaseTestModel(const bool addBiasesLayer = true);
+    std::string getModel(SingleLayerTransformationsTestParams& p) const override;
+    bool transform(CNNNetwork& network, LayerTransformation::Params& params) const override;
+    void resetTransformation(CNNNetwork& network) const override;
+protected:
+    virtual size_t getGroupsCount(SingleLayerTransformationsTestParams& p) const;
+    virtual bool areScalesOnActivationsDifferent() const;
+    const bool addBiasesLayer;
+};
+
+class ConvolutionDepthwiseTestModel : public ConvolutionBaseTestModel {
+public:
+    std::string getName() const override;
+protected:
+    size_t getGroupsCount(SingleLayerTransformationsTestParams& p) const override;
+    bool areScalesOnActivationsDifferent() const override;
+};
+
+class ConvolutionGroupedTestModel : public ConvolutionBaseTestModel {
+public:
+    std::string getName() const override;
+    void initInput(Blob::Ptr input) const override;
+protected:
+    size_t getGroupsCount(SingleLayerTransformationsTestParams& p) const override;
+    bool areScalesOnActivationsDifferent() const override;
+};
+
+class UpdateBiasesConvolutionTestModel : public ConvolutionBaseTestModel {
+public:
+    UpdateBiasesConvolutionTestModel(const bool addBiasesLayer = false);
+    std::string getName() const override;
+    bool transform(CNNNetwork& network, LayerTransformation::Params& params) const override;
+    void initInput(Blob::Ptr input) const override;
+};
+
+class UpdateBiasesFullyConnectedTestModel : public FullyConnectedBaseTestModel {
+public:
+    UpdateBiasesFullyConnectedTestModel(const bool addBiasesLayer = false);
+    std::string getName() const override;
+    bool transform(CNNNetwork& network, LayerTransformation::Params& params) const override;
+    void initInput(Blob::Ptr input) const override;
+};
+
+class EltwiseTestModel : public SingleLayerTestModel {
+public:
+    EltwiseTestModel(
+        const bool cpuSpecific,
+        const std::string& operation,
+        const bool signedIntervals,
+        const size_t minLevels = 2ul,
+        const bool addPooling = true) :
+        SingleLayerTestModel(),
+        cpuSpecific(cpuSpecific),
+        operation(operation),
+        signedIntervals(signedIntervals),
+        minLevels(minLevels),
+        addPooling(addPooling) {}
+
+    std::string getModel(SingleLayerTransformationsTestParams& p) const override;
+    std::string getName() const override;
+    bool transform(CNNNetwork& network, LayerTransformation::Params& params) const override;
+    void resetTransformation(CNNNetwork& network) const override;
+
+private:
+    const bool cpuSpecific;
+    const std::string operation;
+    const bool signedIntervals;
+    const size_t minLevels;
+    const bool addPooling;
+};
+
+class EltwiseFqWithChildrenTestModel : public SingleLayerTestModel {
+public:
+    EltwiseFqWithChildrenTestModel(
+        const bool cpuSpecific,
+        const std::string& operation,
+        const bool signedIntervals,
+        const size_t minLevels = 2ul,
+        const bool addPooling = true) :
+        SingleLayerTestModel(),
+        cpuSpecific(cpuSpecific),
+        operation(operation),
+        signedIntervals(signedIntervals),
+        minLevels(minLevels),
+        addPooling(addPooling) {}
+
+    std::string getModel(SingleLayerTransformationsTestParams& p) const override;
+    std::string getName() const override;
+    bool transform(CNNNetwork& network, LayerTransformation::Params& params) const override;
+    void resetTransformation(CNNNetwork& network) const override;
+
+private:
+    const bool cpuSpecific;
+    const std::string operation;
+    const bool signedIntervals;
+    const size_t minLevels;
+    const bool addPooling;
+};
+
+
+class EltwiseWithPoolingTestModel : public SingleLayerTestModel {
+public:
+    EltwiseWithPoolingTestModel(
+        const bool cpuSpecific,
+        const std::string& operation,
+        const bool signedIntervals,
+        const size_t minLevels = 2ul) :
+        SingleLayerTestModel(),
+        cpuSpecific(cpuSpecific),
+        operation(operation),
+        signedIntervals(signedIntervals),
+        minLevels(minLevels) {}
+
+    std::string getModel(SingleLayerTransformationsTestParams& p) const override;
+    std::string getName() const override;
+    bool transform(CNNNetwork& network, LayerTransformation::Params& params) const override;
+    void resetTransformation(CNNNetwork& network) const override;
+
+private:
+    const bool cpuSpecific;
+    const std::string operation;
+    const bool signedIntervals;
+    const size_t minLevels;
+};
+
+class EltwiseBroadcastTestModel : public SingleLayerTestModel {
+public:
+    std::string getModel(SingleLayerTransformationsTestParams& p) const override;
+    std::string getName() const override;
+    bool transform(CNNNetwork& network, LayerTransformation::Params& params) const override;
+    void resetTransformation(CNNNetwork& network) const override;
+};
+
+class EltwiseCpuTestModel : public SingleLayerTestModel {
+public:
+    std::string getModel(SingleLayerTransformationsTestParams& p) const override {
+
+        std::string layers = layersTemplate;
+        // TODO: hard-coded values
+
+        size_t totalOffset = 0;
+
+
+        REPLACE_WITH_NUM(layers, "DATA_CONST_INPUT_LOW_OFFSET_1", totalOffset);
+        totalOffset += 4;
+        REPLACE_WITH_NUM(layers, "DATA_CONST_INPUT_HIGHT_OFFSET_1", totalOffset);
+        totalOffset += 4;
+        REPLACE_WITH_NUM(layers, "DATA_CONST_OUTPUT_LOW_OFFSET_1", totalOffset);
+        totalOffset += 4;
+        REPLACE_WITH_NUM(layers, "DATA_CONST_OUTPUT_HIGH_OFFSET_1", totalOffset);
+        totalOffset += 4;
+
+        REPLACE_WITH_NUM(layers, "DATA_CONST_INPUT_LOW_OFFSET_3", totalOffset);
+        totalOffset += 4;
+        REPLACE_WITH_NUM(layers, "DATA_CONST_INPUT_HIGHT_OFFSET_3", totalOffset);
+        totalOffset += 4;
+        REPLACE_WITH_NUM(layers, "DATA_CONST_OUTPUT_LOW_OFFSET_3", totalOffset);
+        totalOffset += 4;
+        REPLACE_WITH_NUM(layers, "DATA_CONST_OUTPUT_HIGH_OFFSET_3", totalOffset);
+        totalOffset += 4;
+
+        REPLACE_WITH_NUM(layers, "WEIGHTS_CONST_INPUT_OFFSET", totalOffset);
+        totalOffset += 3 * 3 * 3 * 3 * 4;
+        REPLACE_WITH_NUM(layers, "WEIGHTS_CONST_INPUT_SIZE", 3 * 3 * 3 * 3 * 4);
+
+        REPLACE_WITH_NUM(layers, "WEIGHTS_CONST_INPUT_LOW_OFFSET", totalOffset);
+        totalOffset += 4;
+        REPLACE_WITH_NUM(layers, "WEIGHTS_CONST_INPUT_HIGHT_OFFSET", totalOffset);
+        totalOffset += 4;
+        REPLACE_WITH_NUM(layers, "WEIGHTS_CONST_OUTPUT_LOW_OFFSET", totalOffset);
+        totalOffset += 4;
+        REPLACE_WITH_NUM(layers, "WEIGHTS_CONST_OUTPUT_HIGH_OFFSET", totalOffset);
+        totalOffset += 4;
+
+        REPLACE_WITH_NUM(layers, "BIASES_CONST_OFFSET", totalOffset);
+        totalOffset += 3 * 4;
+        REPLACE_WITH_NUM(layers, "BIASES_CONST_SIZE", 3 * 4);
+
+        REPLACE_WITH_NUM(layers, "DATA_CONST_INPUT_LOW_OFFSET_4", totalOffset);
+        totalOffset += 4;
+        REPLACE_WITH_NUM(layers, "DATA_CONST_INPUT_HIGHT_OFFSET_4", totalOffset);
+        totalOffset += 4;
+        REPLACE_WITH_NUM(layers, "DATA_CONST_OUTPUT_LOW_OFFSET_4", totalOffset);
+        totalOffset += 4;
+        REPLACE_WITH_NUM(layers, "DATA_CONST_OUTPUT_HIGH_OFFSET_4", totalOffset);
+        totalOffset += 4;
+
+        REPLACE_WITH_NUM(layers, "DEQUANTIZE_SCALESHIFT_WEIGHTS_OFFSET", totalOffset);
+        totalOffset += 12;
+        REPLACE_WITH_NUM(layers, "DEQUANTIZE_SCALESHIFT_BIASES_OFFSET", totalOffset);
+        totalOffset += 12;
+
+        const std::string model = IRTemplateGenerator::getIRTemplate(
+            "TransformationsTest",
+            { 1, 3, 299, 299 },
+            "FP32",
+            layers,
+            edgesTemplate,
+            6);
+
+        return model;
+    }
+
+    std::string getName() const override {
+        return "EltwiseCpuTestModel";
+    }
+
+    bool transform(CNNNetwork& network, LayerTransformation::Params& params) const override {
+        LowPrecisionTransformer transformer = getLowPrecisionTransformer(params);
+        transformer.transform(network);
+
+        // TODO: skip interval validation - not completed
+        return false;
+    }
+
+    void resetTransformation(CNNNetwork& network) const override {
+        fillData(getLayer(network, "branch1/dataConstInputLow1"), 255.0 / 200.0, "custom");
+        fillData(getLayer(network, "branch1/dataConstInputHigh1"), 255.0 / 100.0, "custom");
+        fillData(getLayer(network, "branch1/dataConstOutputLow1"), 255.0 / 200.0, "custom");
+        fillData(getLayer(network, "branch1/dataConstOutputHigh1"), 255.0 / 100.0, "custom");
+
+        fillData(getLayer(network, "branch2/dataConstInputLow3"), 255.0 / 200.0, "custom");
+        fillData(getLayer(network, "branch2/dataConstInputHigh3"), 255.0 / 100.0, "custom");
+        fillData(getLayer(network, "branch2/dataConstOutputLow3"), 255.0 / 200.0, "custom");
+        fillData(getLayer(network, "branch2/dataConstOutputHigh3"), 255.0 / 100.0, "custom");
+
+        fillData(getLayer(network, "branch2/weightsConstInput"), 0.0, "custom");
+        fillData(getLayer(network, "branch2/weightsConstInputLow"), 0.0, "custom");
+        fillData(getLayer(network, "branch2/weightsConstInputHigh"), 255.0 / 200.0, "custom");
+        fillData(getLayer(network, "branch2/weightsConstOutputLow"), 0.0, "custom");
+        fillData(getLayer(network, "branch2/weightsConstOutputHigh"), 255.0 / 200.0, "custom");
+
+        fillData(getLayer(network, "branch2/biasesConst"), { 1.0, 2.0, 3.0 });
+
+        fillData(getLayer(network, "branch2/dataConstInputLow4"), 255.0 / 800.0, "custom");
+        fillData(getLayer(network, "branch2/dataConstInputHigh4"), 255.0 / 400.0, "custom");
+        fillData(getLayer(network, "branch2/dataConstOutputLow4"), 255.0 / 800.0, "custom");
+        fillData(getLayer(network, "branch2/dataConstOutputHigh4"), 255.0 / 400.0, "custom");
+    }
+
+private:
+    const std::string layersTemplate = R"V0G0N(
+        <layer name="branch1/dataConstInputLow1" type="Const" precision="FP32" id="102">
+            <output>
+                <port id="0">
+                    <dim>1</dim>
+                </port>
+            </output>
+            <blobs>
+                <custom offset="DATA_CONST_INPUT_LOW_OFFSET_1" size="4"/>
+            </blobs>
+        </layer>
+        <layer name="branch1/dataConstInputHigh1" type="Const" precision="FP32" id="103">
+            <output>
+                <port id="0">
+                    <dim>1</dim>
+                </port>
+            </output>
+            <blobs>
+                <custom offset="DATA_CONST_INPUT_HIGHT_OFFSET_1" size="4"/>
+            </blobs>
+        </layer>
+
+        <layer name="branch1/dataConstOutputLow1" type="Const" precision="FP32" id="104">
+            <output>
+                <port id="0">
+                    <dim>1</dim>
+                </port>
+            </output>
+            <blobs>
+                <custom offset="DATA_CONST_OUTPUT_LOW_OFFSET_1" size="4"/>
+            </blobs>
+        </layer>
+        <layer name="branch1/dataConstOutputHigh1" type="Const" precision="FP32" id="105">
+            <output>
+                <port id="0">
+                    <dim>1</dim>
+                </port>
+            </output>
+            <blobs>
+                <custom offset="DATA_CONST_OUTPUT_HIGH_OFFSET_1" size="4"/>
+            </blobs>
+        </layer>
+
+        <layer name="branch1/dataFakeQuantize1" type="FakeQuantize" precision="FP32" id="106">
+            <data levels="256" />
+            <input>
+                <port id="0">
+                    <dim>1</dim>
+                    <dim>3</dim>
+                    <dim>299</dim>
+                    <dim>299</dim>
+                </port>
+                <port id="1">
+                    <dim>1</dim>
+                </port>
+                <port id="2">
+                    <dim>1</dim>
+                </port>
+                <port id="3">
+                    <dim>1</dim>
+                </port>
+                <port id="4">
+                    <dim>1</dim>
+                </port>
+            </input>
+            <output>
+                <port id="5">
+                    <dim>1</dim>
+                    <dim>3</dim>
+                    <dim>299</dim>
+                    <dim>299</dim>
+                </port>
+            </output>
+        </layer>
+
+        <layer name="branch2/dataConstInputLow3" type="Const" precision="FP32" id="207">
+            <output>
+                <port id="0">
+                    <dim>1</dim>
+                </port>
+            </output>
+            <blobs>
+                <custom offset="DATA_CONST_INPUT_LOW_OFFSET_3" size="4"/>
+            </blobs>
+        </layer>
+        <layer name="branch2/dataConstInputHigh3" type="Const" precision="FP32" id="208">
+            <output>
+                <port id="0">
+                    <dim>1</dim>
+                </port>
+            </output>
+            <blobs>
+                <custom offset="DATA_CONST_INPUT_HIGHT_OFFSET_3" size="4"/>
+            </blobs>
+        </layer>
+
+        <layer name="branch2/dataConstOutputLow3" type="Const" precision="FP32" id="209">
+            <output>
+                <port id="0">
+                    <dim>1</dim>
+                </port>
+            </output>
+            <blobs>
+                <custom offset="DATA_CONST_OUTPUT_LOW_OFFSET_3" size="4"/>
+            </blobs>
+        </layer>
+        <layer name="branch2/dataConstOutputHigh3" type="Const" precision="FP32" id="210">
+            <output>
+                <port id="0">
+                    <dim>1</dim>
+                </port>
+            </output>
+            <blobs>
+                <custom offset="DATA_CONST_OUTPUT_HIGH_OFFSET_3" size="4"/>
+            </blobs>
+        </layer>
+
+
+        <layer name="branch2/dataFakeQuantize3" type="FakeQuantize" precision="FP32" id="211">
+            <data levels="256" />
+            <input>
+                <port id="0">
+                    <dim>1</dim>
+                    <dim>3</dim>
+                    <dim>299</dim>
+                    <dim>299</dim>
+                </port>
+                <port id="1">
+                    <dim>1</dim>
+                </port>
+                <port id="2">
+                    <dim>1</dim>
+                </port>
+                <port id="3">
+                    <dim>1</dim>
+                </port>
+                <port id="4">
+                    <dim>1</dim>
+                </port>
+            </input>
+            <output>
+                <port id="5">
+                    <dim>1</dim>
+                    <dim>3</dim>
+                    <dim>299</dim>
+                    <dim>299</dim>
+                </port>
+            </output>
+        </layer>
+
+
+        <layer name="branch2/weightsConstInput" type="Const" precision="FP32" id="212">
+            <output>
+                <port id="0">
+                    <dim>3</dim>
+                    <dim>3</dim>
+                    <dim>3</dim>
+                    <dim>3</dim>
+                </port>
+            </output>
+            <blobs>
+                <custom offset="WEIGHTS_CONST_INPUT_OFFSET" size="WEIGHTS_CONST_INPUT_SIZE"/>
+            </blobs>
+        </layer>
+        <layer name="branch2/weightsConstInputLow" type="Const" precision="FP32" id="213">
+            <output>
+                <port id="0">
+                    <dim>1</dim>
+                </port>
+            </output>
+            <blobs>
+                <custom offset="WEIGHTS_CONST_INPUT_LOW_OFFSET" size="4"/>
+            </blobs>
+        </layer>
+        <layer name="branch2/weightsConstInputHigh" type="Const" precision="FP32" id="214">
+            <output>
+                <port id="0">
+                    <dim>1</dim>
+                </port>
+            </output>
+            <blobs>
+                <custom offset="WEIGHTS_CONST_INPUT_HIGHT_OFFSET" size="4"/>
+            </blobs>
+        </layer>
+
+        <layer name="branch2/weightsConstOutputLow" type="Const" precision="FP32" id="215">
+            <output>
+            <port id="0">
+                <dim>1</dim>
+            </port>
+            </output>
+            <blobs>
+                <custom offset="WEIGHTS_CONST_OUTPUT_LOW_OFFSET" size="4"/>
+            </blobs>
+        </layer>
+        <layer name="branch2/weightsConstOutputHigh" type="Const" precision="FP32" id="216">
+            <output>
+                <port id="0">
+                    <dim>1</dim>
+                </port>
+            </output>
+            <blobs>
+                <custom offset="WEIGHTS_CONST_OUTPUT_HIGH_OFFSET" size="4"/>
+            </blobs>
+        </layer>
+
+
+        <layer name="branch2/weightsFakeQuantize" type="FakeQuantize" precision="FP32" id="218">
+            <data levels="256" />
+            <input>
+                <port id="0">
+                    <dim>3</dim>
+                    <dim>3</dim>
+                    <dim>3</dim>
+                    <dim>3</dim>
+                </port>
+                <port id="1">
+                    <dim>1</dim>
+                </port>
+                <port id="2">
+                    <dim>1</dim>
+                </port>
+                <port id="3">
+                    <dim>1</dim>
+                </port>
+                <port id="4">
+                    <dim>1</dim>
+                </port>
+            </input>
+            <output>
+                <port id="5">
+                    <dim>3</dim>
+                    <dim>3</dim>
+                    <dim>3</dim>
+                    <dim>3</dim>
+                </port>
+            </output>
+        </layer>
+
+        <layer name="branch2/biasesConst" type="Const" precision="FP32" id="219">
+            <output>
+                <port id="0">
+                    <dim>3</dim>
+                </port>
+            </output>
+            <blobs>
+                <custom offset="BIASES_CONST_OFFSET" size="BIASES_CONST_SIZE"/>
+            </blobs>
+        </layer>
+
+
+        <layer name="branch2/convolution" precision="FP32" type="Convolution" id="220">
+                       <data dilations="1,1" group="1" kernel="3,3" output="3" pads_begin="1,1" pads_end="1,1" strides="1,1"/>
+                       <input>
+                               <port id="0">
+                                       <dim>1</dim>
+                                       <dim>3</dim>
+                                       <dim>299</dim>
+                                       <dim>299</dim>
+                               </port>
+                               <port id="1">
+                                       <dim>3</dim>
+                                       <dim>3</dim>
+                                       <dim>3</dim>
+                                       <dim>3</dim>
+                               </port>
+                               <port id="2">
+                                       <dim>3</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="3">
+                                       <dim>1</dim>
+                                       <dim>3</dim>
+                                       <dim>299</dim>
+                                       <dim>299</dim>
+                               </port>
+                       </output>
+               </layer>
+
+        <layer name="branch2/dataConstInputLow4" type="Const" precision="FP32" id="222">
+            <output>
+                <port id="0">
+                    <dim>1</dim>
+                </port>
+            </output>
+            <blobs>
+                <custom offset="DATA_CONST_INPUT_LOW_OFFSET_4" size="4"/>
+            </blobs>
+        </layer>
+        <layer name="branch2/dataConstInputHigh4" type="Const" precision="FP32" id="223">
+            <output>
+                <port id="0">
+                    <dim>1</dim>
+                </port>
+            </output>
+            <blobs>
+                <custom offset="DATA_CONST_INPUT_HIGHT_OFFSET_4" size="4"/>
+            </blobs>
+        </layer>
+
+        <layer name="branch2/dataConstOutputLow4" type="Const" precision="FP32" id="224">
+            <output>
+                <port id="0">
+                    <dim>1</dim>
+                </port>
+            </output>
+            <blobs>
+                <custom offset="DATA_CONST_OUTPUT_LOW_OFFSET_4" size="4"/>
+            </blobs>
+        </layer>
+        <layer name="branch2/dataConstOutputHigh4" type="Const" precision="FP32" id="225">
+            <output>
+                <port id="0">
+                    <dim>1</dim>
+                </port>
+            </output>
+            <blobs>
+                <custom offset="DATA_CONST_OUTPUT_HIGH_OFFSET_4" size="4"/>
+            </blobs>
+        </layer>
+
+        <layer name="branch2/dataFakeQuantize4" type="FakeQuantize" precision="FP32" id="226">
+            <data levels="256" />
+            <input>
+                <port id="0">
+                    <dim>1</dim>
+                    <dim>3</dim>
+                    <dim>299</dim>
+                    <dim>299</dim>
+                </port>
+                <port id="1">
+                    <dim>1</dim>
+                </port>
+                <port id="2">
+                    <dim>1</dim>
+                </port>
+                <port id="3">
+                    <dim>1</dim>
+                </port>
+                <port id="4">
+                    <dim>1</dim>
+                </port>
+            </input>
+            <output>
+                <port id="5">
+                    <dim>1</dim>
+                    <dim>3</dim>
+                    <dim>299</dim>
+                    <dim>299</dim>
+                </port>
+            </output>
+        </layer>
+
+        <layer name="branch2/eltwise" type="Eltwise" precision="FP32" id="227">
+            <data operation="sum"/>
+            <input>
+                <port id="0">
+                    <dim>1</dim>
+                    <dim>3</dim>
+                    <dim>299</dim>
+                    <dim>299</dim>
+                </port>
+                <port id="1">
+                    <dim>1</dim>
+                    <dim>3</dim>
+                    <dim>299</dim>
+                    <dim>299</dim>
+                </port>
+
+            </input>
+            <output>
+                <port id="2">
+                    <dim>1</dim>
+                    <dim>3</dim>
+                    <dim>299</dim>
+                    <dim>299</dim>
+                </port>
+            </output>
+        </layer>
+
+
+        <layer name="outputPower" type="Power" precision="FP32" id="300">
+            <power_data power="1" scale="1" shift="0"/>
+            <input>
+                <port id="0">
+                    <dim>1</dim>
+                    <dim>3</dim>
+                    <dim>299</dim>
+                    <dim>299</dim>
+                </port>
+            </input>
+            <output>
+                <port id="1">
+                    <dim>1</dim>
+                    <dim>3</dim>
+                    <dim>299</dim>
+                    <dim>299</dim>
+                </port>
+            </output>
+        </layer>
+
+        )V0G0N";
+
+    const std::string edgesTemplate = R"V0G0N(
+        <!-- branch 1 -->
+
+        <edge from-layer="0" from-port="0" to-layer="106" to-port="0"/>
+        <edge from-layer="102" from-port="0" to-layer="106" to-port="1"/>
+        <edge from-layer="103" from-port="0" to-layer="106" to-port="2"/>
+        <edge from-layer="104" from-port="0" to-layer="106" to-port="3"/>
+        <edge from-layer="105" from-port="0" to-layer="106" to-port="4"/>
+        <edge from-layer="106" from-port="5" to-layer="211" to-port="0"/>
+        <edge from-layer="106" from-port="5" to-layer="227" to-port="0"/>
+
+        <!-- branch 2 -->
+
+        <!-- FakeQuantize on activations -->
+        <edge from-layer="207" from-port="0" to-layer="211" to-port="1"/>
+        <edge from-layer="208" from-port="0" to-layer="211" to-port="2"/>
+        <edge from-layer="209" from-port="0" to-layer="211" to-port="3"/>
+        <edge from-layer="210" from-port="0" to-layer="211" to-port="4"/>
+        <edge from-layer="211" from-port="5" to-layer="220" to-port="0"/>
+
+        <!-- FakeQuantize on weights -->
+        <edge from-layer="212" from-port="0" to-layer="218" to-port="0"/>
+        <edge from-layer="213" from-port="0" to-layer="218" to-port="1"/>
+        <edge from-layer="214" from-port="0" to-layer="218" to-port="2"/>
+        <edge from-layer="215" from-port="0" to-layer="218" to-port="3"/>
+        <edge from-layer="216" from-port="0" to-layer="218" to-port="4"/>
+        <edge from-layer="218" from-port="5" to-layer="220" to-port="1"/>
+
+        <!-- Const on biases -->
+        <edge from-layer="219" from-port="0" to-layer="220" to-port="2"/>
+
+        <!-- Convolution -->
+        <edge from-layer="220" from-port="3" to-layer="226" to-port="0"/>
+
+        <!-- FakeQuantize on activations -->
+        <edge from-layer="222" from-port="0" to-layer="226" to-port="1"/>
+        <edge from-layer="223" from-port="0" to-layer="226" to-port="2"/>
+        <edge from-layer="224" from-port="0" to-layer="226" to-port="3"/>
+        <edge from-layer="225" from-port="0" to-layer="226" to-port="4"/>
+        <edge from-layer="226" from-port="5" to-layer="227" to-port="1"/>
+
+        <!-- Eltwise -->
+        <edge from-layer="227" from-port="2" to-layer="300" to-port="0"/>
+        )V0G0N";
+
+    const std::map<std::string, std::vector<size_t>> dimensions = {
+        {{ "in1", { 299, 299, 3, 1 } },
+        { "in2", { 299, 299, 3, 1 } } }
+    };
+};
+
+class ConcatTestModel : public SingleLayerTestModel {
+public:
+    ConcatTestModel(
+        const bool signedIntervals,
+        const bool symmetricInterval = true,
+        const bool multiChannel = true,
+        const std::vector<size_t>& constInputDimentions = { 1 });
+
+    std::string getModel(SingleLayerTransformationsTestParams& p) const override;
+    std::string getName() const override;
+    bool transform(CNNNetwork& network, LayerTransformation::Params& params) const override;
+    void resetTransformation(CNNNetwork& network) const override;
+    float getThreshold(const std::string& device_name, const Precision precision, LayerTransformation::Params& params) const override;
+private:
+    const bool signedIntervals;
+    const bool symmetricInterval;
+    const bool multiChannel;
+    const std::vector<size_t> constInputDimentions;
+};
+
+class ConcatMultiChannelTestModel : public SingleLayerTestModel {
+public:
+    std::string getModel(SingleLayerTransformationsTestParams& p) const override;
+    std::string getName() const override;
+    bool transform(CNNNetwork& network, LayerTransformation::Params& params) const override;
+    void resetTransformation(CNNNetwork& network) const override;
+};
+
+// TODO: remove, not used
+class ConcatMultiBranchTestModel : public SingleLayerTestModel {
+public:
+    std::string getModel(SingleLayerTransformationsTestParams& p) const override;
+    std::string getName() const override;
+    bool transform(CNNNetwork& network, LayerTransformation::Params& params) const override;
+    void resetTransformation(CNNNetwork& network) const override;
+
+    const static std::string layersTemplate;
+private:
+
+    const std::string edgesTemplate = R"V0G0N(
+        <!-- branch 1 -->
+
+        <edge from-layer="0" from-port="0" to-layer="106" to-port="0"/>
+        <edge from-layer="102" from-port="0" to-layer="106" to-port="1"/>
+        <edge from-layer="103" from-port="0" to-layer="106" to-port="2"/>
+        <edge from-layer="104" from-port="0" to-layer="106" to-port="3"/>
+        <edge from-layer="105" from-port="0" to-layer="106" to-port="4"/>
+        <edge from-layer="106" from-port="5" to-layer="113" to-port="0"/>
+
+        <edge from-layer="1" from-port="0" to-layer="111" to-port="0"/>
+        <edge from-layer="107" from-port="0" to-layer="111" to-port="1"/>
+        <edge from-layer="108" from-port="0" to-layer="111" to-port="2"/>
+        <edge from-layer="109" from-port="0" to-layer="111" to-port="3"/>
+        <edge from-layer="110" from-port="0" to-layer="111" to-port="4"/>
+        <edge from-layer="111" from-port="5" to-layer="113" to-port="1"/>
+
+        <edge from-layer="113" from-port="2" to-layer="227" to-port="0"/>
+
+        <!-- branch 2 -->
+
+        <!-- FakeQuantize on activations -->
+        <edge from-layer="113" from-port="2" to-layer="211" to-port="0"/>
+        <edge from-layer="207" from-port="0" to-layer="211" to-port="1"/>
+        <edge from-layer="208" from-port="0" to-layer="211" to-port="2"/>
+        <edge from-layer="209" from-port="0" to-layer="211" to-port="3"/>
+        <edge from-layer="210" from-port="0" to-layer="211" to-port="4"/>
+        <edge from-layer="211" from-port="5" to-layer="220" to-port="0"/>
+
+        <!-- FakeQuantize on weights -->
+        <edge from-layer="212" from-port="0" to-layer="218" to-port="0"/>
+        <edge from-layer="213" from-port="0" to-layer="218" to-port="1"/>
+        <edge from-layer="214" from-port="0" to-layer="218" to-port="2"/>
+        <edge from-layer="215" from-port="0" to-layer="218" to-port="3"/>
+        <edge from-layer="216" from-port="0" to-layer="218" to-port="4"/>
+        <edge from-layer="218" from-port="5" to-layer="220" to-port="1"/>
+
+        <!-- Const on biases -->
+        <edge from-layer="219" from-port="0" to-layer="220" to-port="2"/>
+
+        <!-- Convolution -->
+        <edge from-layer="220" from-port="3" to-layer="226" to-port="0"/>
+
+        <!-- FakeQuantize on activations -->
+        <edge from-layer="222" from-port="0" to-layer="226" to-port="1"/>
+        <edge from-layer="223" from-port="0" to-layer="226" to-port="2"/>
+        <edge from-layer="224" from-port="0" to-layer="226" to-port="3"/>
+        <edge from-layer="225" from-port="0" to-layer="226" to-port="4"/>
+        <edge from-layer="226" from-port="5" to-layer="227" to-port="1"/>
+
+        <!-- Concat -->
+        <edge from-layer="227" from-port="2" to-layer="300" to-port="0"/>
+        )V0G0N";
+
+    const std::map<std::string, std::vector<size_t>> dimensions = {
+        {{ "in1", { 299, 299, 3, 1 } },
+        { "in2", { 299, 299, 3, 1 } } }
+    };
+};
+
+class FakeQuantizeAndScaleShiftTestModel : public SingleLayerTestModel {
+public:
+    std::string getModel(SingleLayerTransformationsTestParams& p) const override;
+    std::string getName() const override;
+    bool transform(CNNNetwork& network, LayerTransformation::Params& params) const override;
+    void resetTransformation(CNNNetwork& network) const override;
+};
+
+class FakeQuantizeAndActivationTestModel : public SingleLayerTestModel {
+public:
+    FakeQuantizeAndActivationTestModel(const std::vector<std::pair<float, float>>& intervals);
+    void initInput(Blob::Ptr input) const override;
+    float getZeroThreshold() const override;
+    std::string getModel(SingleLayerTransformationsTestParams& p) const override;
+    std::string getName() const override;
+    bool transform(CNNNetwork& network, LayerTransformation::Params& params) const override;
+    void resetTransformation(CNNNetwork& network) const override;
+
+private:
+    const std::vector<std::pair<float, float>> intervals;
+};
+
+class ScaleShiftAndFakeQuantizeTestModel : public SingleLayerTestModel {
+public:
+    void initInput(Blob::Ptr input) const override;
+    std::string getModel(SingleLayerTransformationsTestParams& p) const override;
+    std::string getName() const override;
+    bool transform(CNNNetwork& network, LayerTransformation::Params& params) const override;
+    void resetTransformation(CNNNetwork& network) const override;
+};
+
+class FakeQuantizeAndActivationWithNegativeScalesTestModel : public SingleLayerTestModel {
+public:
+    void initInput(Blob::Ptr input) const override;
+    std::string getModel(SingleLayerTransformationsTestParams& p) const override;
+    std::string getName() const override;
+    bool transform(CNNNetwork& network, LayerTransformation::Params& params) const override;
+    void resetTransformation(CNNNetwork& network) const override;
+};
+
+class FakeQuantizeAndActivationWithNegativeSlopeTestModel : public SingleLayerTestModel {
+public:
+    void initInput(Blob::Ptr input) const override;
+    std::string getModel(SingleLayerTransformationsTestParams& p) const override;
+    std::string getName() const override;
+    bool transform(CNNNetwork& network, LayerTransformation::Params& params) const override;
+    void resetTransformation(CNNNetwork& network) const override;
+};
+
+class ConvolutionAndDequantizationScaleShiftAndQuantizeOnActivationsTestModel : public SingleLayerTestModel {
+public:
+    std::string getModel(SingleLayerTransformationsTestParams& p) const override;
+    std::string getName() const override;
+    bool transform(CNNNetwork& network, LayerTransformation::Params& params) const override;
+    void resetTransformation(CNNNetwork& network) const override;
+};
+
+class MvnTestModel : public SingleLayerTestModel {
+public:
+    MvnTestModel(const size_t acrossChannels, const size_t normalizeVariance);
+    void initInput(Blob::Ptr input) const override;
+    std::string getModel(SingleLayerTransformationsTestParams& p) const override;
+    std::string getName() const override;
+    bool transform(CNNNetwork& network, LayerTransformation::Params& params) const override;
+    void resetTransformation(CNNNetwork& network) const override;
+
+private:
+    const size_t acrossChannels;
+    const size_t normalizeVariance;
+};
+
+class PrecisionSelectionMultibranchPreservedTestModel : public SingleLayerTestModel {
+public:
+    PrecisionSelectionMultibranchPreservedTestModel(const bool signedIntervalOnActivation);
+    void initInput(Blob::Ptr input) const override;
+    std::string getModel(SingleLayerTransformationsTestParams& p) const override;
+    std::string getName() const override;
+    bool transform(CNNNetwork& network, LayerTransformation::Params& params) const override;
+    void resetTransformation(CNNNetwork& network) const override;
+
+private:
+    const size_t acrossChannels;
+    const size_t normalizeVariance;
+    const bool signedIntervalOnActivation;
+};
+
+class PrecisionSelectionMultibranchNotPreservedTestModel : public SingleLayerTestModel {
+public:
+    PrecisionSelectionMultibranchNotPreservedTestModel(const bool signedIntervalOnActivation);
+    void initInput(Blob::Ptr input) const override;
+    std::string getModel(SingleLayerTransformationsTestParams& p) const override;
+    std::string getName() const override;
+    bool transform(CNNNetwork& network, LayerTransformation::Params& params) const override;
+    void resetTransformation(CNNNetwork& network) const override;
+
+private:
+    const size_t acrossChannels;
+    const size_t normalizeVariance;
+    const bool signedIntervalOnActivation;
+};
+
+class SingleLayerTransformationsTest : public TestsCommon, public WithParamInterface<SingleLayerTransformationsTestParams> {
+    TBlob<uint8_t>::Ptr generateWeights(const CNNNetwork& network);
+    void checkNetworkWithFakeQuantize(const CNNNetwork& network);
+    void checkNetworkWithQuantize(const CNNNetwork& network);
+    //void sortBlobs(CNNLayer& layer);
+    CNNNetwork createNetwork();
+    std::unordered_map<std::string, InferenceEngine::Blob::Ptr> infer(
+            CNNNetwork& network,
+            std::unordered_map<std::string, Blob::Ptr>& inputBlobs,
+            Core & plugin, const std::string & device_name, 
+            ExecutableNetwork & executableNetwork,
+            InferRequest & inferRequest);
+
+protected:
+    static void compareInDetails(
+        InferenceEngine::Blob &res,
+        InferenceEngine::Blob &ref,
+        const size_t maxDifferenceCounts,
+        float max_diff = 0.01f);
+    virtual void SetUp();
+};
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/mvn_test.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/mvn_test.cpp
new file mode 100644 (file)
index 0000000..3a9506e
--- /dev/null
@@ -0,0 +1,80 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+
+void MvnTestModel::initInput(Blob::Ptr input) const {
+    const size_t dataSize = input->size();
+    std::shared_ptr<float> floatPtr(new float[dataSize], std::default_delete<float[]>());
+
+    float value = 0.f;
+    for (size_t i = 0ul; i < dataSize; ++i) {
+        floatPtr.get()[i] = value;
+        if (value > 255.0) {
+            value = 0.f;
+        }
+        value += 1.f;
+    }
+
+    CNNNetworkHelper::fillBlobByFP32(input, floatPtr.get());
+}
+
+MvnTestModel::MvnTestModel(const size_t acrossChannels, const size_t normalizeVariance) :
+    acrossChannels(acrossChannels),
+    normalizeVariance(normalizeVariance) {}
+
+std::string MvnTestModel::getModel(SingleLayerTransformationsTestParams& p) const {
+    size_t type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP32>::value_type);
+    if (p._network_precision == "FP16") {
+        type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP16>::value_type);
+    }
+
+    std::map<std::string, std::string> power_params = {{"power", "1"}, {"scale", "1"}, {"shift", "0"}};
+    std::map<std::string, std::string> const_params = {};
+    std::map<std::string, std::string> fake_quantize_params = {{"levels", "256"}};
+    std::map<std::string, std::string> mvn_params = {
+        {"eps", "0.001"},
+        {"across_channels", std::to_string(acrossChannels)},
+        {"normalize_variance", std::to_string(acrossChannels)}
+    };
+
+    std::vector<std::pair<std::string, std::string>> edges = {
+        {"0,0", "1,1"}, {"1,2", "6,7"}, // power
+        {"2,3", "6,8"}, {"3,4", "6,9"}, {"4,5", "6,10"}, {"5,6", "6,11"}, // const
+        {"6,12", "7,13"}, {"7,14", "8,15"} // pool, power
+    };
+
+    const std::vector<size_t> dimensions = p.outputDimensions[0];
+
+    return CommonTestUtils::DefaultNetBuilder::buildNetworkWithOneInput("MvnTestModel", dimensions, p._network_precision)
+        .addLayer("Power", p._network_precision, &power_params, {{dimensions}, {dimensions}})
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("FakeQuantize", p._network_precision, &fake_quantize_params, {{dimensions, {1}, {1}, {1}, {1}}, {{dimensions}}})
+        .addLayer("MVN", p._network_precision, &mvn_params, { {dimensions}, {dimensions} })
+        .addLayer("Power", p._network_precision, &power_params, {{dimensions}, {dimensions}})
+        .finish(&edges);
+}
+
+bool MvnTestModel::transform(CNNNetwork& network, LayerTransformation::Params& params) const {
+    LowPrecisionTransformer transformer(LowPrecisionTransformer::getAllTransformations(params));
+    transformer.transform(network);
+    return true;
+}
+
+std::string MvnTestModel::getName() const {
+    return
+        "MvnTestModel" +
+        (acrossChannels == 1ul ? std::string("_AcrossChannels") : "") +
+        (normalizeVariance == 1ul ? std::string("_NormalizeVariance") : "");
+}
+
+void MvnTestModel::resetTransformation(CNNNetwork& network) const {
+    fillData(getLayer(network, "Const2"), 0.f, "custom");
+    fillData(getLayer(network, "Const3"), 255.f / 2.f, "custom");
+    fillData(getLayer(network, "Const4"), 0.f, "custom");
+    fillData(getLayer(network, "Const5"), 255.f / 2.f, "custom");
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/pooling_test.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/pooling_test.cpp
new file mode 100644 (file)
index 0000000..3decd08
--- /dev/null
@@ -0,0 +1,71 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+
+std::string PoolingTestModel::getModel(SingleLayerTransformationsTestParams& p) const {
+    size_t type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP32>::value_type);
+    if (p._network_precision == "FP16")
+        type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP16>::value_type);
+
+    CommonTestUtils::pool_common_params pooling = { {1, 1}, {1, 1}, {0, 0}, {0, 0}, "valid", false, true };
+    std::vector<size_t> poolOutShape(p.inputDimensions[0].size());
+    getPoolOutShape(p.inputDimensions[0], pooling, poolOutShape);
+
+    std::map<std::string, std::string> power_params = {
+        {"power", "1"}, {"scale", "1"}, {"shift", "0"}
+    };
+    std::map<std::string, std::string> const_params = {};
+    std::map<std::string, std::string> fake_quantize_params = {
+        {"levels", "256"}
+    };
+
+    std::vector<std::pair<std::string, std::string>> edges = {
+        {"0,0", "1,1"}, {"1,2", "6,7"}, // power
+        {"2,3", "6,8"}, {"3,4", "6,9"}, {"4,5", "6,10"}, {"5,6", "6,11"}, // const
+        {"6,12", "7,13"}, {"7,14", "8,15"} // pool, power
+    };
+
+    return CommonTestUtils::DefaultNetBuilder::buildNetworkWithOneInput(
+            "Conv_ScaleShift_transformations", p.inputDimensions[0], p._network_precision)
+        .addLayer("Power", p._network_precision, &power_params, {{p.inputDimensions[0]}, {p.inputDimensions[0]}})
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("FakeQuantize", p._network_precision, &fake_quantize_params, {{p.inputDimensions[0], {1}, {1}, {1}, {1}}, {{p.inputDimensions[0]}}})
+        .poolingLayer(p._network_precision, {{p.inputDimensions[0]}, {poolOutShape}}, pooling)
+        .addLayer("Power", p._network_precision, &power_params, {{poolOutShape}, {poolOutShape}})
+        .finish(&edges);
+}
+
+void PoolingTestModel::resetTransformation(CNNNetwork& network) const {
+    fillData(getLayer(network, "Const2"), 63.5, "custom");
+    fillData(getLayer(network, "Const3"), 127.0, "custom");
+    fillData(getLayer(network, "Const4"), 63.5, "custom");
+    fillData(getLayer(network, "Const5"), 127.0, "custom");
+}
+
+std::string PoolingTestModel::getName() const {
+    return "PoolingTestModel";
+}
+
+bool PoolingTestModel::transform(CNNNetwork& network, LayerTransformation::Params& params) const {
+    LowPrecisionTransformer transformer(LowPrecisionTransformer::getAllTransformations(params));
+    transformer.transform(network);
+
+    const Precision precision = params.updatePrecisions ? Precision(Precision::U8) : network.getPrecision();
+
+    CNNLayerPtr fakeQuantize = getLayer(network, "FakeQuantize6");
+    if (fakeQuantize->outData[0]->getPrecision() != precision) {
+        THROW_IE_EXCEPTION << fakeQuantize->name << " precision " << precision << " is not correct";
+    }
+
+    CNNLayerPtr pooling = getLayer(network, "Pooling7");
+    if (pooling->outData[0]->getPrecision() != precision) {
+        THROW_IE_EXCEPTION << pooling->name << " precision " << precision << " is not correct";
+    }
+
+    return true;
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/precision_selection_multibranch_not_preserved.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/precision_selection_multibranch_not_preserved.cpp
new file mode 100644 (file)
index 0000000..edde971
--- /dev/null
@@ -0,0 +1,204 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+#include "low_precision_transformations/fully_connected.hpp"
+
+void PrecisionSelectionMultibranchNotPreservedTestModel::initInput(Blob::Ptr input) const {
+    fillData(input, 2.f);
+    return;
+
+    const size_t dataSize = input->size();
+    std::shared_ptr<float> floatPtr(new float[dataSize], std::default_delete<float[]>());
+
+    const float lowValue = signedIntervalOnActivation ? -128.f : 0.f;
+    const float highValue = signedIntervalOnActivation ? 127.f : 255.f;
+
+    float value = lowValue;
+    for (size_t i = 0ul; i < dataSize; ++i) {
+        floatPtr.get()[i] = value;
+        value += 1.f;
+        if (value > highValue) {
+            value = lowValue;
+        }
+    }
+
+    CNNNetworkHelper::fillBlobByFP32(input, floatPtr.get());
+}
+
+PrecisionSelectionMultibranchNotPreservedTestModel::PrecisionSelectionMultibranchNotPreservedTestModel(const bool signedIntervalOnActivation) :
+    signedIntervalOnActivation(signedIntervalOnActivation),
+    acrossChannels(0),
+    normalizeVariance(0) {}
+
+std::string PrecisionSelectionMultibranchNotPreservedTestModel::getModel(SingleLayerTransformationsTestParams& p) const {
+    size_t type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP32>::value_type);
+    if (p._network_precision == "FP16")
+        type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP16>::value_type);
+
+    CommonTestUtils::conv_common_params conv =
+            { {1, 1}, {3, 3}, {0, 0}, {0, 0}, {1, 1}, "valid", 1, 32, false, false };
+    std::vector<size_t> convOutShape(p.inputDimensions[0].size());
+    getConvOutShape(p.inputDimensions[0], conv, convOutShape);
+
+    std::vector<size_t> weightsConstInputDims = { 32lu, 32lu, 3lu, 3lu };
+    std::vector<size_t> biasesConvolutionConstDims = { conv.out_c };
+    std::map<std::string, std::string> const_params = {};
+    std::map<std::string, std::string> fake_quantize_params = {
+        {"levels", "256"}
+    };
+    std::map<std::string, std::string> power_params = { {"power", "1"}, {"scale", "1"}, {"shift", "0"}};
+    std::map<std::string, std::string> poolingParams = {
+        {"kernel", "1,1"},
+        {"pool-method", "max"},
+        {"exclude-pad", "false"}
+    };
+    const std::vector<size_t> dimensions = p.outputDimensions[0];
+
+    std::vector<std::pair<std::string, std::string>> edges = {
+        {"0,0", "1,1"}, {"1,2", "6,7"}, // Power
+        {"2,3", "6,8"}, {"3,4", "6,9"}, {"4,5", "6,10"}, {"5,6", "6,11"}, // Const layers
+        {"7,13", "12,18"}, {"8,14", "12,19"}, {"9,15", "12,20"}, {"10,16", "12,21"}, {"11,17", "12,22"}, // Const layers
+        {"6,12", "14,25"},  {"12,23", "14,26"}, // Fake quantize to Conv
+        {"13,24", "14,27"}, // biases to Conv
+        {"6,12", "15,29"} // Fake quantize to Pooling
+        //{"14,28", "15,29"} // Fake quantize to Power
+    };
+
+    return CommonTestUtils::DefaultNetBuilder::buildNetworkWithOneInput("QuantizationOnWeights", p.inputDimensions[0], p._network_precision)
+        // 1
+        .addLayer("Power", p._network_precision, &power_params, {{p.inputDimensions[0]}, {p.inputDimensions[0]}})
+        // 2
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        // 3
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        // 4
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        // 5
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        // 6
+        .addLayer(
+            "FakeQuantize",
+            p._network_precision,
+            &fake_quantize_params,
+            {{p.inputDimensions[0], {1}, {1}, {1}, {1}}, {{p.inputDimensions[0]}}},
+            "fakeQuantize")
+        // 7
+        .addLayer("Const", p._network_precision, &const_params, {{}, {weightsConstInputDims}},
+            std::accumulate(weightsConstInputDims.begin(), weightsConstInputDims.end(), 1lu, std::multiplies<size_t>()) * type_size)
+        // 8
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        // 9
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        // 10
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        // 11
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        // 12
+        .addLayer("FakeQuantize", p._network_precision, &fake_quantize_params, {{weightsConstInputDims, {1}, {1}, {1}, {1}}, {{weightsConstInputDims}}})
+        // 13
+        .addLayer("Const", p._network_precision, &const_params, {{}, {biasesConvolutionConstDims}}, type_size * conv.out_c, 0)
+        // 14
+        .convolutionLayer(
+            p._network_precision,
+            { {p.inputDimensions[0], weightsConstInputDims, biasesConvolutionConstDims },
+            {convOutShape} }, conv, {}, "convolution")
+        // 15
+        .addLayer("Pooling", p._network_precision, &poolingParams, { {dimensions}, {dimensions} })
+        .finish(&edges);
+}
+
+void PrecisionSelectionMultibranchNotPreservedTestModel::resetTransformation(CNNNetwork& network) const {
+    if (signedIntervalOnActivation) {
+        fillData(getLayer(network, "Const2"), -128.f / 4.f, "custom");
+        fillData(getLayer(network, "Const3"), 127.f / 4.f, "custom");
+        fillData(getLayer(network, "Const4"), -128.f / 4.f, "custom");
+        fillData(getLayer(network, "Const5"), 127.f / 4.f, "custom");
+    } else {
+        fillData(getLayer(network, "Const2"), 0.f, "custom");
+        fillData(getLayer(network, "Const3"), 255.f / 4.f, "custom");
+        fillData(getLayer(network, "Const4"), 0.f, "custom");
+        fillData(getLayer(network, "Const5"), 255.f / 4.f, "custom");
+    }
+
+    fillDataWithInitValue(getLayer(network, "Const7"), "custom", 2.f);
+
+    fillData(getLayer(network, "Const8"), -128.f / 4.f, "custom");
+    fillData(getLayer(network, "Const9"), 127.f / 4.f, "custom");
+    fillData(getLayer(network, "Const10"), -128.f / 4.f, "custom");
+    fillData(getLayer(network, "Const11"), 127.f / 4.f, "custom");
+
+    fillDataWithInitValue(getLayer(network, "Const13"), "custom", 1.f);
+}
+
+std::string PrecisionSelectionMultibranchNotPreservedTestModel::getName() const {
+    return std::string("PrecisionSelectionMultibranchNotPreservedTestModel") + (signedIntervalOnActivation ? "_Signed" : "_Unsigned");
+}
+
+bool PrecisionSelectionMultibranchNotPreservedTestModel::transform(CNNNetwork& network, LayerTransformation::Params& params) const {
+    params.weightsToConst = true;
+    params.updatePrecisions = true;
+
+    LowPrecisionTransformer transformer = getLowPrecisionTransformer(params);
+    transformer.transform(network);
+
+    const CNNLayerPtr fakeQuantize = CNNNetworkHelper::getLayer(network, "fakeQuantize");
+    const Precision actualPrecision = fakeQuantize->outData[0]->getTensorDesc().getPrecision();
+
+    if (std::any_of(
+        params.precisionsOnActivations.begin(),
+        params.precisionsOnActivations.end(),
+        [&](const Precision precision) { return precision == Precision::U8; })) {
+        if (params.quantizeOutputs) {
+            if (actualPrecision != Precision::U8) {
+                THROW_IE_EXCEPTION << "expected precision " << Precision::U8 << ", actual " << actualPrecision << "";
+            }
+
+            // Convolution has to be quantized
+            CNNLayerPtr scaleShfit = CNNNetworkHelper::getLayer(network, "convolution");
+            if (scaleShfit->type != "ScaleShift") {
+                THROW_IE_EXCEPTION << "unexpected last output dequantization layer type " << scaleShfit->type << " " << scaleShfit->name;
+            }
+
+            if (params.updateBiases) {
+                const Blob::Ptr shiftsBlob = CNNNetworkHelper::getBlob(scaleShfit, "biases");
+                std::shared_ptr<float> shiftsBuffer = CNNNetworkHelper::getFloatData(shiftsBlob);
+                for (size_t i = 0ul; i < shiftsBlob->size(); ++i) {
+                    if (shiftsBuffer.get()[i] != 0.0) {
+                        THROW_IE_EXCEPTION << "unexpected dequantization shift value";
+                    }
+                }
+            }
+
+            //if (signedIntervalOnActivation)
+            //scaleShfit = CNNNetworkHelper::getLayer(network, "MVN15");
+            //if (scaleShfit->type != "ScaleShift") {
+            //    THROW_IE_EXCEPTION << "unexpected last output dequantization layer type " << scaleShfit->type << " " << scaleShfit->name;
+            //}
+        }
+
+        return true;
+    } else {
+        if ((actualPrecision != Precision::FP16) && (actualPrecision != Precision::FP32)) {
+            THROW_IE_EXCEPTION << "unexpected precision " << actualPrecision << "";
+        }
+
+        // convolution can not be quantized
+        CNNLayerPtr convolution = CNNNetworkHelper::getLayer(network, "convolution");
+        if (convolution->type != "Convolution") {
+            THROW_IE_EXCEPTION << "unexpected last output dequantization layer type " << convolution->type << " " << convolution->name;
+        }
+
+        const std::vector<CNNLayerPtr> parents = CNNNetworkHelper::getParents(*convolution);
+        if (parents.size() != 3ul) {
+            THROW_IE_EXCEPTION << "unexpected parents count " << parents.size();
+        }
+
+        if (parents[0]->type != "FakeQuantize") {
+            THROW_IE_EXCEPTION << "unexpected parents type " << parents[0]->type;
+        }
+
+        return false;
+    }
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/precision_selection_multibranch_preserved.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/precision_selection_multibranch_preserved.cpp
new file mode 100644 (file)
index 0000000..83eeec4
--- /dev/null
@@ -0,0 +1,131 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+#include "low_precision_transformations/fully_connected.hpp"
+
+void PrecisionSelectionMultibranchPreservedTestModel::initInput(Blob::Ptr input) const {
+    fillData(input, 2.f);
+    return;
+
+    const size_t dataSize = input->size();
+    std::shared_ptr<float> floatPtr(new float[dataSize], std::default_delete<float[]>());
+
+    const float lowValue = signedIntervalOnActivation ? -128.f : 0.f;
+    const float highValue = signedIntervalOnActivation ? 127.f : 255.f;
+
+    float value = lowValue;
+    for (size_t i = 0ul; i < dataSize; ++i) {
+        floatPtr.get()[i] = value;
+        value += 1.f;
+        if (value > highValue) {
+            value = lowValue;
+        }
+    }
+
+    CNNNetworkHelper::fillBlobByFP32(input, floatPtr.get());
+}
+
+PrecisionSelectionMultibranchPreservedTestModel::PrecisionSelectionMultibranchPreservedTestModel(const bool signedIntervalOnActivation) :
+    signedIntervalOnActivation(signedIntervalOnActivation),
+    acrossChannels(0),
+    normalizeVariance(0) {}
+
+std::string PrecisionSelectionMultibranchPreservedTestModel::getModel(SingleLayerTransformationsTestParams& p) const {
+    size_t type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP32>::value_type);
+    if (p._network_precision == "FP16")
+        type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP16>::value_type);
+
+    CommonTestUtils::conv_common_params conv =
+            { {1, 1}, {3, 3}, {0, 0}, {0, 0}, {1, 1}, "valid", 1, 32, false, false };
+    std::vector<size_t> convOutShape(p.inputDimensions[0].size());
+    CommonTestUtils::getConvOutShape(p.inputDimensions[0], conv, convOutShape);
+
+    std::vector<size_t> weightsConstInputDims = { 32lu, 32lu, 3lu, 3lu };
+    std::vector<size_t> biasesConvolutionConstDims = { conv.out_c };
+    std::map<std::string, std::string> const_params = {};
+    std::map<std::string, std::string> fake_quantize_params = {
+        {"levels", "256"}
+    };
+    std::map<std::string, std::string> power_params = { {"power", "1"}, {"scale", "1"}, {"shift", "0"}};
+    std::map<std::string, std::string> poolingParams = {
+        {"kernel", "1,1"},
+        {"pool-method", "max"},
+        {"exclude-pad", "false"}
+    };
+    const std::vector<size_t> dimensions = p.outputDimensions[0];
+
+    std::vector<std::pair<std::string, std::string>> edges = {
+        {"0,0", "1,1"}, {"1,2", "6,7"}, // Power
+        {"2,3", "6,8"}, {"3,4", "6,9"}, {"4,5", "6,10"}, {"5,6", "6,11"}, // Const layers
+        {"6,12", "7,13"},  // Fake quantize to Pooling7
+        {"6,12", "8,15"}   // Fake quantize to Pooling8
+    };
+
+    return CommonTestUtils::DefaultNetBuilder::buildNetworkWithOneInput("QuantizationOnWeights", p.inputDimensions[0], p._network_precision)
+        // 1
+        .addLayer("Power", p._network_precision, &power_params, {{p.inputDimensions[0]}, {p.inputDimensions[0]}})
+        // 2
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        // 3
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        // 4
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        // 5
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        // 6
+        .addLayer(
+            "FakeQuantize",
+            p._network_precision,
+            &fake_quantize_params,
+            {{p.inputDimensions[0], {1}, {1}, {1}, {1}}, {{p.inputDimensions[0]}}},
+            "fakeQuantize")
+        // 7
+        .addLayer("Pooling", p._network_precision, &poolingParams, { {dimensions}, {dimensions} })
+        // 8
+        .addLayer("Pooling", p._network_precision, &poolingParams, { {dimensions}, {dimensions} })
+        // 9
+        .finish(&edges);
+}
+
+void PrecisionSelectionMultibranchPreservedTestModel::resetTransformation(CNNNetwork& network) const {
+    if (signedIntervalOnActivation) {
+        fillData(getLayer(network, "Const2"), -128.f / 4.f, "custom");
+        fillData(getLayer(network, "Const3"), 127.f / 4.f, "custom");
+        fillData(getLayer(network, "Const4"), -128.f / 4.f, "custom");
+        fillData(getLayer(network, "Const5"), 127.f / 4.f, "custom");
+    } else {
+        fillData(getLayer(network, "Const2"), 0.f, "custom");
+        fillData(getLayer(network, "Const3"), 255.f / 4.f, "custom");
+        fillData(getLayer(network, "Const4"), 0.f, "custom");
+        fillData(getLayer(network, "Const5"), 255.f / 4.f, "custom");
+    }
+}
+
+std::string PrecisionSelectionMultibranchPreservedTestModel::getName() const {
+    return std::string("PrecisionSelectionMultibranchPreservedTestModel") + (signedIntervalOnActivation ? "_Signed" : "_Unsigned");
+}
+
+bool PrecisionSelectionMultibranchPreservedTestModel::transform(CNNNetwork& network, LayerTransformation::Params& params) const {
+    params.updatePrecisions = true;
+
+    LowPrecisionTransformer transformer = getLowPrecisionTransformer(params);
+    transformer.transform(network);
+
+    if (params.quantizeOutputs && params.updatePrecisions) {
+        Precision expectedPrecision;
+        if (params.precisionsOnActivations.size() == 1ul) {
+            expectedPrecision = params.precisionsOnActivations[0];
+        } else {
+            expectedPrecision = signedIntervalOnActivation ? Precision::I8 : Precision::U8;
+        }
+        const CNNLayerPtr fakeQuantize = CNNNetworkHelper::getLayer(network, "fakeQuantize");
+        const Precision actualPrecision = fakeQuantize->outData[0]->getTensorDesc().getPrecision();
+        if (actualPrecision != expectedPrecision) {
+            THROW_IE_EXCEPTION << "expected precision " << expectedPrecision << ", actual " << actualPrecision << "";
+        }
+    }
+
+    return true;
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/quantization_on_inverted_weights_test.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/quantization_on_inverted_weights_test.cpp
new file mode 100644 (file)
index 0000000..5a7e37f
--- /dev/null
@@ -0,0 +1,111 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+
+std::string QuantizationOnInvertedWeightsTestModel::getModel(SingleLayerTransformationsTestParams& p) const {
+    size_t type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP32>::value_type);
+    if (p._network_precision == "FP16")
+        type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP16>::value_type);
+
+    CommonTestUtils::conv_common_params conv =
+            { {1, 1}, {3, 3}, {0, 0}, {0, 0}, {1, 1}, "valid", 1, 32, false, false };
+    std::vector<size_t> convOutShape(p.inputDimensions[0].size());
+    getConvOutShape(p.inputDimensions[0], conv, convOutShape);
+
+    std::vector<size_t> weightsConstInputDims = { 32lu, 32lu, 3lu, 3lu };
+    std::vector<size_t> biasesConvolutionConstDims = { conv.out_c };
+    std::map<std::string, std::string> const_params = {};
+    std::map<std::string, std::string> fake_quantize_params = {
+        {"levels", "256"}
+    };
+    std::map<std::string, std::string> power_params = {
+        {"power", "1"}, {"scale", "1"}, {"shift", "0"}
+    };
+
+    std::vector<std::pair<std::string, std::string>> edges = {
+        {"0,0", "1,1"}, {"1,2", "6,7"}, // Power
+        {"2,3", "6,8"}, {"3,4", "6,9"}, {"4,5", "6,10"}, {"5,6", "6,11"}, // Const layers
+        {"7,13", "12,18"}, {"8,14", "12,19"}, {"9,15", "12,20"}, {"10,16", "12,21"}, {"11,17", "12,22"}, // Const layers
+        {"6,12", "14,25"},  {"12,23", "14,26"}, // Fake quantize to Conv
+        {"13,24", "14,27"}, // biases to Conv
+        {"14,28", "15,29"} // Conv to Power
+    };
+
+    return CommonTestUtils::DefaultNetBuilder::buildNetworkWithOneInput(
+            "QuantizationOnWeights", p.inputDimensions[0], p._network_precision)
+        .addLayer("Power", p._network_precision, &power_params, {{p.inputDimensions[0]}, {p.inputDimensions[0]}})
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("FakeQuantize", p._network_precision, &fake_quantize_params, {{p.inputDimensions[0], {1}, {1}, {1}, {1}}, {{p.inputDimensions[0]}}})
+        .addLayer("Const", p._network_precision, &const_params, {{}, {weightsConstInputDims}},
+                std::accumulate(weightsConstInputDims.begin(), weightsConstInputDims.end(), 1lu, std::multiplies<size_t>()) * type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("FakeQuantize", p._network_precision, &fake_quantize_params, {{weightsConstInputDims, {1}, {1}, {1}, {1}}, {{weightsConstInputDims}}})
+        .addLayer("Const", p._network_precision, &const_params, {{}, {biasesConvolutionConstDims}}, type_size * conv.out_c, 0)
+        .convolutionLayer(p._network_precision, {{p.inputDimensions[0], weightsConstInputDims, biasesConvolutionConstDims }, {convOutShape}}, conv)
+        .addLayer("Power", p._network_precision, &power_params, {{convOutShape}, {convOutShape}})
+        .finish(&edges);
+}
+
+std::string QuantizationOnInvertedWeightsTestModel::getName() const {
+    return "QuantizationOnInvertedWeightsTestModel";
+}
+
+bool QuantizationOnInvertedWeightsTestModel::transform(CNNNetwork& network, LayerTransformation::Params& params) const {
+    CNNLayerPtr weightsFakeQuantize = network.getLayerByName("FakeQuantize12");
+    Blob::Ptr weights = CNNNetworkHelper::quantizeWeights(*weightsFakeQuantize, false);
+
+    CNNLayerPtr biasesConvolutionConst = network.getLayerByName("Const13");
+    Blob::Ptr biases = getBlob(biasesConvolutionConst, "custom");
+
+    CNNLayerPtr convolution = network.getLayerByName("Convolution14");
+    convolution->blobs.emplace("weights", weights);
+    convolution->blobs.emplace("biases", biases);
+
+    WeightableLayer* weightableLayer = dynamic_cast<WeightableLayer*>(convolution.get());
+    weightableLayer->_weights = weights;
+    weightableLayer->_biases = biases;
+
+    CNNLayerPtr weightsConstInput = network.getLayerByName("Const7");
+    CNNNetworkHelper::removeLayer(network, weightsConstInput);
+    CNNLayerPtr weightsConstInputLow = network.getLayerByName("Const8");
+    CNNNetworkHelper::removeLayer(network, weightsConstInputLow);
+    CNNLayerPtr weightsConstInputHigh = network.getLayerByName("Const9");
+    CNNNetworkHelper::removeLayer(network, weightsConstInputHigh);
+    CNNLayerPtr weightsConstOutputLow = network.getLayerByName("Const10");
+    CNNNetworkHelper::removeLayer(network, weightsConstOutputLow);
+    CNNLayerPtr weightsConstOutputHigh = network.getLayerByName("Const11");
+    CNNNetworkHelper::removeLayer(network, weightsConstOutputHigh);
+
+    CNNNetworkHelper::removeLayer(network, weightsFakeQuantize);
+    CNNNetworkHelper::removeLayer(network, biasesConvolutionConst);
+
+    return false;
+}
+
+std::unordered_set<std::string> QuantizationOnInvertedWeightsTestModel::getNotTransformedLayers() const {
+    return { "dataFakeQuantize" };
+}
+
+void QuantizationOnInvertedWeightsTestModel::resetTransformation(CNNNetwork& network) const {
+    fillData(getLayer(network, "Const2"), 0.0, "custom");
+    fillData(getLayer(network, "Const3"), 127.5, "custom");
+    fillData(getLayer(network, "Const4"), 0.0, "custom");
+    fillData(getLayer(network, "Const5"), 127.5, "custom");
+
+    fillData(getLayer(network, "Const7"), 3.0, "custom");
+
+    fillData(getLayer(network, "Const8"), 1.278 / 2.0, "custom");
+    fillData(getLayer(network, "Const9"), -1.27, "custom");
+    fillData(getLayer(network, "Const10"), 1.278 / 2.0, "custom");
+    fillData(getLayer(network, "Const11"), -1.27, "custom");
+
+    fillData(getLayer(network, "Const13"), 5.0, "custom");
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/quantization_on_weights_test.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/quantization_on_weights_test.cpp
new file mode 100644 (file)
index 0000000..92f47d6
--- /dev/null
@@ -0,0 +1,111 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+
+std::string QuantizationOnWeightsTestModel::getModel(SingleLayerTransformationsTestParams& p) const {
+    size_t type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP32>::value_type);
+    if (p._network_precision == "FP16")
+        type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP16>::value_type);
+
+    CommonTestUtils::conv_common_params conv =
+            { {1, 1}, {3, 3}, {0, 0}, {0, 0}, {1, 1}, "valid", 1, 32, false, false };
+    std::vector<size_t> convOutShape(p.inputDimensions[0].size());
+    getConvOutShape(p.inputDimensions[0], conv, convOutShape);
+
+    std::vector<size_t> weightsConstInputDims = { 32lu, 32lu, 3lu, 3lu };
+    std::vector<size_t> biasesConvolutionConstDims = { conv.out_c };
+    std::map<std::string, std::string> const_params = {};
+    std::map<std::string, std::string> fake_quantize_params = {
+        {"levels", "256"}
+    };
+    std::map<std::string, std::string> power_params = {
+        {"power", "1"}, {"scale", "1"}, {"shift", "0"}
+    };
+
+    std::vector<std::pair<std::string, std::string>> edges = {
+        {"0,0", "1,1"}, {"1,2", "6,7"}, // Power
+        {"2,3", "6,8"}, {"3,4", "6,9"}, {"4,5", "6,10"}, {"5,6", "6,11"}, // Const layers
+        {"7,13", "12,18"}, {"8,14", "12,19"}, {"9,15", "12,20"}, {"10,16", "12,21"}, {"11,17", "12,22"}, // Const layers
+        {"6,12", "14,25"},  {"12,23", "14,26"}, // Fake quantize to Conv
+        {"13,24", "14,27"}, // biases to Conv
+        {"14,28", "15,29"} // Conv to Power
+    };
+
+    return CommonTestUtils::DefaultNetBuilder::buildNetworkWithOneInput(
+            "QuantizationOnWeights", p.inputDimensions[0], p._network_precision)
+        .addLayer("Power", p._network_precision, &power_params, {{p.inputDimensions[0]}, {p.inputDimensions[0]}})
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("FakeQuantize", p._network_precision, &fake_quantize_params, {{p.inputDimensions[0], {1}, {1}, {1}, {1}}, {{p.inputDimensions[0]}}})
+        .addLayer("Const", p._network_precision, &const_params, {{}, {weightsConstInputDims}},
+                std::accumulate(weightsConstInputDims.begin(), weightsConstInputDims.end(), 1lu, std::multiplies<size_t>()) * type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        .addLayer("FakeQuantize", p._network_precision, &fake_quantize_params, {{weightsConstInputDims, {1}, {1}, {1}, {1}}, {{weightsConstInputDims}}})
+        .addLayer("Const", p._network_precision, &const_params, {{}, {biasesConvolutionConstDims}}, type_size * conv.out_c, 0)
+        .convolutionLayer(p._network_precision, {{p.inputDimensions[0], weightsConstInputDims, biasesConvolutionConstDims }, {convOutShape}}, conv)
+        .addLayer("Power", p._network_precision, &power_params, {{convOutShape}, {convOutShape}})
+        .finish(&edges);
+}
+
+std::string QuantizationOnWeightsTestModel::getName() const {
+    return "QuantizationOnWeightsTestModel";
+}
+
+bool QuantizationOnWeightsTestModel::transform(CNNNetwork& network, LayerTransformation::Params& params) const {
+    CNNLayerPtr weightsFakeQuantize = network.getLayerByName("FakeQuantize12");
+    Blob::Ptr weights = CNNNetworkHelper::quantizeWeights(*weightsFakeQuantize, false);
+
+    CNNLayerPtr biasesConvolutionConst = network.getLayerByName("Const13");
+    Blob::Ptr biases = getBlob(biasesConvolutionConst, "custom");
+
+    CNNLayerPtr convolution = network.getLayerByName("Convolution14");
+    convolution->blobs.emplace("weights", weights);
+    convolution->blobs.emplace("biases", biases);
+
+    WeightableLayer* weightableLayer = dynamic_cast<WeightableLayer*>(convolution.get());
+    weightableLayer->_weights = weights;
+    weightableLayer->_biases = biases;
+
+    CNNLayerPtr weightsConstInput = network.getLayerByName("Const7");
+    CNNNetworkHelper::removeLayer(network, weightsConstInput);
+    CNNLayerPtr weightsConstInputLow = network.getLayerByName("Const8");
+    CNNNetworkHelper::removeLayer(network, weightsConstInputLow);
+    CNNLayerPtr weightsConstInputHigh = network.getLayerByName("Const9");
+    CNNNetworkHelper::removeLayer(network, weightsConstInputHigh);
+    CNNLayerPtr weightsConstOutputLow = network.getLayerByName("Const10");
+    CNNNetworkHelper::removeLayer(network, weightsConstOutputLow);
+    CNNLayerPtr weightsConstOutputHigh = network.getLayerByName("Const11");
+    CNNNetworkHelper::removeLayer(network, weightsConstOutputHigh);
+
+    CNNNetworkHelper::removeLayer(network, weightsFakeQuantize);
+    CNNNetworkHelper::removeLayer(network, biasesConvolutionConst);
+
+    return false;
+}
+
+std::unordered_set<std::string> QuantizationOnWeightsTestModel::getNotTransformedLayers() const {
+    return { "dataFakeQuantize" };
+}
+
+void QuantizationOnWeightsTestModel::resetTransformation(CNNNetwork& network) const {
+    fillData(getLayer(network, "Const2"), 0.0, "custom");
+    fillData(getLayer(network, "Const3"), 127.5, "custom");
+    fillData(getLayer(network, "Const4"), 0.0, "custom");
+    fillData(getLayer(network, "Const5"), 127.5, "custom");
+
+    fillData(getLayer(network, "Const7"), 3.0, "custom");
+
+    fillData(getLayer(network, "Const8"), -1.275 / 2.0, "custom");
+    fillData(getLayer(network, "Const9"), 1.275, "custom");
+    fillData(getLayer(network, "Const10"), -1.275 / 2.0, "custom");
+    fillData(getLayer(network, "Const11"), 1.275, "custom");
+
+    fillData(getLayer(network, "Const13"), 5.0, "custom");
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/resample_test.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/resample_test.cpp
new file mode 100644 (file)
index 0000000..5430206
--- /dev/null
@@ -0,0 +1,67 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+
+std::string ResampleTestModel::getModel(SingleLayerTransformationsTestParams& p) const {
+    size_t type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP32>::value_type);
+    if (p._network_precision == "FP16")
+        type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP16>::value_type);
+
+    CommonTestUtils::conv_common_params conv = { {1, 1}, {3, 3}, {0, 0}, {0, 0}, {1, 1}, "valid", 1, 32, false, false };
+    std::vector<size_t> convOutShape(p.inputDimensions[0].size());
+    getConvOutShape(p.inputDimensions[0], conv, convOutShape);
+
+    std::vector<size_t> weightsConstInputDims = { 32lu, 32lu, 3lu, 3lu };
+    std::vector<size_t> biasesConvolutionConstDims = { conv.out_c };
+    std::map<std::string, std::string> const_params = {};
+    std::map<std::string, std::string> fake_quantize_params = {
+        {"levels", "256"}
+    };
+    std::map<std::string, std::string> power_params = {
+        {"power", "1"}, {"scale", "1"}, {"shift", "0"}
+    };
+
+    std::map<std::string, std::string> resampleParams = {
+        {"antialias", "0"}, {"factor", "2"}, {"type", "caffe.ResampleParameter.NEAREST"}
+    };
+
+    std::vector<std::pair<std::string, std::string>> edges = {
+        {"0,0", "5,5"}, // Power
+        {"1,1", "5,6"}, {"2,2", "5,7"}, {"3,3", "5,8"}, {"4,4", "5,9"}, // Const layers
+        {"5,10", "6,11"}
+    };
+
+    return CommonTestUtils::DefaultNetBuilder::buildNetworkWithOneInput("QuantizationOnWeights", p.inputDimensions[0], p._network_precision)
+        // 1
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        // 2
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        // 3
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        // 4
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        // 5
+        .addLayer("FakeQuantize", p._network_precision, &fake_quantize_params, {{p.inputDimensions[0], {1}, {1}, {1}, {1}}, {{p.inputDimensions[0]}}})
+        // 6
+        .addLayer("Resample", p._network_precision, &resampleParams, {{p.inputDimensions[0]}, {{p.inputDimensions[0]}}})
+        .finish(&edges);
+}
+
+std::string ResampleTestModel::getName() const {
+    return "ResampleTestModel";
+}
+
+bool ResampleTestModel::transform(CNNNetwork& network, LayerTransformation::Params& params) const {
+    LowPrecisionTransformer transformer(LowPrecisionTransformer::getAllTransformations(params));
+    transformer.transform(network);
+    return true;
+}
+
+void ResampleTestModel::resetTransformation(CNNNetwork& network) const {
+    fillData(getLayer(network, "Const1"), -128.0 / 20.0, "custom");
+    fillData(getLayer(network, "Const2"), 127.0 / 20.0, "custom");
+    fillData(getLayer(network, "Const3"), -128.0 / 20.0, "custom");
+    fillData(getLayer(network, "Const4"), 127.0 / 20.0, "custom");
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/scaleshift_and_fake_quantize_test.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/scaleshift_and_fake_quantize_test.cpp
new file mode 100644 (file)
index 0000000..a0d8baf
--- /dev/null
@@ -0,0 +1,79 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+
+void ScaleShiftAndFakeQuantizeTestModel::initInput(Blob::Ptr input) const {
+    const Precision& precision = input->getTensorDesc().getPrecision();
+    const size_t dataSize = input->size();
+
+    std::vector<float> data(input->size(), 4.0);
+    float value = -64.0;
+    for (size_t i = 0ul; i < std::min(static_cast<size_t>(256), dataSize); ++i) {
+        if (precision == Precision::FP32) {
+            float* buffer = input->buffer().as<float*>();
+            buffer[i] = InferenceEngine::PrecisionUtils::f32tof16(value);
+        } else if (precision == Precision::FP16) {
+            short* buffer = input->buffer().as<short*>();
+            buffer[i] = InferenceEngine::PrecisionUtils::f32tof16(value);
+        }
+        value += 1.0;
+    }
+}
+
+std::string ScaleShiftAndFakeQuantizeTestModel::getModel(SingleLayerTransformationsTestParams& p) const {
+    size_t type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP32>::value_type);
+    if (p._network_precision == "FP16")
+        type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP16>::value_type);
+
+    std::map<std::string, std::string> const_params = {};
+    std::map<std::string, std::string> fake_quantize_params = {{"levels", "256"}};
+    std::map<std::string, std::string> power_params = {{"power", "1"}, {"scale", "1"}, {"shift", "0"}};
+
+    std::vector<std::pair<std::string, std::string>> edges = {
+        {"0,0", "1,1"}, {"1,2", "6,7"}, // Power
+        {"2,3", "6,8"}, {"3,4", "6,9"}, {"4,5", "6,10"}, {"5,6", "6,11"}, // Const layers
+        {"6,12", "7,13"}, // Fake quantize to ScaleShift
+        {"7,14", "8,15"}
+    };
+
+    return CommonTestUtils::DefaultNetBuilder::buildNetworkWithOneInput("FakeQuantizeAndActivationTestModel", p.inputDimensions[0], p._network_precision)
+        // 1
+        .addLayer("Power", p._network_precision, &power_params, {{p.inputDimensions[0]}, {p.inputDimensions[0]}})
+        // 2
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        // 3
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        // 4
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        // 5
+        .addLayer("Const", p._network_precision, &const_params, {{}, {{1}}}, type_size, 0)
+        // 6
+        .addLayer("FakeQuantize", p._network_precision, &fake_quantize_params, {{p.inputDimensions[0], {1}, {1}, {1}, {1}}, {{p.inputDimensions[0]}}})
+        // 7
+        .addLayer("ScaleShift", p._network_precision, {}, { {p.inputDimensions[0]}, {p.inputDimensions[0]} }, 3 * type_size, 3 * type_size)
+        // 8
+        .addLayer("Power", p._network_precision, &power_params, {{p.inputDimensions[0]}, {p.inputDimensions[0]}})
+        .finish(&edges);
+}
+
+std::string ScaleShiftAndFakeQuantizeTestModel::getName() const {
+    return "ScaleShiftAndFakeQuantizeTestModel";
+}
+
+bool ScaleShiftAndFakeQuantizeTestModel::transform(CNNNetwork& network, LayerTransformation::Params& params) const {
+    LowPrecisionTransformer transformer(LowPrecisionTransformer::getAllTransformations(params));
+    transformer.transform(network);
+    return true;
+}
+
+void ScaleShiftAndFakeQuantizeTestModel::resetTransformation(CNNNetwork& network) const {
+    fillData(getLayer(network, "Const2"), -128.f / 4.f, "custom");
+    fillData(getLayer(network, "Const3"), 127.f / 4.f, "custom");
+    fillData(getLayer(network, "Const4"), -128.f / 4.f, "custom");
+    fillData(getLayer(network, "Const5"), 127.f / 4.f, "custom");
+
+    fillData(getLayer(network, "ScaleShift7"), 1.0, "weights");
+    fillData(getLayer(network, "ScaleShift7"), 0.0, "biases");
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/scaleshift_to_conv_after_concat_test.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/scaleshift_to_conv_after_concat_test.cpp
new file mode 100644 (file)
index 0000000..0a2f19a
--- /dev/null
@@ -0,0 +1,150 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+#include "low_precision_transformations/scaleshift_to_convolution.hpp"
+
+ScaleShiftToConvolutionAfterConcatTestModel::ScaleShiftToConvolutionAfterConcatTestModel(const bool scaleShiftIsOutput) :
+    scaleShiftIsOutput(scaleShiftIsOutput) {}
+
+std::string ScaleShiftToConvolutionAfterConcatTestModel::getModel(SingleLayerTransformationsTestParams& p) const {
+//    ASSERT_EQ(2, p.inputDimensions.size());
+    size_t type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP32>::value_type);
+    if (p._network_precision == "FP16")
+        type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP16>::value_type);
+
+    const size_t axis = 1; // should be passed in 'p' argument
+
+    std::vector<size_t> concat_out_dims = p.inputDimensions[0];
+    concat_out_dims[axis] += p.inputDimensions[1][axis];
+
+    std::map<std::string, std::string> const_params = {};
+    std::map<std::string, std::string> fake_quantize_params = {
+        {"levels", "256"}
+    };
+    std::map<std::string, std::string> concat_params = {
+        {"axis", "1"}
+    };
+    std::map<std::string, std::string> power_params = {
+        {"power", "1"}, {"scale", "1"}, {"shift", "0"}
+    };
+
+    std::vector<std::pair<std::string, std::string>> edges = {
+        {"0,0", "10,10"}, {"1,1", "11,16"}, // Inputs to FakeQuantize
+        {"2,2", "10,11"}, {"3,3", "10,12"}, {"4,4", "10,13"}, {"5,5", "10,14"}, // Const layers
+        {"6,6", "11,17"}, {"7,7", "11,18"}, {"8,8", "11,19"}, {"9,9", "11,20"}, // Const layers
+        {"10,15", "12,22"}, {"11,21", "12,23"}, // FakeQuantize to Concat
+        {"12,24", "13,25"} // Concat to ScaleShift
+    };
+
+    if (!scaleShiftIsOutput) {
+        edges.push_back({ "13,26", "14,27" });
+    }
+
+    auto layers = CommonTestUtils::DefaultNetBuilder::buildNetworkWithOneInput("ScaleShiftToConvolutionAfterConcatTestModel", p.inputDimensions[0], p._network_precision)
+        .addInputLayer(p._network_precision, p.inputDimensions[1])
+        .addLayer("Const", p._network_precision, &const_params, { {}, {{1}} }, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, { {}, {{1}} }, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, { {}, {{1}} }, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, { {}, {{1}} }, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, { {}, {{1}} }, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, { {}, {{1}} }, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, { {}, {{1}} }, type_size, 0)
+        .addLayer("Const", p._network_precision, &const_params, { {}, {{1}} }, type_size, 0)
+        .addLayer("FakeQuantize", p._network_precision, &fake_quantize_params, { {p.inputDimensions[0], {1}, {1}, {1}, {1}}, {{p.inputDimensions[0]}} })
+        .addLayer("FakeQuantize", p._network_precision, &fake_quantize_params, { {p.inputDimensions[1], {1}, {1}, {1}, {1}}, {{p.inputDimensions[1]}} })
+        .addLayer("Concat", p._network_precision, &concat_params, { {p.inputDimensions[0], p.inputDimensions[1]}, { concat_out_dims } })
+        .addLayer("ScaleShift", p._network_precision, {}, { {p.outputDimensions[0]}, {p.outputDimensions[0]} }, p.outputDimensions[0][1] * type_size, p.outputDimensions[0][1] * type_size);
+
+    if (!scaleShiftIsOutput) {
+        layers.addLayer("Power", p._network_precision, &power_params, { {p.outputDimensions[0]}, {p.outputDimensions[0]} });
+    }
+
+    return layers.finish(&edges);
+}
+
+std::string ScaleShiftToConvolutionAfterConcatTestModel::getName() const {
+    return std::string("ScaleShiftToConvolutionAfterConcatTestModel") +
+        (scaleShiftIsOutput ? "_scaleShiftIsOutput" : "_scaleShiftIsNotOutput");
+}
+
+bool ScaleShiftToConvolutionAfterConcatTestModel::transform(CNNNetwork& network, LayerTransformation::Params& params) const {
+    if (std::any_of(
+        params.precisionsOnActivations.begin(),
+        params.precisionsOnActivations.end(),
+        [](const Precision precision) { return precision == Precision::U8; })) {
+        params.updatePrecisions = true;
+    }
+
+    LowPrecisionTransformer transformer(LowPrecisionTransformer::getAllTransformations(params).
+        addCleanup<ScaleShiftToConvolutionTransformation>(
+            LayerTransformation::Params(params).setPrecisionsOnActivations({ Precision::U8 }),
+            "ScaleShift"));
+
+    transformer.transform(network);
+
+    if (scaleShiftIsOutput || (!params.updatePrecisions)) {
+        CNNLayerPtr scaleShift = CNNNetworkHelper::getLayer(network, "ScaleShift13");
+        if (scaleShift->type != "ScaleShift") {
+            THROW_IE_EXCEPTION << "unexpected layer type " << scaleShift->type << " '" << scaleShift->name << "'";
+        }
+    } else {
+        CNNLayerPtr convolution = CNNNetworkHelper::getLayer(network, "ScaleShift13");
+        if (convolution->type != "Convolution") {
+            THROW_IE_EXCEPTION << "unexpected layer type " << convolution->type << " '" << convolution->name << "'";
+        }
+
+        if (CNNNetworkHelper::getInputChannelsCount(*convolution) != CNNNetworkHelper::getOutputChannelsCount(*convolution)) {
+            THROW_IE_EXCEPTION <<
+                "input channels count " << CNNNetworkHelper::getInputChannelsCount(*convolution) <<
+                " is not not equal output channels count " << CNNNetworkHelper::getOutputChannelsCount(*convolution);
+        }
+
+        const std::vector<CNNLayerPtr> parents = CNNNetworkHelper::getParents(*convolution);
+
+        const Blob::Ptr weightsBlob = CNNNetworkHelper::getBlob(parents[1], "custom");
+        if (weightsBlob == nullptr) {
+            THROW_IE_EXCEPTION << "weights are absent";
+        }
+        if (weightsBlob->getTensorDesc().getPrecision() != Precision::FP16) {
+            const std::shared_ptr<float> weightsData = CNNNetworkHelper::getFloatData(weightsBlob);
+            if (weightsData == nullptr) {
+                THROW_IE_EXCEPTION << "weights are not received";
+            }
+            const float* weights = weightsData.get();
+            size_t notZeroWeightsValues = 0ul;
+            for (size_t i = 0ul; i < weightsBlob->size(); ++i) {
+                if (weights[i] != 0.f) {
+                    notZeroWeightsValues++;
+                }
+            }
+            if (notZeroWeightsValues != CNNNetworkHelper::getOutputChannelsCount(*convolution)) {
+                THROW_IE_EXCEPTION << "unexpected weights not zero values " << notZeroWeightsValues;
+            }
+        }
+
+        const Blob::Ptr biasesBlob = CNNNetworkHelper::getBlob(parents[2], "custom");
+        if (biasesBlob == nullptr) {
+            THROW_IE_EXCEPTION << "biases are absent";
+        }
+        const std::shared_ptr<float> biases = CNNNetworkHelper::getFloatData(biasesBlob);
+        if (biases == nullptr) {
+            THROW_IE_EXCEPTION << "biases are not received";
+        }
+    }
+
+    return true;
+}
+
+void ScaleShiftToConvolutionAfterConcatTestModel::resetTransformation(CNNNetwork& network) const {
+    fillData(getLayer(network, "Const2"), 0.0, "custom");
+    fillData(getLayer(network, "Const3"), 255.0 / 10.0, "custom");
+    fillData(getLayer(network, "Const4"), 0.0, "custom");
+    fillData(getLayer(network, "Const5"), 255.0 / 10.0, "custom");
+
+    fillData(getLayer(network, "Const6"), -255.0 / 400.0, "custom");
+    fillData(getLayer(network, "Const7"), 255.0 / 200.0, "custom");
+    fillData(getLayer(network, "Const8"), -255.0 / 400.0, "custom");
+    fillData(getLayer(network, "Const9"), 255.0 / 200.0, "custom");
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/scaleshift_to_conv_after_fakequantize_ignore_test.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/scaleshift_to_conv_after_fakequantize_ignore_test.cpp
new file mode 100644 (file)
index 0000000..053e5a4
--- /dev/null
@@ -0,0 +1,72 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+
+void ScaleShiftToConvolutionAfterFakeQuantizeIgnoreTestModel::resetTransformation(CNNNetwork& network) const {
+    fillData(getLayer(network, "Const2"), 0.f, "custom");
+    fillData(getLayer(network, "Const3"), 255.f / 8.f, "custom");
+    fillData(getLayer(network, "Const4"), 0.f, "custom");
+    fillData(getLayer(network, "Const5"), 255.f / 8.f, "custom");
+
+    fillData(getLayer(network, "ScaleShift7"), 3.f, "weights");
+    fillData(getLayer(network, "ScaleShift7"), 0.f, "biases");
+}
+
+std::string ScaleShiftToConvolutionAfterFakeQuantizeIgnoreTestModel::getName() const {
+    return "ScaleShiftToConvolutionAfterFakeQuantizeIgnoreTestModel";
+}
+
+bool ScaleShiftToConvolutionAfterFakeQuantizeIgnoreTestModel::transform(CNNNetwork& network, LayerTransformation::Params& params) const {
+    LowPrecisionTransformer transformer(LowPrecisionTransformer::getAllTransformations(params));
+    transformer.transform(network);
+
+    CNNLayerPtr scaleShift = CNNNetworkHelper::getLayer(network, "ScaleShift7");
+    if (scaleShift != nullptr) {
+        THROW_IE_EXCEPTION << "unexpected layer " << scaleShift->type << " '" << scaleShift->name << "'";
+    }
+
+    return true;
+}
+
+std::string ScaleShiftToConvolutionAfterFakeQuantizeIgnoreTestModel::getModel(SingleLayerTransformationsTestParams& p) const {
+    size_t type_size;
+    if (p._network_precision == "FP16") {
+        type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP16>::value_type);
+    } else if (p._network_precision == "FP32") {
+        type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP32>::value_type);
+    } else {
+        THROW_IE_EXCEPTION << "unexpected network precision " << p._network_precision;
+    }
+
+    std::map<std::string, std::string> const_params = {};
+    std::map<std::string, std::string> fake_quantize_params = { {"levels", "256"} };
+    std::map<std::string, std::string> power_params = { {"power", "1"}, {"scale", "1"}, {"shift", "0"} };
+    std::vector<std::pair<std::string, std::string>> edges = {
+        {"0,0", "1,1"}, // Input -> Power
+        {"1,2", "6,7"}, // Power -> FakeQuantize
+        {"2,3", "6,8"}, {"3,4", "6,9"}, {"4,5", "6,10"}, {"5,6", "6,11"}, // Const layers
+        {"6,12", "7,13"}, // FakeQuantize -> ScaleShift
+        {"7,14", "8,15"}, // FakeQuantize -> ScaleShift
+    };
+
+    return CommonTestUtils::DefaultNetBuilder::buildNetworkWithOneInput("ScaleShiftToConvolutionAfterFakeQuantizeIgnoreTestModel", p.inputDimensions[0], p._network_precision)
+        // 1
+        .addLayer("Power", p._network_precision, &power_params, { {p.inputDimensions[0]}, {p.inputDimensions[0]} })
+        // 2
+        .addLayer("Const", p._network_precision, &const_params, { {}, {{1}} }, type_size, 0)
+        // 3
+        .addLayer("Const", p._network_precision, &const_params, { {}, {{1}} }, type_size, 0)
+        // 4
+        .addLayer("Const", p._network_precision, &const_params, { {}, {{1}} }, type_size, 0)
+        // 5
+        .addLayer("Const", p._network_precision, &const_params, { {}, {{1}} }, type_size, 0)
+        // 6
+        .addLayer("FakeQuantize", p._network_precision, &fake_quantize_params, { {p.inputDimensions[0], {1}, {1}, {1}, {1}}, {{p.inputDimensions[0]}} })
+        // 7
+        .addLayer("ScaleShift", p._network_precision, {}, { {p.inputDimensions[0]}, {p.inputDimensions[0]} }, p.inputDimensions[0][1] * type_size, p.outputDimensions[0][1] * type_size)
+        // 8
+        .addLayer("Power", p._network_precision, &power_params, { {p.inputDimensions[0]}, {p.inputDimensions[0]} })
+        .finish(&edges);
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/scaleshift_to_conv_after_not_concat_ignore_test.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/scaleshift_to_conv_after_not_concat_ignore_test.cpp
new file mode 100644 (file)
index 0000000..f8a54e8
--- /dev/null
@@ -0,0 +1,72 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+
+void ScaleShiftToConvolutionAfterNotConcatIgnoreTestModel::resetTransformation(CNNNetwork& network) const {
+    fillData(getLayer(network, "Const2"), 0.f, "custom");
+    fillData(getLayer(network, "Const3"), 255.f / 8.f, "custom");
+    fillData(getLayer(network, "Const4"), 0.f, "custom");
+    fillData(getLayer(network, "Const5"), 255.f / 8.f, "custom");
+
+    fillData(getLayer(network, "ScaleShift8"), 3.f, "weights");
+    fillData(getLayer(network, "ScaleShift8"), 0.f, "biases");
+}
+
+std::string ScaleShiftToConvolutionAfterNotConcatIgnoreTestModel::getName() const {
+    return "ScaleShiftToConvolutionAfterNotConcatIgnoreTestModel";
+}
+
+bool ScaleShiftToConvolutionAfterNotConcatIgnoreTestModel::transform(CNNNetwork& network, LayerTransformation::Params& params) const {
+    LowPrecisionTransformer transformer(LowPrecisionTransformer::getAllTransformations(params));
+    transformer.transform(network);
+
+    CNNLayerPtr scaleShift = CNNNetworkHelper::getLayer(network, "ScaleShift8");
+    if (scaleShift->type != "ScaleShift") {
+        THROW_IE_EXCEPTION << "unexpected layer type " << scaleShift->type << " '" << scaleShift->name << "'";
+    }
+
+    return true;
+}
+
+std::string ScaleShiftToConvolutionAfterNotConcatIgnoreTestModel::getModel(SingleLayerTransformationsTestParams& p) const {
+    size_t type_size;
+    if (p._network_precision == "FP16") {
+        type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP16>::value_type);
+    } else if (p._network_precision == "FP32") {
+        type_size = sizeof(InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP32>::value_type);
+    } else {
+        THROW_IE_EXCEPTION << "unexpected network precision " << p._network_precision;
+    }
+
+    std::map<std::string, std::string> const_params = {};
+    std::map<std::string, std::string> fake_quantize_params = { {"levels", "256"} };
+    std::map<std::string, std::string> power_params = { {"power", "1"}, {"scale", "1"}, {"shift", "0"} };
+    std::vector<std::pair<std::string, std::string>> edges = {
+        {"0,0", "1,1"}, // Input -> Power
+        {"1,2", "6,7"}, // Power -> FakeQuantize
+        {"2,3", "6,8"}, {"3,4", "6,9"}, {"4,5", "6,10"}, {"5,6", "6,11"}, // Const layers
+        {"6,12", "7,13"}, // FakeQuantize -> ReLU
+        {"7,14", "8,15"}, // ReLU -> ScaleShift
+    };
+
+    return CommonTestUtils::DefaultNetBuilder::buildNetworkWithOneInput("ScaleShiftToConvolutionAfterNotConcatTestModel", p.inputDimensions[0], p._network_precision)
+        // 1
+        .addLayer("Power", p._network_precision, &power_params, { {p.inputDimensions[0]}, {p.inputDimensions[0]} })
+        // 2
+        .addLayer("Const", p._network_precision, &const_params, { {}, {{1}} }, type_size, 0)
+        // 3
+        .addLayer("Const", p._network_precision, &const_params, { {}, {{1}} }, type_size, 0)
+        // 4
+        .addLayer("Const", p._network_precision, &const_params, { {}, {{1}} }, type_size, 0)
+        // 5
+        .addLayer("Const", p._network_precision, &const_params, { {}, {{1}} }, type_size, 0)
+        // 6
+        .addLayer("FakeQuantize", p._network_precision, &fake_quantize_params, { {p.inputDimensions[0], {1}, {1}, {1}, {1}}, {{p.inputDimensions[0]}} })
+        // 7
+        .addLayer("ReLU", p._network_precision, {}, { {p.inputDimensions[0]}, {p.inputDimensions[0]} })
+        // 8
+        .addLayer("ScaleShift", p._network_precision, {}, { {p.inputDimensions[0]}, {p.inputDimensions[0]} }, p.inputDimensions[0][1] * type_size, p.outputDimensions[0][1] * type_size)
+        .finish(&edges);
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/single_layer_test.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/single_layer_test.cpp
new file mode 100644 (file)
index 0000000..623654d
--- /dev/null
@@ -0,0 +1,27 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+#include "low_precision_transformations/convolution.hpp"
+#include "low_precision_transformations/fully_connected.hpp"
+#include "low_precision_transformations/scaleshift_to_convolution.hpp"
+
+LowPrecisionTransformations SingleLayerTestModel::getLowPrecisionTransformations(const LayerTransformation::Params& params) const {
+    if (device_name == "CPU") {
+        return LowPrecisionTransformer::getAllTransformations(params).
+            add<ConvolutionTransformation>(LayerTransformation::Params(params).setPrecisionsOnActivations({ Precision::U8 }), "Convolution").
+            addCleanup<ScaleShiftToConvolutionTransformation>(
+                LayerTransformation::Params(params).setPrecisionsOnActivations({ Precision::U8 }),
+                "ScaleShift");
+    } else if (device_name == "GPU") {
+        return LowPrecisionTransformer::getAllTransformations(params);
+    } else {
+        THROW_IE_EXCEPTION << "unknown plugin " << device_name;
+    }
+}
+
+LowPrecisionTransformer SingleLayerTestModel::getLowPrecisionTransformer(const LayerTransformation::Params& params) const {
+    LowPrecisionTransformer transformer(getLowPrecisionTransformations(params));
+    return transformer;
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/single_layer_transformations_test.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/single_layer_transformations_test.cpp
new file mode 100644 (file)
index 0000000..c234284
--- /dev/null
@@ -0,0 +1,329 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp"
+#include "common/validation.hpp"
+#include "tests_common_func.hpp"
+#include <cpp/ie_cnn_net_reader.h>
+
+TBlob<uint8_t>::Ptr SingleLayerTransformationsTest::generateWeights(const CNNNetwork& network) {
+    std::vector<Blob::Ptr> blobs;
+    const auto net_precision = network.getPrecision();
+
+    std::vector<CNNLayerPtr> sortedLayers = CNNNetSortTopologically(network);
+    for (CNNLayerPtr layer : sortedLayers) {
+        auto weightableLayer = std::dynamic_pointer_cast<WeightableLayer>(layer);
+        const std::string& type = layer->type;
+        if ((weightableLayer == nullptr) && !CaselessEq<std::string>()(type, "Const")) {
+            continue;
+        }
+
+        size_t blobSize = 0lu;
+        if (CaselessEq<std::string>()(type, "Convolution")) {
+            const size_t kernelSize = CNNNetworkHelper::getKernelSize(*layer);
+            const size_t inputChannelsCount = CNNNetworkHelper::getInputChannelsCount(*layer);
+            const size_t outputChannelsCount = CNNNetworkHelper::getOutputChannelsCount(*layer);
+            blobSize = kernelSize * inputChannelsCount * outputChannelsCount;
+        } else if (CaselessEq<std::string>()(type, "Const")) {
+            const std::vector<size_t>& dims = layer->outData[0]->getDims();
+            blobSize = std::accumulate(dims.begin(), dims.end(), 1lu, std::multiplies<size_t>());
+        } else if (CaselessEq<std::string>()(type, "ScaleShift")) {
+            blobSize = 2 * layer->outData[0]->getDims()[1]; // weights and biases
+        }
+
+        Blob::Ptr weights = CNNNetworkHelper::makeNewBlobPtr({ net_precision, { blobSize }, C });
+        weights->allocate();
+        fillDataWithInitValue(weights, 1.23f);
+        blobs.push_back(weights);
+
+        if (CaselessEq<std::string>()(type, "Convolution")) {
+            Blob::Ptr bias = CNNNetworkHelper::makeNewBlobPtr({ net_precision, { CNNNetworkHelper::getOutputChannelsCount(*layer) }, C });
+            bias->allocate();
+            fillDataWithInitValue(bias, 3.21f);
+            blobs.push_back(bias);
+        }
+    }
+    size_t totalSize = 0lu;
+    for (auto& blob : blobs) totalSize += (blob->byteSize());
+
+    TBlob<uint8_t>::Ptr modelBlob = make_shared_blob<uint8_t>({ Precision::U8, { totalSize }, C });
+    modelBlob->allocate();
+    uint8_t* modelBlobBuffer = modelBlob->buffer().as<uint8_t *>();
+    for (Blob::Ptr blob : blobs) {
+        memcpy(modelBlobBuffer, blob->buffer().as<uint8_t *>(), blob->byteSize());
+        modelBlobBuffer += blob->byteSize();
+    }
+
+    return modelBlob;
+}
+
+// TODO: not completed
+void SingleLayerTransformationsTest::checkNetworkWithFakeQuantize(const CNNNetwork& network) {
+    size_t total_size_in_bytes = 0;
+    std::vector<Blob::Ptr> blob_to_model;
+
+    std::vector<CNNLayerPtr> sortedLayers = CNNNetSortTopologically(network);
+    for (CNNLayerPtr layer : sortedLayers) {
+        if ((layer->type != "Convolution") && (layer->type != "Const")) {
+            continue;
+        }
+    }
+}
+
+// TODO: not completed
+void SingleLayerTransformationsTest::checkNetworkWithQuantize(const CNNNetwork& network) {
+    size_t total_size_in_bytes = 0;
+    std::vector<Blob::Ptr> blob_to_model;
+
+    std::vector<CNNLayerPtr> sortedLayers = CNNNetSortTopologically(network);
+    for (CNNLayerPtr layer : sortedLayers) {
+        if ((layer->type != "Convolution") && (layer->type != "Const")) {
+            continue;
+        }
+    }
+}
+
+//void SingleLayerTransformationsTest::sortBlobs(CNNLayer& layer) {
+//    auto it = layer.blobs.begin();
+//    if (it == layer.blobs.end()) {
+//        THROW_IE_EXCEPTION << "there is no blobs";
+//    }
+
+//    const auto size = it->second->size();
+//    const auto byteSize = it->second->byteSize();
+//    if ((it->second->size() != 2) || (it->second->byteSize() != 16)) {
+//        THROW_IE_EXCEPTION << "not supported - under development";
+//    }
+
+//    float* buffer = it->second->buffer().as<float*>();
+//    if (buffer[0] > buffer[1]) {
+//        const float tmp = buffer[0];
+//        buffer[0] = buffer[1];
+//        buffer[1] = tmp;
+//    }
+//}
+
+CNNNetwork SingleLayerTransformationsTest::createNetwork() {
+    SingleLayerTransformationsTestParams p = ::testing::WithParamInterface<SingleLayerTransformationsTestParams>::GetParam();
+    std::string model = p.model->getModel(p);
+
+    Core reader;
+    auto weights_fake = make_shared_blob<uint8_t>(TensorDesc(Precision::U8,
+            SizeVector({std::numeric_limits<uint32_t>::max()/2}), Layout::C));
+    weights_fake->allocate();
+    CNNNetwork network = reader.ReadNetwork(model, weights_fake);
+
+    auto modelBlob = generateWeights(network);
+    return reader.ReadNetwork(model, modelBlob);
+}
+
+std::unordered_map<std::string, InferenceEngine::Blob::Ptr> SingleLayerTransformationsTest::infer(
+        CNNNetwork& network,
+        std::unordered_map<std::string, Blob::Ptr>& inputBlobs,
+        Core & core,
+        const std::string & device_name,
+        ExecutableNetwork & executableNetwork,
+        InferRequest & inferRequest) {
+    const SingleLayerTransformationsTestParams p = ::testing::WithParamInterface<SingleLayerTransformationsTestParams>::GetParam();
+
+    std::map<std::string, std::string> config;
+    config.emplace(PluginConfigInternalParams::KEY_LP_TRANSFORMS_MODE, PluginConfigParams::NO);
+    //config.emplace(PluginConfigParams::KEY_DUMP_EXEC_GRAPH_AS_DOT, "SingleLayerTransformationsTest");
+
+    executableNetwork = core.LoadNetwork(network, device_name, config);
+    inferRequest = executableNetwork.CreateInferRequest();
+
+    for (auto& item : inputBlobs) {
+        inferRequest.SetBlob(item.first.c_str(), item.second);
+    }
+
+    inferRequest.Infer();
+
+    const std::map<std::string, DataPtr> outputsInfo = network.getOutputsInfo();
+    std::unordered_map<std::string, InferenceEngine::Blob::Ptr> outputs_blob_map;
+    for (auto& info : outputsInfo) {
+        Blob::Ptr output_blob = inferRequest.GetBlob(info.first.c_str());
+        outputs_blob_map.insert({info.first, output_blob});
+    }
+
+    return outputs_blob_map;
+}
+
+void SingleLayerTransformationsTest::compareInDetails(
+        InferenceEngine::Blob &res,
+        InferenceEngine::Blob &ref,
+        const size_t maxDifferenceCounts,
+        float max_diff) {
+    float *res_ptr = res.buffer().as<float*>();
+    size_t res_size = res.size();
+
+    float *ref_ptr = ref.buffer().as<float*>();
+    size_t ref_size = ref.size();
+
+    ASSERT_EQ(res_size, ref_size);
+
+    size_t differenceCount = 0;
+    std::stringstream log;
+    for (size_t i = 0; i < ref_size; i++) {
+        const float difference = fabs((res_ptr[i] - ref_ptr[i]) / ref_ptr[i]) * 100.0;
+        if ((difference >= max_diff) && (fabs(res_ptr[i] - ref_ptr[i]) > 0.0003)) {
+            log << "i=" << i << ": " << res_ptr[i] << " VS " << ref_ptr[i] << ": " << difference << "%, " << fabs(res_ptr[i] - ref_ptr[i]) << std::endl;
+
+            differenceCount++;
+            if (differenceCount > maxDifferenceCounts) {
+                std::cout << log.str();
+                std::cout << differenceCount << " differences are detected" << std::endl;
+                ASSERT_TRUE(difference < max_diff);
+                break;
+            }
+        }
+    }
+}
+
+void SingleLayerTransformationsTest::SetUp() {
+    try {
+        const SingleLayerTransformationsTestParams p = ::testing::WithParamInterface<SingleLayerTransformationsTestParams>::GetParam();
+        // TODO: ONNX enabling
+        CNNNetwork network = createNetwork();
+
+        const auto inputsInfo = network.getInputsInfo();
+        std::unordered_map<std::string, Blob::Ptr> inputBlobs;
+        for (auto& inputInfo : inputsInfo) {
+            const TensorDesc& desc = inputInfo.second->getTensorDesc();
+            Blob::Ptr input = CNNNetworkHelper::makeNewBlobPtr(desc);
+            input->allocate();
+
+            fillData(input, 4.f);
+            p.model->initInput(input);
+
+            inputBlobs.insert(std::pair<std::string, Blob::Ptr>(inputInfo.first, input));
+        }
+
+        p.model->resetTransformation(network);
+
+        //network.serialize(
+        //    p.model->getName() + "_original.xml",
+        //    p.model->getName() + "_original.bin");
+
+        Core core;
+        ExecutableNetwork executableNetwork;
+        InferRequest inferRequest;
+        const auto originalOutputMap = infer(network, inputBlobs, core, 
+                p.device_name, executableNetwork, inferRequest);
+
+        const std::vector<bool> updatePrecisionsValues = { false };
+        const std::vector<bool> quantizeOutputsValues = { true, false };
+        const std::vector<bool> weightsToConstValues = { true, false };
+        const std::vector<LayerTransformation::QuantizedTensorAlignment> quantizedTensorAlignmentOnActivationsValues = {
+            LayerTransformation::QuantizedTensorAlignment::None,
+            LayerTransformation::QuantizedTensorAlignment::UpdateLevel
+        };
+        const std::vector<LayerTransformation::QuantizedTensorAlignment> quantizedTensorAlignmentOnWeightsValues = {
+            LayerTransformation::QuantizedTensorAlignment::None,
+            //LayerTransformation::QuantizedTensorAlignment::Mixed
+        };
+        const std::vector<bool> roundQuantizedValues = { false, true };
+        const std::vector<bool> updateBiasesValues = { true, false };
+        const std::vector<bool> supportAsymmetricQuantizationValues = { true /*, false*/ };
+        const std::vector<std::vector<Precision>> precisionOnActivationsValues = {
+            { Precision::I8 },
+            { Precision::I8, Precision::U8 },
+            { Precision::U8 },
+            { Precision::U8, Precision::I8 }
+        };
+        const std::vector<std::vector<Precision>> precisionOnWeightsValues = { { Precision::I8 } };
+
+        for (const bool updatePrecision : updatePrecisionsValues) {
+            for (const bool quantizeOutputs : quantizeOutputsValues) {
+                for (const bool weightsToConst : weightsToConstValues) {
+                    for (const LayerTransformation::QuantizedTensorAlignment quantizedTensorAlignmentOnActivations : quantizedTensorAlignmentOnActivationsValues) {
+                        for (const LayerTransformation::QuantizedTensorAlignment quantizedTensorAlignmentOnWeights : quantizedTensorAlignmentOnWeightsValues) {
+                            for (const bool roundQuantizedValue : roundQuantizedValues) {
+                                for (const bool updateBiases : updateBiasesValues) {
+                                    for (const bool supportAsymmetricQuantization : supportAsymmetricQuantizationValues) {
+                                        for (const std::vector<Precision> precisionOnActivations : precisionOnActivationsValues) {
+                                            for (const std::vector<Precision> precisionOnWeights : precisionOnWeightsValues) {
+                                                network = createNetwork();
+
+                                                p.model->resetTransformation(network);
+                                                auto param = LayerTransformation::Params(
+                                                    updatePrecision,
+                                                    quantizeOutputs,
+                                                    weightsToConst,
+                                                    quantizedTensorAlignmentOnActivations,
+                                                    quantizedTensorAlignmentOnWeights,
+                                                    roundQuantizedValue,
+                                                    updateBiases,
+                                                    supportAsymmetricQuantization,
+                                                    precisionOnActivations,
+                                                    precisionOnWeights);
+
+                                                const bool validate = p.model->transform(network, param);
+
+#ifdef DISPLAY_RESULTS
+                                                // TODO: separate each usecase to standalone parameterized test
+                                                std::cout << std::endl <<
+                                                    "\tupdatePrecision=" << (param.updatePrecisions ? "true" : "false") << std::endl <<
+                                                    "\tquantizeOutputs=" << (param.quantizeOutputs ? "true" : "false") << std::endl <<
+                                                    "\tweightsToConst=" << (param.weightsToConst ? "true" : "false") << std::endl <<
+                                                    "\tquantizedTensorAlignmentOnActivations=" << param.quantizedTensorAlignmentOnActivations << std::endl <<
+                                                    "\tquantizedTensorAlignmentOnWeights=" << param.quantizedTensorAlignmentOnWeights << std::endl <<
+                                                    "\troundQuantizedValues: " << (param.roundQuantizedValues ? "true" : "false") << std::endl <<
+                                                    "\tupdateBiases: " << (param.updateBiases ? "true" : "false") << std::endl <<
+                                                    "\tsupportAsymmetricQuantization: " << (param.supportAsymmetricQuantization ? "true" : "false") << std::endl <<
+                                                    "\tprecisionsOnActivations: " << param.precisionsOnActivations << std::endl <<
+                                                    "\tprecisionsOnWeights: " << param.precisionsOnWeights << std::endl <<
+                                                    "\tnetworkPrecision=" << p._network_precision << std::endl;
+#endif
+
+                                                //network.serialize(
+                                                //    p.model->getName() + "_transformed.xml",
+                                                //    p.model->getName() + "_transformed.bin");
+
+                                                if (validate) {
+                                                    LowPrecisionTransformationValidation::validate(
+                                                        network,
+                                                        param,
+                                                        p.model->getNotTransformedLayers());
+                                                }
+
+                                                ExecutableNetwork executableNetworkTransformed;
+                                                InferRequest inferRequestTransformed;
+                                                const auto transformedOutput = infer(network, inputBlobs, core, p.device_name, executableNetworkTransformed, inferRequestTransformed);
+
+                                                //compareInDetails(originalOutputMap, *transformedOutput, 70, 0.5);
+                                                auto net_precision = network.getPrecision();
+                                                for (auto& originalOutput : originalOutputMap) {
+                                                    const auto& name = originalOutput.first;
+                                                    const auto outSize = originalOutput.second->size();
+
+                                                    auto transformed = CNNNetworkHelper::getFloatData(transformedOutput.find(name)->second);
+                                                    auto original = CNNNetworkHelper::getFloatData(originalOutput.second);
+
+                                                    const float threshold = p.model->getThreshold(p.device_name, net_precision, param);
+                                                    const float zeroThreshold = p.model->getZeroThreshold();
+                                                    // const float threshold = net_precision == Precision::FP16 ? 0.0005f : 0.0003f;
+                                                    relative_compare(
+                                                        CNNNetworkHelper::getFloatData(transformedOutput.find(name)->second).get(),
+                                                        CNNNetworkHelper::getFloatData(originalOutput.second).get(),
+                                                        outSize,
+                                                        threshold,
+                                                        updatePrecision ? "failed with precisions" : "failed without precisions",
+                                                        zeroThreshold);
+                                                }
+                                            }
+                                        }
+                                    }
+                                }
+                            }
+                        }
+                    }
+                }
+            }
+        }
+    } catch (const InferenceEngine::details::InferenceEngineException &e) {
+        FAIL() << e.what();
+    }
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/update_biases_convolution_test.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/update_biases_convolution_test.cpp
new file mode 100644 (file)
index 0000000..85b1180
--- /dev/null
@@ -0,0 +1,54 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+#include "low_precision_transformations/fake_quantize.hpp"
+#include "low_precision_transformations/convolution.hpp"
+
+UpdateBiasesConvolutionTestModel::UpdateBiasesConvolutionTestModel(const bool addBiasesLayer) : ConvolutionBaseTestModel(addBiasesLayer) {}
+
+std::string UpdateBiasesConvolutionTestModel::getName() const {
+    return std::string("UpdateBiasesConvolutionTestModel") +
+        (addBiasesLayer ? "" : "_withoutBiases");
+}
+
+void UpdateBiasesConvolutionTestModel::initInput(Blob::Ptr input) const {
+    fillDataWithInitValue(input, -1.f);
+}
+
+bool UpdateBiasesConvolutionTestModel::transform(CNNNetwork& network, LayerTransformation::Params& params) const {
+    params.supportAsymmetricQuantization = false;
+
+    LowPrecisionTransformer transformer = getLowPrecisionTransformer(params);
+    transformer.transform(network);
+
+    if (std::any_of(
+        params.precisionsOnActivations.begin(),
+        params.precisionsOnActivations.end(),
+        [](const Precision precision) { return precision == Precision::U8; }) &&
+        params.quantizeOutputs) {
+        const CNNLayerPtr dequantizationLayer = getLayer(network, "Convolution");
+        if (dequantizationLayer->type != "ScaleShift") {
+            THROW_IE_EXCEPTION << "was not quantized";
+        }
+
+        const Blob::Ptr biases = CNNNetworkHelper::getBiases(*dequantizationLayer);
+        const std::shared_ptr<float> biasesData = CNNNetworkHelper::getFloatData(biases);
+        if (params.updateBiases) {
+            for (size_t i = 0ul; i < biases->size(); ++i) {
+                if (biasesData.get()[i] != 0.f) {
+                    THROW_IE_EXCEPTION << "biases value is not zero";
+                }
+            }
+
+            //CNNLayerPtr convolution = dequantizationLayer->insData[0].lock()->getCreatorLayer().lock();
+            //CNNLayerPtr convolutionBiases = CNNNetworkHelper::getParent(*convolution, 2);
+            //if (convolutionBiases == nullptr) {
+            //    THROW_IE_EXCEPTION << "biases const layer was not added";
+            //}
+        }
+    }
+
+    return true;
+}
diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/update_biases_fully_connected_test.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/update_biases_fully_connected_test.cpp
new file mode 100644 (file)
index 0000000..8787de2
--- /dev/null
@@ -0,0 +1,57 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "low_precision_transformer_single_layer_tests.hpp"
+#include "low_precision_transformations/fake_quantize.hpp"
+#include "low_precision_transformations/convolution.hpp"
+#include "low_precision_transformations/fully_connected.hpp"
+#include "low_precision_transformations/scaleshift_to_convolution.hpp"
+
+UpdateBiasesFullyConnectedTestModel::UpdateBiasesFullyConnectedTestModel(const bool addBiasesLayer) : FullyConnectedBaseTestModel(addBiasesLayer) {}
+
+std::string UpdateBiasesFullyConnectedTestModel::getName() const {
+    return std::string("UpdateBiasesFullyConnectedTestModel") +
+        (addBiasesLayer ? "WithBiases" : "WithoutBiases");
+}
+
+void UpdateBiasesFullyConnectedTestModel::initInput(Blob::Ptr input) const {
+    fillDataWithInitValue(input, -1.f);
+}
+
+bool UpdateBiasesFullyConnectedTestModel::transform(CNNNetwork& network, LayerTransformation::Params& params) const {
+    // TODO: use getLowPrecisionTransformer(params) instead
+    LowPrecisionTransformer transformer(LowPrecisionTransformer::getAllTransformations(params).
+        add<FullyConnectedTransformation>(LayerTransformation::Params(params).setSupportAsymmetricQuantization(false), "FullyConnected").
+        add<ConvolutionTransformation>(LayerTransformation::Params(params).setPrecisionsOnActivations({ Precision::U8 }), "Convolution").
+        addCleanup<ScaleShiftToConvolutionTransformation>(
+            LayerTransformation::Params(params).setPrecisionsOnActivations({ Precision::U8 }),
+            "ScaleShift"));
+
+    transformer.transform(network);
+
+    if (params.quantizeOutputs) {
+        const CNNLayerPtr dequantizationLayer = getLayer(network, "fullyConnected");
+        if (dequantizationLayer->type != "ScaleShift") {
+            THROW_IE_EXCEPTION << "was not quantized";
+        }
+
+        const Blob::Ptr biases = CNNNetworkHelper::getBiases(*dequantizationLayer);
+        const std::shared_ptr<float> biasesData = CNNNetworkHelper::getFloatData(biases);
+        if (params.updateBiases) {
+            for (size_t i = 0ul; i < biases->size(); ++i) {
+                if (biasesData.get()[i] != 0.f) {
+                    THROW_IE_EXCEPTION << "biases value is not zero";
+                }
+            }
+        } else {
+            for (size_t i = 0ul; i < biases->size(); ++i) {
+                if (biasesData.get()[i] == 0.f) {
+                    THROW_IE_EXCEPTION << "biases value is zero";
+                }
+            }
+        }
+    }
+
+    return true;
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/CMakeLists.txt b/inference-engine/tests_deprecated/functional/vpu/CMakeLists.txt
new file mode 100644 (file)
index 0000000..8e6ada3
--- /dev/null
@@ -0,0 +1,82 @@
+# Copyright (C) 2018-2020 Intel Corporation
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+
+set(VPU_DEPENDENCIES
+    vpu_copy_firmware)
+
+if (ENABLE_CLDNN)
+    list(APPEND VPU_DEPENDENCIES
+            clDNNPlugin
+            HeteroPlugin)
+endif()
+
+include(add_ie_target)
+
+addIeTarget(
+        NAME myriadTestData
+        DEVELOPER_PACKAGE
+        TYPE STATIC
+        ROOT ${CMAKE_CURRENT_SOURCE_DIR}/test_data
+        LINK_LIBRARIES
+        ieTestHelpers
+        DEFINES
+        INSTANTIATE_TESTS=1
+        PUBLIC
+        DATA_PATH=\"${DATA_PATH}\"
+        MODELS_PATH=\"${MODELS_PATH}\"
+)
+
+addIeTarget(
+    NAME VPUCommonTests
+    DEVELOPER_PACKAGE
+    TYPE STATIC
+    ROOT ${CMAKE_CURRENT_SOURCE_DIR}/common
+    ADDITIONAL_SOURCE_DIRS
+        ${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instance
+        ${CMAKE_CURRENT_SOURCE_DIR}/vpu_base
+    INCLUDES
+        ${CMAKE_CURRENT_SOURCE_DIR}/vpu_base
+        ${CMAKE_CURRENT_SOURCE_DIR}/common/regression/helpers
+    LINK_LIBRARIES
+        IESharedTests
+        inference_engine_ir_readers
+        vpu_graph_transformer
+        vpu_custom_kernels
+    DEFINES
+        INSTANTIATE_TESTS=1
+    EXPORT_DEPENDENCIES
+        vpu_custom_kernels
+)
+
+target_include_directories(VPUCommonTests INTERFACE
+    ${CMAKE_CURRENT_SOURCE_DIR}/vpu_base
+    ${CMAKE_CURRENT_SOURCE_DIR}/common/regression/helpers
+    $<TARGET_PROPERTY:vpu_graph_transformer,INTERFACE_INCLUDE_DIRECTORIES>
+    )
+
+ie_developer_export_targets(vpu_custom_kernels)
+addIeTargetTest(
+    NAME MyriadFunctionalTests
+    ROOT ${CMAKE_CURRENT_SOURCE_DIR}/myriad_tests
+    ADDITIONAL_SOURCE_DIRS
+        ${CMAKE_CURRENT_SOURCE_DIR}/graph_transformer
+    INCLUDES
+        ${IE_MAIN_SOURCE_DIR}/include/vpu
+        ${IE_MAIN_SOURCE_DIR}/src/vpu/graph_transformer/include
+        ${CMAKE_CURRENT_BINARY_DIR}
+    LINK_LIBRARIES
+        IESharedTests
+        vpu_custom_kernels
+        inference_engine_ir_readers
+    LINK_LIBRARIES_WHOLE_ARCHIVE
+        myriadTestData
+        VPUCommonTests
+    DEPENDENCIES
+        ${VPU_DEPENDENCIES}
+        myriadPlugin
+    LABELS  # Must be the last parameter. Reason: see description comment for addIeTargetTest().
+        VPU
+        MYRIAD
+    )
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/bbox_util.cpp b/inference-engine/tests_deprecated/functional/vpu/common/bbox_util.cpp
new file mode 100644 (file)
index 0000000..094be4d
--- /dev/null
@@ -0,0 +1,996 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <algorithm>
+#include <csignal>
+#include <ctime>
+#include <map>
+#include <string>
+#include <utility>
+#include <vector>
+#include <numeric>
+#include <cfloat>
+
+#include "bbox_util.h"
+
+static const float  PI = 3.14159265358979323846;
+
+static const float  ORIENT_MAX = PI;
+static const float  ORIENT_MIN = -PI;
+static const float  ORIENT_RNG = (ORIENT_MAX-ORIENT_MIN);
+
+template<typename Dtype>
+Dtype get_orient_by_bin_index(Dtype bin_index, int num_orient_bins) {
+    Dtype bin_size = (Dtype)ORIENT_RNG / (Dtype)num_orient_bins;
+    Dtype orient = (Dtype)ORIENT_MIN + (bin_index+1)*bin_size;
+
+    return orient;
+}
+
+// Explicit initialization.
+template float get_orient_by_bin_index(float bin_index, int num_orient_bins);
+template double get_orient_by_bin_index(double bin_index, int num_orient_bins);
+
+float get_orient_by_class_index(int class_index, int num_orient_classes) {
+    switch (class_index) {
+        case 0:
+            return -0.5f * PI;
+        case 1:
+            return 0.5f * PI;
+        default:
+            return 0;
+    }
+}
+
+template<typename Dtype, typename ArrayDtype>
+Dtype get_orientation_impl(const ArrayDtype& bin_vals, bool interpolate_orientation) {
+    int max_index = -1;
+
+    Dtype max_score = 0;
+    int num_bins = bin_vals.size();
+    for (int i=0; i < num_bins; i++) {
+        if (bin_vals[i] > max_score) {
+            max_score = bin_vals[i];
+            max_index = i;
+        }
+    }
+
+    if (num_bins == 3) {
+        Dtype orient = get_orient_by_class_index(max_index, num_bins);
+        return orient;
+    }
+
+    Dtype bin_index = 0;
+    if (interpolate_orientation) {
+        // to be implemented soon
+        int left_index = ((max_index-1)+num_bins)%num_bins;
+        int right_index = ((max_index+1))%num_bins;
+
+        Dtype left_val = bin_vals[left_index];
+        Dtype right_val = bin_vals[right_index];
+        Dtype x2 = (Dtype)((right_val - left_val)/(2*(left_val+right_val-2*max_score)));
+        bin_index = (Dtype)max_index - x2;
+    } else {
+        bin_index = (Dtype)max_index;
+    }
+
+    return get_orient_by_bin_index(bin_index, num_bins);
+}
+
+float get_orientation(const std::vector<float>& bin_vals, bool interpolate_orientation) {
+    return get_orientation_impl<float, std::vector<float> >(bin_vals, interpolate_orientation);
+}
+
+template<typename Dtype>
+Dtype get_orientation(const ArrayWrapper<Dtype>& bin_vals, bool interpolate_orientation) {
+    return get_orientation_impl<Dtype, ArrayWrapper<Dtype> >(bin_vals, interpolate_orientation);
+}
+
+// Explicit initialization.
+template float get_orientation(const ArrayWrapper<float>& bin_vals, bool interpolate_orientation);
+template double get_orientation(const ArrayWrapper<double>& bin_vals, bool interpolate_orientation);
+
+bool SortBBoxAscend(const NormalizedBBox& bbox1, const NormalizedBBox& bbox2) {
+    return bbox1.score < bbox2.score;
+}
+
+bool SortBBoxDescend(const NormalizedBBox& bbox1, const NormalizedBBox& bbox2) {
+    return bbox1.score > bbox2.score;
+}
+
+bool SortBBoxIndexPairDescend(const std::pair<NormalizedBBox, int>& bbox1, const std::pair<NormalizedBBox, int>& bbox2) {
+    return SortBBoxDescend(bbox1.first, bbox2.first);
+}
+
+template <typename T>
+bool SortDetectionResultPairAscend(const std::pair<DetectionResult, T>& pair1,
+                                   const std::pair<DetectionResult, T>& pair2) {
+    return pair1.first.mScore < pair2.first.mScore;
+}
+
+// Explicit initialization.
+template <typename T>
+bool SortDetectionResultPairDescend(const std::pair<DetectionResult, T>& pair1,
+                                    const std::pair<DetectionResult, T>& pair2) {
+    return pair1.first.mScore > pair2.first.mScore;
+}
+
+template <typename T>
+bool SortScorePairAscend(const std::pair<float, T>& pair1,
+                         const std::pair<float, T>& pair2) {
+    return pair1.first < pair2.first;
+}
+
+// Explicit initialization.
+template bool SortScorePairAscend(const std::pair<float, int>& pair1,
+                                  const std::pair<float, int>& pair2);
+template bool SortScorePairAscend(const std::pair<float, std::pair<int, int> >& pair1,
+                                  const std::pair<float, std::pair<int, int> >& pair2);
+
+template <typename T>
+bool SortScorePairDescend(const std::pair<float, T>& pair1,
+                          const std::pair<float, T>& pair2) {
+    return pair1.first > pair2.first;
+}
+
+template <typename T>
+bool SortScorePairDescendStable(const std::pair<float, T>& pair1,
+                                const std::pair<float, T>& pair2) {
+    if (pair1.first > pair2.first) return true;
+    if (pair1.first < pair2.first) return false;
+    return pair1.second < pair2.second;
+}
+
+template <typename T>
+struct ScoresIndexedComparator {
+    explicit ScoresIndexedComparator(const std::vector<T>& scores) : _scores(scores) {}
+
+    bool operator()(int idx1, int idx2) {
+        if (_scores[idx1] > _scores[idx2]) return true;
+        if (_scores[idx1] < _scores[idx2]) return false;
+        return idx1 < idx2;
+    }
+
+    const std::vector<T>& _scores;
+};
+
+
+// Explicit initialization.
+template bool SortDetectionResultPairDescend(const std::pair<DetectionResult, int>& pair1,
+                                             const std::pair<DetectionResult, int>& pair2);
+template bool SortDetectionResultPairDescend(const std::pair<DetectionResult, std::pair<int, int> >& pair1,
+                                             const std::pair<DetectionResult, std::pair<int, int> >& pair2);
+
+template bool SortScorePairDescend(const std::pair<float, int>& pair1,
+                                   const std::pair<float, int>& pair2);
+template bool SortScorePairDescend(const std::pair<float, std::pair<int, int> >& pair1,
+                                   const std::pair<float, std::pair<int, int> >& pair2);
+
+
+template bool SortScorePairDescendStable(const std::pair<float, int>& pair1,
+                                         const std::pair<float, int>& pair2);
+template bool SortScorePairDescendStable(const std::pair<float, std::pair<int, int> >& pair1,
+                                         const std::pair<float, std::pair<int, int> >& pair2);
+
+NormalizedBBox UnitBBox() {
+    NormalizedBBox unit_bbox;
+    unit_bbox.set_xmin(0.);
+    unit_bbox.set_ymin(0.);
+    unit_bbox.set_xmax(1.);
+    unit_bbox.set_ymax(1.);
+    return unit_bbox;
+}
+
+template<typename BBoxType>
+void IntersectBBox_impl(const BBoxType& bbox1, const BBoxType& bbox2, BBoxType* intersect_bbox) {
+    if (bbox2.xmin() > bbox1.xmax() || bbox2.xmax() < bbox1.xmin() ||
+        bbox2.ymin() > bbox1.ymax() || bbox2.ymax() < bbox1.ymin()) {
+        // Return [0, 0, 0, 0] if there is no intersection.
+        intersect_bbox->set_xmin(0);
+        intersect_bbox->set_ymin(0);
+        intersect_bbox->set_xmax(0);
+        intersect_bbox->set_ymax(0);
+    } else {
+        intersect_bbox->set_xmin(std::max(bbox1.xmin(), bbox2.xmin()));
+        intersect_bbox->set_ymin(std::max(bbox1.ymin(), bbox2.ymin()));
+        intersect_bbox->set_xmax(std::min(bbox1.xmax(), bbox2.xmax()));
+        intersect_bbox->set_ymax(std::min(bbox1.ymax(), bbox2.ymax()));
+    }
+}
+
+void IntersectBBox(const NormalizedBBox& bbox1, const NormalizedBBox& bbox2, NormalizedBBox* intersect_bbox) {
+    IntersectBBox_impl(bbox1, bbox2, intersect_bbox);
+}
+
+template<typename Dtype>
+void IntersectBBox(const BBox<Dtype>& bbox1, const BBox<Dtype>& bbox2, BBox<Dtype>* intersect_bbox) {
+    IntersectBBox_impl(bbox1, bbox2, intersect_bbox);
+}
+
+// Explicit initialization.
+template void IntersectBBox(const BBox<float>& bbox1, const BBox<float>& bbox2,
+                            BBox<float>* intersect_bbox);
+template void IntersectBBox(const BBox<double>& bbox1, const BBox<double>& bbox2,
+                            BBox<double>* intersect_bbox);
+
+
+template<typename Dtype, typename BBoxType>
+Dtype BBoxSize_impl(const BBoxType& bbox, const bool normalized) {
+    if (bbox.xmax() < bbox.xmin() || bbox.ymax() < bbox.ymin()) {
+        // If bbox is invalid (e.g. xmax < xmin or ymax < ymin), return 0.
+        return 0;
+    }
+    Dtype width = bbox.xmax() - bbox.xmin();
+    Dtype height = bbox.ymax() - bbox.ymin();
+    if (normalized) {
+        return width * height;
+    } else {
+        // If bbox is not within range [0, 1].
+        return (width + 1) * (height + 1);
+    }
+}
+
+float BBoxSize(const NormalizedBBox& bbox, const bool normalized) {
+    return BBoxSize_impl<float, NormalizedBBox>(bbox, normalized);
+}
+
+template<typename Dtype>
+Dtype BBoxSize(const BBox<Dtype>& bbox, const bool normalized) {
+    return BBoxSize_impl<Dtype, BBox<Dtype> >(bbox, normalized);
+}
+
+// Explicit initialization.
+template float BBoxSize(const BBox<float>& bbox, const bool normalized);
+template double BBoxSize(const BBox<double>& bbox, const bool normalized);
+
+void ClipBBox(const NormalizedBBox& bbox, NormalizedBBox* clip_bbox) {
+    clip_bbox->set_xmin(std::max(std::min(bbox.xmin(), 1.f), 0.f));
+    clip_bbox->set_ymin(std::max(std::min(bbox.ymin(), 1.f), 0.f));
+    clip_bbox->set_xmax(std::max(std::min(bbox.xmax(), 1.f), 0.f));
+    clip_bbox->set_ymax(std::max(std::min(bbox.ymax(), 1.f), 0.f));
+
+    clip_bbox->set_size(BBoxSize(*clip_bbox));
+    clip_bbox->difficult = bbox.difficult;
+}
+
+template<typename Dtype>
+void ClipBBox(const BBox<Dtype>& bbox, BBox<Dtype>* clip_bbox) {
+    clip_bbox->set_xmin(std::max<Dtype>(std::min<Dtype>(bbox.xmin(), 1.0), 0.0));
+    clip_bbox->set_ymin(std::max<Dtype>(std::min<Dtype>(bbox.ymin(), 1.0), 0.0));
+    clip_bbox->set_xmax(std::max<Dtype>(std::min<Dtype>(bbox.xmax(), 1.0), 0.0));
+    clip_bbox->set_ymax(std::max<Dtype>(std::min<Dtype>(bbox.ymax(), 1.0), 0.0));
+}
+
+// Explicit initialization.
+template void ClipBBox(const BBox<float>& bbox, BBox<float>* clip_bbox);
+template void ClipBBox(const BBox<double>& bbox, BBox<double>* clip_bbox);
+
+void ScaleBBox(const NormalizedBBox& bbox, const int height, const int width,
+               NormalizedBBox* scale_bbox) {
+    scale_bbox->set_xmin(bbox.xmin() * width);
+    scale_bbox->set_ymin(bbox.ymin() * height);
+    scale_bbox->set_xmax(bbox.xmax() * width);
+    scale_bbox->set_ymax(bbox.ymax() * height);
+
+    bool normalized = !(width > 1 || height > 1);
+
+    scale_bbox->set_size(BBoxSize(*scale_bbox, normalized));
+    scale_bbox->difficult = bbox.difficult;
+}
+
+template<typename Dtype>
+void ScaleBBox(const BBox<Dtype>& bbox, const int height, const int width,
+               BBox<Dtype>* scale_bbox) {
+    scale_bbox->set_xmin(bbox.xmin() * width);
+    scale_bbox->set_ymin(bbox.ymin() * height);
+    scale_bbox->set_xmax(bbox.xmax() * width);
+    scale_bbox->set_ymax(bbox.ymax() * height);
+}
+
+// Explicit initialization.
+template void ScaleBBox(const BBox<float>& bbox, const int height, const int width,
+                        BBox<float>* scale_bbox);
+template void ScaleBBox(const BBox<double>& bbox, const int height, const int width,
+                        BBox<double>* scale_bbox);
+
+void LocateBBox(const NormalizedBBox& src_bbox, const NormalizedBBox& bbox, NormalizedBBox* loc_bbox) {
+    float src_width = src_bbox.xmax() - src_bbox.xmin();
+    float src_height = src_bbox.ymax() - src_bbox.ymin();
+    loc_bbox->set_xmin(src_bbox.xmin() + bbox.xmin() * src_width);
+    loc_bbox->set_ymin(src_bbox.ymin() + bbox.ymin() * src_height);
+    loc_bbox->set_xmax(src_bbox.xmin() + bbox.xmax() * src_width);
+    loc_bbox->set_ymax(src_bbox.ymin() + bbox.ymax() * src_height);
+    loc_bbox->difficult = bbox.difficult;
+    loc_bbox->orientation = src_bbox.orientation;
+}
+
+
+bool ProjectBBox_GetRatio(const NormalizedBBox& src_bbox, const NormalizedBBox& bbox, NormalizedBBox* proj_bbox, float& ratio) {
+    if (bbox.xmin() >= src_bbox.xmax() || bbox.xmax() <= src_bbox.xmin() ||
+        bbox.ymin() >= src_bbox.ymax() || bbox.ymax() <= src_bbox.ymin() ||
+        bbox.xmax() <= bbox.xmin() || bbox.ymax() <= bbox.ymin()) {
+        return false;
+    }
+
+    float src_width = src_bbox.xmax() - src_bbox.xmin();
+    float src_height = src_bbox.ymax() - src_bbox.ymin();
+
+    proj_bbox->set_xmin((bbox.xmin() - src_bbox.xmin()) / src_width);
+    proj_bbox->set_ymin((bbox.ymin() - src_bbox.ymin()) / src_height);
+    proj_bbox->set_xmax((bbox.xmax() - src_bbox.xmin()) / src_width);
+    proj_bbox->set_ymax((bbox.ymax() - src_bbox.ymin()) / src_height);
+    proj_bbox->difficult = bbox.difficult;
+    proj_bbox->orientation = bbox.orientation;
+
+    float area_before_clipping = BBoxSize(*proj_bbox);
+    ClipBBox(*proj_bbox, proj_bbox);
+
+    float area_after_clipping = BBoxSize(*proj_bbox);
+    ratio = area_after_clipping/area_before_clipping;
+
+    if (BBoxSize(*proj_bbox) > 0) {
+        return true;
+    } else {
+        return false;
+    }
+}
+
+
+bool ProjectBBox(const NormalizedBBox& src_bbox, const NormalizedBBox& bbox,
+                 NormalizedBBox* proj_bbox) {
+    float ratio = 0;
+    return ProjectBBox_GetRatio(src_bbox, bbox, proj_bbox, ratio);
+}
+
+template<typename Dtype, typename BBoxType>
+Dtype JaccardOverlap_impl(const BBoxType& bbox1, const BBoxType& bbox2,
+                          const bool normalized) {
+    BBoxType intersect_bbox;
+    IntersectBBox(bbox1, bbox2, &intersect_bbox);
+    Dtype intersect_width, intersect_height;
+    if (normalized) {
+        intersect_width = intersect_bbox.xmax() - intersect_bbox.xmin();
+        intersect_height = intersect_bbox.ymax() - intersect_bbox.ymin();
+    } else {
+        intersect_width = intersect_bbox.xmax() - intersect_bbox.xmin() + 1;
+        intersect_height = intersect_bbox.ymax() - intersect_bbox.ymin() + 1;
+    }
+    if (intersect_width > 0 && intersect_height > 0) {
+        Dtype intersect_size = intersect_width * intersect_height;
+        Dtype bbox1_size = BBoxSize(bbox1);
+        Dtype bbox2_size = BBoxSize(bbox2);
+        return intersect_size / (bbox1_size + bbox2_size - intersect_size);
+    } else {
+        return 0.;
+    }
+}
+
+float JaccardOverlap(const NormalizedBBox& bbox1, const NormalizedBBox& bbox2,
+                     const bool normalized) {
+    return JaccardOverlap_impl<float, NormalizedBBox>(bbox1, bbox2, normalized);
+}
+
+template<typename Dtype>
+Dtype JaccardOverlap(const BBox<Dtype>& bbox1, const BBox<Dtype>& bbox2,
+                     const bool normalized) {
+    return JaccardOverlap_impl<Dtype, BBox<Dtype> >(bbox1, bbox2, normalized);
+}
+
+// Explicit initialization.
+template float JaccardOverlap(const BBox<float>& bbox1, const BBox<float>& bbox2,
+                              const bool normalized);
+template double JaccardOverlap(const BBox<double>& bbox1, const BBox<double>& bbox2,
+                               const bool normalized);
+
+float BBoxCoverage(const NormalizedBBox& bbox1, const NormalizedBBox& bbox2) {
+    NormalizedBBox intersect_bbox;
+    IntersectBBox(bbox1, bbox2, &intersect_bbox);
+    float intersect_size = BBoxSize(intersect_bbox);
+    float bbox1_size = BBoxSize(bbox1);
+    if (intersect_size > 0 && bbox1_size > 0) {
+        return intersect_size / bbox1_size;
+    } else {
+        return 0.;
+    }
+}
+
+void DecodeBBox(const NormalizedBBox& prior_bbox, const float* prior_variance,
+                const CodeType code_type, const bool variance_encoded_in_target,
+                const NormalizedBBox& bbox, NormalizedBBox* decode_bbox) {
+    if (code_type == CodeType::CORNER) {
+        if (variance_encoded_in_target) {
+            // variance is encoded in target, we simply need to add the offset
+            // predictions.
+            decode_bbox->set_xmin(prior_bbox.xmin() + bbox.xmin());
+            decode_bbox->set_ymin(prior_bbox.ymin() + bbox.ymin());
+            decode_bbox->set_xmax(prior_bbox.xmax() + bbox.xmax());
+            decode_bbox->set_ymax(prior_bbox.ymax() + bbox.ymax());
+        } else {
+            // variance is encoded in bbox, we need to scale the offset accordingly.
+            decode_bbox->set_xmin(
+                                  prior_bbox.xmin() + prior_variance[0] * bbox.xmin());
+            decode_bbox->set_ymin(
+                                  prior_bbox.ymin() + prior_variance[1] * bbox.ymin());
+            decode_bbox->set_xmax(
+                                  prior_bbox.xmax() + prior_variance[2] * bbox.xmax());
+            decode_bbox->set_ymax(
+                                  prior_bbox.ymax() + prior_variance[3] * bbox.ymax());
+        }
+    } else if (code_type == CodeType::CENTER_SIZE) {
+        float prior_width = prior_bbox.xmax() - prior_bbox.xmin();
+        float prior_height = prior_bbox.ymax() - prior_bbox.ymin();
+        float prior_center_x = (prior_bbox.xmin() + prior_bbox.xmax()) / 2.;
+        float prior_center_y = (prior_bbox.ymin() + prior_bbox.ymax()) / 2.;
+
+        float decode_bbox_center_x, decode_bbox_center_y;
+        float decode_bbox_width, decode_bbox_height;
+
+        if (variance_encoded_in_target) {
+            // variance is encoded in target, we simply need to retore the offset
+            // predictions.
+            decode_bbox_center_x = bbox.xmin() * prior_width + prior_center_x;
+            decode_bbox_center_y = bbox.ymin() * prior_height + prior_center_y;
+            decode_bbox_width = exp(bbox.xmax()) * prior_width;
+            decode_bbox_height = exp(bbox.ymax()) * prior_height;
+        } else {
+            // variance is encoded in bbox, we need to scale the offset accordingly.
+            decode_bbox_center_x =
+            prior_variance[0] * bbox.xmin() * prior_width + prior_center_x;
+            decode_bbox_center_y =
+            prior_variance[1] * bbox.ymin() * prior_height + prior_center_y;
+            decode_bbox_width =
+            exp(prior_variance[2] * bbox.xmax()) * prior_width;
+            decode_bbox_height =
+            exp(prior_variance[3] * bbox.ymax()) * prior_height;
+        }
+
+        decode_bbox->set_xmin(decode_bbox_center_x - decode_bbox_width / 2.);
+        decode_bbox->set_ymin(decode_bbox_center_y - decode_bbox_height / 2.);
+        decode_bbox->set_xmax(decode_bbox_center_x + decode_bbox_width / 2.);
+        decode_bbox->set_ymax(decode_bbox_center_y + decode_bbox_height / 2.);
+    }
+
+    decode_bbox->set_size(BBoxSize(*decode_bbox));
+}
+
+void DecodeBBoxes(const std::vector<NormalizedBBox>& prior_bboxes,
+                  const std::vector<float>& prior_variances,
+                  const CodeType code_type, const bool variance_encoded_in_target,
+                  const std::vector<NormalizedBBox>& bboxes,
+                  std::vector<NormalizedBBox>* decode_bboxes) {
+    int num_bboxes = prior_bboxes.size();
+
+    decode_bboxes->clear();
+    decode_bboxes->resize(num_bboxes);
+
+    for (int i = 0; i < num_bboxes; ++i) {
+        NormalizedBBox decode_bbox;
+        DecodeBBox(prior_bboxes[i], &prior_variances[i * 4], code_type,
+                   variance_encoded_in_target, bboxes[i], &decode_bbox);
+        (*decode_bboxes)[i] = decode_bbox;
+    }
+}
+
+void DecodeBBoxesAll(const std::vector<LabelBBox>& all_loc_preds,
+                     const std::vector<NormalizedBBox>& prior_bboxes,
+                     const std::vector<float>& prior_variances,
+                     const int num, const bool share_location,
+                     const int num_loc_classes, const int background_label_id,
+                     const CodeType code_type, const bool variance_encoded_in_target,
+                     std::vector<LabelBBox>* all_decode_bboxes) {
+    all_decode_bboxes->clear();
+    all_decode_bboxes->resize(num);
+
+    for (int i = 0; i < num; ++i) {
+        // Decode predictions into bboxes.
+        LabelBBox& decode_bboxes = (*all_decode_bboxes)[i];
+        for (int c = 0; c < num_loc_classes; ++c) {
+            int label = share_location ? -1 : c;
+
+            if (label == background_label_id) {
+                // Ignore background class.
+                continue;
+            }
+
+            if (all_loc_preds[i].find(label) == all_loc_preds[i].end()) {
+                continue;
+            }
+
+            const std::vector<NormalizedBBox>& label_loc_preds = all_loc_preds[i].find(label)->second;
+
+            DecodeBBoxes(prior_bboxes, prior_variances,
+                         code_type, variance_encoded_in_target,
+                         label_loc_preds, &(decode_bboxes[label]));
+        }
+    }
+}
+
+template <typename Dtype>
+void GetLocPredictions(const Dtype* loc_data, const int num,
+                       const int num_preds_per_class, const int num_loc_classes,
+                       const bool share_location, std::vector<LabelBBox>* loc_preds) {
+    loc_preds->clear();
+    loc_preds->resize(num);
+
+    for (int i = 0; i < num; ++i) {
+        LabelBBox& label_bbox = (*loc_preds)[i];
+
+        for (int c = 0; c < num_loc_classes; ++c) {
+            std::vector<NormalizedBBox>& bbox_vec = (share_location ? label_bbox[-1] : label_bbox[c]);
+            bbox_vec.resize(num_preds_per_class);
+
+            for (int p = 0; p < num_preds_per_class; ++p) {
+                int start_idx = p * num_loc_classes * 4;
+                bbox_vec[p].set_xmin(loc_data[start_idx + c * 4]);
+                bbox_vec[p].set_ymin(loc_data[start_idx + c * 4 + 1]);
+                bbox_vec[p].set_xmax(loc_data[start_idx + c * 4 + 2]);
+                bbox_vec[p].set_ymax(loc_data[start_idx + c * 4 + 3]);
+            }
+        }
+        loc_data += num_preds_per_class * num_loc_classes * 4;
+    }
+}
+
+template <typename Dtype>
+void GetLocPredictions(const Dtype* loc_data, const int num,
+                       const int num_preds_per_class, const int num_loc_classes,
+                       const bool share_location, std::vector<std::map<int, BBoxArrayWrapper<Dtype> > >* loc_preds) {
+    loc_preds->clear();
+    loc_preds->resize(num);
+
+    const BBox<Dtype>* bbox_data = reinterpret_cast<const BBox<Dtype>*>(loc_data);
+
+    for (int i = 0; i < num; ++i) {
+        std::map<int, BBoxArrayWrapper<Dtype> > & label_bbox = (*loc_preds)[i];
+
+        if (share_location) {
+            label_bbox[-1] = BBoxArrayWrapper<Dtype>(&bbox_data[i * num_preds_per_class * num_loc_classes], num_preds_per_class);
+        } else {
+            for (int c = 0; c < num_loc_classes; ++c) {
+                label_bbox[c] = BBoxArrayWrapper<Dtype>(&bbox_data[(i * num_loc_classes + c) * num_preds_per_class], num_preds_per_class);
+            }
+        }
+    }
+}
+
+// Explicit initialization.
+template void GetLocPredictions(const float* loc_data, const int num,
+                                const int num_preds_per_class, const int num_loc_classes,
+                                const bool share_location, std::vector<LabelBBox>* loc_preds);
+template void GetLocPredictions(const double* loc_data, const int num,
+                                const int num_preds_per_class, const int num_loc_classes,
+                                const bool share_location, std::vector<LabelBBox>* loc_preds);
+
+template void GetLocPredictions(const float* loc_data, const int num,
+                                const int num_preds_per_class, const int num_loc_classes,
+                                const bool share_location, std::vector<std::map<int, BBoxArrayWrapper<float>>>* loc_preds);
+template void GetLocPredictions(const double* loc_data, const int num,
+                                const int num_preds_per_class, const int num_loc_classes,
+                                const bool share_location, std::vector<std::map<int, BBoxArrayWrapper<double>>>* loc_preds);
+
+template <typename Dtype>
+void GetOrientPredictions(const Dtype* orient_data, const int num, std::vector<float>* orient_preds) {
+    orient_preds->clear();
+    orient_preds->resize(num);
+
+    for (int i = 0; i < num; ++i) {
+        (*orient_preds)[i] = orient_data[i];
+    }
+}
+
+// Explicit initialization.
+template void GetOrientPredictions(const float* orient_data, const int num,
+                                   std::vector<float>* orient_preds);
+template void GetOrientPredictions(const double* orient_data, const int num,
+                                   std::vector<float>* orient_preds);
+
+template <typename Dtype>
+void GetOrientationScores(const Dtype* orient_data, const int num,
+                          const int num_priors, const int num_orient_classes,
+                          std::vector<std::vector<std::vector<float> > >* orient_preds) {
+    orient_preds->clear();
+    orient_preds->resize(num);
+    for (int i = 0; i < num; ++i) {
+        std::vector<std::vector<float> >& orient_scores = (*orient_preds)[i];
+
+        orient_scores.resize(num_priors);
+        for (int p = 0; p < num_priors; ++p) {
+            int start_idx = p * num_orient_classes;
+            orient_scores[p].resize(num_orient_classes);
+            for (int c = 0; c < num_orient_classes; ++c) {
+                orient_scores[p][c] = orient_data[start_idx + c];
+            }
+        }
+        orient_data += num_priors * num_orient_classes;
+    }
+}
+
+template <typename Dtype>
+void GetOrientationScores(const Dtype* orient_data, const int num,
+                          const int num_priors, const int num_orient_classes,
+                          std::vector<std::vector<ArrayWrapper<Dtype> > >* orient_preds) {
+    orient_preds->clear();
+    orient_preds->resize(num);
+    for (int i = 0; i < num; ++i) {
+        std::vector<ArrayWrapper<Dtype> >& orient_scores = (*orient_preds)[i];
+        orient_scores.resize(num_priors);
+        for (int p = 0; p < num_priors; ++p) {
+            int start_idx = p * num_orient_classes;
+            orient_scores[p] = ArrayWrapper<Dtype>(&orient_data[start_idx], num_orient_classes);
+        }
+        orient_data += num_priors * num_orient_classes;
+    }
+}
+
+// Explicit initialization.
+template void GetOrientationScores(const float* orient_data, const int num,
+                                   const int num_priors, const int num_orient_classes,
+                                   std::vector<std::vector<std::vector<float>>>* orient_preds);
+template void GetOrientationScores(const double* orient_data, const int num,
+                                   const int num_priors, const int num_orient_classes,
+                                   std::vector<std::vector<std::vector<float>>>* orient_preds);
+
+template void GetOrientationScores(const float* orient_data, const int num,
+                                   const int num_priors, const int num_orient_classes,
+                                   std::vector<std::vector<ArrayWrapper<float>>>* orient_preds);
+template void GetOrientationScores(const double* orient_data, const int num,
+                                   const int num_priors, const int num_orient_classes,
+                                   std::vector<std::vector<ArrayWrapper<double>>>* orient_preds);
+
+template <typename Dtype>
+void GetConfidenceScores(const Dtype* conf_data, const int num,
+                         const int num_preds_per_class, const int num_classes,
+                         std::vector<std::map<int, std::vector<Dtype> > >* conf_preds) {
+    bool class_major = false;
+    GetConfidenceScores(conf_data, num, num_preds_per_class, num_classes, class_major, conf_preds);
+}
+
+template <typename Dtype>
+void GetConfidenceScores(const Dtype* conf_data, const int num,
+                         const int num_preds_per_class, const int num_classes,
+                         const bool class_major, std::vector<std::map<int, std::vector<Dtype>>>* conf_preds) {
+    conf_preds->clear();
+    conf_preds->resize(num);
+
+    for (int i = 0; i < num; ++i) {
+        std::map<int, std::vector<Dtype> >& label_scores = (*conf_preds)[i];
+
+        if (class_major) {
+            for (int c = 0; c < num_classes; ++c) {
+                label_scores[c].assign(conf_data, conf_data + num_preds_per_class);
+                conf_data += num_preds_per_class;
+            }
+        } else {
+            for (int p = 0; p < num_preds_per_class; ++p) {
+                int start_idx = p * num_classes;
+                for (int c = 0; c < num_classes; ++c) {
+                    label_scores[c].push_back(conf_data[start_idx + c]);
+                }
+            }
+            conf_data += num_preds_per_class * num_classes;
+        }
+    }
+}
+
+// Explicit initialization.
+template void GetConfidenceScores(const float* conf_data, const int num,
+                                  const int num_preds_per_class, const int num_classes,
+                                  std::vector<std::map<int, std::vector<float> > >* conf_preds);
+template void GetConfidenceScores(const double* conf_data, const int num,
+                                  const int num_preds_per_class, const int num_classes,
+                                  std::vector<std::map<int, std::vector<double> > >* conf_preds);
+template void GetConfidenceScores(const float* conf_data, const int num,
+                                  const int num_preds_per_class, const int num_classes,
+                                  const bool class_major, std::vector<std::map<int, std::vector<float> > >* conf_preds);
+template void GetConfidenceScores(const double* conf_data, const int num,
+                                  const int num_preds_per_class, const int num_classes,
+                                  const bool class_major, std::vector<std::map<int, std::vector<double>>>* conf_preds);
+
+template <typename Dtype>
+void GetMaxConfidenceScores(const Dtype* conf_data, const int num,
+                            const int num_preds_per_class, const int num_classes,
+                            const int background_label_id, const ConfLossType loss_type,
+                            std::vector<std::vector<float> >* all_max_scores) {
+    all_max_scores->clear();
+    for (int image_index_in_batch = 0; image_index_in_batch < num; ++image_index_in_batch) {
+        std::vector<float> max_scores;
+        for (int prior_box_index = 0; prior_box_index < num_preds_per_class; ++prior_box_index) {
+            int start_idx = prior_box_index * num_classes;
+            Dtype maxval = -FLT_MAX;
+            Dtype maxval_pos = -FLT_MAX;
+            for (int label_index = 0; label_index < num_classes; ++label_index) {
+                maxval = std::max<Dtype>(conf_data[start_idx + label_index], maxval);
+                if (label_index != background_label_id) {
+                    // Find maximum scores for positive classes.
+                    maxval_pos = std::max<Dtype>(conf_data[start_idx + label_index], maxval_pos);
+                }
+            }
+
+
+            if (loss_type == ConfLossType::SOFTMAX) {
+                // Compute softmax probability.
+                Dtype sum = 0.;
+                for (int label_index = 0; label_index < num_classes; ++label_index) {
+                    sum += std::exp(conf_data[start_idx + label_index] - maxval);
+                }
+                maxval_pos = std::exp(maxval_pos - maxval) / sum;
+            } else if (loss_type == ConfLossType::HINGE) {
+            } else if (loss_type == ConfLossType::LOGISTIC) {
+                maxval_pos = 1. / (1. + exp(-maxval_pos));
+            }
+
+            max_scores.push_back(maxval_pos);
+        }
+        conf_data += num_preds_per_class * num_classes;
+        all_max_scores->push_back(max_scores);
+    }
+}
+
+// Explicit initialization.
+template void GetMaxConfidenceScores(const float* conf_data, const int num,
+                                     const int num_preds_per_class, const int num_classes,
+                                     const int background_label_id, const ConfLossType loss_type,
+                                     std::vector<std::vector<float> >* all_max_scores);
+template void GetMaxConfidenceScores(const double* conf_data, const int num,
+                                     const int num_preds_per_class, const int num_classes,
+                                     const int background_label_id, const ConfLossType loss_type,
+                                     std::vector<std::vector<float> >* all_max_scores);
+
+template <typename Dtype>
+void GetPriorBBoxes(const Dtype* prior_data, const int num_priors,
+                    std::vector<NormalizedBBox>& prior_bboxes,
+                    std::vector<float>& prior_variances) {
+    for (int i = 0; i < num_priors; ++i) {
+        int start_idx = i * 4;
+        NormalizedBBox bbox;
+        bbox.set_xmin(prior_data[start_idx]);
+        bbox.set_ymin(prior_data[start_idx + 1]);
+        bbox.set_xmax(prior_data[start_idx + 2]);
+        bbox.set_ymax(prior_data[start_idx + 3]);
+        float bbox_size = BBoxSize(bbox);
+        bbox.set_size(bbox_size);
+        prior_bboxes[i] = bbox;
+    }
+
+    for (int i = 0; i < num_priors; ++i) {
+        int start_idx = (num_priors + i) * 4;
+
+        prior_variances[i * 4 + 0] = prior_data[start_idx + 0];
+        prior_variances[i * 4 + 1] = prior_data[start_idx + 1];
+        prior_variances[i * 4 + 2] = prior_data[start_idx + 2];
+        prior_variances[i * 4 + 3] = prior_data[start_idx + 3];
+    }
+}
+
+// Explicit initialization.
+template void GetPriorBBoxes(const float* prior_data, const int num_priors,
+                             std::vector<NormalizedBBox>& prior_bboxes,
+                             std::vector<float >& prior_variances);
+template void GetPriorBBoxes(const double* prior_data, const int num_priors,
+                             std::vector<NormalizedBBox>& prior_bboxes,
+                             std::vector<float >& prior_variances);
+
+template<typename Dtype>
+void GetTopKScoreIndex(const std::vector<Dtype>& scores, const std::vector<int>& indices,
+                       const int top_k, std::vector<std::pair<Dtype, int>>* score_index_vec) {
+    int num_output_scores = (top_k == -1 ? scores.size() : std::min<int>(top_k, scores.size()));
+
+    std::vector<int> sorted_indices(num_output_scores);
+
+    std::partial_sort_copy(indices.begin(), indices.end(),
+                           sorted_indices.begin(), sorted_indices.end(),
+                           ScoresIndexedComparator<Dtype>(scores));
+
+    score_index_vec->reserve(num_output_scores);
+
+    for (int i = 0; i < num_output_scores; i++) {
+        int idx = sorted_indices[i];
+        (*score_index_vec).push_back(std::make_pair(scores[idx], idx));
+    }
+}
+
+// Explicit initialization.
+template void GetTopKScoreIndex(const std::vector<float>& scores, const std::vector<int>& indices,
+                                const int top_k, std::vector<std::pair<float, int> >* score_index_vec);
+template void GetTopKScoreIndex(const std::vector<double>& scores, const std::vector<int>& indices,
+                                const int top_k, std::vector<std::pair<double, int> >* score_index_vec);
+
+template<typename Dtype>
+void GetMaxScoreIndex(const std::vector<Dtype>& scores, const float threshold,
+                      const int top_k, std::vector<std::pair<Dtype, int> >* score_index_vec) {
+    std::vector<int> indices(scores.size());
+    for (int i = 0; i < scores.size(); ++i) {
+        indices[i] = i;
+    }
+
+    GetTopKScoreIndex(scores, indices, top_k, score_index_vec);
+
+    // trim output values smaller or equal to the threshold, if exist
+    for (int i = 0; i < score_index_vec->size(); i++) {
+        Dtype score = (*score_index_vec)[i].first;
+        if (score <= threshold) {
+            score_index_vec->resize(i);
+            break;
+        }
+    }
+}
+
+// Explicit initialization.
+template void GetMaxScoreIndex(const std::vector<float>& scores, const float threshold,
+                               const int top_k, std::vector<std::pair<float, int> >* score_index_vec);
+template void GetMaxScoreIndex(const std::vector<double>& scores, const float threshold,
+                               const int top_k, std::vector<std::pair<double, int> >* score_index_vec);
+
+void ApplyNMS(const std::vector<NormalizedBBox>& bboxes, const std::vector<float>& scores,
+              const float threshold, const int top_k, const bool reuse_overlaps,
+              std::map<int, std::map<int, float> >* overlaps, std::vector<int>* indices) {
+    std::vector<int> idx;
+
+    for (int  i = 0; i < scores.size(); i++) {
+        idx.push_back(i);
+    }
+
+    std::vector<std::pair<float, int> > score_index_vec;
+
+    GetTopKScoreIndex(scores, idx, top_k, &score_index_vec);
+
+    // Do nms.
+    indices->clear();
+
+    while (score_index_vec.size() != 0) {
+        // Get the current highest score box.
+        int best_idx = score_index_vec.front().second;
+        const NormalizedBBox& best_bbox = bboxes[best_idx];
+        if (BBoxSize(best_bbox) < 1e-5) {
+            // Erase small box.
+            score_index_vec.erase(score_index_vec.begin());
+            continue;
+        }
+
+        indices->push_back(best_idx);
+        // Erase the best box.
+        score_index_vec.erase(score_index_vec.begin());
+
+        if (top_k > -1 && indices->size() >= top_k) {
+            // Stop if finding enough bboxes for nms.
+            break;
+        }
+
+        // Compute overlap between best_bbox and other remaining bboxes.
+        // Remove a bbox if the overlap with best_bbox is larger than nms_threshold.
+        for (std::vector<std::pair<float, int> >::iterator it = score_index_vec.begin();
+             it != score_index_vec.end(); ) {
+            int cur_idx = it->second;
+            const NormalizedBBox& cur_bbox = bboxes[cur_idx];
+            if (BBoxSize(cur_bbox) < 1e-5) {
+                // Erase small box.
+                it = score_index_vec.erase(it);
+                continue;
+            }
+            float cur_overlap = 0.;
+            if (reuse_overlaps) {
+                if (overlaps->find(best_idx) != overlaps->end() &&
+                    overlaps->find(best_idx)->second.find(cur_idx) !=
+                    (*overlaps)[best_idx].end()) {
+                    // Use the computed overlap.
+                    cur_overlap = (*overlaps)[best_idx][cur_idx];
+                } else if (overlaps->find(cur_idx) != overlaps->end() &&
+                           overlaps->find(cur_idx)->second.find(best_idx) !=
+                           (*overlaps)[cur_idx].end()) {
+                    // Use the computed overlap.
+                    cur_overlap = (*overlaps)[cur_idx][best_idx];
+                } else {
+                    cur_overlap = JaccardOverlap(best_bbox, cur_bbox);
+                    // Store the overlap for future use.
+                    (*overlaps)[best_idx][cur_idx] = cur_overlap;
+                }
+            } else {
+                cur_overlap = JaccardOverlap(best_bbox, cur_bbox);
+            }
+
+            // Remove it if necessary
+            if (cur_overlap > threshold) {
+                it = score_index_vec.erase(it);
+            } else {
+                ++it;
+            }
+        }
+    }
+}
+
+void ApplyNMS(const bool* overlapped, const int num, std::vector<int>* indices) {
+    std::vector<int> index_vec;
+
+    for (int i = 0; i < num; i++) {
+        index_vec.push_back(i);
+    }
+
+    // Do nms.
+    indices->clear();
+
+    while (index_vec.size() != 0) {
+        // Get the current highest score box.
+        int best_idx = index_vec.front();
+        indices->push_back(best_idx);
+        // Erase the best box.
+        index_vec.erase(index_vec.begin());
+
+        for (std::vector<int>::iterator it = index_vec.begin(); it != index_vec.end();) {
+            int cur_idx = *it;
+
+            // Remove it if necessary
+            if (overlapped[best_idx * num + cur_idx]) {
+                it = index_vec.erase(it);
+            } else {
+                ++it;
+            }
+        }
+    }
+}
+
+template<typename Dtype, typename ArrayOfBBox>
+void ApplyNMSFast_impl(const ArrayOfBBox& bboxes,
+                       const std::vector<Dtype>& scores, const float score_threshold,
+                       const float nms_threshold, const int top_k, std::vector<int>* indices) {
+    std::vector<std::pair<Dtype, int> > score_index_vec;
+
+    GetMaxScoreIndex(scores, score_threshold, top_k, &score_index_vec);
+
+    indices->clear();
+
+    while (score_index_vec.size() != 0) {
+        const int idx = score_index_vec.front().second;
+        bool keep = true;
+
+        for (int k = 0; k < indices->size(); ++k) {
+            if (keep) {
+                const int kept_idx = (*indices)[k];
+                float overlap = JaccardOverlap(bboxes[idx], bboxes[kept_idx]);
+                keep = overlap <= nms_threshold;
+            } else {
+                break;
+            }
+        }
+        if (keep) {
+            indices->push_back(idx);
+        }
+
+        score_index_vec.erase(score_index_vec.begin());
+    }
+}
+
+template<typename Dtype>
+void ApplyNMSFast(const std::vector<NormalizedBBox>& bboxes,
+                  const std::vector<Dtype>& scores, const float score_threshold,
+                  const float nms_threshold, const int top_k, std::vector<int>* indices) {
+    ApplyNMSFast_impl<Dtype, std::vector<NormalizedBBox>>(bboxes, scores, score_threshold,
+                                                           nms_threshold, top_k, indices);
+}
+
+template<typename Dtype>
+void ApplyNMSFast(const BBoxArrayWrapper<Dtype>& bboxes,
+                  const std::vector<Dtype>& scores, const float score_threshold,
+                  const float nms_threshold, const int top_k, std::vector<int>* indices) {
+    ApplyNMSFast_impl<Dtype, BBoxArrayWrapper<Dtype>>(
+                                                      bboxes, scores, score_threshold,
+                                                      nms_threshold, top_k, indices);
+}
+
+// Explicit initialization.
+template void ApplyNMSFast(const std::vector<NormalizedBBox>& bboxes,
+                           const std::vector<float>& scores, const float score_threshold,
+                           const float nms_threshold, const int top_k, std::vector<int>* indices);
+
+template void ApplyNMSFast(const std::vector<NormalizedBBox>& bboxes,
+                           const std::vector<double>& scores, const float score_threshold,
+                           const float nms_threshold, const int top_k, std::vector<int>* indices);
+
+template void ApplyNMSFast(const BBoxArrayWrapper<float>& bboxes,
+                           const std::vector<float>& scores, const float score_threshold,
+                           const float nms_threshold, const int top_k, std::vector<int>* indices);
+
+template void ApplyNMSFast(const BBoxArrayWrapper<double>& bboxes,
+                           const std::vector<double>& scores, const float score_threshold,
+                           const float nms_threshold, const int top_k, std::vector<int>* indices);
+
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/bbox_util.h b/inference-engine/tests_deprecated/functional/vpu/common/bbox_util.h
new file mode 100644 (file)
index 0000000..0181cec
--- /dev/null
@@ -0,0 +1,247 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <stdint.h>
+#include <cmath>  // for std::fabs and std::signbit
+#include <map>
+#include <string>
+#include <utility>
+#include <vector>
+
+enum CodeType {
+    CORNER = 1,
+    CENTER_SIZE = 2,
+};
+
+enum EmitType {
+    CENTER = 0,
+    MIN_OVERLAP = 1,
+};
+
+enum ConfLossType {
+    SOFTMAX = 0,
+    LOGISTIC = 1,
+    HINGE = 2,
+    PIECEWISE_LINEAR = 3,
+};
+
+struct NormalizedBBox {
+public:
+    NormalizedBBox() {
+        reliable_location = true;
+        orientation = -10;
+    }
+
+    void set_xmin(const float& v) { _xmin = v; }
+    void set_ymin(const float& v) { _ymin = v; }
+    void set_xmax(const float& v) { _xmax = v; }
+    void set_ymax(const float& v) { _ymax = v; }
+
+    void set_size(const float& v) { _size = v; }
+
+    const float& xmin() const { return _xmin; }
+    const float& ymin() const { return _ymin; }
+    const float& xmax() const { return _xmax; }
+    const float& ymax() const { return _ymax; }
+
+    float _xmin = 0;
+    float _ymin = 0;
+    float _xmax = 0;
+    float _ymax = 0;
+
+    int label = 0;
+    bool difficult = false;
+    float score = 0;
+    float _size = 0;
+    bool reliable_location = false;
+    float orientation = 0;
+};
+
+typedef std::map<int, std::vector<NormalizedBBox> > LabelBBox;
+
+template <typename Dtype>
+struct BBox {
+  Dtype data[4];
+  Dtype& xmin() { return data[0]; }
+  Dtype& ymin() { return data[1]; }
+  Dtype& xmax() { return data[2]; }
+  Dtype& ymax() { return data[3]; }
+
+  const Dtype& xmin() const { return data[0]; }
+  const Dtype& ymin() const { return data[1]; }
+  const Dtype& xmax() const { return data[2]; }
+  const Dtype& ymax() const { return data[3]; }
+
+  void set_xmin(const Dtype& v) { xmin() = v; }
+  void set_ymin(const Dtype& v) { ymin() = v; }
+  void set_xmax(const Dtype& v) { xmax() = v; }
+  void set_ymax(const Dtype& v) { ymax() = v; }
+};
+
+template <typename T>
+struct ArrayWrapper {
+    ArrayWrapper() : _data(NULL), _count(0) {}
+    ArrayWrapper(const T* data, int count) : _data(data), _count(count) {}
+    ArrayWrapper(const ArrayWrapper& other) : _data(other._data), _count(other._count) {}
+    size_t size() const { return _count; }
+    size_t sizeb() const { return _count * sizeof(T); }
+
+    inline const T& operator[](int i) const {
+        return _data[i];
+    }
+
+    const T* _data;
+    int _count;
+};
+
+template <typename Dtype>
+struct BBoxArrayWrapper : public ArrayWrapper<BBox<Dtype> > {
+    BBoxArrayWrapper() {}
+    BBoxArrayWrapper(const BBox<Dtype>* data, int count) : ArrayWrapper<BBox<Dtype> >(data, count) {}
+    BBoxArrayWrapper(const BBoxArrayWrapper& other) : ArrayWrapper<BBox<Dtype> >(other) {}
+};
+
+class DetectionResult {
+public:
+    explicit DetectionResult(float score = 0, float odiff = -10.f) : mScore(score), mOrientationDiff(odiff) {}
+    float mScore;
+    float mOrientationDiff;
+};
+
+// Compute the jaccard (intersection over union IoU) overlap between two bboxes.
+float JaccardOverlap(const NormalizedBBox& bbox1, const NormalizedBBox& bbox2,
+                     const bool normalized = true);
+
+template<typename Dtype>
+Dtype JaccardOverlap(const BBox<Dtype>& bbox1, const BBox<Dtype>& bbox2,
+                     const bool normalized = true);
+
+// Compute bbox size.
+float BBoxSize(const NormalizedBBox& bbox, const bool normalized = true);
+
+template<typename Dtype>
+Dtype BBoxSize(const BBox<Dtype>& bbox, const bool normalized = true);
+
+// Get location predictions from loc_data.
+//    loc_data: num x num_preds_per_class * num_loc_classes * 4 blob.
+//    num: the number of images.
+//    num_preds_per_class: number of predictions per class.
+//    num_loc_classes: number of location classes. It is 1 if share_location is
+//      true; and is equal to number of classes needed to predict otherwise.
+//    share_location: if true, all classes share the same location prediction.
+//    loc_preds: stores the location prediction, where each item contains
+//      location prediction for an image.
+template <typename Dtype>
+void GetLocPredictions(const Dtype* loc_data, const int num,
+                       const int num_preds_per_class, const int num_loc_classes,
+                       const bool share_location, std::vector<LabelBBox>* loc_preds);
+
+template <typename Dtype>
+void GetLocPredictions(const Dtype* loc_data, const int num,
+                       const int num_preds_per_class, const int num_loc_classes,
+                       const bool share_location, std::vector<std::map<int, BBoxArrayWrapper<Dtype>>>* loc_preds);
+
+// Get confidence predictions from conf_data.
+//    conf_data: num x num_preds_per_class * num_classes blob.
+//    num: the number of images.
+//    num_preds_per_class: number of predictions per class.
+//    num_classes: number of classes.
+//    conf_scores: stores the confidence prediction, where each item contains
+//      confidence prediction for an image.
+template <typename Dtype>
+void GetConfidenceScores(const Dtype* conf_data, const int num,
+                         const int num_preds_per_class, const int num_classes,
+                         std::vector<std::map<int, std::vector<Dtype> > >* conf_scores);
+
+// Get confidence predictions from conf_data.
+//    conf_data: num x num_preds_per_class * num_classes blob.
+//    num: the number of images.
+//    num_preds_per_class: number of predictions per class.
+//    num_classes: number of classes.
+//    class_major: if true, data layout is
+//      num x num_classes x num_preds_per_class; otherwise, data layerout is
+//      num x num_preds_per_class * num_classes.
+//    conf_scores: stores the confidence prediction, where each item contains
+//      confidence prediction for an image.
+template <typename Dtype>
+void GetConfidenceScores(const Dtype* conf_data, const int num,
+                         const int num_preds_per_class, const int num_classes,
+                         const bool class_major, std::vector<std::map<int, std::vector<Dtype>>>* conf_scores);
+
+// Get prior bounding boxes from prior_data.
+//    prior_data: 1 x 2 x num_priors * 4 x 1 blob.
+//    num_priors: number of priors.
+//    prior_bboxes: stores all the prior bboxes in the format of NormalizedBBox.
+//    prior_variances: stores all the variances needed by prior bboxes.
+template <typename Dtype>
+void GetPriorBBoxes(const Dtype* prior_data, const int num_priors,
+                    std::vector<NormalizedBBox>& prior_bboxes,
+                    std::vector<float>& prior_variances);
+
+// Get confidence predictions from conf_data.
+//    orient_data: num x num_priors * num_orient_classes blob.
+//    num: the number of images.
+//    num_priors: number of priors.
+//    num_orient_classes: number of orientation classes (bins).
+//    orient_preds: stores the orientation pr. prediction, where each item contains
+//      predictions for an image.
+template <typename Dtype>
+void GetOrientationScores(const Dtype* orient_data, const int num,
+                          const int num_priors, const int num_orient_classes,
+                          std::vector<std::vector<std::vector<float>>>* orient_preds);
+
+template <typename Dtype>
+void GetOrientationScores(const Dtype* orient_data, const int num,
+                          const int num_priors, const int num_orient_classes,
+                          std::vector<std::vector<ArrayWrapper<Dtype>>>* orient_preds);
+
+// Decode all bboxes in a batch.
+void DecodeBBoxesAll(const std::vector<LabelBBox>& all_loc_pred,
+                     const std::vector<NormalizedBBox>& prior_bboxes,
+                     const std::vector<float >& prior_variances,
+                     const int num, const bool share_location,
+                     const int num_loc_classes, const int background_label_id,
+                     const CodeType code_type, const bool variance_encoded_in_target,
+                     std::vector<LabelBBox>* all_decode_bboxes);
+
+// Do non maximum suppression given bboxes and scores.
+// Inspired by Piotr Dollar's NMS implementation in EdgeBox.
+// https://goo.gl/jV3JYS
+//    bboxes: a set of bounding boxes.
+//    scores: a set of corresponding confidences.
+//    score_threshold: a threshold used to filter detection results.
+//    nms_threshold: a threshold used in non maximum suppression.
+//    top_k: if not -1, keep at most top_k picked indices.
+//    indices: the kept indices of bboxes after nms.
+template<typename Dtype>
+void ApplyNMSFast(const std::vector<NormalizedBBox>& bboxes,
+                  const std::vector<Dtype>& scores, const float score_threshold,
+                  const float nms_threshold, const int top_k, std::vector<int>* indices);
+
+template<typename Dtype>
+void ApplyNMSFast(const BBoxArrayWrapper<Dtype>& bboxes,
+                  const std::vector<Dtype>& scores, const float score_threshold,
+                  const float nms_threshold, const int top_k, std::vector<int>* indices);
+
+// Function sued to sort pair<float, T>, stored in STL container (e.g. vector)
+// in descend order based on the score (first) value.
+template <typename T>
+bool SortScorePairDescend(const std::pair<float, T>& pair1,
+                          const std::pair<float, T>& pair2);
+
+template <typename T>
+bool SortScorePairDescendStable(const std::pair<float, T>& pair1,
+                                const std::pair<float, T>& pair2);
+
+// Clip the NormalizedBBox such that the range for each corner is [0, 1].
+void ClipBBox(const NormalizedBBox& bbox, NormalizedBBox* clip_bbox);
+
+template<typename Dtype>
+void ClipBBox(const BBox<Dtype>& bbox, BBox<Dtype>* clip_bbox);
+
+float get_orientation(const std::vector<float>& bin_vals, bool interpolate_orientation);
+template<typename Dtype>
+Dtype get_orientation(const ArrayWrapper<Dtype>& bin_vals, bool interpolate_orientation);
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/blob_reader_tests.cpp b/inference-engine/tests_deprecated/functional/vpu/common/blob_reader_tests.cpp
new file mode 100644 (file)
index 0000000..97a7c88
--- /dev/null
@@ -0,0 +1,202 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <tests_common.hpp>
+#include <memory>
+
+#include <ie_common.h>
+#include <cpp/ie_cnn_network.h>
+
+#include <vpu/blob_reader.hpp>
+#include <vpu/graph_transformer.hpp>
+#include <vpu/utils/logger.hpp>
+
+#include <myriad_plugin/myriad_config.h>
+#include <ngraph/op/util/attr_types.hpp>
+#include <ngraph_functions/subgraph_builders.hpp>
+
+using namespace ::testing;
+using namespace vpu;
+using namespace InferenceEngine;
+
+class VPUBlobReaderHeaderTests: public TestsCommon, public testing::WithParamInterface<std::vector<size_t>> {
+private:
+    std::vector<size_t> inputShape;
+
+public:
+    size_t getElemSizeByPrecision(Precision precision) {
+        size_t elemSize = 0;
+        switch (precision) {
+        case Precision::U8:
+            elemSize = 1;
+        case Precision::FP16:
+            elemSize = 2;
+            break;
+        case Precision::FP32:
+            elemSize = 4;
+            break;
+        default:
+            throw std::runtime_error(std::string("unsupported precision: ") + precision.name() );
+        }
+
+        return elemSize;
+    }
+
+    void SetUp() override {
+        auto fn_ptr = ngraph::builder::subgraph::makeSplitConvConcat();
+        ASSERT_NO_THROW(_network = InferenceEngine::CNNNetwork(fn_ptr));
+
+        CompilationConfig compileConfig;
+        auto log = std::make_shared<Logger>("GraphCompiler", LogLevel::None, consoleOutput());
+        _compiledGraph = compileNetwork(_network, Platform::MYRIAD_X, compileConfig, log);
+    }
+
+    CNNNetwork _network;
+    CompiledGraph::Ptr _compiledGraph;
+};
+
+TEST_P(VPUBlobReaderHeaderTests, canReadCorrectMagicNumber) {
+    SetUp();
+    BlobReader blobReader;
+    ASSERT_NO_THROW(blobReader.parse(_compiledGraph->blob));
+
+    ASSERT_EQ(BLOB_MAGIC_NUMBER, blobReader.getMagicNumber());
+}
+
+TEST_P(VPUBlobReaderHeaderTests, canReadCorrectStageCount) {
+    SetUp();
+    BlobReader blobReader;
+    ASSERT_NO_THROW(blobReader.parse(_compiledGraph->blob));
+
+    ASSERT_EQ(_compiledGraph->numActiveStages, blobReader.getStageCount());
+}
+
+TEST_P(VPUBlobReaderHeaderTests, canReadCorrectBlobVersion) {
+    SetUp();
+    BlobReader blobReader;
+    ASSERT_NO_THROW(blobReader.parse(_compiledGraph->blob));
+
+    ASSERT_EQ(BLOB_VERSION_MAJOR, blobReader.getVersionMajor());
+    ASSERT_EQ(BLOB_VERSION_MINOR, blobReader.getVersionMinor());
+}
+
+using VPUBlobReaderInputTests = VPUBlobReaderHeaderTests;
+
+TEST_P(VPUBlobReaderInputTests, areEqualTotalInputSizeFromBlobAndCalculatedFromInputDesc) {
+    SetUp();
+    BlobReader blobReader;
+    ASSERT_NO_THROW(blobReader.parse(_compiledGraph->blob));
+
+    size_t inputTotalSize = 0;
+    for (const auto &input : blobReader.getNetworkInputs()) {
+        auto dims = input.second->getTensorDesc().getDims();
+        auto precision = blobReader.getNetworkInputs().begin()->second->getPrecision();
+
+        inputTotalSize += std::accumulate(dims.begin(), dims.end(), 1, std::multiplies<size_t>()) * getElemSizeByPrecision(precision);
+    }
+
+    auto inputInfo = blobReader.getInputInfo();
+    ASSERT_GE(inputInfo.totalSize, inputTotalSize);
+}
+
+TEST_P(VPUBlobReaderInputTests, canGetCorrectInputDimsFromImportedNetwork) {
+    SetUp();
+    BlobReader blobReader;
+    ASSERT_NO_THROW(blobReader.parse(_compiledGraph->blob));
+
+    auto parsedNetworkInputs = blobReader.getNetworkInputs();
+    auto expectedNetworkInputs = _network.getInputsInfo();
+
+    for (auto&& actual : parsedNetworkInputs) {
+        auto actualDims = actual.second->getTensorDesc().getDims();
+        size_t actualTotalSize = std::accumulate(actualDims.begin(), actualDims.end(), 1, std::multiplies<size_t>());
+
+        ASSERT_TRUE(expectedNetworkInputs.count(actual.first) > 0);
+        auto expectedDims = expectedNetworkInputs[actual.first]->getTensorDesc().getDims();
+        size_t expectedTotalSize = std::accumulate(expectedDims.begin(), expectedDims.end(), 1, std::multiplies<size_t>());
+
+        ASSERT_EQ(actualTotalSize, expectedTotalSize);
+    }
+}
+
+TEST_P(VPUBlobReaderInputTests, canGetCorrectInputNamesFromImportedNetwork) {
+    SetUp();
+    BlobReader blobReader;
+    ASSERT_NO_THROW(blobReader.parse(_compiledGraph->blob));
+
+    auto parsedNetworkInputs   = blobReader.getNetworkInputs();
+    auto expectedNetworkInputs = _network.getInputsInfo();
+
+    for (auto&& actual : parsedNetworkInputs) {
+        ASSERT_TRUE(expectedNetworkInputs.count(actual.first) > 0);
+    }
+
+    for (auto&& expected : expectedNetworkInputs) {
+        ASSERT_TRUE(parsedNetworkInputs.count(expected.first) > 0);
+    }
+}
+
+using VPUBlobReaderOutputTests = VPUBlobReaderHeaderTests;
+
+TEST_P(VPUBlobReaderOutputTests, areEqualTotalOutputSizeFromBlobAndCalculatedFromOutputDesc) {
+    SetUp();
+    BlobReader blobReader;
+    ASSERT_NO_THROW(blobReader.parse(_compiledGraph->blob));
+
+    size_t outputTotalSize = 0;
+    for (const auto &input : blobReader.getNetworkOutputs()) {
+        auto dims = input.second->getDims();
+        auto precision = blobReader.getNetworkOutputs().begin()->second->getPrecision();
+
+        outputTotalSize += std::accumulate(dims.begin(), dims.end(), 1, std::multiplies<size_t>()) * getElemSizeByPrecision(precision);
+    }
+
+    auto outputInfo = blobReader.getOutputInfo();
+    ASSERT_GE(outputInfo.totalSize, outputTotalSize);
+}
+
+TEST_P(VPUBlobReaderOutputTests, canGetCorrectOutputDimsFromImportedNetwork) {
+    SetUp();
+    BlobReader blobReader;
+    ASSERT_NO_THROW(blobReader.parse(_compiledGraph->blob));
+
+    auto parsedNetworkOutputs = blobReader.getNetworkOutputs();
+    auto expectedNetworkOutputs = _network.getOutputsInfo();
+
+    for (auto&& actual : parsedNetworkOutputs) {
+        auto actualDims = actual.second->getDims();
+        size_t actualTotalSize = std::accumulate(actualDims.begin(), actualDims.end(), 1, std::multiplies<size_t>());
+
+        ASSERT_TRUE(expectedNetworkOutputs.count(actual.first) > 0);
+        auto expectedDims = expectedNetworkOutputs[actual.first]->getDims();
+        size_t expectedTotalSize = std::accumulate(expectedDims.begin(), expectedDims.end(), 1, std::multiplies<size_t>());
+
+        ASSERT_EQ(actualTotalSize, expectedTotalSize);
+    }
+}
+
+TEST_P(VPUBlobReaderOutputTests, canGetCorrectOutputNamesFromImportedNetwork) {
+    SetUp();
+    BlobReader blobReader;
+    ASSERT_NO_THROW(blobReader.parse(_compiledGraph->blob));
+
+    auto parsedNetworkOutputs   = blobReader.getNetworkOutputs();
+    auto expectedNetworkOutputs = _network.getOutputsInfo();
+
+    for (auto&& actual : parsedNetworkOutputs) {
+        ASSERT_TRUE(expectedNetworkOutputs.count(actual.first) > 0);
+    }
+
+    for (auto&& expected : expectedNetworkOutputs) {
+        ASSERT_TRUE(parsedNetworkOutputs.count(expected.first) > 0);
+    }
+}
+
+
+const std::vector<size_t> inputShape = {{1, 4, 10, 10}};
+
+INSTANTIATE_TEST_CASE_P(myriadBlobReader_nightly, VPUBlobReaderHeaderTests, ::testing::Values(inputShape));
+INSTANTIATE_TEST_CASE_P(myriadBlobReader_nightly, VPUBlobReaderInputTests, ::testing::Values(inputShape));
+INSTANTIATE_TEST_CASE_P(myriadBlobReader_nightly, VPUBlobReaderOutputTests, ::testing::Values(inputShape));
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_CTCDecoder_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_CTCDecoder_test.cpp
new file mode 100644 (file)
index 0000000..49fa679
--- /dev/null
@@ -0,0 +1,10 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_CTCDecoder_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(myriad, myriadCTCDecoderLayerTests_nightly,
+        ::testing::Combine(
+        ::testing::Values(true, false),
+        ::testing::ValuesIn(s_DimsConfig)));
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_CTCDecoder_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_CTCDecoder_test.hpp
new file mode 100644 (file)
index 0000000..d047eee
--- /dev/null
@@ -0,0 +1,154 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <cmath>
+#include "myriad_layers_tests.hpp"
+
+using namespace InferenceEngine;
+
+#define ERROR_BOUND 0.2f
+
+typedef struct {
+    SizeVector src_dims;
+    SizeVector seq_ind_dims;
+    SizeVector dst_dims;
+    std::string custom_config;
+} dims_config;
+
+PRETTY_PARAM(hwAcceleration, std::string);
+PRETTY_PARAM(dimsConfig, dims_config);
+
+typedef myriadLayerTestBaseWithParam<std::tuple<bool, dims_config>> myriadCTCDecoderLayerTests_nightly;
+
+void refCTCDecoder(const Blob::Ptr src, const Blob::Ptr seq_ind, Blob::Ptr dst) {
+    ie_fp16 *src_data = static_cast<ie_fp16*>(src->buffer());
+    ie_fp16 *src_seq_inp = static_cast<ie_fp16*>(seq_ind->buffer());
+    ie_fp16 *output_sequences = static_cast<ie_fp16*>(dst->buffer());
+    ASSERT_NE(src_data, nullptr);
+    ASSERT_NE(src_seq_inp, nullptr);
+    ASSERT_NE(output_sequences, nullptr);
+
+    const auto& dims = src->getTensorDesc().getDims();
+    size_t in_width      = dims[dims.size() - 1];
+    size_t in_height     = dims[dims.size() - 2];
+    size_t in_channels   = dims[dims.size() - 3];
+
+    size_t T_ = in_channels;
+    size_t N_ = in_height;
+    size_t C_ = in_width;
+
+    std::vector<int> seq_ind_data(88);
+    seq_ind_data[0] = 0;
+    for(int i = 1; i < 88; i++) {
+        seq_ind_data[i] = (int)(PrecisionUtils::f16tof32(src_seq_inp[i]));
+    }
+
+    // Fill output_sequences with -1
+    for (size_t ii = 0; ii < T_; ii++) {
+        output_sequences[ii] = PrecisionUtils::f32tof16(-1.0);
+    }
+    size_t output_index = 0;
+
+    // Caffe impl
+    for(size_t n = 0; n < N_; ++n) {
+        int prev_class_idx = -1;
+
+        for (size_t t = 0; /* check at end */; ++t) {
+            // get maximum probability and its index
+            int max_class_idx = 0;
+            ie_fp16* probs;
+            ie_fp16 max_prob;
+
+            probs = src_data + t*C_;
+            max_prob = probs[0];
+            ++probs;
+
+            for (size_t c = 1; c < C_; ++c, ++probs) {
+                if (*probs > max_prob) {
+                    max_class_idx = c;
+                    max_prob = *probs;
+                }
+            }
+
+            //if (max_class_idx != blank_index_
+            //        && !(merge_repeated_&& max_class_idx == prev_class_idx))
+            if (max_class_idx < (int)C_-1 && !(1 && max_class_idx == prev_class_idx)) {
+                output_sequences[output_index] =  PrecisionUtils::f32tof16((float)max_class_idx);
+                output_index++;
+            }
+
+            prev_class_idx = max_class_idx;
+
+            // Assume sequence_indicators is always 1
+//             if (t + 1 == T_)
+            if (t + 1 == T_ || seq_ind_data[t + 1] == 0) {
+                break;
+            }
+        }
+    }
+}
+
+TEST_P(myriadCTCDecoderLayerTests_nightly, CTCGreedyDecoder) {
+
+    bool HWConfigValue = std::get<0>(GetParam());
+    dims_config dimsConfig = std::get<1>(GetParam());
+
+    if(!dimsConfig.custom_config.empty() && !CheckMyriadX()) {
+        GTEST_SKIP()<<"Custom layers for MYRIAD2 not supported";
+    }
+
+    _config[VPU_CONFIG_KEY(CUSTOM_LAYERS)] = dimsConfig.custom_config;
+
+    IN_OUT_desc inputTensors;
+    IN_OUT_desc outputTensors;
+
+    inputTensors.resize(2);
+    outputTensors.resize(1);
+
+    inputTensors[0] = dimsConfig.src_dims;
+    inputTensors[1] = dimsConfig.seq_ind_dims;
+    outputTensors[0] = dimsConfig.dst_dims;
+
+    SetInputTensors(inputTensors);
+    SetOutputTensors(outputTensors);
+
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("CTCGreedyDecoder"), NetworkInitParams().useHWOpt(HWConfigValue)));
+
+    auto iter = _inputMap.begin();
+    auto first_input = iter->first;
+    ++iter;
+    auto second_input = iter->first;
+
+    Blob::Ptr data;
+    auto dataBlob = _inputMap[first_input];
+
+    auto seqIndBlob = _inputMap[second_input];
+    uint16_t *blobRawSeqFp16 = seqIndBlob->buffer().as<uint16_t *>();
+    size_t count = seqIndBlob->size();
+    blobRawSeqFp16[0] = PrecisionUtils::f32tof16(0.0);
+    for (size_t indx = 1; indx < count; ++indx) {
+        blobRawSeqFp16[indx] = PrecisionUtils::f32tof16(1.0);
+    }
+
+    std::string inputTensorBinary = TestDataHelpers::get_data_path() + "/vpu/InputGreedyDecoderMyriadCHW.bin";
+    ASSERT_TRUE(fromBinaryFile(inputTensorBinary, dataBlob));
+
+    ASSERT_TRUE(Infer());
+
+    auto outputBlob = _outputMap.begin()->second;
+
+    _refBlob = make_shared_blob<ie_fp16>(TensorDesc(Precision::FP16, outputBlob->getTensorDesc().getDims(), ANY));
+    _refBlob->allocate();
+
+    refCTCDecoder(dataBlob, seqIndBlob, _refBlob);
+
+    CompareCommonAbsolute(outputBlob, _refBlob, 0.0);
+}
+
+static std::vector<dims_config> s_DimsConfig = {
+    {{88, 1, 71}, {88,  1}, {1, 88, 1, 1}, ""},
+#ifdef VPU_HAS_CUSTOM_KERNELS
+    {{88, 1, 71}, {88,  1}, {1, 88, 1, 1}, getIELibraryPath() + "/vpu_custom_kernels/customLayerBindings.xml"},
+#endif
+};
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_batch_normalization_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_batch_normalization_test.cpp
new file mode 100644 (file)
index 0000000..9739f14
--- /dev/null
@@ -0,0 +1,14 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_batch_normalization_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(
+        accuracy, myriadLayersTestsBatchNormalization_nightly,
+        ::testing::Values(
+                bn_test_params{{1, 1, 16, 8}, 0.001f},
+                bn_test_params{{1, 4, 8, 16}, 0.00001f},
+                bn_test_params{{1, 44, 88, 16}, 0.003f},
+                bn_test_params{{1, 16, 32, 32}, 0.00005f},
+                bn_test_params{{1, 512, 7, 7}, 0.0000096f}));
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_batch_normalization_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_batch_normalization_test.hpp
new file mode 100644 (file)
index 0000000..8028d60
--- /dev/null
@@ -0,0 +1,92 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tests.hpp"
+
+#define ERROR_BOUND (1.e-2f)
+#define WEIGHTS_BOUND (12.f)
+
+using namespace InferenceEngine;
+
+struct bn_test_params {
+    tensor_test_params in;
+    float epsilon;
+    friend std::ostream& operator<<(std::ostream& os, bn_test_params const& tst)
+    {
+        return os << tst.in
+                  << ", epsilon=" << tst.epsilon;
+    };
+};
+
+void ref_batch_normalization(const InferenceEngine::Blob::Ptr src,
+                      const uint16_t *weights, const size_t weightsSize,
+                      InferenceEngine::Blob::Ptr dst, float eps)
+{
+    ASSERT_NE(src, nullptr);
+    ASSERT_NE(weights, nullptr);
+    ASSERT_NE(dst, nullptr);
+
+    const auto& src_dims = src->getTensorDesc().getDims();
+
+    size_t IW = src_dims[3];
+    size_t IH = src_dims[2];
+    size_t IC = src_dims[1];
+
+    std::vector<float> new_weights(IC);
+    std::vector<float> new_bias(IC);
+
+    const uint16_t *src_data = src->buffer();
+    const uint16_t *weights_data = weights;
+    const uint16_t *bias_data = weights_data + IC;
+    uint16_t *dst_data = dst->buffer();
+    for (size_t ic = 0; ic < IC; ic++) {
+        float val = PrecisionUtils::f16tof32(weights_data[ic]) + eps;
+        val = 1.0f/sqrt(val);
+        new_weights[ic] = val;
+        new_bias[ic] = -val * PrecisionUtils::f16tof32(bias_data[ic]);
+    }
+    for (size_t ic = 0; ic < IC; ic++) {
+        float val = new_bias[ic];
+        for (size_t kh = 0; kh < IH; kh++) {
+            for (size_t  kw = 0; kw < IW; kw++) {
+                size_t iidx = ic + kw * IC + kh * IC * IW;
+                float res = val + PrecisionUtils::f16tof32(src_data[iidx]) * new_weights[ic];
+                dst_data[iidx] = PrecisionUtils::f32tof16(res);
+            }
+        }
+    }
+}
+
+class myriadLayersTestsBatchNormalization_nightly: public myriadLayersTests_nightly,
+                           public testing::WithParamInterface<bn_test_params> {
+};
+
+TEST_P(myriadLayersTestsBatchNormalization_nightly, TestsBatchNorm)
+{
+    bn_test_params p = ::testing::WithParamInterface<bn_test_params>::GetParam();
+    size_t sz_weights = p.in.c;
+    size_t sz_bias = p.in.c;
+    size_t sz = sz_weights + sz_bias;
+    InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(GenWeights(sz, -WEIGHTS_BOUND, WEIGHTS_BOUND));
+    uint16_t* weights = weights_ptr->data().as<uint16_t*>();
+    for (int i = 0; i < sz_weights; ++i) {
+        /* weights are variations so all gains should be >= 0 */
+        weights[i] = PrecisionUtils::f32tof16(fabs(PrecisionUtils::f16tof32(weights[i])));
+    }
+    IN_OUT_desc inpt = {{p.in.n, p.in.c, p.in.h, p.in.w}};
+    SetInputTensors(inpt);
+    SetOutputTensors(inpt);
+    std::map<std::string, std::string> params;
+    params["epsilon"] = std::to_string(p.epsilon);
+
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("BatchNormalization")
+                                        .params(params)
+                                        .weights(sz_weights)
+                                        .biases(sz_bias),
+                                        {},
+                                        weights_ptr));
+    ASSERT_TRUE(Infer());
+    ref_batch_normalization(_inputMap.begin()->second, weights, sz, _refBlob, p.epsilon);
+    CompareCommonWithNorm(_outputMap.begin()->second, _refBlob, ERROR_BOUND);
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_bias_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_bias_test.cpp
new file mode 100644 (file)
index 0000000..f8521f8
--- /dev/null
@@ -0,0 +1,9 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_bias_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsBias_nightly,
+        ::testing::ValuesIn(s_biasDims)
+);
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_bias_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_bias_test.hpp
new file mode 100644 (file)
index 0000000..61abb88
--- /dev/null
@@ -0,0 +1,121 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "vpu/model/data_desc.hpp"
+#include "myriad_layers_tests.hpp"
+
+#define ERROR_BOUND (1.e-3f)
+
+using namespace InferenceEngine;
+
+namespace {
+    bool iter(SizeVector& in, SizeVector& out) {
+        bool flag = true;
+        for(int t = 0; t < out.size(); t++) {
+            int i = out.size() - 1 - t;
+            if(in[i] < out[i] - 1) {
+                in[i]++;
+                break;
+            } else {
+                if(i == 0) {
+                    flag = false;
+                    break;
+                }
+                in[i] = 0;
+            }
+        }
+        return flag;
+    }
+
+    int calcOffset(SizeVector& in, SizeVector& out) {
+        int offset = in.back();
+        for(int i = in.size() - 2; i >= 0; i--) {
+            int mul = in[i];
+            for(int j = i + 1; j < out.size(); j++)
+                mul *= out[j];
+            offset += mul;
+        }
+        return offset;
+    }
+}
+
+void ref_bias(const InferenceEngine::Blob::Ptr src1,
+              const InferenceEngine::Blob::Ptr src2,
+              InferenceEngine::Blob::Ptr dst) {
+    ASSERT_NE(src1, nullptr);
+    ASSERT_NE(src2, nullptr);
+    ASSERT_NE(dst, nullptr);
+
+    SizeVector in_size;
+    SizeVector out_size;
+    in_size = src1->getTensorDesc().getDims();
+    out_size = dst->getTensorDesc().getDims();
+    Layout layout = src1->getTensorDesc().getLayout();
+    const uint16_t *src_data = src1->buffer();
+    const uint16_t *bias_data = src2->buffer();
+    uint16_t *dst_data = dst->buffer();
+
+    // TODO: investigate this case
+    if (layout == NCHW || layout == NHWC) {
+        size_t N1 = out_size[0];
+        size_t C1 = out_size[1];
+        size_t H1 = out_size[2];
+        size_t W1 = out_size[3];
+        for (size_t n = 0; n < N1; n++) {
+            for (size_t c = 0; c < C1; c++) {
+                float val = PrecisionUtils::f16tof32(bias_data[c]);
+                for (size_t h = 0; h < H1; h++) {
+                    for (size_t w = 0; w < W1; w++) {
+                        size_t iidx = layout == NCHW ?
+                                           w + h * W1 + c * W1 * H1 + n * W1 * H1 * C1 : 
+                                           c + w * C1 + h * C1 * W1 + n * W1 * H1 * C1;
+                        float res = val + PrecisionUtils::f16tof32(src_data[iidx]);
+                        dst_data[iidx] = PrecisionUtils::f32tof16(res);
+                    }
+                }
+            }
+        }
+    } else {
+        int dims = out_size.size();
+        int dimC = dimToIeInd(vpu::Dim::C, dims);
+        SizeVector curr_size(dims);
+        do {
+            float val = PrecisionUtils::f16tof32(bias_data[curr_size[dimC]]);
+            float res = val + PrecisionUtils::f16tof32(src_data[calcOffset(curr_size, in_size)]);
+            dst_data[calcOffset(curr_size, out_size)] = PrecisionUtils::f32tof16(res);
+        } while(iter(curr_size, out_size));
+    }
+}
+
+class myriadLayersTestsBias_nightly: public myriadLayersTests_nightly,
+                             public testing::WithParamInterface<InferenceEngine::SizeVector> {
+};
+
+TEST_P(myriadLayersTestsBias_nightly, TestsBias) {
+    _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+    auto input_dim = GetParam();
+    InferenceEngine::SizeVector input_dim1;
+    auto dims = input_dim.size();
+    int dimC = dimToIeInd(vpu::Dim::C, dims);
+    input_dim1.push_back(input_dim[dimC]);
+    SetInputTensors({input_dim, input_dim1});
+    SetOutputTensors({input_dim});
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("Bias")));
+
+    ASSERT_TRUE(Infer());
+    ASSERT_EQ(_inputMap.size(), 2);
+    ASSERT_EQ(_outputMap.size(), 1);
+    auto iter = _inputMap.begin();
+    auto first_input = iter->second;
+    ++iter;
+    auto second_input = iter->second;
+    ref_bias(first_input, second_input, _refBlob);
+    CompareCommonAbsolute(_outputMap.begin()->second, _refBlob, ERROR_BOUND);
+}
+
+static std::vector<InferenceEngine::SizeVector> s_biasDims = {
+    {4, 10, 8, 4, 4},
+    {10, 8, 4, 4},
+    {32, 8, 16}
+};
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_blob_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_blob_test.cpp
new file mode 100644 (file)
index 0000000..f663e73
--- /dev/null
@@ -0,0 +1,418 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <functional_test_utils/precision_utils.hpp>
+#include <ngraph_functions/subgraph_builders.hpp>
+#include "myriad_layers_tests.hpp"
+#include "vpu_tests_config.hpp"
+
+using namespace InferenceEngine;
+using namespace ::testing;
+
+typedef myriadLayerTestBaseWithParam<std::string> myriadBlobTests_nightly;
+
+std::vector<char> readBinFile(std::string filename)
+{
+    std::ifstream file(filename, std::ios::binary);
+    file.unsetf(std::ios::skipws);
+
+    std::streampos fileSize;
+
+    file.seekg(0, std::ios::end);
+    fileSize = file.tellg();
+    file.seekg(0, std::ios::beg);
+
+    std::vector<char> vec;
+    vec.reserve(fileSize);
+
+    vec.insert(vec.begin(),
+        std::istream_iterator<char>(file),
+        std::istream_iterator<char>());
+
+    return vec;
+}
+
+TEST_P(myriadBlobTests_nightly, CanGetSameBlobsOnSameIR) {
+    std::string HWConfigValue = GetParam();
+
+    auto fnPtr = ngraph::builder::subgraph::makeSplitConvConcat();
+    ASSERT_NO_THROW(_cnnNetwork = CNNNetwork(fnPtr));
+
+    const size_t countBlobsToDump = 3;
+    std::vector<std::string> filenames(countBlobsToDump);
+    for (int i = 0; i < countBlobsToDump; i++) {
+
+        StatusCode st;
+        ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, _cnnNetwork,
+        { {VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), HWConfigValue } }, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+        std::stringstream modelFilenameStream;
+        modelFilenameStream << "spltConvConcat" << i << ".blob";
+        filenames[i] = modelFilenameStream.str();
+        ASSERT_NO_THROW(_exeNetwork->Export(modelFilenameStream.str(), nullptr));
+    }
+
+    for (int i = 0; i < filenames.size() - 1; i++) {
+        std::vector<char>  blob1 = readBinFile(filenames[i]);
+        std::vector<char>  blob2 = readBinFile(filenames[i + 1]);
+        ASSERT_TRUE(blob1 == blob2);
+    }
+
+    for (int i = 0; i < filenames.size(); i++) {
+        std::remove(filenames[i].c_str());
+    }
+}
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadBlobTests_nightly,
+    ::testing::Values(CONFIG_VALUE(YES), CONFIG_VALUE(NO))
+);
+
+using myriadBlobExportTests_nightly = myriadLayersTests_nightly;
+
+
+TEST_F(myriadBlobExportTests_nightly, CanNotDoImportOnNonExistFile)
+{
+    InferenceEngine::IExecutableNetwork::Ptr importedNetworkPtr;
+    ASSERT_EQ(StatusCode::NETWORK_NOT_READ, _vpuPluginPtr->ImportNetwork(importedNetworkPtr, "I_dont_exist.blob", {}, nullptr));
+}
+
+TEST_F(myriadBlobExportTests_nightly, CanInferImportedNetworkOnExportedBlob)
+{
+    auto fnPtr = ngraph::builder::subgraph::makeSplitConvConcat();
+    ASSERT_NO_THROW(_cnnNetwork = CNNNetwork(fnPtr));
+
+    ASSERT_EQ(StatusCode::OK, _vpuPluginPtr->LoadNetwork(_exeNetwork, _cnnNetwork, { }, &_resp)) << _resp.msg;
+    std::stringstream modelFilenameStream;
+    modelFilenameStream << "SplitConvConcat" << ".blob";
+    ASSERT_EQ(StatusCode::OK, _exeNetwork->Export(modelFilenameStream.str(), &_resp)) << _resp.msg;
+
+    InferenceEngine::IExecutableNetwork::Ptr importedNetworkPtr;
+    ASSERT_EQ(StatusCode::OK, _vpuPluginPtr->ImportNetwork(importedNetworkPtr, modelFilenameStream.str(), {}, &_resp)) << _resp.msg;
+    InferenceEngine::IInferRequest::Ptr inferRequest;
+    ASSERT_EQ(StatusCode::OK, importedNetworkPtr->CreateInferRequest(inferRequest, &_resp)) << _resp.msg;
+
+    ASSERT_EQ(StatusCode::OK, inferRequest->Infer(&_resp)) << _resp.msg;
+}
+
+TEST_F(myriadBlobExportTests_nightly, CanGetPerfCountsImportedNetwork)
+{
+    auto fnPtr = ngraph::builder::subgraph::makeSplitConvConcat();
+    ASSERT_NO_THROW(_cnnNetwork = CNNNetwork(fnPtr));
+
+    ASSERT_EQ(StatusCode::OK, _vpuPluginPtr->LoadNetwork(_exeNetwork, _cnnNetwork, {}, &_resp)) << _resp.msg;
+    std::stringstream modelFilenameStream;
+    modelFilenameStream << "splitConvConcat" << ".blob";
+    ASSERT_EQ(StatusCode::OK, _exeNetwork->Export(modelFilenameStream.str(), &_resp)) << _resp.msg;
+
+    std::map<std::string, std::string> config = { {CONFIG_KEY(PERF_COUNT), CONFIG_VALUE(YES)} };
+    InferenceEngine::IExecutableNetwork::Ptr importedNetworkPtr;
+    ASSERT_EQ(StatusCode::OK, _vpuPluginPtr->ImportNetwork(importedNetworkPtr, modelFilenameStream.str(), config, &_resp)) << _resp.msg;
+    InferenceEngine::IInferRequest::Ptr inferRequest;
+    ASSERT_EQ(StatusCode::OK, importedNetworkPtr->CreateInferRequest(inferRequest, &_resp)) << _resp.msg;
+
+    ASSERT_EQ(StatusCode::OK, inferRequest->Infer(&_resp)) << _resp.msg;
+    std::map<std::string, InferenceEngineProfileInfo> perfCounts;
+    ASSERT_EQ(StatusCode::OK, inferRequest->GetPerformanceCounts(perfCounts, &_resp)) << _resp.msg;
+
+    ASSERT_NE(0, perfCounts.size());
+    for (const auto &perfInfoElem : perfCounts) {
+        InferenceEngineProfileInfo perfInfo = perfInfoElem.second;
+        ASSERT_EQ(perfInfo.status, InferenceEngineProfileInfo::LayerStatus::EXECUTED);
+        ASSERT_STREQ(perfInfo.exec_type, "UNKNOWN");
+        ASSERT_STREQ(perfInfo.layer_type, "UNKNOWN");
+        ASSERT_NE(perfInfo.realTime_uSec, 0);
+    }
+}
+
+class myriadConfigsWithBlobImportTests_nightly: public myriadLayersTests_nightly {
+protected:
+    // use this stream to redirect cout to it,
+    // needs to be able check output on warnings
+    std::stringstream redirectCoutStream;
+
+    void SetUp() override {
+        myriadLayersTests_nightly::SetUp();
+        backup = std::cout.rdbuf();
+        std::cout.rdbuf(redirectCoutStream.rdbuf());
+    }
+
+    void TearDown() override {
+        myriadLayersTests_nightly::TearDown();
+        std::cout.rdbuf(backup);
+        std::cout << redirectCoutStream.str();
+    }
+
+private:
+    std::streambuf *backup;
+};
+
+
+TEST_F(myriadConfigsWithBlobImportTests_nightly, TryingToSetCompileOptionPrintsWarning)
+{
+    auto fnPtr = ngraph::builder::subgraph::makeSplitConvConcat();
+    ASSERT_NO_THROW(_cnnNetwork = CNNNetwork(fnPtr));
+
+    ASSERT_EQ(StatusCode::OK, _vpuPluginPtr->LoadNetwork(_exeNetwork, _cnnNetwork, {}, &_resp)) << _resp.msg;
+    std::stringstream modelFilenameStream;
+    modelFilenameStream << "splitConvConcat" << ".blob";
+    ASSERT_EQ(StatusCode::OK, _exeNetwork->Export(modelFilenameStream.str(), &_resp)) << _resp.msg;
+
+
+    std::map<std::string, std::string> config = { {VPU_CONFIG_KEY(COPY_OPTIMIZATION), CONFIG_VALUE(YES)},
+                                                  {VPU_CONFIG_KEY(IGNORE_UNKNOWN_LAYERS), CONFIG_VALUE(YES)},
+                                                  {VPU_CONFIG_KEY(NONE_LAYERS), CONFIG_VALUE(YES)},
+                                                  {VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), CONFIG_VALUE(YES)},
+                                                  {VPU_CONFIG_KEY(NUMBER_OF_SHAVES), std::to_string(10)},
+                                                  {VPU_CONFIG_KEY(NUMBER_OF_CMX_SLICES), std::to_string(10)} };
+
+    IE_SUPPRESS_DEPRECATED_START
+    config[VPU_CONFIG_KEY(INPUT_NORM)] = std::to_string(1.f);
+    config[VPU_CONFIG_KEY(INPUT_BIAS)] = std::to_string(1.f);
+    IE_SUPPRESS_DEPRECATED_START
+
+    InferenceEngine::IExecutableNetwork::Ptr importedNetworkPtr;
+    ASSERT_EQ(StatusCode::OK, _vpuPluginPtr->ImportNetwork(importedNetworkPtr, modelFilenameStream.str(), config, &_resp)) << _resp.msg;
+
+    std::string content = redirectCoutStream.str();
+    for (auto &&elem : config) {
+        std::stringstream expectedMsgStream;
+        expectedMsgStream << "[Warning][VPU][Config] " << elem.first;
+        std::string msg = expectedMsgStream.str();
+        ASSERT_TRUE(content.find(msg) != std::string::npos) << msg;
+    }
+}
+
+TEST_F(myriadConfigsWithBlobImportTests_nightly, TryingToSetRuntimeOptionDoesNotPrintWarning)
+{
+    auto fnPtr = ngraph::builder::subgraph::makeSplitConvConcat();
+    ASSERT_NO_THROW(_cnnNetwork = CNNNetwork(fnPtr));
+
+    ASSERT_EQ(StatusCode::OK, _vpuPluginPtr->LoadNetwork(_exeNetwork, _cnnNetwork, {}, &_resp)) << _resp.msg;
+    std::stringstream modelFilenameStream;
+    modelFilenameStream << "splitConvConcat" << ".blob";
+    ASSERT_EQ(StatusCode::OK, _exeNetwork->Export(modelFilenameStream.str(), &_resp)) << _resp.msg;
+
+    std::map<std::string, std::string> config = { {CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS), CONFIG_VALUE(YES)},
+                                                  {CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_INFO)},
+                                                  {CONFIG_KEY(PERF_COUNT), CONFIG_VALUE(YES)},
+                                                  {VPU_CONFIG_KEY(PRINT_RECEIVE_TENSOR_TIME), CONFIG_VALUE(YES)} };
+    if (vpu::tests::deviceForceReset()) {
+        config.insert({VPU_MYRIAD_CONFIG_KEY(FORCE_RESET), CONFIG_VALUE(NO)});
+        config.insert({VPU_CONFIG_KEY(PLATFORM), VPU_CONFIG_VALUE(2480)});
+    }
+
+    InferenceEngine::IExecutableNetwork::Ptr importedNetworkPtr;
+    ASSERT_EQ(StatusCode::OK, _vpuPluginPtr->ImportNetwork(importedNetworkPtr, modelFilenameStream.str(), config, &_resp)) << _resp.msg;
+
+    std::string content = redirectCoutStream.str();
+    for (auto &&elem : config) {
+        std::stringstream expectedMsgStream;
+        expectedMsgStream << "Warning:" << elem.first;
+        std::string msg = expectedMsgStream.str();
+        ASSERT_EQ(content.find(msg), std::string::npos);
+    }
+}
+
+
+using myriadBlobExportAccuracyDifferentCountInAndOutTests_nightly = myriadLayerTestBaseWithParam<std::vector<size_t>>;
+
+TEST_F(myriadBlobExportAccuracyDifferentCountInAndOutTests_nightly, IsResultOfImportedAndGeneratedModelSame)
+{
+    SetSeed(DEFAULT_SEED_VALUE);
+
+    auto fnPtr = ngraph::builder::subgraph::makeSplitConvConcat();
+    ASSERT_NO_THROW(_cnnNetwork = CNNNetwork(fnPtr));
+
+    InferenceEngine::IExecutableNetwork::Ptr originalExeNetworkPtr;
+    ASSERT_EQ(StatusCode::OK, _vpuPluginPtr->LoadNetwork(originalExeNetworkPtr, _cnnNetwork, { }, &_resp)) << _resp.msg;
+
+    ConstInputsDataMap originalInputsInfo;
+    ASSERT_EQ(StatusCode::OK, originalExeNetworkPtr->GetInputsInfo(originalInputsInfo, &_resp)) << _resp.msg;
+
+    InferenceEngine::IInferRequest::Ptr orignalInferRequest;
+    ASSERT_EQ(StatusCode::OK, originalExeNetworkPtr->CreateInferRequest(orignalInferRequest, &_resp)) << _resp.msg;
+
+    std::vector<Blob::Ptr> inputBlobs(originalInputsInfo.size());
+    auto inputBlobsIt = inputBlobs.begin();
+    for (const auto &inputInfo : originalInputsInfo) {
+        ASSERT_EQ(StatusCode::OK, orignalInferRequest->GetBlob(inputInfo.first.c_str(), *inputBlobsIt, &_resp)) << _resp.msg;
+        GenRandomData(*inputBlobsIt);
+        inputBlobsIt++;
+    }
+
+    ASSERT_EQ(StatusCode::OK, orignalInferRequest->Infer(&_resp)) << _resp.msg;
+
+    ConstOutputsDataMap orignalOutputsInfo;
+    ASSERT_EQ(StatusCode::OK, originalExeNetworkPtr->GetOutputsInfo(orignalOutputsInfo, &_resp)) << _resp.msg;
+
+    std::vector<Blob::Ptr> originalOutputBlobs(orignalOutputsInfo.size());
+    auto outputBlobsIt = originalOutputBlobs.begin();
+    for (const auto &outputInfo: orignalOutputsInfo) {
+        ASSERT_EQ(StatusCode::OK, orignalInferRequest->GetBlob(outputInfo.first.c_str(), *outputBlobsIt, &_resp)) << _resp.msg;
+        outputBlobsIt++;
+    }
+
+    std::stringstream modelFilenameStream;
+    modelFilenameStream << "exportedModel" << ".blob";
+    ASSERT_EQ(StatusCode::OK, originalExeNetworkPtr->Export(modelFilenameStream.str(), &_resp)) << _resp.msg;
+
+    InferenceEngine::IExecutableNetwork::Ptr importedNetworkPtr;
+    ASSERT_EQ(StatusCode::OK, _vpuPluginPtr->ImportNetwork(importedNetworkPtr, modelFilenameStream.str(), {}, &_resp)) << _resp.msg;
+    InferenceEngine::IInferRequest::Ptr importedInferRequest;
+    ASSERT_EQ(StatusCode::OK, importedNetworkPtr->CreateInferRequest(importedInferRequest, &_resp)) << _resp.msg;
+
+    ConstInputsDataMap importedInputsInfo;
+    ASSERT_EQ(StatusCode::OK, importedNetworkPtr->GetInputsInfo(importedInputsInfo, &_resp)) << _resp.msg;
+
+    inputBlobsIt = inputBlobs.begin();
+    for (const auto &inputInfo : importedInputsInfo) {
+        ASSERT_EQ(StatusCode::OK, importedInferRequest->SetBlob(inputInfo.first.c_str(), *inputBlobsIt, &_resp)) << &_resp.msg;
+        inputBlobsIt++;
+    }
+
+    ASSERT_EQ(StatusCode::OK, importedInferRequest->Infer(&_resp)) << _resp.msg;
+
+    ConstOutputsDataMap importedOutputsInfo;
+    ASSERT_EQ(StatusCode::OK, importedNetworkPtr->GetOutputsInfo(importedOutputsInfo, &_resp)) << _resp.msg;
+
+    outputBlobsIt = originalOutputBlobs.begin();
+    for (const auto &outputInfo : importedOutputsInfo) {
+        Blob::Ptr importedOutputBlobPtr;
+        ASSERT_EQ(StatusCode::OK, importedInferRequest->GetBlob(outputInfo.first.c_str(), importedOutputBlobPtr, &_resp)) << _resp.msg;
+
+        CompareCommonAbsolute(importedOutputBlobPtr, *outputBlobsIt, 0.f);
+        outputBlobsIt++;
+    }
+}
+
+
+using myriadBlobExportAccuracyDifferentPrecisionOfInAndOutTests_nightly = myriadLayerTestBaseWithParam<std::tuple<InferenceEngine::Precision, InferenceEngine::Precision>>;
+
+TEST_P(myriadBlobExportAccuracyDifferentPrecisionOfInAndOutTests_nightly, IsResultOfImportedAndGeneratedModelSame)
+{
+    SetSeed(DEFAULT_SEED_VALUE);
+    InferenceEngine::Precision inputPrecision = std::get<0>(GetParam());
+    InferenceEngine::Precision outputPrecision = std::get<1>(GetParam());
+    std::vector<size_t> inputShape;
+
+    auto fnPtr = ngraph::builder::subgraph::makeSplitConvConcat();
+    ASSERT_NO_THROW(_cnnNetwork = CNNNetwork(fnPtr));
+
+    const auto& network = _cnnNetwork;
+    InputsDataMap inputsInfo = network.getInputsInfo();
+    ASSERT_EQ(inputsInfo.size(), 1);
+    auto inputInfo = inputsInfo.begin();
+    ASSERT_NO_THROW(inputInfo->second->setPrecision(inputPrecision));
+
+    OutputsDataMap outputsInfo = network.getOutputsInfo();
+    ASSERT_EQ(outputsInfo.size(), 1);
+    auto outputInfo = outputsInfo.begin();
+    ASSERT_NO_THROW(outputInfo->second->setPrecision(outputPrecision));
+
+    InferenceEngine::IExecutableNetwork::Ptr originalExeNetworkPtr;
+    ASSERT_EQ(StatusCode::OK, _vpuPluginPtr->LoadNetwork(originalExeNetworkPtr, network, { }, &_resp)) << _resp.msg;
+
+    InferenceEngine::IInferRequest::Ptr orignalInferRequest;
+    ASSERT_EQ(StatusCode::OK, originalExeNetworkPtr->CreateInferRequest(orignalInferRequest, &_resp)) << _resp.msg;
+
+    Blob::Ptr inputBlobPtr;
+    ASSERT_EQ(StatusCode::OK, orignalInferRequest->GetBlob(inputInfo->first.c_str(), inputBlobPtr, &_resp)) << _resp.msg;
+    GenRandomData(inputBlobPtr);
+
+    ASSERT_EQ(StatusCode::OK, orignalInferRequest->Infer(&_resp)) << _resp.msg;
+
+    Blob::Ptr outputBlobPtr;
+    ASSERT_EQ(StatusCode::OK, orignalInferRequest->GetBlob(outputInfo->first.c_str(), outputBlobPtr, &_resp)) << _resp.msg;
+
+    std::stringstream modelFilenameStream;
+    modelFilenameStream << "exportedModel" << ".blob";
+    ASSERT_EQ(StatusCode::OK, originalExeNetworkPtr->Export(modelFilenameStream.str(), &_resp)) << _resp.msg;
+
+    InferenceEngine::IExecutableNetwork::Ptr importedNetworkPtr;
+    ASSERT_EQ(StatusCode::OK, _vpuPluginPtr->ImportNetwork(importedNetworkPtr, modelFilenameStream.str(), {}, &_resp)) << _resp.msg;
+    InferenceEngine::IInferRequest::Ptr importedInferRequest;
+    ASSERT_EQ(StatusCode::OK, importedNetworkPtr->CreateInferRequest(importedInferRequest, &_resp)) << _resp.msg;
+
+    ConstInputsDataMap importedInputsInfo;
+    ASSERT_EQ(StatusCode::OK, importedNetworkPtr->GetInputsInfo(importedInputsInfo, &_resp)) << _resp.msg;
+    ASSERT_EQ(importedInputsInfo.size(), 1);
+    auto importedInputInfo = importedInputsInfo.begin();
+
+    ASSERT_EQ(StatusCode::OK, importedInferRequest->SetBlob(importedInputInfo->first.c_str(), inputBlobPtr, &_resp)) << &_resp.msg;
+
+    ASSERT_EQ(StatusCode::OK, importedInferRequest->Infer(&_resp)) << _resp.msg;
+
+    ConstOutputsDataMap importedOutputsInfo;
+    ASSERT_EQ(StatusCode::OK, importedNetworkPtr->GetOutputsInfo(importedOutputsInfo, &_resp)) << _resp.msg;
+    ASSERT_EQ(importedOutputsInfo.size(), 1);
+    auto importedOutputInfo = importedOutputsInfo.begin();
+
+    Blob::Ptr importedOutputBlobPtr;
+    ASSERT_EQ(StatusCode::OK, importedInferRequest->GetBlob(importedOutputInfo->first.c_str(), importedOutputBlobPtr, &_resp)) << _resp.msg;
+
+    CompareCommonAbsolute(importedOutputBlobPtr, outputBlobPtr, 0.f);
+}
+
+using myriadExtraTests_nightly = myriadLayersTests_nightly;
+
+TEST_F(myriadExtraTests_nightly, ThereIsNoSegfaultOnZeroConvolutionWeights) {
+    if (!CheckMyriadX()) {
+        SKIP() << "Non-MyriadX device";
+    }
+
+    tensor_test_params input_dims = { 1, 3, 25, 25 };
+    param_size kernel = { 3, 3 };
+    param_size stride = { 1, 1 };
+    param_size pad = { 1, 1 };
+    size_t out_channels = 3;
+    size_t group = 1;
+    param_size dilation_factor = { 1, 1 };
+
+    size_t out_w = (input_dims.w + 2 * pad.x - dilation_factor.x * (kernel.x - 1) - 1 + stride.x) / stride.x;
+    size_t out_h = (input_dims.h + 2 * pad.y - dilation_factor.y * (kernel.y - 1) - 1 + stride.y) / stride.y;
+
+    tensor_test_params output_dims = { 1, out_channels, out_h, out_w };
+
+    SetInputTensor(input_dims);
+    SetOutputTensor(output_dims);
+
+    size_t num_weights = kernel.x * kernel.y * (input_dims.c / group) * output_dims.c;
+    size_t num_bias = output_dims.c;
+
+    InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr =
+        InferenceEngine::TBlob<uint8_t>::Ptr(GenWeights(num_weights + num_bias));
+    ie_fp16* weights = weights_ptr->data().as<ie_fp16*>();
+    // set a small number in FP16
+    for (size_t i = 0; i < num_weights + num_bias; i++) {
+        weights[i] = 0;
+    }
+
+    std::map<std::string, std::string> layer_params = {
+        { "kernel-x", std::to_string(kernel.x) }
+        ,{ "kernel-y", std::to_string(kernel.y) }
+        ,{ "stride-x", std::to_string(stride.x) }
+        ,{ "stride-y", std::to_string(stride.y) }
+        ,{ "pad-x", std::to_string(pad.x) }
+        ,{ "pad-y", std::to_string(pad.y) }
+        ,{ "output", std::to_string(out_channels) }
+        ,{ "group", std::to_string(group) }
+        ,{ "dilation-x", std::to_string(dilation_factor.x) }
+        ,{ "dilation-y", std::to_string(dilation_factor.y) }
+    };
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("Convolution")
+                                        .params(layer_params)
+                                        .weights(num_weights)
+                                        .biases(num_bias),
+                                        NetworkInitParams().useHWOpt(true),
+                                        weights_ptr));
+}
+
+static const std::vector<InferenceEngine::Precision> inputPrecisions = {InferenceEngine::Precision::U8, InferenceEngine::Precision::FP16,
+                                                                        InferenceEngine::Precision::FP32};
+
+static const std::vector<InferenceEngine::Precision> outputPrecisions = {InferenceEngine::Precision::FP16, InferenceEngine::Precision::FP32};
+
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadBlobExportAccuracyDifferentPrecisionOfInAndOutTests_nightly,
+                        ::testing::Combine(::testing::ValuesIn(inputPrecisions), ::testing::ValuesIn(outputPrecisions)));
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_clamp_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_clamp_test.cpp
new file mode 100644 (file)
index 0000000..b848f2d
--- /dev/null
@@ -0,0 +1,11 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_clamp_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsClampParams_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(s_clampTensors),
+        ::testing::ValuesIn(s_clampParams))
+);
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_clamp_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_clamp_test.hpp
new file mode 100644 (file)
index 0000000..306fe68
--- /dev/null
@@ -0,0 +1,57 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tests.hpp"
+
+#define ERROR_BOUND (.1f)
+
+using namespace InferenceEngine;
+
+struct clamp_test_params {
+    float min;
+    float max;
+    friend std::ostream& operator<<(std::ostream& os, clamp_test_params const& tst)
+    {
+        return os << " min=" << tst.min
+                  << ", max=" << tst.max;
+    };
+};
+
+typedef myriadLayerTestBaseWithParam<std::tuple<SizeVector, clamp_test_params>> myriadLayersTestsClampParams_nightly;
+
+TEST_P(myriadLayersTestsClampParams_nightly, TestsClamp) {
+    _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+    auto param = GetParam();
+    SizeVector tensor = std::get<0>(param);
+    clamp_test_params p = std::get<1>(param);
+
+    std::map<std::string, std::string> params;
+    params["min"] = std::to_string(p.min);
+    params["max"] = std::to_string(p.max);
+
+    SetInputTensors({tensor});
+    SetOutputTensors({tensor});
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("Clamp").params(params)));
+    /* input data preparation */
+    SetFirstInputToRange(-100.f, 100.f);
+    ASSERT_TRUE(Infer());
+
+    /* output check */
+    auto outputBlob =_outputMap[_outputsInfo.begin()->first];
+    auto inputBlob  = _inputMap[_inputsInfo.begin()->first];
+
+    ref_Clamp(inputBlob, _refBlob, p.min, p.max);
+
+    CompareCommonAbsolute(outputBlob, _refBlob, ERROR_BOUND);
+}
+
+static std::vector<SizeVector> s_clampTensors = {
+    {{1, 3, 10, 15}},
+    {{5, 6, 2, 3, 10, 15}},
+};
+
+static std::vector<clamp_test_params> s_clampParams = {
+    {0.f, 6.0f},
+    {-10.f, 17.0f}
+};
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_concat_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_concat_test.cpp
new file mode 100644 (file)
index 0000000..2a385cf
--- /dev/null
@@ -0,0 +1,198 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_concat_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsConcat_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(s_concatCores),
+        ::testing::ValuesIn(s_axis),
+        ::testing::ValuesIn(s_concatInputs),
+        ::testing::ValuesIn(s_dimension),
+        ::testing::ValuesIn(s_batch)),
+                        getTestCaseName
+);
+
+TEST_F(myriadLayersTestsConcat_nightly, ConcatAfterNormalize) {
+    const std::string model = R"V0G0N(
+        <Net name="ConcatAfterNormalize" version="2" batch="1">
+            <layers>
+                <layer name="input" type="Input" precision="FP16" id="1">
+                    <output>
+                        <port id="1">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>128</dim>
+                            <dim>128</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="normalize1" type="Normalize" precision="FP16" id="2">
+                    <data across_spatial="0" channel_shared="1" eps="9.99999993922529e-09"/>
+                    <input>
+                        <port id="2">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>128</dim>
+                            <dim>128</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="3">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>128</dim>
+                            <dim>128</dim>
+                        </port>
+                    </output>
+                    <weights offset="0" size="2"/>
+                </layer>
+                <layer name="normalize2" type="Normalize" precision="FP16" id="3">
+                    <data across_spatial="0" channel_shared="1" eps="9.99999993922529e-09"/>
+                    <input>
+                        <port id="4">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>128</dim>
+                            <dim>128</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="5">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>128</dim>
+                            <dim>128</dim>
+                        </port>
+                    </output>
+                    <weights offset="2" size="2"/>
+                </layer>
+                <layer name="copy1" type="Copy" precision="FP16" id="4">
+                    <input>
+                        <port id="6">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>128</dim>
+                            <dim>128</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="7">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>128</dim>
+                            <dim>128</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="copy2" type="Copy" precision="FP16" id="5">
+                    <input>
+                        <port id="8">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>128</dim>
+                            <dim>128</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="9">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>128</dim>
+                            <dim>128</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="concat" type="Concat" precision="FP16" id="6">
+                    <concat_data axis="1"/>
+                    <input>
+                        <port id="10">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>128</dim>
+                            <dim>128</dim>
+                        </port>
+                        <port id="11">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>128</dim>
+                            <dim>128</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="12">
+                            <dim>1</dim>
+                            <dim>6</dim>
+                            <dim>128</dim>
+                            <dim>128</dim>
+                        </port>
+                    </output>
+                </layer>
+            </layers>
+            <edges>
+                <edge from-layer="1" from-port="1" to-layer="2" to-port="2"/>
+                <edge from-layer="1" from-port="1" to-layer="3" to-port="4"/>
+                <edge from-layer="2" from-port="3" to-layer="4" to-port="6"/>
+                <edge from-layer="3" from-port="5" to-layer="5" to-port="8"/>
+                <edge from-layer="2" from-port="3" to-layer="6" to-port="10"/>
+                <edge from-layer="3" from-port="5" to-layer="6" to-port="11"/>
+            </edges>
+        </Net>
+    )V0G0N";
+
+    TBlob<uint8_t>::Ptr weights(GenWeights(4 / sizeof(ie_fp16)));
+
+    // Parse model
+    InferenceEngine::Core ie;
+    auto network = ie.ReadNetwork(model, weights);
+
+    auto inputsInfo = network.getInputsInfo();
+    inputsInfo["input"]->setPrecision(Precision::FP16);
+    inputsInfo["input"]->setLayout(Layout::NHWC);
+
+    auto outputsInfo = network.getOutputsInfo();
+    outputsInfo["concat"]->setPrecision(Precision::FP16);
+    outputsInfo["concat"]->setLayout(Layout::NHWC);
+    outputsInfo["copy1"]->setPrecision(Precision::FP16);
+    outputsInfo["copy1"]->setLayout(Layout::NHWC);
+    outputsInfo["copy2"]->setPrecision(Precision::FP16);
+    outputsInfo["copy2"]->setLayout(Layout::NHWC);
+
+    // Load network
+    StatusCode st;
+    ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network, {}, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    ASSERT_NE(_exeNetwork, nullptr) << _resp.msg;
+
+    // Create InferRequest
+    InferenceEngine::IInferRequest::Ptr inferRequest;
+    ASSERT_NO_THROW(st = _exeNetwork->CreateInferRequest(inferRequest, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    // Generate input blob
+    InferenceEngine::Blob::Ptr inputBlob;
+    ASSERT_NO_THROW(st = inferRequest->GetBlob("input", inputBlob, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    GenRandomData(inputBlob);
+
+    // Get output blob
+    InferenceEngine::Blob::Ptr output;
+    ASSERT_NO_THROW(st = inferRequest->Infer(&_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    ASSERT_NO_THROW(st = inferRequest->GetBlob("concat", output, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    // Get blobs which are input to Concat
+    InferenceEngine::Blob::Ptr norm1;
+    InferenceEngine::Blob::Ptr norm2;
+    ASSERT_NO_THROW(st = inferRequest->GetBlob("copy1", norm1, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    ASSERT_NO_THROW(st = inferRequest->GetBlob("copy2", norm2, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    InferenceEngine::BlobMap normMap;
+    normMap["normalize1"] = norm1;
+    normMap["normalize2"] = norm2;
+    CheckOutput(normMap, output, 2);
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_concat_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_concat_test.hpp
new file mode 100644 (file)
index 0000000..d25918b
--- /dev/null
@@ -0,0 +1,179 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tests.hpp"
+
+using namespace InferenceEngine;
+
+using myriadConcatTestParams = std::tuple<InferenceEngine::SizeVector, int32_t, InferenceEngine::SizeVector, int32_t, int32_t >;
+typedef myriadLayerTestBaseWithParam<myriadConcatTestParams> myriadLayersTestsConcat_nightly;
+
+void CheckOutput(const InferenceEngine::BlobMap& input, InferenceEngine::Blob::Ptr actual, int32_t axis) {
+    int32_t OW = 1;
+    int32_t OH = 1;
+    int32_t OC = 1;
+    int32_t ON = 1;
+
+    get_ndims(actual, OW, OH, OC, ON);
+
+    int32_t OFFSET[3] = {};
+    auto actual_data = actual->buffer().as<const uint16_t*>();
+    int input_idx = 0;
+    int n_checks = 0;
+
+    for (auto inputElem : input) {
+        int32_t INP[4] = {};
+        get_ndims(inputElem.second, INP[0], INP[1], INP[2], INP[3]);
+        auto src_data = inputElem.second->buffer().as<const uint16_t*>();
+        size_t output_size =  OW * OH * OC;
+        size_t input_size =  INP[0] * INP[1] * INP[2];
+        for (int32_t n = 0; n < INP[3]; ++n) {
+            for (int32_t h = 0; h < INP[1]; ++h) {
+                for (int32_t w = 0; w < INP[0]; ++w) {
+                    for (int32_t c = 0; c < INP[2]; ++c) {
+                        n_checks++;
+                        size_t oodx = c + OFFSET[2] + OC * ((w + OFFSET[0]) + (h + OFFSET[1]) * OW)  +  n * output_size;
+                        size_t iidx = c + INP[2] * (w + h * INP[0]) + n * input_size;
+                        ASSERT_EQ(actual_data[oodx], src_data[iidx])
+                                    << "at: input=" << input_idx << " n=" << n << " c=" << c << " h=" << h << " w=" << w
+                                    << ", actual data : " << PrecisionUtils::f16tof32(actual_data[oodx])
+                                    << " reference data " << PrecisionUtils::f16tof32(src_data[iidx]);
+                    }
+                }
+            }
+        }
+        OFFSET[axis] += INP[axis];
+        input_idx++;
+    }
+    ASSERT_NE(n_checks, 0);
+}
+
+TEST_P(myriadLayersTestsConcat_nightly, Concat) {
+    auto param   = GetParam();
+    auto core    = std::get<0>(param);
+    auto axis    = std::get<1>(param);
+    auto shifts  = std::get<2>(param);
+    auto numDims = std::get<3>(param);
+    auto batch   = std::get<4>(param);
+
+    ASSERT_EQ(core.size(), 2);
+    axis %= numDims;
+    IN_OUT_desc dims;
+    IN_OUT_desc output(1);
+    output[0].resize(numDims);
+
+    int32_t channelsSum = 0;
+    uint32_t offset0 = 0;
+    uint32_t offset1 = 0;
+    int32_t shifted_axis = numDims - 1 - axis;
+    switch(numDims) {
+        case 4:
+            offset0 = 1 + ((axis) % 3);
+            offset1 = 1 + ((axis + 1) % 3);
+            for (auto elem : shifts) {
+                InferenceEngine::SizeVector newSlice(numDims);
+                newSlice[0] = batch;
+                newSlice[axis] = elem;
+                newSlice[offset0] = core[0];
+                newSlice[offset1] = core[1];
+                channelsSum += elem;
+                dims.push_back(newSlice);
+            }
+            output[0][0] = batch;
+            output[0][offset1] = core[1];
+            break;
+        case 2:
+            shifted_axis = (batch == 1 ? 2 : 3)  - 1 - axis;
+            offset0 = 1 + ((axis + 1) % (numDims));
+            axis++;
+            for (auto elem : shifts) {
+                InferenceEngine::SizeVector newSlice(batch == 1 ? 3 : 4, 1);
+                newSlice[0] = batch;
+                newSlice[axis] = elem;
+                newSlice[offset0] = core[0];
+                dims.push_back(newSlice);
+                channelsSum += elem;
+            }
+            output[0].resize(batch == 1 ? 3 : 4, 1);
+            output[0][0] = batch;
+            break;
+        case 1:
+            offset0 = 1 + ((axis + 1) % (numDims));
+            axis++;
+            for (auto elem : shifts) {
+                InferenceEngine::SizeVector newSlice(numDims + 1);
+                newSlice[0] = batch;
+                newSlice[axis] = elem;
+                //newSlice[offset0] = core[0];
+                channelsSum += elem;
+                dims.push_back(newSlice);
+            }
+            output[0].resize(numDims + 1);
+            output[0][0] = batch;
+            break;
+        default:
+            FAIL() << "Unsupported tensor dimension.";
+    }
+    output[0][axis] = channelsSum;
+    if (numDims > 1) {
+        output[0][offset0] = core[0];
+    }
+
+    SetInputTensors(dims);
+    SetOutputTensors(output);
+    std::map<std::string, std::string> params;
+    params["axis"] = std::to_string(axis);
+
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("Concat").params(params), NetworkInitParams().layoutPreference(vpu::LayoutPreference::ChannelMinor)));
+    ASSERT_TRUE(Infer());
+    auto dst = _outputMap.begin()->second;
+    CheckOutput(_inputMap, dst, shifted_axis);
+}
+
+static  std::vector<int32_t> s_axis = {
+    1, 2, 3
+};
+
+static  std::vector<int32_t> s_dimension = {
+    1, 2, 4
+};
+
+static  std::vector<int32_t> s_batch = {
+    1, 8
+};
+
+static std::vector<InferenceEngine::SizeVector> s_concatCores = {
+    {{8, 4}, { 8, 16}, {8, 8}}
+};
+
+static std::vector<InferenceEngine::SizeVector> s_concatInputs = {
+    {{1,}, {1, 2, 4}, {1, 2, 3, 4, 5}, {2, 4}}
+};
+
+template<class T>
+std::ostream &operator << (std::ostream & os, const std::vector<T> & vector_of_elements) {
+    os <<"{";
+    int idx=0;
+    for(const auto & element : vector_of_elements) {
+        os << element;
+        if(++idx != vector_of_elements.size()) {
+            os<<",";
+        }
+    }
+    os <<"}";
+    return os;
+}
+
+//function is returning correct name to gtest
+std::string getTestCaseName(testing::TestParamInfo<myriadConcatTestParams> param) {
+    auto core    = std::get<0>(param.param);
+    auto axis    = std::get<1>(param.param);
+    auto shifts  = std::get<2>(param.param);
+    auto numDims = std::get<3>(param.param);
+    auto batch   = std::get<4>(param.param);
+
+    std::stringstream ss;
+    ss<<"core="<<core<<"/axis="<<axis<<"/shifts="<<shifts<<"/numDims="<<(numDims!=4 ? numDims+1 : numDims)<<"/batch="<<batch;
+    return ss.str();
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_conv_nd_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_conv_nd_test.cpp
new file mode 100644 (file)
index 0000000..75e3f27
--- /dev/null
@@ -0,0 +1,249 @@
+// Copyright (C) 2019-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_conv_nd_test.hpp"
+
+using namespace testing;
+
+//----------------------------------------------------------------------
+//
+// 3D, tricky input size, kernel shape, pads, strides, and dilations
+//
+//----------------------------------------------------------------------
+
+INSTANTIATE_TEST_CASE_P(tricky_ncdhw_userpad, myriadLayersConvNDTest_nightly,
+    Combine(
+        Values(InputShape {1, 3, 19, 65, 47}),
+        Values(KernelShape {1, 3, 5}),
+        Values(PadsBegin {0, 1, 1}),
+        Values(PadsEnd {0, 1, 3}),
+        Values(AutoPad("")),
+        Values(Strides {1, 2, 3}),
+        Values(Dilations {3, 2, 1}),
+        Values(OutputChannels(16)),
+        Values(Groups(1))
+    )
+);
+
+INSTANTIATE_TEST_CASE_P(tricky_ncdhw_autopad, myriadLayersConvNDTest_nightly,
+    Combine(
+        Values(InputShape {1, 3, 19, 65, 47}),
+        Values(KernelShape {1, 3, 5}),
+        Values(PadsBegin {}),
+        Values(PadsEnd {}),
+        Values(AutoPad("valid"),
+               AutoPad("same_lower"),
+               AutoPad("same_upper")),
+        Values(Strides {1, 2, 3}),
+        Values(Dilations {3, 2, 1}),
+        Values(OutputChannels(16)),
+        Values(Groups(1))
+    )
+);
+
+//----------------------------------------------------------------------
+//
+// 3D, simple input size, kernel shape, pads, strides, and dilations
+//
+//----------------------------------------------------------------------
+
+INSTANTIATE_TEST_CASE_P(simple_ncdhw_userpad, myriadLayersConvNDTest_nightly,
+    Combine(
+        Values(InputShape {1, 3, 20, 64, 48}),
+        Values(KernelShape {3, 3, 3}),
+        Values(PadsBegin {1, 1, 1}),
+        Values(PadsEnd {1, 1, 1}),
+        Values(AutoPad("")),
+        Values(Strides {2, 2, 2}),
+        Values(Dilations {1, 1, 1}),
+        Values(OutputChannels(16)),
+        Values(Groups(1))
+    )
+);
+
+INSTANTIATE_TEST_CASE_P(simple_ncdhw_autopad, myriadLayersConvNDTest_nightly,
+    Combine(
+        Values(InputShape {1, 3, 20, 64, 48}),
+        Values(KernelShape {3, 3, 3}),
+        Values(PadsBegin {}),
+        Values(PadsEnd {}),
+        Values(AutoPad("valid"),
+               AutoPad("same_lower"),
+               AutoPad("same_upper")),
+        Values(Strides {2, 2, 2}),
+        Values(Dilations {1, 1, 1}),
+        Values(OutputChannels(16)),
+        Values(Groups(1))
+    )
+);
+
+//----------------------------------------------------------------------
+//
+// 2D, tricky input size, kernel shape, pads, strides, and dilations
+//
+//----------------------------------------------------------------------
+
+INSTANTIATE_TEST_CASE_P(tricky_nchw_userpad, myriadLayersConvNDTest_nightly,
+    Combine(
+        Values(InputShape {1, 3, 65, 47}),
+        Values(KernelShape {1, 3}),
+        Values(PadsBegin {0, 0}),
+        Values(PadsEnd {0, 2}),
+        Values(AutoPad("")),
+        Values(Strides {1, 2}),
+        Values(Dilations {2, 1}),
+        Values(OutputChannels(16)),
+        Values(Groups(1))
+    )
+);
+
+INSTANTIATE_TEST_CASE_P(tricky_nchw_autopad, myriadLayersConvNDTest_nightly,
+    Combine(
+        Values(InputShape {1, 3, 65, 47}),
+        Values(KernelShape {1, 3}),
+        Values(PadsBegin {}),
+        Values(PadsEnd {}),
+        Values(AutoPad("valid"),
+               AutoPad("same_lower"),
+               AutoPad("same_upper")),
+        Values(Strides {1, 2}),
+        Values(Dilations {2, 1}),
+        Values(OutputChannels(16)),
+        Values(Groups(1))
+    )
+);
+
+//----------------------------------------------------------------------
+//
+// 2D, simple input size, kernel shape, pads, strides, and dilations
+//
+//----------------------------------------------------------------------
+
+INSTANTIATE_TEST_CASE_P(simple_nchw_userpad, myriadLayersConvNDTest_nightly,
+    Combine(
+        Values(InputShape {1, 3, 64, 48}),
+        Values(KernelShape {3, 3}),
+        Values(PadsBegin {1, 1}),
+        Values(PadsEnd {1, 1}),
+        Values(AutoPad("")),
+        Values(Strides {2, 2}),
+        Values(Dilations {1, 1}),
+        Values(OutputChannels(16)),
+        Values(Groups(1))
+    )
+);
+
+INSTANTIATE_TEST_CASE_P(simple_nchw_autopad, myriadLayersConvNDTest_nightly,
+    Combine(
+        Values(InputShape {1, 3, 64, 48}),
+        Values(KernelShape {3, 3}),
+        Values(PadsBegin {}),
+        Values(PadsEnd {}),
+        Values(AutoPad("valid"),
+               AutoPad("same_lower"),
+               AutoPad("same_upper")),
+        Values(Strides {2, 2}),
+        Values(Dilations {1, 1}),
+        Values(OutputChannels(16)),
+        Values(Groups(1))
+    )
+);
+
+//----------------------------------------------------------------------
+//
+// Test cases from the I3D network
+//
+//----------------------------------------------------------------------
+
+// NB: requires 1GB of RAM on device (e.g. ma2085 board)
+// Stress test: large image with large depth, large kernel
+INSTANTIATE_TEST_CASE_P(i3d_id6, myriadLayersConvNDTest_nightly,
+                        Combine(
+                                Values(InputShape {1, 3, 79, 224, 224}),
+                                Values(KernelShape {7, 7, 7}),
+                                Values(PadsBegin {}),
+                                Values(PadsEnd {}),
+                                Values(AutoPad("same_upper")),
+                                Values(Strides {2, 2, 2}),
+                                Values(Dilations {1, 1, 1}),
+                                Values(OutputChannels(64)),
+                                Values(Groups(1))));
+
+// Like `i3d_id6` test but with smaller image (so must fit in Myriad X)
+INSTANTIATE_TEST_CASE_P(i3d_id6_shrink, myriadLayersConvNDTest_nightly,
+                        Combine(
+                                Values(InputShape {1, 3, 39, 112, 112}),
+                                Values(KernelShape {7, 7, 7}),
+                                Values(PadsBegin {}),
+                                Values(PadsEnd {}),
+                                Values(AutoPad("same_upper")),
+                                Values(Strides {2, 2, 2}),
+                                Values(Dilations {1, 1, 1}),
+                                Values(OutputChannels(64)),
+                                Values(Groups(1))));
+
+// Average-size image, trivial kernel 1x1x1
+INSTANTIATE_TEST_CASE_P(i3d_id12, myriadLayersConvNDTest_nightly,
+                        Combine(
+                                Values(InputShape {1, 64, 40, 56, 56}),
+                                Values(KernelShape {1, 1, 1}),
+                                Values(PadsBegin {}),
+                                Values(PadsEnd {}),
+                                Values(AutoPad("same_upper")),
+                                Values(Strides {1, 1, 1}),
+                                Values(Dilations {1, 1, 1}),
+                                Values(OutputChannels(64)),
+                                Values(Groups(1))));
+
+// Average-size image, non-trivial kernel 3x3x3
+INSTANTIATE_TEST_CASE_P(i3d_id17, myriadLayersConvNDTest_nightly,
+                        Combine(
+                                Values(InputShape {1, 64, 40, 56, 56}),
+                                Values(KernelShape {3, 3, 3}),
+                                Values(PadsBegin {}),
+                                Values(PadsEnd {}),
+                                Values(AutoPad("same_upper")),
+                                Values(Strides {1, 1, 1}),
+                                Values(Dilations {1, 1, 1}),
+                                Values(OutputChannels(192)),
+                                Values(Groups(1))));
+
+// Small image (7x7), trivial kernel
+INSTANTIATE_TEST_CASE_P(i3d_id249, myriadLayersConvNDTest_nightly,
+                        Combine(
+                                Values(InputShape {1, 832, 10, 7, 7}),
+                                Values(KernelShape {1, 1, 1}),
+                                Values(PadsBegin {}),
+                                Values(PadsEnd {}),
+                                Values(AutoPad("same_upper")),
+                                Values(Strides {1, 1, 1}),
+                                Values(Dilations {1, 1, 1}),
+                                Values(OutputChannels(256)),
+                                Values(Groups(1))));
+
+// Small image (7x7), non-trivial kernel
+INSTANTIATE_TEST_CASE_P(i3d_id301, myriadLayersConvNDTest_nightly,
+                        Combine(
+                                Values(InputShape {1, 48, 10, 7, 7}),
+                                Values(KernelShape {3, 3, 3}),
+                                Values(PadsBegin {}),
+                                Values(PadsEnd {}),
+                                Values(AutoPad("same_upper")),
+                                Values(Strides {1, 1, 1}),
+                                Values(Dilations {1, 1, 1}),
+                                Values(OutputChannels(128)),
+                                Values(Groups(1))));
+
+// Trivial image (1x1), trivial kernel
+INSTANTIATE_TEST_CASE_P(i3d_id314, myriadLayersConvNDTest_nightly,
+                        Combine(
+                                Values(InputShape {1, 1024, 9, 1, 1}),
+                                Values(KernelShape {1, 1, 1}),
+                                Values(PadsBegin {}),
+                                Values(PadsEnd {}),
+                                Values(AutoPad("same_upper")),
+                                Values(Strides {1, 1, 1}),
+                                Values(Dilations {1, 1, 1}),
+                                Values(OutputChannels(400)),
+                                Values(Groups(1))));
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_conv_nd_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_conv_nd_test.hpp
new file mode 100644 (file)
index 0000000..98bfeaa
--- /dev/null
@@ -0,0 +1,759 @@
+// Copyright (C) 2019-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include "myriad_layers_tests.hpp"
+#include "vpu_case_common.hpp"
+#include "precision_utils.h"
+
+#include <cmath>
+#include <cstdlib>
+
+#include <iostream>
+#include <limits>
+#include <memory>
+#include <random>
+#include <string>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+#define DEBUG 0
+
+using namespace InferenceEngine;
+
+using InputShape  = std::vector<int>;
+using KernelShape = std::vector<int>;
+using PadsBegin   = std::vector<int>;
+using PadsEnd     = std::vector<int>;
+using AutoPad     = std::string;
+using Strides     = std::vector<int>;
+using Dilations   = std::vector<int>;
+using OutputChannels = int;
+using Groups         = int;
+
+using ConvNDTestParams =
+    std::tuple<
+        InputShape,
+        KernelShape,
+        PadsBegin,
+        PadsEnd,
+        AutoPad,
+        Strides,
+        Dilations,
+        OutputChannels,
+        Groups>;
+
+class ConvNDTest: public myriadLayerTestBaseWithParam<ConvNDTestParams>
+{
+protected:
+
+    void testConvND() {
+        _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+        _config[VPU_CONFIG_KEY(HW_INJECT_STAGES)]     = CONFIG_VALUE(NO);
+
+        //
+        // Get test parameters
+        //
+
+        const auto& params = GetParam();
+
+        const std::vector<int>  inputShape = std::get<0>(params);
+        const std::vector<int> kernelShape = std::get<1>(params);
+        const std::vector<int>   padsBegin = std::get<2>(params);
+        const std::vector<int>   padsEnd   = std::get<3>(params);
+        const std::string          autoPad = std::get<4>(params);
+        const std::vector<int>     strides = std::get<5>(params);
+        const std::vector<int>   dilations = std::get<6>(params);
+        const int           outputChannels = std::get<7>(params);
+        const int                   groups = std::get<8>(params);
+
+        // Exclude the `i3d_id6` test case, which requires at least
+        // 1 GB of RAM on device, like e.g. ma2085 board
+        bool tooLarge = kernelShape[0] == 7 &&
+                        inputShape[inputShape.size() - 1] == 224;
+        DISABLE_IF(tooLarge && !CheckMA2085());
+
+        //
+        // TODO: Add `withBiases` to test parameters
+        //
+        const bool withBiases  = true;
+
+        // Only support not-interleaved layouts: CHW, NCHW, NCDHW, ...
+        const bool interleaved = false;
+
+        const int     inputNDims =  inputShape.size();
+        const int    kernelNDims = kernelShape.size();
+        const int   stridesNDims =     strides.size();
+        const int dilationsNDims =   dilations.size();
+        const int padsBeginNDims =   padsBegin.size();
+        const int padsEndNDims   =   padsEnd.size();
+
+        //
+        // Verify test parameters
+        //
+
+        IE_ASSERT(inputNDims >= 3); // CHW, NCHW, NCDHW, ...
+
+        const int channelsNDims = 1;
+        const int batchNDims = inputNDims > 3; // 0 if CHW, 1 if NCHW etc
+        IE_ASSERT(inputNDims == kernelNDims + channelsNDims + batchNDims);
+
+        //
+        // Assume dims order like {N, C, ..., H, W}
+        // where {..., H, W} are spacial dimensions
+        //
+
+        const int channelsDim = batchNDims;
+        const int spacialDimsBegin = channelsDim + 1;
+        const int inputChannels = inputShape[channelsDim];
+
+        IE_ASSERT(groups > 0);
+        IE_ASSERT(inputChannels > 0);
+        IE_ASSERT(outputChannels > 0);
+        IE_ASSERT(inputChannels % groups == 0);
+        IE_ASSERT(outputChannels % groups == 0);
+
+        IE_ASSERT(kernelNDims > 0);
+        IE_ASSERT(kernelNDims == stridesNDims   || stridesNDims == 0);
+        IE_ASSERT(kernelNDims == dilationsNDims || dilationsNDims == 0);
+
+        IE_ASSERT(autoPad == "same_lower" ||
+                  autoPad == "same_upper" ||
+                  autoPad == "valid" ||
+                  autoPad == "");
+
+        if (autoPad == "") {
+            IE_ASSERT(kernelNDims == padsBeginNDims);
+            IE_ASSERT(kernelNDims == padsEndNDims);
+        } else {
+            IE_ASSERT(0 == padsBeginNDims);
+            IE_ASSERT(0 == padsEndNDims);
+        }
+
+        //
+        // Derive other parameters of layer
+        //
+
+        std::vector<int> padsBeginUpdate(kernelNDims);
+        std::vector<int> padsEndUpdate(kernelNDims);
+        std::vector<int> stridesUpdate(kernelNDims);
+        std::vector<int> dilationsUpdate(kernelNDims);
+        std::vector<int> dilatedKernelShape(kernelNDims);
+
+        std::vector<int> outputShape(inputNDims);
+        for (int i = 0; i < kernelNDims; i++) {
+            stridesUpdate[i] = stridesNDims ? strides[i] : 1;
+            dilationsUpdate[i] = dilationsNDims ? dilations[i] : 1;
+            dilatedKernelShape[i] = dilationsUpdate[i] * (kernelShape[i] - 1) + 1;
+
+            int remainder_i = inputShape[i + spacialDimsBegin] % stridesUpdate[i];
+            int pads_i = dilatedKernelShape[i] - (remainder_i? remainder_i: stridesUpdate[i]);
+
+            if (autoPad == "") {
+                padsBeginUpdate[i] = padsBegin[i];
+                padsEndUpdate[i]   = padsEnd[i];
+            } else if (autoPad == "valid") {
+                padsBeginUpdate[i] = 0;
+                padsEndUpdate[i]   = 0;
+            } else if (autoPad == "same_lower") {
+                padsEndUpdate[i]   = pads_i / 2;                 // floor(pads_i / 2.)
+                padsBeginUpdate[i] = pads_i - padsEndUpdate[i];  //  ceil(pads_i / 2.)
+            } else if (autoPad == "same_upper") {
+                padsBeginUpdate[i] = pads_i / 2;
+                padsEndUpdate[i]   = pads_i - padsBeginUpdate[i];
+            } else {
+                IE_ASSERT(false); // this must never happen
+            }
+
+            outputShape[i + spacialDimsBegin] =
+                (inputShape[i + spacialDimsBegin]
+                + padsBeginUpdate[i] + padsEndUpdate[i]
+                - dilatedKernelShape[i]
+                ) / stridesUpdate[i] + 1;
+        }
+        outputShape[channelsDim] = outputChannels;
+        if (batchNDims) {
+            outputShape[0] = inputShape[0]; // copy batch size
+        }
+
+        std::vector<int> weightsShape(kernelNDims + 2);
+        for (int i = 0; i < kernelNDims; i++) {
+            weightsShape[i + 2] = kernelShape[i];
+        }
+        weightsShape[1] =  inputChannels / groups;
+        weightsShape[0] = outputChannels / groups;
+
+        std::vector<int> biasesShape {outputChannels / groups};
+
+        //
+        // Initialize data
+        //
+
+        TBlob<uint8_t>::Ptr inputBlob = createPlainTBlob(inputShape, Precision::FP16);
+        TBlob<uint8_t>::Ptr outputBlob = createPlainTBlob(outputShape, Precision::FP16);
+
+        int weightsNum = 1;
+        for (int i = 0; i < weightsShape.size(); i++) {
+            weightsNum *= weightsShape[i];
+        }
+        int biasesNum = outputChannels / groups;
+        int coeffsNum = weightsNum + biasesNum;
+        std::vector<int> coeffsShape { coeffsNum };
+
+        TBlob<uint8_t>::Ptr coeffsBlob = createPlainTBlob(coeffsShape, Precision::FP16);
+
+        inputBlob->allocate();
+        outputBlob->allocate();
+        coeffsBlob->allocate();
+
+        uint8_t* inputBlobDataPtr = inputBlob->data();
+        uint8_t* coeffsBlobDataPtr = coeffsBlob->data();
+
+        int inputNum = getTotalNum(inputShape);
+
+        // HACK: Fulfill random data with Gaussian distribution! (not uniform)
+        //
+        // WHY: While uniform distribution is OK for reference implementation,
+        //      hardware convolution on Myriad X uses tricky quantization that
+        //      is not accurace enough if input is white-noise.
+        //
+        //      Such quantization adjusts to image's histogram, which Gaussian
+        //      noise may simulate more-or-less adequately.
+        #if 0
+        fulfillUniformly(inputBlobDataPtr, inputNum, Precision::FP16, 0, 255);
+        fulfillUniformly(coeffsBlobDataPtr, coeffsNum, Precision::FP16, -1, 1);
+        #else
+        // Coefficients to simulate average pooling, although with random deviations
+        double coeffsAvg = 1. / getTotalNum(kernelShape) / (inputChannels / groups);
+        double coeffsDev = coeffsAvg * 0.5;  // 50% deviation
+        fulfillGaussian(inputBlobDataPtr, inputNum, Precision::FP16, 128, 32);
+        fulfillGaussian(coeffsBlobDataPtr, coeffsNum, Precision::FP16, coeffsAvg, coeffsDev);
+        #endif
+
+        //
+        // Initialize network
+        //
+
+        std::string model = createModel(inputShape,
+                                        kernelShape,
+                                        padsBegin,
+                                        padsEnd,
+                                        autoPad,
+                                        strides,
+                                        dilations,
+                                        groups,
+                                        outputShape,
+                                        weightsShape,
+                                        biasesShape);
+        #if DEBUG
+        std::cout << "model:\n" << model << "\n";
+        #endif
+
+        ASSERT_NO_THROW(readNetwork(model, coeffsBlob));
+
+        const auto& network = _cnnNetwork;
+
+        _inputsInfo = network.getInputsInfo();
+        _inputsInfo["input"]->setPrecision(Precision::FP16);
+
+        _outputsInfo = network.getOutputsInfo();
+        _outputsInfo["convolution"]->setPrecision(Precision::FP16);
+
+        //
+        // Infer
+        //
+
+        StatusCode st = OK;
+
+        ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network, _config, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+        ASSERT_NE(_exeNetwork, nullptr) << _resp.msg;
+
+        ASSERT_NO_THROW(st = _exeNetwork->CreateInferRequest(_inferRequest, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        Blob::Ptr inputValuesBlob;
+        ASSERT_NO_THROW(st = _inferRequest->GetBlob("input", inputValuesBlob, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        void* inputValuesBlobDataPtr = inputValuesBlob->buffer();
+        std::memcpy(inputValuesBlobDataPtr, inputBlobDataPtr, inputNum * sizeof(ie_fp16));
+
+        ASSERT_NO_THROW(st = _inferRequest->Infer(&_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        Blob::Ptr outputValuesBlob;
+        ASSERT_NO_THROW(st = _inferRequest->GetBlob("convolution", outputValuesBlob, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        //
+        // Check result
+        //
+
+        const ie_fp16 *inputData = inputValuesBlob->cbuffer().as<ie_fp16*>();
+        const ie_fp16 *outputData = outputValuesBlob->cbuffer().as<ie_fp16*>();
+
+        const ie_fp16* weightsData = (const ie_fp16*) coeffsBlobDataPtr;
+        const ie_fp16* biasesData = weightsData + weightsNum;
+
+        int  outputNDims = inputNDims;
+        int weightsNDims = kernelNDims + 2;
+
+        std::vector<int> inputIndices(inputNDims);
+        std::vector<int> outputIndices(outputNDims);
+        std::vector<int> weightsIndices(weightsNDims);
+
+        // Check ~10000 of output points (part maybe out of millions):
+        // given a point, check it with probability 10000/totalOutputs
+        int totalOutputs = getTotalNum(outputShape);
+        double P = std::min(1., 100000. / totalOutputs);
+        std::uniform_real_distribution<> uniform(0, 1);
+        std::mt19937 gen;
+
+        int points = 0;  // count: points actually checked
+        int errors = 0;  // count: errors exceeded tolerance
+
+        float tolerance = 0.01;  // 1%
+        float avgRelDif = 0;     // count: average of relative diff
+
+        //
+        // Cycle over batch dimension (if any)
+        //
+        int N = batchNDims ? inputShape[0] : 1;
+        for (int n = 0; n < N; n++) {
+            if (batchNDims > 0) {
+                inputIndices[0] = n;
+                outputIndices[0] = n;
+            }
+
+            //
+            // Cycle over spacial dims of output
+            //
+            do {
+                //
+                // Cycle over output channels
+                //
+                int C = outputChannels;
+                for (int c = 0; c < C; c++) {
+                    outputIndices[channelsDim] = c;
+
+                    // check with probability P
+                    double p = uniform(gen);
+                    if (p > P) {
+                        continue;
+                    }
+                    points++;
+
+                    float reference = referenceConvND(inputIndices,
+                                                      outputIndices,
+                                                      weightsIndices,
+                                                      inputData,
+                                                      weightsData,
+                                                      biasesData,
+                                                      inputShape,
+                                                      outputShape,
+                                                      weightsShape,
+                                                      padsBeginUpdate,
+                                                      stridesUpdate,
+                                                      dilationsUpdate,
+                                                      groups,
+                                                      interleaved,
+                                                      withBiases);
+
+                    int resOffset = offsetByIndex(&outputIndices[0], &outputShape[0], outputNDims);
+                    float result = PrecisionUtils::f16tof32(outputData[resOffset]);
+
+                    float diff = result - reference;
+                    float relative = std::fabs(diff) / std::fabs(reference);
+                    if (relative > tolerance) {
+                        if (errors++ < 25) {
+                            std::cout << "error:"
+                                << " outputIndices=" << to_string(outputIndices)
+                                << " result=" << result
+                                << " reference=" << reference
+                                << " diff=" << diff
+                                << " relative=" << to_percents(relative)
+                                << std::endl;
+                        }
+                    }
+
+                    avgRelDif += relative;  // accumulating...
+                }
+            } while (nextIndices(&outputIndices[spacialDimsBegin],
+                                 &outputShape[spacialDimsBegin],
+                                 kernelNDims));
+        }
+
+        if (points == 0) {
+            FAIL() << "test bug: number of tested points must be (much!) greater than zero";
+        }
+        avgRelDif = avgRelDif / points;
+
+        if (errors > 0) {
+            std::cout << "errors: " << errors << " (tested points: " << points << ")" << std::endl;
+            std::cout << "avgDif: " << to_percents(avgRelDif) << " (tolerance: " << to_percents(tolerance) << ")"
+                      << std::endl;
+        }
+
+        ASSERT_LE(avgRelDif, tolerance);
+    }
+
+private:
+
+    static
+    std::string to_percents(float x) {
+        std::stringstream s;
+        s << std::setprecision(3);
+        s << x * 100;
+        s << "%";
+        return s.str();
+    }
+
+    static
+    std::string to_string(const std::vector<int>& v) {
+        std::stringstream s;
+        s << "{";
+        for (int i = 0; i < v.size(); i++) {
+            s << (i? ", ": "") << v[i];
+        }
+        s << "}";
+        return s.str();
+    }
+
+    // Return result of ND convolution for the given output indices
+    static
+    float referenceConvND(std::vector<int>   & inputIndices,
+                    const std::vector<int>   & outputIndices,
+                          std::vector<int>   & weightsIndices,
+                    const             ie_fp16  inputData[],
+                    const             ie_fp16  weightsData[],
+                    const             ie_fp16  biasesData[],
+                    const std::vector<int>   & inputShape,
+                    const std::vector<int>   & outputShape,
+                    const std::vector<int>   & weightsShape,
+                    const std::vector<int>   & padsBeginUpdate,
+                    const std::vector<int>   & stridesUpdate,
+                    const std::vector<int>   & dilationsUpdate,
+                    const int                  groups,
+                    const bool                 interleaved,
+                    const bool                 withBiases)
+    {
+        const int  inputNDims =  inputShape.size();
+        const int outputNDims = outputShape.size();
+        const int weightsNDims = weightsShape.size();
+
+        const int kernelNDims = weightsNDims - 2;
+        const int biasesNDims = 1;
+
+        IE_ASSERT(inputNDims == outputNDims);
+        IE_ASSERT(inputNDims >= 3); // CHW, NCHW, NCDHW, ...
+
+        const int channelsNDims = 1;
+        const int batchNDims = inputNDims > 3; // 0 if CHW, 1 if NCHW etc
+        IE_ASSERT(inputNDims == kernelNDims + channelsNDims + batchNDims);
+
+        int padsBeginNDims = padsBeginUpdate.size();
+        int   stridesNDims =   stridesUpdate.size();
+        int dilationsNDims = dilationsUpdate.size();
+
+        IE_ASSERT(kernelNDims > 0);
+        IE_ASSERT(kernelNDims == padsBeginNDims);
+        IE_ASSERT(kernelNDims == stridesNDims);
+        IE_ASSERT(kernelNDims == dilationsNDims);
+
+        const int channelsDim      = interleaved ? inputNDims - 1 : batchNDims;
+        const int spacialDimsBegin = interleaved ? batchNDims     : channelsDim + 1;
+
+        const int  inputChannels =  inputShape[channelsDim];
+        const int outputChannels = outputShape[channelsDim];
+
+        IE_ASSERT(weightsShape[0] == outputChannels / groups);
+        IE_ASSERT(weightsShape[1] ==  inputChannels / groups);
+
+        IE_ASSERT(groups > 0);
+        IE_ASSERT(inputChannels > 0);
+        IE_ASSERT(outputChannels > 0);
+        IE_ASSERT(inputChannels % groups == 0);
+        IE_ASSERT(outputChannels % groups == 0);
+
+        int IC =  inputChannels / groups;
+        int OC = outputChannels / groups;
+
+        int c = outputIndices[channelsDim];
+
+        int g  = c / OC;  // group of channels
+        int oc = c % OC;  // channel of group
+
+        // accumulate result with FP32 precision
+        float result = withBiases ? PrecisionUtils::f16tof32(biasesData[oc]) : 0;
+
+        for (int i = 0; i < kernelNDims; i++) {
+            weightsIndices[i + 2] = 0;
+        }
+        weightsIndices[0] = oc;
+    //  weightsIndices[1] = ic; -- defer till inner cycle by ic (below)
+
+        //
+        // Cycle over weights spacial indices, i.e. 2nd, 3rd, ...
+        //
+        do {
+            //
+            // Setup spacial dims of inputIndices
+            //
+            bool offside = false;
+            for (int i = 0; i < kernelNDims; i++) {
+                int index = outputIndices[i + spacialDimsBegin] * stridesUpdate[i]
+                            + weightsIndices[i + 2] * dilationsUpdate[i]
+                            - padsBeginUpdate[i];
+
+                if (index < 0 || index >= inputShape[i + spacialDimsBegin]) {
+                    offside = true;  // out of input tensor bounds,
+                    break;           // so skip this weightsIndices
+                }
+
+                inputIndices[i + spacialDimsBegin] = index;
+            }
+            if (offside) {
+                continue;  // goto next weightsIndices
+            }
+
+            //
+            // Cycle over input channels in the group
+            //
+            for (int ic = 0; ic < IC; ic++) {
+                inputIndices[channelsDim] = ic + g*IC;
+                weightsIndices[1] = ic;
+                ie_fp16 in = inputData[offsetByIndex(&inputIndices[0], &inputShape[0], inputNDims)];
+                ie_fp16 w = weightsData[offsetByIndex(&weightsIndices[0], &weightsShape[0], weightsNDims)];
+                result += PrecisionUtils::f16tof32(in) * PrecisionUtils::f16tof32(w);
+            }
+        } while (nextIndices(&weightsIndices[2],
+                             &weightsShape[2],
+                             kernelNDims));
+
+        return result;
+    }
+
+    static
+    bool nextIndices(int indices[],
+               const int shape[],
+                     int nDims) {
+        // let W's index change quicker than H's:
+        // note that dims order is like ..., H, W
+        for (int i = nDims - 1; i >= 0; i--) {
+            if (++indices[i] < shape[i])
+                return true;
+            indices[i] = 0;
+        }
+        return false; // cannot get next indices
+    }
+
+    // Get element offset by ND index
+    static
+    int offsetByIndex(const int index[],
+                      const int shape[],
+                      const int ndims) {
+        int offset = 0;
+        int stride = 1;
+        for (int i = ndims - 1; i >= 0; i--) {
+            offset += index[i] * stride;
+            stride *= shape[i];
+        }
+        return offset;
+    }
+
+    // Count total number of elements in ND tensor
+    static
+    int getTotalNum(const std::vector<int>& shape) {
+        int totalNum = 1;
+        for (int i = 0; i < shape.size(); i++) {
+            totalNum *= shape[i];
+        }
+        return totalNum;
+    }
+
+    // Fulfill data[] array with random numbers
+    // distributed uniformly in the interval [a,b]
+    static
+    void fulfillUniformly(uint8_t* data, int num, Precision precision,
+                          double a, double b) {
+        IE_ASSERT(Precision::FP16 == precision);
+        std::mt19937 gen;
+        std::uniform_real_distribution<float> uniform(a, b);
+        for (int i = 0; i < num; i++) {
+            float v = uniform(gen);
+            reinterpret_cast<ie_fp16*>(data)[i] = PrecisionUtils::f32tof16(v);
+        }
+    }
+
+    // Fulfill data[] array with random numbers,
+    // Gaissian distribution with the given mean and standard deviation
+    static
+    void fulfillGaussian(uint8_t* data, int num, Precision precision,
+                         double mean, double stdDev) {
+        IE_ASSERT(Precision::FP16 == precision);
+        std::mt19937 gen;
+        std::normal_distribution<float> gauss(mean, stdDev);
+        for (int i = 0; i < num; i++) {
+            float value = gauss(gen);
+            reinterpret_cast<ie_fp16*>(data)[i] = PrecisionUtils::f32tof16(value);
+        }
+    }
+
+    static
+    TBlob<uint8_t>::Ptr createPlainTBlob(const std::vector<int>& shape,
+                                         const Precision& precision)
+    {
+        int length = getTotalNum(shape);
+        SizeVector dims { length * precision.size() };
+        Layout layout = Layout::ANY; // as plain memory
+        TensorDesc tensorDesc(Precision::U8, dims, layout);
+        TBlob<uint8_t>::Ptr blob = std::make_shared<TBlob<uint8_t>>(tensorDesc);
+        return blob;
+    }
+
+    static
+    std::string createModel(const std::vector<int>& inputShape,
+                            const std::vector<int>& kernelShape,
+                            const std::vector<int>& padsBegin,
+                            const std::vector<int>& padsEnd,
+                            const std::string       autoPad,
+                            const std::vector<int>& strides,
+                            const std::vector<int>& dilations,
+                            const int               groups,
+                            const std::vector<int>& outputShape,
+                            const std::vector<int>& weightsShape,
+                            const std::vector<int>& biasesShape)
+    {
+        std::string model = R"V0G0N(
+            <?xml version="1.0" ?>
+            <net name="testConvND" version="6">
+                <layers>
+                    <layer id="0" name="input" type="Input" precision="__PRECISION__">
+                        <output>
+                            <port id="0">
+                                __INPUT_DIMS__
+                            </port>
+                        </output>
+                    </layer>
+                    <layer id="1" name="convolution" type="Convolution" precision="__PRECISION__">
+                        <data auto_pad="__AUTO_PAD__"
+                              dilations="__DILATIONS__"
+                              group="__GROUP__"
+                              kernel="__KERNEL__"
+                              output="__OUTPUT_CHANNELS__"
+                              pads_begin="__PADS_BEGIN__"
+                              pads_end="__PADS_END__"
+                              strides="__STRIDES__"
+                        />
+                        <input>
+                            <port id="0">
+                                __INPUT_DIMS__
+                            </port>
+                        </input>
+                        <output>
+                            <port id="3">
+                                __OUTPUT_DIMS__
+                            </port>
+                        </output>
+                        <blobs>
+                            <weights offset="0" size="__WEIGHTS_BYTES__"/>
+                            <biases offset="__WEIGHTS_BYTES__" size="__BIASES_BYTES__"/>
+                        </blobs>
+                    </layer>
+                </layers>
+                <edges>
+                    <edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
+                </edges>
+            </net>
+        )V0G0N";
+
+        REPLACE_WITH_STR(model, "__PRECISION__", "FP16");
+
+        const std::string inputDimsStr = shapeToDimsString(inputShape);
+        const std::string outputDimsStr = shapeToDimsString(outputShape);
+        REPLACE_WITH_STR(model, "__INPUT_DIMS__", inputDimsStr);
+        REPLACE_WITH_STR(model, "__OUTPUT_DIMS__", outputDimsStr);
+
+        const std::string groupStr = std::to_string(groups);
+        const std::string kernelShapeStr = shapeToString(kernelShape);
+        REPLACE_WITH_STR(model, "__GROUP__", groupStr);
+        REPLACE_WITH_STR(model, "__KERNEL__", kernelShapeStr);
+
+        const int batchNDims = inputShape.size() > 3; // NCHW, NCDHW, ...
+        const int channelsDim = batchNDims;
+        const int outputChannels = outputShape[channelsDim];
+        const std::string outputChannelsStr = std::to_string(outputChannels);
+        REPLACE_WITH_STR(model, "__OUTPUT_CHANNELS__", outputChannelsStr);
+
+        if (autoPad == "") {
+            const std::string padsBeginStr = shapeToString(padsBegin);
+            const std::string padsEndStr = shapeToString(padsEnd);
+            REPLACE_WITH_STR(model, "__PADS_BEGIN__", padsBeginStr);
+            REPLACE_WITH_STR(model, "__PADS_END__", padsEndStr);
+            REPLACE_WITH_STR(model, "auto_pad=\"__AUTO_PAD__\"", "");
+        } else {
+            REPLACE_WITH_STR(model, "pads_begin=\"__PADS_BEGIN__\"", "");
+            REPLACE_WITH_STR(model, "pads_end=\"__PADS_END__\"", "");
+            REPLACE_WITH_STR(model, "__AUTO_PAD__", autoPad);
+        }
+
+        if (dilations.empty()) {
+            REPLACE_WITH_STR(model, "dilations=\"__DILATIONS__\"", "");
+        } else {
+            const std::string dilationsStr = shapeToString(dilations);
+            REPLACE_WITH_STR(model, "__DILATIONS__", dilationsStr);
+        }
+
+        if (strides.empty()) {
+            REPLACE_WITH_STR(model, "strides=\"__STRIDES__\"", "");
+        } else {
+            const std::string stridesStr = shapeToString(strides);
+            REPLACE_WITH_STR(model, "__STRIDES__", stridesStr);
+        }
+
+        int weightsElements = 1;
+        for (int i = 0; i < weightsShape.size(); i++) {
+            weightsElements *= weightsShape[i];
+        }
+        const int weightsBytes = weightsElements * sizeof(ie_fp16);
+        const std::string weightsBytesStr = std::to_string(weightsBytes);
+        REPLACE_WITH_STR(model, "__WEIGHTS_BYTES__", weightsBytesStr);
+
+        const int biasesBytes = (outputChannels / groups) * sizeof(ie_fp16);
+        const std::string biasesBytesStr = std::to_string(biasesBytes);
+        REPLACE_WITH_STR(model, "__BIASES_BYTES__", biasesBytesStr);
+
+        return model;
+    }
+
+    static
+    std::string shapeToString(const std::vector<int>& shape) {
+        std::string str;
+        for (int i = 0; i < shape.size(); i++) {
+            str += (i? ", ": "");
+            str += std::to_string(shape[i]);
+        }
+        return str;
+    }
+
+    static
+    std::string shapeToDimsString(const std::vector<int>& shape)
+    {
+        std::string str;
+        for (int i = 0; i < shape.size(); i++) {
+            str += (i? " ": "");
+            str += "<dim>" + std::to_string(shape[i]) + "</dim>";
+        }
+        return str;
+    }
+};
+
+class myriadLayersConvNDTest_nightly: public ConvNDTest {};
+
+TEST_P(myriadLayersConvNDTest_nightly, ConvND) {
+    testConvND();
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_convert_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_convert_test.cpp
new file mode 100644 (file)
index 0000000..e64e72b
--- /dev/null
@@ -0,0 +1,21 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_convert_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(
+    accuracy, myriadLayersTestsIOConvert_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(inputsDims),
+        ::testing::ValuesIn(precisionsIO)
+    )
+);
+
+INSTANTIATE_TEST_CASE_P(
+    accuracy, myriadLayersTestsConvertWithFP16_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(inputsDims),
+        ::testing::ValuesIn(withFP16Precisions)
+    )
+);
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_convert_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_convert_test.hpp
new file mode 100644 (file)
index 0000000..03b6291
--- /dev/null
@@ -0,0 +1,116 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <cmath>
+
+#include <blob_factory.hpp>
+#include "myriad_layers_tests.hpp"
+#include "myriad_layers_reference_functions.hpp"
+
+#define ERROR_BOUND (5.e-4f)
+
+using namespace InferenceEngine;
+
+typedef std::pair<Precision, Precision> PrecisionPair;
+typedef std::tuple<InferenceEngine::SizeVector, PrecisionPair> ConvertIOTestParam;
+typedef std::tuple<InferenceEngine::SizeVector, Precision> ConvertWithFP16TestParam;
+
+class myriadLayersTestsIOConvert_nightly: public myriadLayersTests_nightly,
+                                          public testing::WithParamInterface<ConvertIOTestParam> {
+};
+
+TEST_P(myriadLayersTestsIOConvert_nightly, TestsIOConvert)
+{
+    const auto& param = ::testing::WithParamInterface<ConvertIOTestParam>::GetParam();
+    const auto& inputDims = std::get<0>(param);
+    const auto& precisions = std::get<1>(param);
+    const auto& inputPrecision = precisions.first;
+    const auto& outputPrecision = precisions.second;
+
+    SetInputTensors({inputDims});
+    SetOutputTensors({inputDims});
+
+    makeSingleLayerNetwork(LayerInitParams("Copy"),
+                NetworkInitParams()
+                .inputPrecision(inputPrecision)
+                .outputPrecision(outputPrecision));
+    ASSERT_TRUE(Infer());
+
+    auto tensorDesc = InferenceEngine::TensorDesc(
+        outputPrecision, _outputMap.begin()->second->getTensorDesc().getDims(),
+        _outputMap.begin()->second->getTensorDesc().getLayout());
+    auto refBlob = make_blob_with_precision(outputPrecision, tensorDesc);
+    refBlob->allocate();
+
+    ref_convert(_inputMap.begin()->second, refBlob);
+
+    CompareCommonAbsolute(_outputMap.begin()->second, refBlob, ERROR_BOUND);
+}
+
+class myriadLayersTestsConvertWithFP16_nightly: public myriadLayersTests_nightly,
+                                        public testing::WithParamInterface<ConvertWithFP16TestParam> {
+};
+
+TEST_P(myriadLayersTestsConvertWithFP16_nightly, TestsConvertWithFP16)
+{
+    const auto& param = ::testing::WithParamInterface<ConvertWithFP16TestParam>::GetParam();
+    const auto& inputDims = std::get<0>(param);
+    const auto& internalPrecision = std::get<1>(param);
+    const auto defaultPrecision = Precision::FP16;
+
+    std::map<std::string, std::string> convertToInternalPrecisionParams = {
+        {"precision", std::to_string(internalPrecision)}
+    };
+    std::map<std::string, std::string> convertFromInternalPrecisionParams = {
+        {"precision", std::to_string(defaultPrecision)}
+    };
+
+    auto convertLayerToTestPrecisionParams = LayerInitParams("Convert")
+            .params(convertToInternalPrecisionParams)
+            .name("convert_to")
+            .in({inputDims})
+            .out({inputDims})
+            .outPrecision(internalPrecision);
+
+    auto convertLayerFromTestPrecisionParams = LayerInitParams("Convert")
+            .params(convertFromInternalPrecisionParams)
+            .name("convert_from")
+            .in({inputDims})
+            .out({inputDims})
+            .outPrecision(defaultPrecision);
+
+    _testNet.addLayer(convertLayerToTestPrecisionParams, ref_convert_wrap);
+    _testNet.addLayer(convertLayerFromTestPrecisionParams, ref_convert_wrap);
+
+    ASSERT_TRUE(generateNetAndInfer(NetworkInitParams()
+            .inputPrecision(defaultPrecision)
+            .outputPrecision(defaultPrecision)
+            .runRefGraph(true)));
+
+    CompareCommonAbsolute(_outputMap.begin()->second, getReferenceOutput(), ERROR_BOUND);
+}
+
+std::vector<InferenceEngine::SizeVector> inputsDims = {
+    {       224, 224 },
+    {    3, 224, 224 },
+    { 1, 1, 224, 224 },
+    { 1, 1, 416, 416 },
+    { 1, 1,  62,  62 },
+    { 1, 1, 227, 227 },
+    { 1, 3, 224, 224 },
+
+    // 5D case
+    { 2, 2, 3, 224, 224 },
+};
+
+std::vector<PrecisionPair> precisionsIO = {
+    {Precision::U8,   Precision::FP16},
+    {Precision::FP32, Precision::FP16},
+    {Precision::FP16, Precision::FP32}
+};
+
+std::vector<Precision> withFP16Precisions = {
+    Precision::I32,
+    Precision::FP32,
+};
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_convolution1x1.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_convolution1x1.cpp
new file mode 100644 (file)
index 0000000..da29ff1
--- /dev/null
@@ -0,0 +1,11 @@
+// Copyright (C) 2018-2019 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_convolution1x1.hpp"
+
+INSTANTIATE_TEST_CASE_P(myriad, myriadConvolution1x1LayerTests_nightly,
+        ::testing::Combine(
+        ::testing::Values(CONFIG_VALUE(NO)),
+        ::testing::ValuesIn(s_isHWC),
+        ::testing::ValuesIn(s_DimsConfig)));
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_convolution1x1.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_convolution1x1.hpp
new file mode 100644 (file)
index 0000000..379817c
--- /dev/null
@@ -0,0 +1,257 @@
+// // Copyright (C) 2018-2019 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <cmath>
+#include "myriad_layers_tests.hpp"
+
+using namespace InferenceEngine;
+
+#define ERROR_BOUND 0.5f
+
+typedef struct {
+    SizeVector src_dims;
+    SizeVector weights_dims;
+    SizeVector dst_dims;
+    std::string custom_config;
+} dims_config;
+
+PRETTY_PARAM(hwAcceleration, std::string);
+PRETTY_PARAM(dimsConfig, dims_config);
+PRETTY_PARAM(isHWC, int);
+
+typedef myriadLayerTestBaseWithParam<std::tuple<std::string, isHWC, dims_config>> myriadConvolution1x1LayerTests_nightly;
+
+void refConvolution1x1(const Blob::Ptr src, InferenceEngine::TBlob<uint8_t>::Ptr weights, Blob::Ptr dst, int isHWC) {
+    ie_fp16 *in = static_cast<ie_fp16*>(src->buffer());
+    const ie_fp16 *w = weights->readOnly().as<const ie_fp16 *>();
+    ie_fp16 *out = static_cast<ie_fp16*>(dst->buffer());
+
+    ASSERT_NE(in, nullptr);
+    ASSERT_NE(w, nullptr);
+    ASSERT_NE(out, nullptr);
+
+    const auto& in_dims = src->getTensorDesc().getDims();
+    size_t in_width      = in_dims[in_dims.size() - 1];
+    size_t in_height     = in_dims[in_dims.size() - 2];
+    size_t in_channels   = in_dims[in_dims.size() - 3];
+
+    size_t IW = in_width;
+    size_t IH = in_height;
+    size_t IC = in_channels;
+    
+    const auto& out_dims = dst->getTensorDesc().getDims();
+    size_t out_width      = out_dims[out_dims.size() - 1];
+    size_t out_height     = out_dims[out_dims.size() - 2];
+    size_t out_channels   = out_dims[out_dims.size() - 3];
+
+    size_t OW = out_width;
+    size_t OH = out_height;
+    size_t OC = out_channels;
+
+    for (int oc = 0; oc < OC; ++oc)
+    {
+        for (int oh = 0; oh < OH; oh++)
+        {
+            for (int ow = 0; ow < OW; ow++)
+            {
+                float valYXZ = 0.0f;
+                ie_fp16 valZYX = 0.0f;
+                for (int ic = 0; ic < IC; ++ic)
+                {
+                    int iw = ow;
+                    int ih = oh;
+
+                    if (iw < 0 || iw >= (int)IW || ih < 0 || ih >= (int)IH)
+                    {
+                        continue;
+                    }
+                    uint32_t indx;
+                    if(isHWC == 1){
+                        indx = ic + iw * IC + ih * IC * IW;   
+                        valYXZ = (valYXZ) + (PrecisionUtils::f16tof32(in[indx]) * PrecisionUtils::f16tof32(w[oc*IC + ic]));
+                    }
+                    else {
+                        indx = iw + ih * IW + ic * IW * IH;
+                        valZYX = PrecisionUtils::f32tof16(PrecisionUtils::f16tof32(valZYX) + PrecisionUtils::f16tof32(PrecisionUtils::f32tof16(PrecisionUtils::f16tof32(in[indx]) * PrecisionUtils::f16tof32(w[oc*IC + ic]))));
+                    }
+                }
+                if(isHWC == 1){                    
+                    out[oc*OH*OW + oh*OW + ow] = PrecisionUtils::f32tof16(valYXZ);
+                }
+                else {
+                    out[oc*OH*OW + oh*OW + ow] = (valZYX);
+                }
+            }
+        }
+    }
+}
+
+TEST_P(myriadConvolution1x1LayerTests_nightly, Convolution1x1) {
+    std::string model = R"V0G0N(
+       <net name="Convolution1x1" version="2" batch="1">
+           <layers>
+            <layer id="0" name="data" precision="FP16" type="Input">
+                <output>
+                    <port id="0">
+                        <dim>@IB@</dim>
+                        <dim>@IC@</dim>
+                        <dim>@IH@</dim>
+                        <dim>@IW@</dim>
+                    </port>
+                </output>
+            </layer>
+            <layer id="2" name="conv1x1" precision="FP16" type="Convolution">
+                <data isHWC="@isHWC@" stride-x="1" stride-y="1" pad-x="0" pad-y="0" kernel-x="1" kernel-y="1" output="48" group="1"/>
+                <input>
+                    <port id="0">
+                        <dim>@IB@</dim>
+                        <dim>@IC@</dim>
+                        <dim>@IH@</dim>
+                        <dim>@IW@</dim>
+                    </port>
+                </input>
+                <output>
+                    <port id="2">
+                        <dim>@OB@</dim>
+                        <dim>@OC@</dim>
+                        <dim>@OH@</dim>
+                        <dim>@OW@</dim>
+                    </port>
+                </output>
+                <weights offset="0" size="@size_weights@"/>
+            </layer>
+           </layers>
+           <edges>
+               <edge from-layer="0" from-port="0" to-layer="2" to-port="0"/>
+           </edges>
+       </net>
+   )V0G0N";
+
+    SetSeed(DEFAULT_SEED_VALUE + 6);
+
+    std::string HWConfigValue = std::get<0>(GetParam());
+    int isHWC                 = std::get<1>(GetParam());
+    dims_config customConfig  = std::get<2>(GetParam());
+
+    if(!customConfig.custom_config.empty() && !CheckMyriadX()) {
+        GTEST_SKIP()<<"Custom layers for MYRIAD2 not supported";
+    }
+    _config[VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION)] = HWConfigValue;
+    _config[VPU_CONFIG_KEY(CUSTOM_LAYERS)] = customConfig.custom_config;
+
+    int IB = customConfig.src_dims[0];
+    int IC = customConfig.src_dims[1];
+    int IH = customConfig.src_dims[2];
+    int IW = customConfig.src_dims[3];
+
+    int OB = customConfig.dst_dims[0];
+    int OC = customConfig.dst_dims[1];
+    int OH = customConfig.dst_dims[2];
+    int OW = customConfig.dst_dims[3];
+
+    size_t num_weights = IC * OC;
+
+    model.replace( model.find("@isHWC@"), sizeof("@isHWC@") -1, std::to_string(isHWC));
+
+    model.replace( model.find("@IB@"), sizeof("@IB@") -1, std::to_string(IB));
+    model.replace( model.find("@IB@"), sizeof("@IB@") -1, std::to_string(IB));
+    model.replace( model.find("@IC@"), sizeof("@IC@") -1, std::to_string(IC));
+    model.replace( model.find("@IC@"), sizeof("@IC@") -1, std::to_string(IC));
+    model.replace( model.find("@IH@"), sizeof("@IH@") -1, std::to_string(IH));
+    model.replace( model.find("@IH@"), sizeof("@IH@") -1, std::to_string(IH));
+    model.replace( model.find("@IW@"), sizeof("@IW@") -1, std::to_string(IW));
+    model.replace( model.find("@IW@"), sizeof("@IW@") -1, std::to_string(IW));
+
+    model.replace( model.find("@OB@"), sizeof("@OB@") -1, std::to_string(OB));
+    model.replace( model.find("@OC@"), sizeof("@OC@") -1, std::to_string(OC));
+    model.replace( model.find("@OH@"), sizeof("@OH@") -1, std::to_string(OH));
+    model.replace( model.find("@OW@"), sizeof("@OW@") -1, std::to_string(OW));
+
+    model.replace( model.find("@size_weights@"), sizeof("@size_weights@") -1, std::to_string(num_weights * sizeof(ie_fp16)));
+
+    InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(GenWeights(num_weights));
+   
+    StatusCode st;
+
+    InferenceEngine::Core ie;
+    auto network = ie.ReadNetwork(model, weights_ptr);
+
+    _inputsInfo = network.getInputsInfo();
+    _inputsInfo["data"]->setPrecision(Precision::FP16);
+    (isHWC) ? _inputsInfo["data"]->setLayout(NHWC) : _inputsInfo["data"]->setLayout(NCHW);
+
+    _outputsInfo = network.getOutputsInfo();
+    _outputsInfo["conv1x1"]->setPrecision(Precision::FP16);
+    _outputsInfo["conv1x1"]->setLayout(NCHW);
+
+    ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network,
+                                                    {{VPU_CONFIG_KEY(CUSTOM_LAYERS), customConfig.custom_config}, {VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), HWConfigValue}}, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    ASSERT_NE(_exeNetwork, nullptr) << _resp.msg;
+
+    ASSERT_NO_THROW(st = _exeNetwork->CreateInferRequest(_inferRequest, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    Blob::Ptr data;
+    ASSERT_NO_THROW(st = _inferRequest->GetBlob("data", data, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    GenRandomData(data);
+
+    ASSERT_NO_THROW(st = _inferRequest->Infer(&_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+{
+    std::map<std::string, InferenceEngine::InferenceEngineProfileInfo> perfMap;
+    _inferRequest->GetPerformanceCounts(perfMap, nullptr);
+    std::vector <std::pair<std::string, InferenceEngine::InferenceEngineProfileInfo>> perfVec(perfMap.begin(), perfMap.end());
+    std::sort(perfVec.begin(), perfVec.end(),
+              [=](const std::pair<std::string, InferenceEngine::InferenceEngineProfileInfo> &pair1,
+                 const std::pair<std::string, InferenceEngine::InferenceEngineProfileInfo> &pair2) -> bool {
+                  return pair1.second.execution_index < pair2.second.execution_index;
+              });
+
+    unsigned currentIndex = 0;
+    for (auto it = perfVec.begin(); it != perfVec.end(); ++it) {
+        std::string layerName = it->first;
+        InferenceEngine::InferenceEngineProfileInfo info = it->second;
+        if (info.status == InferenceEngine::InferenceEngineProfileInfo::EXECUTED) {
+            printf("[----------] Myriad time = '%s' layer with '%s' type is %f ms.\n", layerName.c_str(), info.exec_type, info.realTime_uSec / 1000.f);
+        }
+    }
+    printf("[----------] input dim: [%d %d %d %d]; output dim: [%d %d %d %d].\n", IB, IC, IH, IW, OB, OC, OH, OW);
+    printf("[----------] isHardware: %s; isHWC: %d.\n", HWConfigValue.c_str(), isHWC);
+}
+    Blob::Ptr outputBlob;
+    ASSERT_NO_THROW(_inferRequest->GetBlob("conv1x1", outputBlob, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    _refBlob = make_shared_blob<ie_fp16>(TensorDesc(Precision::FP16, outputBlob->getTensorDesc().getDims(), NCHW));
+    _refBlob->allocate();
+
+    refConvolution1x1(data, weights_ptr, _refBlob, isHWC);
+
+    CompareCommonAbsolute(outputBlob, _refBlob, ERROR_BOUND);
+}
+static std::vector<dims_config> s_DimsConfig = {
+#ifdef VPU_HAS_CUSTOM_KERNELS
+    {{1,   64, 56, 56}, {1, 1, 1,   64 *   64}, {1,   64, 56, 56}, getIELibraryPath() + "/vpu_custom_kernels/customLayerBindings.xml"},
+    {{1,   64, 56, 56}, {1, 1, 1,   64 *  256}, {1,  256, 56, 56}, getIELibraryPath() + "/vpu_custom_kernels/customLayerBindings.xml"},
+    {{1,  256, 56, 56}, {1, 1, 1,  256 *  256}, {1,  256, 56, 56}, getIELibraryPath() + "/vpu_custom_kernels/customLayerBindings.xml"},
+    {{1,  256, 56, 56}, {1, 1, 1,  256 *  128}, {1,  128, 56, 56}, getIELibraryPath() + "/vpu_custom_kernels/customLayerBindings.xml"},
+    {{1,  128, 28, 28}, {1, 1, 1,  128 *  512}, {1,  512, 28, 28}, getIELibraryPath() + "/vpu_custom_kernels/customLayerBindings.xml"},
+    {{1,  512, 28, 28}, {1, 1, 1,  512 *  128}, {1,  128, 28, 28}, getIELibraryPath() + "/vpu_custom_kernels/customLayerBindings.xml"},
+    {{1,  512, 28, 28}, {1, 1, 1,  512 *  256}, {1,  256, 28, 28}, getIELibraryPath() + "/vpu_custom_kernels/customLayerBindings.xml"},
+    {{1,  256, 14, 14}, {1, 1, 1,  256 * 1024}, {1, 1024, 14, 14}, getIELibraryPath() + "/vpu_custom_kernels/customLayerBindings.xml"},
+    {{1, 1024, 14, 14}, {1, 1, 1, 1024 *  256}, {1,  256, 14, 14}, getIELibraryPath() + "/vpu_custom_kernels/customLayerBindings.xml"},
+    {{1, 1024, 14, 14}, {1, 1, 1, 1024 *  512}, {1,  512, 14, 14}, getIELibraryPath() + "/vpu_custom_kernels/customLayerBindings.xml"},
+    {{1,  512,  7,  7}, {1, 1, 1,  512 * 2048}, {1, 2048,  7,  7}, getIELibraryPath() + "/vpu_custom_kernels/customLayerBindings.xml"},
+    {{1, 2048,  7,  7}, {1, 1, 1, 2048 *  512}, {1,  512,  7,  7}, getIELibraryPath() + "/vpu_custom_kernels/customLayerBindings.xml"},
+#endif
+};
+
+static std::vector<isHWC> s_isHWC = {
+#ifdef VPU_HAS_CUSTOM_KERNELS
+   {0, 1}
+#endif
+};
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_convolution3x3.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_convolution3x3.cpp
new file mode 100644 (file)
index 0000000..967ed36
--- /dev/null
@@ -0,0 +1,10 @@
+// Copyright (C) 2018-2019 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_convolution3x3.hpp"
+
+INSTANTIATE_TEST_CASE_P(myriad, myriadConvolution3x3LayerTests_nightly,
+        ::testing::Combine(
+        ::testing::Values(CONFIG_VALUE(NO)),
+        ::testing::ValuesIn(s_DimsConfig)));
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_convolution3x3.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_convolution3x3.hpp
new file mode 100644 (file)
index 0000000..6f7307e
--- /dev/null
@@ -0,0 +1,284 @@
+// // Copyright (C) 2018-2019 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <cmath>
+#include "myriad_layers_tests.hpp"
+
+using namespace InferenceEngine;
+
+#define ERROR_BOUND 0.5f
+
+typedef struct {
+    SizeVector  src_dims;
+    SizeVector  dst_dims;
+    int         stride_xy;
+    std::string custom_config;
+} dims_config_con3x3;
+
+PRETTY_PARAM(hwAcceleration, std::string);
+PRETTY_PARAM(dimsConfig, dims_config_con3x3);
+
+typedef myriadLayerTestBaseWithParam<std::tuple<std::string, dims_config_con3x3>> myriadConvolution3x3LayerTests_nightly;
+
+void refConvolution3x3(const Blob::Ptr src, InferenceEngine::TBlob<uint8_t>::Ptr weights, Blob::Ptr dst, int stride_x, int stride_y, int pad_x, int pad_y, int dilation_x, int dilation_y) {
+    
+          ie_fp16 *in = static_cast<ie_fp16*>(src->buffer());
+    const ie_fp16 *w = weights->readOnly().as<const ie_fp16 *>();
+          ie_fp16 *out = static_cast<ie_fp16*>(dst->buffer());
+
+    ASSERT_NE(in, nullptr);
+    ASSERT_NE(w, nullptr);
+    ASSERT_NE(out, nullptr);
+
+    const auto& in_dims = src->getTensorDesc().getDims();
+    size_t in_width      = in_dims[in_dims.size() - 1];
+    size_t in_height     = in_dims[in_dims.size() - 2];
+    size_t in_channels   = in_dims[in_dims.size() - 3];
+
+    size_t IW = in_width;
+    size_t IH = in_height;
+    size_t IC = in_channels;
+    
+    const auto& out_dims = dst->getTensorDesc().getDims();
+    size_t out_width      = out_dims[out_dims.size() - 1];
+    size_t out_height     = out_dims[out_dims.size() - 2];
+    size_t out_channels   = out_dims[out_dims.size() - 3];
+
+    size_t OW = out_width;
+    size_t OH = out_height;
+    size_t OC = out_channels;
+
+    size_t group = 1;
+
+    size_t src_channels = IC / group;
+    size_t dst_channels = OC / group;
+
+    size_t KW = 3;
+    size_t KH = 3;
+
+    //the start address after 1 line/column padding(requested by convolution operation with 3x3 kernel )
+    in = in + 1 + 1 * IW; 
+
+    int cnt = 0;
+
+    for (size_t g = 0; g < group; ++g)
+    {
+        for (size_t oc = 0; oc < dst_channels; ++oc)
+        {
+            size_t dst_channel = (g * dst_channels + oc);
+            for (size_t oh = 0; oh < OH; oh++)
+            {
+                for (size_t ow = 0; ow < OW; ow++)
+                {
+                    size_t oidx = dst_channel + ow * OC + oh * OC * OW;
+                    float val = 0.0f;
+                    ie_fp16 hval = PrecisionUtils::f32tof16(val);
+                    float fval = 0.0f;
+
+                    for (size_t ic = 0; ic < src_channels; ++ic)
+                    {
+                        size_t src_channel = (g * src_channels + ic);
+
+                        for (int ky = 0; ky < KH; ++ky)
+                        {
+                            for (int kx = 0; kx < KW; ++kx)
+                            {
+                                int32_t iw = ow * stride_x - pad_x + kx * dilation_x;
+                                int32_t ih = oh * stride_y - pad_y + ky * dilation_y;
+
+                                float v = PrecisionUtils::f16tof32(in[iw + ih * IW + src_channel * IW * IH])
+                                        * 
+                                        PrecisionUtils::f16tof32(w[oc*IC*KW*KH + ic*KW*KH + ky*KW + kx]);
+                                val += v;
+                            }
+                        }
+                    }
+
+                    out[oc*OH*OW + oh*OW + ow] = PrecisionUtils::f32tof16(val);
+                }
+            }
+        }
+    }
+}
+
+TEST_P(myriadConvolution3x3LayerTests_nightly, Convolution3x3) {
+    std::string model = R"V0G0N(
+       <net name="Convolution3x3" version="2" batch="1">
+           <layers>
+            <layer id="0" name="data" precision="FP16" type="Input">
+                <output>
+                    <port id="0">
+                        <dim>@IB@</dim>
+                        <dim>@IC@</dim>
+                        <dim>@IH@</dim>
+                        <dim>@IW@</dim>
+                    </port>
+                </output>
+            </layer>
+            <layer id="2" name="conv3x3" precision="FP16" type="Convolution">
+                <data stride-x="@stride-x@" stride-y="@stride-y@" pad-x="1" pad-y="1" dilation-x="1" dilation-y="1" output="1" kernel-x="3" kernel-y="3"/>
+                <input>
+                    <port id="0">
+                        <dim>@IB@</dim>
+                        <dim>@IC@</dim>
+                        <dim>@IH@</dim>
+                        <dim>@IW@</dim>
+                    </port>
+                </input>
+                <output>
+                    <port id="2">
+                        <dim>@OB@</dim>
+                        <dim>@OC@</dim>
+                        <dim>@OH@</dim>
+                        <dim>@OW@</dim>
+                    </port>
+                </output>
+                <weights offset="0" size="@size_weights@"/>
+            </layer>
+           </layers>
+           <edges>
+               <edge from-layer="0" from-port="0" to-layer="2" to-port="0"/>
+           </edges>
+       </net>
+   )V0G0N";
+    SetSeed(DEFAULT_SEED_VALUE + 6);
+    std::string HWConfigValue = std::get<0>(GetParam());
+    dims_config_con3x3 customConfig  = std::get<1>(GetParam());
+     
+    int stride_xy = customConfig.stride_xy;//(int)std::get<1>(GetParam());
+
+    if(!customConfig.custom_config.empty() && !CheckMyriadX()) {
+        GTEST_SKIP()<<"Custom layers for MYRIAD2 not supported";
+    }
+    _config[VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION)] = HWConfigValue;
+    _config[VPU_CONFIG_KEY(CUSTOM_LAYERS)] = customConfig.custom_config;
+
+    int IB = customConfig.src_dims[0];
+    int IC = customConfig.src_dims[1];
+    int IH = customConfig.src_dims[2];
+    int IW = customConfig.src_dims[3];
+
+    int OB = customConfig.dst_dims[0];
+    int OC = customConfig.dst_dims[1];
+    int OH = customConfig.dst_dims[2];
+    int OW = customConfig.dst_dims[3];
+
+    // 3 * 3 = 3x3 kernel size
+    size_t num_weights = 3 * 3 * IC * OC;
+
+    model.replace( model.find("@IB@"), sizeof("@IB@") -1, std::to_string(IB));
+    model.replace( model.find("@IB@"), sizeof("@IB@") -1, std::to_string(IB));
+    model.replace( model.find("@IC@"), sizeof("@IC@") -1, std::to_string(IC));
+    model.replace( model.find("@IC@"), sizeof("@IC@") -1, std::to_string(IC));
+    model.replace( model.find("@IH@"), sizeof("@IH@") -1, std::to_string(IH));
+    model.replace( model.find("@IH@"), sizeof("@IH@") -1, std::to_string(IH));
+    model.replace( model.find("@IW@"), sizeof("@IW@") -1, std::to_string(IW));
+    model.replace( model.find("@IW@"), sizeof("@IW@") -1, std::to_string(IW));
+
+    model.replace( model.find("@OB@"), sizeof("@OB@") -1, std::to_string(OB));
+    model.replace( model.find("@OC@"), sizeof("@OC@") -1, std::to_string(OC));
+    model.replace( model.find("@OH@"), sizeof("@OH@") -1, std::to_string(OH));
+    model.replace( model.find("@OW@"), sizeof("@OW@") -1, std::to_string(OW));
+
+    model.replace( model.find("@stride-x@"), sizeof("@stride-x@") -1, std::to_string(stride_xy));
+    model.replace( model.find("@stride-y@"), sizeof("@stride-y@") -1, std::to_string(stride_xy));
+    model.replace( model.find("@size_weights@"), sizeof("@size_weights@") -1, std::to_string(num_weights * sizeof(ie_fp16)));
+
+    InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(GenWeights(num_weights));
+
+    InferenceEngine::Core ie;
+    auto network = ie.ReadNetwork(model, weights_ptr);
+    
+    StatusCode st;
+
+    _inputsInfo = network.getInputsInfo();
+    _inputsInfo["data"]->setPrecision(Precision::FP16);
+    _inputsInfo["data"]->setLayout(NCHW);
+
+    _outputsInfo = network.getOutputsInfo();
+    _outputsInfo["conv3x3"]->setPrecision(Precision::FP16);
+    _outputsInfo["conv3x3"]->setLayout(NCHW);
+
+    ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network,
+                                                    {{VPU_CONFIG_KEY(CUSTOM_LAYERS), customConfig.custom_config}, {VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), HWConfigValue}}, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    ASSERT_NE(_exeNetwork, nullptr) << _resp.msg;
+
+    ASSERT_NO_THROW(st = _exeNetwork->CreateInferRequest(_inferRequest, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    Blob::Ptr data;
+    ASSERT_NO_THROW(st = _inferRequest->GetBlob("data", data, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    GenRandomData(data);
+
+    //padding with zeros 1 row(top/bottom), 1 column(left/right) input tensor
+    for(int ic = 0; ic < IC; ++ic){
+        for(int iw = 0; iw < IW; ++iw){
+            int indx_l0 = iw + 0 * IW + ic * IW * IH;
+            int indx_ln = iw + (IH - 1) * IW + ic * IW * IH;
+            *((ie_fp16*)data->buffer() + indx_l0) = 0.0f;
+            *((ie_fp16*)data->buffer() + indx_ln) = 0.0f;
+        }
+    }
+    for(int ic = 0; ic < IC; ++ic){
+        for(int ih = 0; ih < IH; ++ih){
+            int indx_c0 = 0 + ih * IW + ic * IW * IH;
+            int indx_cn = (IW - 1) + ih * IW + ic * IW * IH;
+            *((ie_fp16*)data->buffer() + indx_c0) = 0.0f;
+            *((ie_fp16*)data->buffer() + indx_cn) = 0.0f;
+        }
+    }
+
+    ASSERT_NO_THROW(st = _inferRequest->Infer(&_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+{
+    std::map<std::string, InferenceEngine::InferenceEngineProfileInfo> perfMap;
+    _inferRequest->GetPerformanceCounts(perfMap, nullptr);
+    std::vector <std::pair<std::string, InferenceEngine::InferenceEngineProfileInfo>> perfVec(perfMap.begin(), perfMap.end());
+    std::sort(perfVec.begin(), perfVec.end(),
+              [=](const std::pair<std::string, InferenceEngine::InferenceEngineProfileInfo> &pair1,
+                 const std::pair<std::string, InferenceEngine::InferenceEngineProfileInfo> &pair2) -> bool {
+                  return pair1.second.execution_index < pair2.second.execution_index;
+              });
+
+    unsigned currentIndex = 0;
+    for (auto it = perfVec.begin(); it != perfVec.end(); ++it) {
+        std::string layerName = it->first;
+        InferenceEngine::InferenceEngineProfileInfo info = it->second;
+        if (info.status == InferenceEngine::InferenceEngineProfileInfo::EXECUTED) {
+            printf("[----------] Myriad time = '%s' layer with '%s' type is %f ms.\n", layerName.c_str(), info.exec_type, info.realTime_uSec / 1000.f);
+        }
+    }
+    printf("[----------] input dim: [%d %d %d %d]; output dim: [%d %d %d %d]; stride: %d.\n", IB, IC, IH, IW, OB, OC, OH, OW, stride_xy);
+    printf("[----------] isHardware: %s.\n", HWConfigValue.c_str());
+}
+
+    Blob::Ptr outputBlob;
+    ASSERT_NO_THROW(_inferRequest->GetBlob("conv3x3", outputBlob, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    _refBlob = make_shared_blob<ie_fp16>(TensorDesc(Precision::FP16, outputBlob->getTensorDesc().getDims(), NCHW));
+    _refBlob->allocate();
+
+    refConvolution3x3(data, weights_ptr, _refBlob, stride_xy, stride_xy, 1, 1, 1, 1);
+
+    CompareCommonAbsolute(outputBlob, _refBlob, ERROR_BOUND);
+}
+
+static std::vector<dims_config_con3x3> s_DimsConfig = {
+#ifdef VPU_HAS_CUSTOM_KERNELS
+    {{1,   64, 58, 58}, {1,  64, 56, 56}, 1, getIELibraryPath() + "/vpu_custom_kernels/customLayerBindings.xml"},
+    {{1,  128, 58, 58}, {1, 128, 56, 56}, 1, getIELibraryPath() + "/vpu_custom_kernels/customLayerBindings.xml"},
+    {{1,  128, 30, 30}, {1, 128, 28, 28}, 1, getIELibraryPath() + "/vpu_custom_kernels/customLayerBindings.xml"},
+    // {{1,  256, 30, 30}, {1, 256, 28, 28}, 1, getIELibraryPath() + "/vpu_custom_kernels/customLayerBindings.xml"},
+    {{1,  256, 16, 16}, {1, 256, 14, 14}, 1, getIELibraryPath() + "/vpu_custom_kernels/customLayerBindings.xml"},
+    // {{1,  512, 16, 16}, {1, 512, 14, 14}, 1, getIELibraryPath() + "/vpu_custom_kernels/customLayerBindings.xml"},
+    {{1,  512,  9,  9}, {1, 512,  7,  7}, 1, getIELibraryPath() + "/vpu_custom_kernels/customLayerBindings.xml"},
+    
+    {{1,  128, 58, 58}, {1, 128, 28, 28}, 2, getIELibraryPath() + "/vpu_custom_kernels/customLayerBindings.xml"},
+    {{1,  256, 30, 30}, {1, 256, 14, 14}, 2, getIELibraryPath() + "/vpu_custom_kernels/customLayerBindings.xml"},
+    {{1,  256, 16, 16}, {1, 384,  7,  7}, 2, getIELibraryPath() + "/vpu_custom_kernels/customLayerBindings.xml"},
+#endif
+};
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_convolution_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_convolution_test.cpp
new file mode 100644 (file)
index 0000000..dbb8ad0
--- /dev/null
@@ -0,0 +1,903 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_convolution_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(accuracy_chw_dilation, myriadLayerConvolution_nightly,
+        ::testing::Combine(
+            ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 37, 43, 43)
+                                       , MAKE_STRUCT(tensor_test_params, 1, 37, 19, 19))
+          , ::testing::Values<kernel>(
+                                      MAKE_STRUCT(param_size, 3, 3)
+                                     )
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1)
+                                    , MAKE_STRUCT(param_size, 2, 2)
+                                    , MAKE_STRUCT(param_size, 3, 3)
+                                    )
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 3, 2))
+          , ::testing::Values<out_channels>(24)
+          , ::testing::Values<group>(1)
+          , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 6, 5))
+          , ::testing::Values<layoutPreference>(vpu::LayoutPreference::ChannelMajor)
+        )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayers_IR3_ConvTests_nightly,
+        ::testing::Combine(
+            ::testing::Values<InferenceEngine::SizeVector>({1, 3, 32, 24})
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<uint32_t>(12)
+          , ::testing::Values<uint32_t>(1)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy_Batch_0, myriadLayers_BatchTest_ConvTests_nightly,
+        ::testing::Combine(
+            ::testing::Values<InferenceEngine::SizeVector>({10, 5, 1, 1})
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+          , ::testing::Values<uint32_t>(5)
+          , ::testing::Values<uint32_t>(1)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy_Batch_1, myriadLayers_BatchTest_ConvTests_nightly,
+        ::testing::Combine(
+            ::testing::Values<InferenceEngine::SizeVector>({10, 576, 7, 7})
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+          , ::testing::Values<uint32_t>(128)
+          , ::testing::Values<uint32_t>(1)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy_Batch_2, myriadLayers_BatchTest_ConvTests_nightly,
+        ::testing::Combine(
+            ::testing::Values<InferenceEngine::SizeVector>({10, 128, 7, 7})
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 2, 2))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<uint32_t>(192)
+          , ::testing::Values<uint32_t>(1)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy_Batch_3, myriadLayers_BatchTest_ConvTests_nightly,
+        ::testing::Combine(
+            ::testing::Values<InferenceEngine::SizeVector>({10, 4, 7, 7})
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+          , ::testing::Values<uint32_t>(4)
+          , ::testing::Values<uint32_t>(1)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy_Batch_4, myriadLayers_BatchTest_ConvTests_nightly,
+        ::testing::Combine(
+            ::testing::Values<InferenceEngine::SizeVector>({10, 256, 7, 7})
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 2, 2))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<uint32_t>(256)
+          , ::testing::Values<uint32_t>(1)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy_Batch_5, myriadLayers_BatchTest_ConvTests_nightly,
+        ::testing::Combine(
+            ::testing::Values<InferenceEngine::SizeVector>({10, 1024, 4, 4})
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+          , ::testing::Values<uint32_t>(352)
+          , ::testing::Values<uint32_t>(1)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy_Batch_6, myriadLayers_BatchTest_ConvTests_nightly,
+        ::testing::Combine(
+            ::testing::Values<InferenceEngine::SizeVector>({10, 192, 4, 4})
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<uint32_t>(320)
+          , ::testing::Values<uint32_t>(1)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy_Batch_7, myriadLayers_BatchTest_ConvTests_nightly,
+        ::testing::Combine(
+            ::testing::Values<InferenceEngine::SizeVector>({10, 160, 4, 4})
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<uint32_t>(224)
+          , ::testing::Values<uint32_t>(1)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy_Batch_8, myriadLayers_BatchTest_ConvTests_nightly,
+        ::testing::Combine(
+            ::testing::Values<InferenceEngine::SizeVector>({10, 224, 4, 4})
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<uint32_t>(224)
+          , ::testing::Values<uint32_t>(1)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy_Batch_9, myriadLayers_BatchTest_ConvTests_nightly,
+        ::testing::Combine(
+            ::testing::Values<InferenceEngine::SizeVector>({10, 1024, 4, 4})
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+          , ::testing::Values<uint32_t>(128)
+          , ::testing::Values<uint32_t>(1)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy_Batch_10, myriadLayers_BatchTest_ConvTests_nightly,
+        ::testing::Combine(
+            ::testing::Values<InferenceEngine::SizeVector>({1, 64, 56, 56})
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<uint32_t>(192)
+          , ::testing::Values<uint32_t>(1)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy_Batch_11, myriadLayers_BatchTest_ConvTests_nightly,
+        ::testing::Combine(
+            ::testing::Values<InferenceEngine::SizeVector>({10, 192, 7, 7})
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<uint32_t>(256)
+          , ::testing::Values<uint32_t>(1)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy_Batch_12, myriadLayers_BatchTest_ConvTests_nightly,
+        ::testing::Combine(
+            ::testing::Values<InferenceEngine::SizeVector>({10, 576, 7, 7})
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+          , ::testing::Values<uint32_t>(192)
+          , ::testing::Values<uint32_t>(1)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy_Batch_1, myriadLayers_BatchTest2_ConvTests_nightly,
+        ::testing::Combine(
+            ::testing::Values<InferenceEngine::SizeVector>({10, 576, 7, 7})
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+          , ::testing::Values<uint32_t>(192)
+          , ::testing::Values<uint32_t>(1)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy_3X3, myriadLayers_IR3_ConvTests_nightly,
+        ::testing::Combine(
+            ::testing::Values<InferenceEngine::SizeVector>({1, 3, 32, 24})
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<uint32_t>(12)
+          , ::testing::Values<uint32_t>(1)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy_3X1, myriadLayers_IR3_ConvTests_nightly,
+        ::testing::Combine(
+            ::testing::Values<InferenceEngine::SizeVector>({1, 3, 32, 24})
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 1))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 0))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 0))
+          , ::testing::Values<uint32_t>(16)
+          , ::testing::Values<uint32_t>(1)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy_1X3, myriadLayers_IR3_ConvTests_nightly,
+        ::testing::Combine(
+            ::testing::Values<InferenceEngine::SizeVector>({1, 4, 16, 16})
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 1, 3))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 1))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 1))
+          , ::testing::Values<uint32_t>(8)
+          , ::testing::Values<uint32_t>(1)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayers_3X3X3_ConstInput_nightly,
+        ::testing::Combine(
+            ::testing::Values<InferenceEngine::SizeVector>({1, 3, 10, 10})
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<uint32_t>(32)
+          , ::testing::Values<uint32_t>(1)
+          , ::testing::ValuesIn(g_poolingLayout) // this array keeps possible layouts
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy_crossroad_spatialConv, myriadLayerConvolutionTensorFlow_nightly,
+        ::testing::Combine(
+            ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 3, 1024, 1024))
+          , ::testing::Values<DimsOutput>(MAKE_STRUCT(tensor_test_params, 1, 3, 512, 512))
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 7, 7))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 2, 2))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 3, 3))
+          , ::testing::Values<group>(1)
+          , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 2))
+          , ::testing::Values<layoutPreference>(vpu::LayoutPreference::ChannelMinor)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy_inception_v2, myriadLayerConvolutionTensorFlow_nightly,
+        ::testing::Combine(
+            ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 16, 28, 28))
+          , ::testing::Values<DimsOutput>(MAKE_STRUCT(tensor_test_params, 1, 64, 14, 14))
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 2, 2))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+          , ::testing::Values<group>(1)
+          , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 2))
+          , ::testing::Values<layoutPreference>(vpu::LayoutPreference::ChannelMinor)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy_inception_v1, myriadLayerConvolutionTensorFlow_nightly,
+        ::testing::Combine(
+            ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 3, 224, 224),
+                                         MAKE_STRUCT(tensor_test_params, 1, 32, 224, 224)
+            )
+          , ::testing::Values<DimsOutput>(MAKE_STRUCT(tensor_test_params, 1, 64, 112, 112))
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 7, 7))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 2, 2))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 2, 2))
+          , ::testing::Values<group>(1)
+          , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 2))
+          , ::testing::Values<layoutPreference>(vpu::LayoutPreference::ChannelMinor)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(test_3x3_SSD_dilation, myriadLayerConvolution_nightly,
+        ::testing::Combine(
+            ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 512, 19, 19))
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 6, 6))
+          , ::testing::Values<out_channels>(1024)
+          , ::testing::Values<group>(1)
+          , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 6, 5))
+          , ::testing::Values<layoutPreference>(vpu::LayoutPreference::ChannelMinor)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(test_TF_Resnet_50, myriadLayers_IR3_ConvTests_nightly,
+        ::testing::Combine(
+            ::testing::Values<InferenceEngine::SizeVector>({1, 512, 38, 38})
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+          , ::testing::Values<out_channels>(128)
+          , ::testing::Values<group>(1)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(test_3x3_icvnet_dilation, myriadLayerConvolution_nightly,
+        ::testing::Combine(
+            ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 24, 20, 20))
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1)
+                                    , MAKE_STRUCT(param_size, 2, 2))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1)
+                                 , MAKE_STRUCT(param_size, 2, 2))
+          , ::testing::Values<out_channels>(48)
+          , ::testing::Values<group>(6, 8)
+          , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 2, 3))
+          , ::testing::Values<layoutPreference>(vpu::LayoutPreference::ChannelMinor)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(test_5x5_with_dilation, myriadLayerConvolution_nightly,
+        ::testing::Combine(
+            ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 32, 64,  77))
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 5, 5))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1)
+                                    , MAKE_STRUCT(param_size, 2, 2))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1)
+                                 , MAKE_STRUCT(param_size, 0, 0)
+                                 , MAKE_STRUCT(param_size, 2, 2))
+          , ::testing::Values<out_channels>(32)
+          , ::testing::Values<group>(1)
+          , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 2, 3), MAKE_STRUCT(param_size, 3, 4))
+          , ::testing::Values<layoutPreference>(vpu::LayoutPreference::ChannelMinor)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(test_7x7_with_dilation, myriadLayerConvolution_nightly,
+        ::testing::Combine(
+            ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 32, 64,  77))
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 7, 7))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1)
+                                    , MAKE_STRUCT(param_size, 2, 2))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1)
+                                 , MAKE_STRUCT(param_size, 0, 0)
+                                 , MAKE_STRUCT(param_size, 2, 2))
+          , ::testing::Values<out_channels>(32)
+          , ::testing::Values<group>(1)
+          , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 2, 3), MAKE_STRUCT(param_size, 3, 4))
+          , ::testing::Values<layoutPreference>(vpu::LayoutPreference::ChannelMinor)
+          )
+);
+
+
+INSTANTIATE_TEST_CASE_P(test_conv1x1, myriadLayerConvolution_nightly,
+        ::testing::Combine(
+            ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 10, 13, 13))
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1)
+                                    , MAKE_STRUCT(param_size, 2, 2))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<out_channels>(20)
+          , ::testing::Values<group>(1)
+          , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 2))
+          , ::testing::Values<layoutPreference>(vpu::LayoutPreference::ChannelMinor)
+           )
+);
+
+INSTANTIATE_TEST_CASE_P(test_yolo_tiny_2_512x13x13_use_3x3_convolution, myriadLayerConvolution_nightly,
+        ::testing::Combine(
+            ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 512, 13, 13))
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<out_channels>(1024)
+          , ::testing::Values<group>(1)
+          , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 2))
+          , ::testing::Values<layoutPreference>(vpu::LayoutPreference::ChannelMinor)
+           )
+);
+
+INSTANTIATE_TEST_CASE_P(test_yolo_tiny_2_512x13x13_use_1x1_convolution, myriadLayerConvolution_nightly,
+        ::testing::Combine(
+            ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 4608, 13, 13))
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+          , ::testing::Values<out_channels>(1024)
+          , ::testing::Values<group>(1)
+          , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 2))
+          , ::testing::Values<layoutPreference>(vpu::LayoutPreference::ChannelMinor)
+           )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy_group, myriadLayerConvolution_nightly,
+        ::testing::Combine(
+            ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 32, 64,  77)
+                                       , MAKE_STRUCT(tensor_test_params, 1, 32, 112, 96))
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3)
+                                  , MAKE_STRUCT(param_size, 5, 5)
+                                  , MAKE_STRUCT(param_size, 7, 7))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1)
+                                    , MAKE_STRUCT(param_size, 2, 2))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1)
+                                 , MAKE_STRUCT(param_size, 0, 0)
+                                 , MAKE_STRUCT(param_size, 2, 2))
+          , ::testing::Values<out_channels>(32)
+          , ::testing::Values<group>(32)
+          , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 2))
+          , ::testing::Values<layoutPreference>(vpu::LayoutPreference::ChannelMinor)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy_group_large_input, myriadLayerConvolution_nightly,
+        ::testing::Combine(
+            ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 32, 192, 336))
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1)
+                                 , MAKE_STRUCT(param_size, 0, 0)
+                                 , MAKE_STRUCT(param_size, 2, 2))
+          , ::testing::Values<out_channels>(32)
+          , ::testing::Values<group>(32)
+          , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 2))
+          , ::testing::Values<layoutPreference>(vpu::LayoutPreference::ChannelMinor)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy_any_group, myriadLayerConvolution_nightly,
+        ::testing::Combine(
+            ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 32, 64,  77))
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1)
+                                    , MAKE_STRUCT(param_size, 2, 2))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1)
+                                 , MAKE_STRUCT(param_size, 0, 0)
+                                 , MAKE_STRUCT(param_size, 2, 2))
+          , ::testing::Values<out_channels>(32)
+          , ::testing::Values<group>(2, 4, 16)
+          , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 2), MAKE_STRUCT(param_size, 2, 3), MAKE_STRUCT(param_size, 3, 4)),
+            ::testing::Values<layoutPreference>(vpu::LayoutPreference::ChannelMinor,
+                                                vpu::LayoutPreference::ChannelMajor)
+        )
+);
+
+INSTANTIATE_TEST_CASE_P(set_optimization_for_3x3_with_group, myriadLayerConvolution_nightly,
+        ::testing::Combine(
+            ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 24, 80, 80)
+                                       , MAKE_STRUCT(tensor_test_params, 1, 36, 80, 80))
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1),
+                                      MAKE_STRUCT(param_size, 2, 2))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<out_channels>(24)
+          , ::testing::Values<group>(6)
+          , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 2), MAKE_STRUCT(param_size, 2, 3))
+          , ::testing::Values<layoutPreference>(vpu::LayoutPreference::ChannelMinor)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(set_optimization_for_3x3s1, myriadLayerConvolution_nightly,
+        ::testing::Combine(
+            ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 24, 80, 80))
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<out_channels>(1,2,3,4,5,6,7,8)
+          , ::testing::Values<group>(1)
+          , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 2))
+          , ::testing::Values<layoutPreference>(vpu::LayoutPreference::ChannelMinor)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy_1x1, myriadLayerConvolution_nightly,
+        ::testing::Combine(
+            ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 16, 64, 64)
+                                       , MAKE_STRUCT(tensor_test_params, 1, 32, 1, 1))
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1)
+                                    , MAKE_STRUCT(param_size, 2, 2))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0),
+                                   MAKE_STRUCT(param_size, 1, 1)
+                                  )
+          , ::testing::Values<out_channels>(16, 24)
+          , ::testing::Values<group>(1)
+          , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 2))
+          , ::testing::Values<layoutPreference>(vpu::LayoutPreference::ChannelMinor)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy_3x3, myriadLayerConvolution_nightly,
+        ::testing::Combine(
+            ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 8, 16, 16)
+                                       , MAKE_STRUCT(tensor_test_params, 1, 8, 59, 73))
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1)
+                                    , MAKE_STRUCT(param_size, 2, 2))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0)
+                                 , MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<out_channels>(8, 15/*, 20 failed for 3x3s2*/, 32)
+          , ::testing::Values<group>(1)
+          , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 2))
+          , ::testing::Values<layoutPreference>(vpu::LayoutPreference::ChannelMinor)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy_1x3, myriadLayerConvolution_nightly,
+        ::testing::Combine(
+            ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 8, 59, 73))
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 1, 3), MAKE_STRUCT(param_size, 3, 1))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0)
+                                 , MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<out_channels>(7, 32)
+          , ::testing::Values<group>(1)
+          , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 2))
+          , ::testing::Values<layoutPreference>(vpu::LayoutPreference::ChannelMinor)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy_5x5, myriadLayerConvolution_nightly,
+        ::testing::Combine(
+            ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 16, 32, 32)
+                                     /*, MAKE_STRUCT(tensor_test_params, 1, 8, 511, 399) failed*/)
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 5, 5))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1)
+                                    , MAKE_STRUCT(param_size, 2, 2))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0)
+                               /*, MAKE_STRUCT(param_size, 1, 1) failed*/
+                                 , MAKE_STRUCT(param_size, 2, 2))
+          , ::testing::Values<out_channels>(16, 32)
+          , ::testing::Values<group>(1)
+          , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 2)),
+            ::testing::Values<layoutPreference>(vpu::LayoutPreference::ChannelMinor,
+                                                vpu::LayoutPreference::ChannelMajor)
+        )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy_7x7, myriadLayerConvolution_nightly,
+        ::testing::Combine(
+            ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 8, 32, 32)
+                                     /*, MAKE_STRUCT(tensor_test_params, 1, 8, 511, 399) failed*/)
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 7, 7))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1)
+                                    , MAKE_STRUCT(param_size, 2, 2))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0)
+                               /*, MAKE_STRUCT(param_size, 1, 1) failed*/
+                               /*, MAKE_STRUCT(param_size, 3, 3) failed*/)
+          , ::testing::Values<out_channels>(16, 32)
+          , ::testing::Values<group>(1)
+          , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 2))
+          , ::testing::Values<layoutPreference>(vpu::LayoutPreference::ChannelMinor)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy_3x3_large_input_1, myriadLayerConvolution_nightly,
+        ::testing::Combine(
+            ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 3, 720, 1280))
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 2, 2))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+          , ::testing::Values<out_channels>(8)
+          , ::testing::Values<group>(1)
+          , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 2))
+          , ::testing::Values<layoutPreference>(vpu::LayoutPreference::ChannelMinor)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy_3x3_large_input_2, myriadLayerConvolution_nightly,
+        ::testing::Combine(
+            ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 24, 357, 637))
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+          , ::testing::Values<out_channels>(8)
+          , ::testing::Values<group>(1)
+          , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 2))
+          , ::testing::Values<layoutPreference>(vpu::LayoutPreference::ChannelMinor)
+          )
+);
+
+
+INSTANTIATE_TEST_CASE_P(accuracy_3x3_large_input_3, myriadLayerConvolution_nightly,
+        ::testing::Combine(
+            ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 16, 359, 639))
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+          , ::testing::Values<out_channels>(12)
+          , ::testing::Values<group>(1)
+          , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 2))
+          , ::testing::Values<layoutPreference>(vpu::LayoutPreference::ChannelMinor)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy_1x1_large_input, myriadLayerConvolution_nightly,
+        ::testing::Combine(
+            ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 24, 355, 635))
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+          , ::testing::Values<out_channels>(2, 3)
+          , ::testing::Values<group>(1)
+          , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 2))
+          , ::testing::Values<layoutPreference>(vpu::LayoutPreference::ChannelMinor)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy_small_input_0, myriadLayerConvolution_nightly,
+        ::testing::Combine(
+            ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 128, 38, 38))
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<out_channels>(6)
+          , ::testing::Values<group>(1)
+          , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 2))
+          , ::testing::Values<layoutPreference>(vpu::LayoutPreference::ChannelMinor)
+          )
+);
+INSTANTIATE_TEST_CASE_P(accuracy_small_input_1, myriadLayerConvolution_nightly,
+        ::testing::Combine(
+            ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 256, 2, 3))
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<out_channels>(8)
+          , ::testing::Values<group>(1)
+          , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<layoutPreference>(vpu::LayoutPreference::ChannelMinor)
+          )
+);
+INSTANTIATE_TEST_CASE_P(accuracy_small_input_2, myriadLayerConvolution_nightly,
+        ::testing::Combine(
+            ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 256, 2, 2))
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<out_channels>(8)
+          , ::testing::Values<group>(1)
+          , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<layoutPreference>(vpu::LayoutPreference::ChannelMinor)
+          )
+);
+INSTANTIATE_TEST_CASE_P(accuracy_small_input_3, myriadLayerConvolution_nightly,
+        ::testing::Combine(
+            ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 256, 1, 1))
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<out_channels>(84)
+          , ::testing::Values<group>(1)
+          , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<layoutPreference>(vpu::LayoutPreference::ChannelMinor)
+           )
+ );
+
+TEST_F(myriadLayersTests_nightly, tests125) {
+    std::string outName1 = "SecondStageFeatureExtractor/InceptionV2/Mixed_5a/Branch_1/Conv2d_0b_3x3/Conv2D";
+    std::string outName2 = "SecondStageFeatureExtractor/InceptionV2/Mixed_5a/Branch_0/Conv2d_1a_3x3/Relu";
+    InferenceEngine::TBlob<uint8_t>::Ptr weights(GenWeights(1697280 / sizeof(ie_fp16)));
+    constWeightsRange(weights->data().as<uint16_t *>(), 1697280 / sizeof(ie_fp16));
+
+    StatusCode st;
+
+    InferenceEngine::Core ie;
+    auto network = ie.ReadNetwork(MODEL_RFCNN, weights);
+
+    auto inputsInfo = network.getInputsInfo();
+    inputsInfo["input"]->setPrecision(Precision::FP16);
+
+    auto outputsInfo = network.getOutputsInfo();
+    outputsInfo[outName1]->setPrecision(Precision::FP16);
+    outputsInfo[outName2]->setPrecision(Precision::FP16);
+
+    InferenceEngine::IExecutableNetwork::Ptr exeNetwork;
+    ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(exeNetwork, network, {}, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    InferenceEngine::IInferRequest::Ptr     inferRequest;
+    ASSERT_NO_THROW(st = exeNetwork->CreateInferRequest(inferRequest, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    Blob::Ptr input;
+    ASSERT_NO_THROW(st = inferRequest->GetBlob("input", input, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    genTestData(input);
+
+    ASSERT_NO_THROW(st = inferRequest->Infer(&_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    Blob::Ptr out1;
+    Blob::Ptr out2;
+    ASSERT_NO_THROW(st = inferRequest->GetBlob(outName1.c_str(), out1, &_resp));
+    ASSERT_NO_THROW(st = inferRequest->GetBlob(outName2.c_str(), out2, &_resp));
+};
+
+// This tests checks that conv3x3s1 case doesn't corrupt its input.
+// To check this we run two convolutions on the same input
+// and check that they return same results.
+TEST_F(myriadLayersTests_nightly, SmallConv_CorruptInputBug) {
+    const std::string model = R"V0G0N(
+        <Net name="SmallConv_CorruptInputBug" version="2" batch="1">
+            <layers>
+                <layer name="input" type="Input" precision="FP16" id="1">
+                    <output>
+                        <port id="1">
+                            <dim>1</dim>
+                            <dim>256</dim>
+                            <dim>1</dim>
+                            <dim>1</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="input_copy" type="Power" precision="FP16" id="2">
+                    <power_data power="1" scale="1" shift="0"/>
+                    <input>
+                        <port id="2">
+                            <dim>1</dim>
+                            <dim>256</dim>
+                            <dim>1</dim>
+                            <dim>1</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="3">
+                            <dim>1</dim>
+                            <dim>256</dim>
+                            <dim>1</dim>
+                            <dim>1</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="conv1" type="Convolution" precision="FP16" id="3">
+                    <convolution_data stride-x="1" stride-y="1" pad-x="1" pad-y="1" kernel-x="3" kernel-y="3" output="84" group="1"/>
+                    <input>
+                        <port id="4">
+                            <dim>1</dim>
+                            <dim>256</dim>
+                            <dim>1</dim>
+                            <dim>1</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="5">
+                            <dim>1</dim>
+                            <dim>84</dim>
+                            <dim>1</dim>
+                            <dim>1</dim>
+                        </port>
+                    </output>
+                    <weights offset="0" size="387072"/>
+                    <biases offset="387072" size="168"/>
+                </layer>
+                <layer name="conv2" type="Convolution" precision="FP16" id="4">
+                    <convolution_data stride-x="1" stride-y="1" pad-x="1" pad-y="1" kernel-x="3" kernel-y="3" output="84" group="1"/>
+                    <input>
+                        <port id="6">
+                            <dim>1</dim>
+                            <dim>256</dim>
+                            <dim>1</dim>
+                            <dim>1</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="7">
+                            <dim>1</dim>
+                            <dim>84</dim>
+                            <dim>1</dim>
+                            <dim>1</dim>
+                        </port>
+                    </output>
+                    <weights offset="0" size="387072"/>
+                    <biases offset="387072" size="168"/>
+                </layer>
+                <layer name="conv1_out" type="Power" precision="FP16" id="5">
+                    <power_data power="1" scale="1" shift="0"/>
+                    <input>
+                        <port id="8">
+                            <dim>1</dim>
+                            <dim>84</dim>
+                            <dim>1</dim>
+                            <dim>1</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="9">
+                            <dim>1</dim>
+                            <dim>84</dim>
+                            <dim>1</dim>
+                            <dim>1</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="conv2_out" type="Power" precision="FP16" id="6">
+                    <power_data power="1" scale="1" shift="0"/>
+                    <input>
+                        <port id="10">
+                            <dim>1</dim>
+                            <dim>84</dim>
+                            <dim>1</dim>
+                            <dim>1</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="11">
+                            <dim>1</dim>
+                            <dim>84</dim>
+                            <dim>1</dim>
+                            <dim>1</dim>
+                        </port>
+                    </output>
+                </layer>
+            </layers>
+            <edges>
+                <edge from-layer="1" from-port="1" to-layer="2" to-port="2"/>
+                <edge from-layer="2" from-port="3" to-layer="3" to-port="4"/>
+                <edge from-layer="2" from-port="3" to-layer="4" to-port="6"/>
+                <edge from-layer="3" from-port="5" to-layer="5" to-port="8"/>
+                <edge from-layer="4" from-port="7" to-layer="6" to-port="10"/>
+            </edges>
+        </Net>
+    )V0G0N";
+
+    SetSeed(DEFAULT_SEED_VALUE);
+
+    size_t num_weights = 193536;
+    size_t num_bias = 84;
+
+    TBlob<uint8_t>::Ptr weightsBlob(GenWeights(num_weights + num_bias));
+    const ie_fp16 *weights = weightsBlob->readOnly().as<const ie_fp16 *>();
+    const ie_fp16 *bias = weights + num_weights;
+
+    StatusCode st;
+
+    ASSERT_NO_THROW(readNetwork(model, weightsBlob));
+
+    const auto& network = _cnnNetwork;
+
+    _inputsInfo = network.getInputsInfo();
+    _inputsInfo["input"]->setPrecision(Precision::FP16);
+
+    _outputsInfo = network.getOutputsInfo();
+    _outputsInfo["conv1_out"]->setPrecision(Precision::FP16);
+    _outputsInfo["conv2_out"]->setPrecision(Precision::FP16);
+
+    ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network, {}, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    ASSERT_NO_THROW(st = _exeNetwork->CreateInferRequest(_inferRequest, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    Blob::Ptr input;
+    ASSERT_NO_THROW(st = _inferRequest->GetBlob("input", input, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    {
+        ie_fp16 *dst = input->buffer().as<ie_fp16 *>();
+        for (int i = 0; i < input->size(); ++i) {
+            float val = static_cast<float>(std::rand()) / RAND_MAX;
+            dst[i] = PrecisionUtils::f32tof16(val);
+        }
+    }
+
+    ASSERT_NO_THROW(st = _inferRequest->Infer(&_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    Blob::Ptr conv1;
+    ASSERT_NO_THROW(_inferRequest->GetBlob("conv1_out", conv1, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    Blob::Ptr conv2;
+    ASSERT_NO_THROW(_inferRequest->GetBlob("conv2_out", conv2, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    {
+        SCOPED_TRACE("CompareCommonAbsolute with itself");
+        CompareCommonAbsolute(conv1, conv2, 0.0);
+    }
+
+    {
+        SCOPED_TRACE("CompareCommonAbsolute with reference");
+
+        _refBlob = make_shared_blob<ie_fp16>({Precision::FP16, conv1->getTensorDesc().getDims(), Layout::NHWC});
+        _refBlob->allocate();
+        ref_convolution(input, _refBlob, weights, bias, {3, 3}, {1, 1}, {1, 1}, 1);
+
+        CompareCommonAbsolute(conv1, _refBlob, 0.1);
+        CompareCommonAbsolute(conv2, _refBlob, 0.1);
+    }
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_convolution_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_convolution_test.hpp
new file mode 100644 (file)
index 0000000..18e16ff
--- /dev/null
@@ -0,0 +1,672 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tests.hpp"
+#include "myriad_layers_reference_functions.hpp"
+#include "weights_for_convolution_test.h"
+
+#include "conv_ref.hpp"
+
+using std::tuple;
+using std::get;
+
+using namespace InferenceEngine;
+
+PRETTY_PARAM(kernel, param_size);
+PRETTY_PARAM(stride, param_size);
+PRETTY_PARAM(pad, param_size);
+PRETTY_PARAM(out_channels, int);
+PRETTY_PARAM(group, int);
+PRETTY_PARAM(dilation_factor, param_size);
+PRETTY_PARAM(layoutPreference, vpu::LayoutPreference);
+
+typedef myriadLayerTestBaseWithParam<tuple<DimsInput, kernel, stride, pad
+        , out_channels, group, dilation_factor, layoutPreference >> myriadLayerConvolution_nightly;
+
+typedef myriadLayerTestBaseWithParam<tuple<DimsInput, DimsOutput, kernel, stride, pad
+        , group, dilation_factor, layoutPreference >> myriadLayerConvolutionTensorFlow_nightly;
+
+TEST_P(myriadLayerConvolution_nightly, Convolution) {
+    tensor_test_params input_dims = get<0>(GetParam());
+    param_size kernel = get<1>(GetParam());
+    param_size stride = get<2>(GetParam());
+    param_size pad = get<3>(GetParam());
+    size_t out_channels = get<4>(GetParam());
+    size_t group = get<5>(GetParam());
+    param_size dilation_factor = get<6>(GetParam());
+    vpu::LayoutPreference layoutPreference = get<7>(GetParam());
+
+    size_t out_w = (input_dims.w + 2 * pad.x - dilation_factor.x * (kernel.x - 1) - 1 + stride.x) / stride.x;
+    size_t out_h = (input_dims.h + 2 * pad.y - dilation_factor.y * (kernel.y - 1) - 1 + stride.y) / stride.y;
+
+    tensor_test_params output_dims = {1, out_channels, out_h, out_w};
+
+    SetInputTensor(input_dims);
+    SetOutputTensor(output_dims);
+
+    size_t num_weights = kernel.x * kernel.y * (input_dims.c / group) * output_dims.c;
+    size_t num_bias = output_dims.c;
+
+    InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr =
+            InferenceEngine::TBlob<uint8_t>::Ptr(GenWeights(num_weights + num_bias));
+    ie_fp16* weights = weights_ptr->data().as<ie_fp16*>();
+    ie_fp16* bias = weights + num_weights;
+
+    std::map<std::string, std::string> layer_params = {
+              {"kernel-x", std::to_string(kernel.x)}
+            , {"kernel-y", std::to_string(kernel.y)}
+            , {"stride-x", std::to_string(stride.x)}
+            , {"stride-y", std::to_string(stride.y)}
+            , {"pad-x", std::to_string(pad.x)}
+            , {"pad-y", std::to_string(pad.y)}
+            , {"output", std::to_string(out_channels)}
+            , {"group", std::to_string(group)}
+            , {"dilation-x", std::to_string(dilation_factor.x)}
+            , {"dilation-y", std::to_string(dilation_factor.y)}
+    };
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("Convolution")
+                                        .params(layer_params)
+                                        .weights(num_weights)
+                                        .biases(num_bias),
+                                        {},
+                                        weights_ptr));
+    SetFirstInputToRange(-0.9f, 0.9f);
+
+    ASSERT_TRUE(Infer());
+    auto inputBlob = _inputMap.begin()->second;
+    auto outputBlob = _outputMap.begin()->second;
+
+    ref_convolution(inputBlob, _refBlob, weights, bias, kernel, stride, pad, group, dilation_factor);
+
+    float maxerr = 0;
+
+    if (group == 1)
+        maxerr = 0.00055 * input_dims.c * kernel.x * kernel.y;
+    else // TODO: currently dephConv is slightly less accurate
+        maxerr = 0.00066 * (input_dims.c / group) * kernel.x * kernel.y;
+
+    CompareCommonAbsolute(outputBlob, _refBlob, maxerr);
+}
+
+TEST_P(myriadLayerConvolutionTensorFlow_nightly, Convolution) {
+    tensor_test_params input_dims = get<0>(GetParam());
+    tensor_test_params output_dims = get<1>(GetParam());
+    param_size kernel = get<2>(GetParam());
+    param_size stride = get<3>(GetParam());
+    param_size pad = get<4>(GetParam());
+    size_t group = get<5>(GetParam());
+    param_size dilation_factor = get<6>(GetParam());
+
+    SetInputTensor(input_dims);
+    SetOutputTensor(output_dims);
+
+    size_t num_weights = kernel.x * kernel.y * (input_dims.c / group) * output_dims.c;
+    size_t num_bias = output_dims.c;
+
+    InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr =
+            InferenceEngine::TBlob<uint8_t>::Ptr(GenWeights(num_weights + num_bias));
+    ie_fp16* weights = weights_ptr->data().as<ie_fp16*>();
+    ie_fp16* bias = weights + num_weights;
+
+    std::map<std::string, std::string> layer_params = {
+              {"kernel-x", std::to_string(kernel.x)}
+            , {"kernel-y", std::to_string(kernel.y)}
+            , {"stride-x", std::to_string(stride.x)}
+            , {"stride-y", std::to_string(stride.y)}
+            , {"pad-x", std::to_string(pad.x)}
+            , {"pad-y", std::to_string(pad.y)}
+            , {"output", std::to_string(output_dims.c)}
+            , {"group", std::to_string(group)}
+            , {"dilation-x", std::to_string(dilation_factor.x)}
+            , {"dilation-y", std::to_string(dilation_factor.y)}
+    };
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("Convolution")
+                                        .params(layer_params)
+                                        .weights(num_weights)
+                                        .biases(num_bias),
+                                        {},
+                                        weights_ptr));
+    SetFirstInputToRange(-0.9f, 0.9f);
+    ASSERT_TRUE(Infer());
+    auto inputBlob = _inputMap.begin()->second;
+    auto outputBlob = _outputMap.begin()->second;
+
+    ref_convolution(inputBlob, _refBlob, weights, bias, kernel, stride, pad, group, dilation_factor);
+
+    float maxerr = 0;
+
+    maxerr = 0.00055 * (input_dims.c / group) * kernel.x * kernel.y;
+
+    CompareCommonAbsolute(outputBlob, _refBlob, maxerr);
+}
+
+void FillWeights(uint16_t* ptr, size_t weightsSize) {
+    ASSERT_NE(ptr, nullptr);
+    auto szW = sizeof(s_3X3X3YOLO_Weights)/sizeof(s_3X3X3YOLO_Weights[0]);
+    ASSERT_EQ(szW, weightsSize);
+    auto sz = szW;
+    size_t indx = 0;
+    for (; indx < szW; ++indx) {
+        ptr[indx] = PrecisionUtils::f32tof16(s_3X3X3YOLO_Weights[indx]);
+    }
+}
+void FillBiases(uint16_t* ptr, size_t biasSize) {
+    ASSERT_NE(ptr, nullptr);
+    auto szB = sizeof(s_3X3X3YOLO_Biases)/sizeof(s_3X3X3YOLO_Biases[0]);
+    ASSERT_EQ(szB, biasSize);
+    auto sz = szB;
+    size_t indx = 0;
+    for (; indx < sz; ++indx) {
+        ptr[indx] = PrecisionUtils::f32tof16(s_3X3X3YOLO_Biases[indx]);
+    }
+}
+
+void loadConstData(InferenceEngine::Blob::Ptr blob) {
+    /* input blob has predefined size and CHW layout */
+    ASSERT_NE(blob, nullptr);
+    uint16_t *inputBlobRawDataFp16 = static_cast<uint16_t *>(blob->buffer());
+    ASSERT_NE(inputBlobRawDataFp16, nullptr);
+
+    for (int indx = 0; indx < blob->size(); indx++) {
+        inputBlobRawDataFp16[indx] = PrecisionUtils::f32tof16(128.0);
+    }
+}
+
+class myriadLayers_3X3X3_ConstInput_nightly: public ConvolutionTest<vpu::LayoutPreference>{
+};
+
+TEST_P(myriadLayers_3X3X3_ConstInput_nightly, Convolution) {
+    auto p = ::testing::WithParamInterface<std::tuple<InferenceEngine::SizeVector, param_size, param_size, param_size, uint32_t, uint32_t, vpu::LayoutPreference>>::GetParam();
+    const auto layoutPreference = std::get<6>(p);
+
+    _testNet.setWeightsCallbackForLayer(0, FillWeights);
+    _testNet.setBiasesCallbackForLayer(0, FillBiases);
+    _genDataCallback = loadConstData;
+    ASSERT_TRUE(generateNetAndInfer( NetworkInitParams().layoutPreference(layoutPreference) ));
+    auto outputBlob = _outputMap.begin()->second;
+    const uint16_t *res_ptr = outputBlob->buffer().as<const uint16_t*>();
+    size_t res_size = outputBlob->size();
+
+    size_t N = outputBlob->getTensorDesc().getDims()[0];
+    size_t C = outputBlob->getTensorDesc().getDims()[1];
+    size_t H = outputBlob->getTensorDesc().getDims()[2];
+    size_t W = outputBlob->getTensorDesc().getDims()[3];
+
+    for (size_t n = 0; n < N; n++) {
+        for (size_t c = 0; c < C; c++) {
+            auto ref_offs = outputBlob->getTensorDesc().getLayout() == NCHW ?
+                            1 + 1*W + c*W*H + n*W*H*C : c + 1*C + 1*C*W + n*W*H*C;
+            float ref_val = PrecisionUtils::f16tof32(res_ptr[ref_offs]);
+            for (size_t h = 1; h < H - 1; h++) {
+                for (size_t w = 1; w < W - 1; w++) {
+                    size_t actualIdx = outputBlob->getTensorDesc().getLayout() == NCHW ?
+                                       w + h*W + c*W*H + n*W*H*C : c + w*C + h*C*W + n*W*H*C;
+                    float cur_val = PrecisionUtils::f16tof32(res_ptr[actualIdx]);
+                    ASSERT_FLOAT_EQ(cur_val, ref_val);
+                }
+            }
+        }
+    }
+    /* to check max error */
+    CompareCommonAbsolute(_outputMap.begin()->second, getReferenceOutput(), 0.02);
+}
+
+/* IR version 3 tests, main difference is a changes in padding parameters definitions */
+typedef std::tuple<InferenceEngine::SizeVector, param_size, param_size, param_size, param_size, uint32_t, uint32_t> IR3_params;
+
+class myriadLayers_IR3_ConvTests_nightly: public myriadLayersTests_nightly, /*input tensor, kernel, stride, pads_begin, pads_end, out_channel, group */
+                                          public testing::WithParamInterface<IR3_params> {
+};
+
+TEST_P(myriadLayers_IR3_ConvTests_nightly, Conv) {
+    std::map<std::string, std::string> params;
+    InferenceEngine::SizeVector output_tensor;
+    int32_t IW = 0;
+    int32_t IH = 0;
+    int32_t IC = 0;
+    int32_t I_N = 0;
+    size_t  group = 0;
+
+
+    auto p = ::testing::WithParamInterface<IR3_params>::GetParam();
+    auto input_tensor = std::get<0>(p);
+    param_size kernel = std::get<1>(p);
+    param_size stride = std::get<2>(p);
+    param_size pads_begin = std::get<3>(p);
+    param_size pads_end = std::get<4>(p);
+    size_t out_channels = std::get<5>(p);
+    group = std::get<6>(p);
+    get_dims(input_tensor, IW, IH, IC, I_N);
+    if (I_N > 1)
+        _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+    else
+        _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(YES);
+    size_t out_w = (IW + pads_begin.x + pads_end.x - kernel.x + stride.x) / stride.x;
+    size_t out_h = (IH + pads_begin.y + pads_end.y - kernel.y + stride.y) / stride.y;
+    gen_dims(output_tensor, input_tensor.size(), out_w, out_h, out_channels, I_N);
+
+    size_t num_weights = kernel.x * kernel.y * (IC / group) * out_channels;
+    size_t num_bias    = out_channels;
+
+    std::string padsB = std::to_string(pads_begin.x) + ",";
+    padsB += std::to_string(pads_begin.y);
+    std::string padsE = std::to_string(pads_end.x) + ",";
+    padsE += std::to_string(pads_end.y);
+    std::string strides = std::to_string(stride.x) + ",";
+    strides += std::to_string(stride.y);
+    std::string kern = std::to_string(kernel.x) + ",";
+    kern += std::to_string(kernel.y);
+
+    std::map<std::string, std::string> layer_params = {
+              {"kernel",     kern}
+            , {"strides",    strides}
+            , {"pads_begin", padsB}
+            , {"pads_end",   padsE}
+            , {"output", std::to_string(out_channels)}
+            , {"group", std::to_string(group)}
+            , {"auto_pad", "same_upper"}
+            , {"dilations", "1,1"}
+    };
+    _testNet.addLayer(LayerInitParams("Convolution")
+              .params(layer_params)
+              .in({input_tensor})
+              .out({output_tensor})
+              .weights(num_weights).fillWeights(defaultWeightsRange)
+              .biases(num_bias).fillBiases(defaultWeightsRange),
+             ref_convolution_wrap);
+    ASSERT_TRUE(generateNetAndInfer( NetworkInitParams().useHWOpt( CheckMyriadX() ) ));
+    float maxerr = 0.0009f * (IC / group) * kernel.x * kernel.y;
+    CompareCommonAbsolute(_outputMap.begin()->second, getReferenceOutput(), maxerr);
+}
+
+class myriadLayers_BatchTest_ConvTests_nightly: public myriadLayersTests_nightly, /*input tensor, kernel, stride, pads_begin, pads_end, out_channel, group */
+                                                public testing::WithParamInterface<IR3_params> {
+};
+
+class myriadLayers_BatchTest2_ConvTests_nightly: public myriadLayersTests_nightly, /*input tensor, kernel, stride, pads_begin, pads_end, out_channel, group */
+                                                 public testing::WithParamInterface<IR3_params> {
+};
+
+void constWeightsRange(uint16_t* ptr, size_t weightsSize) {
+    ASSERT_NE(ptr, nullptr);
+    float shft = 0.0011f;
+    float val = 0.125f;
+    for (size_t count = 0 ; count < weightsSize; ++count) {
+
+        ptr[count] = PrecisionUtils::f32tof16(val);
+        val += shft;
+        if (val > 1.)
+            val = -1.0f;
+    }
+}
+
+static void genTestData(InferenceEngine::Blob::Ptr blob) {
+    ASSERT_NE(blob, nullptr);
+    Layout layout = blob->getTensorDesc().getLayout();
+    SizeVector dims = blob->getTensorDesc().getDims();
+
+    ie_fp16* ptr = blob->buffer().as<ie_fp16*>();
+    if (layout == NCHW || layout == NHWC) {
+        size_t N = dims[0];
+        size_t C = dims[1];
+        size_t H = dims[2];
+        size_t W = dims[3];
+
+        float counter = 0.125f;
+        for (size_t n = 0; n < N; n++) {
+            for (size_t c = 0; c < C; c++) {
+                for (size_t h = 0; h < H; h++) {
+                    for (size_t w = 0; w < W; w++) {
+                        size_t actualIdx = layout == NCHW ?
+                                           w + h * W + c * W * H + n * W * H * C : c + w * C + h * C * W +
+                                                                                   n * W * H * C;
+                        ptr[actualIdx] = PrecisionUtils::f32tof16(counter);
+                        counter += 0.025f;
+                        if (counter > .90f) {
+                            counter = -.90f;
+                        }
+                    }
+                }
+            }
+        }
+    } else {
+        ASSERT_TRUE(false);
+    }
+}
+
+TEST_P(myriadLayers_BatchTest_ConvTests_nightly, Conv) {
+    std::map<std::string, std::string> params;
+    InferenceEngine::SizeVector output_tensor;
+    int32_t IW = 0;
+    int32_t IH = 0;
+    int32_t IC = 0;
+    int32_t I_N = 0;
+    size_t  group = 0;
+
+    auto p = ::testing::WithParamInterface<IR3_params>::GetParam();
+    auto input_tensor = std::get<0>(p);
+    param_size kernel = std::get<1>(p);
+    param_size stride = std::get<2>(p);
+    param_size pads_begin = std::get<3>(p);
+    param_size pads_end = std::get<4>(p);
+    size_t out_channels = std::get<5>(p);
+    group = std::get<6>(p);
+    get_dims(input_tensor, IW, IH, IC, I_N);
+    if (I_N > 1)
+        _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+    else
+        _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(YES);
+    size_t out_w = (IW + pads_begin.x + pads_end.x - kernel.x + stride.x) / stride.x;
+    size_t out_h = (IH + pads_begin.y + pads_end.y - kernel.y + stride.y) / stride.y;
+    gen_dims(output_tensor, input_tensor.size(), out_w, out_h, out_channels, I_N);
+
+    size_t num_weights = kernel.x * kernel.y * (IC / group) * out_channels;
+    size_t num_bias    = out_channels;
+
+    std::string padsB   = gen_param(pads_begin);
+    std::string padsE   = gen_param(pads_end);
+    std::string strides = gen_param(stride);
+    std::string kern    = gen_param(kernel);
+
+    std::map<std::string, std::string> layer_params = {
+              {"kernel",     kern}
+            , {"strides",    strides}
+            , {"pads_begin", padsB}
+            , {"pads_end",   padsE}
+            , {"output", std::to_string(out_channels)}
+            , {"group", std::to_string(group)}
+            , {"auto_pad", "same_upper"}
+            , {"dilations", "1,1"}
+    };
+    _genDataCallback = genTestData;
+    _testNet.addLayer( LayerInitParams("Convolution")
+           .params(layer_params)
+           .in({input_tensor})
+           .out({output_tensor})
+           .weights(num_weights).fillWeights(defaultWeightsRange)
+           .biases(num_bias).fillBiases(defaultWeightsRange),
+             ref_convolution_wrap);
+
+    ASSERT_TRUE(generateNetAndInfer( NetworkInitParams().useHWOpt( CheckMyriadX() ).layoutPreference(vpu::LayoutPreference::ChannelMinor) ));
+
+    float maxerr = 0.0009f * (IC / group) * kernel.x * kernel.y;
+    CompareCommonAbsolute(_outputMap.begin()->second, getReferenceOutput(), maxerr);
+}
+
+static const std::string MODEL_RFCNN = R"V0G0N(
+<net name="MODEL_TEST" version="3" batch="10">
+    <layers>
+        <layer id="0" name="input" precision="FP16" type="Input">
+                       <output>
+                               <port id="0">
+                                       <dim>10</dim>
+                                       <dim>576</dim>
+                                       <dim>14</dim>
+                                       <dim>14</dim>
+                               </port>
+                       </output>
+               </layer>
+               <layer id="142" name="MaxPool2D/MaxPool" precision="FP16" type="Pooling">
+                       <data auto_pad="valid" exclude-pad="true" kernel="2,2" pads_begin="0,0" pads_end="0,0" pool-method="max" strides="2,2"/>
+                       <input>
+                               <port id="0">
+                                       <dim>10</dim>
+                                       <dim>576</dim>
+                                       <dim>14</dim>
+                                       <dim>14</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="1">
+                                       <dim>10</dim>
+                                       <dim>576</dim>
+                                       <dim>7</dim>
+                                       <dim>7</dim>
+                               </port>
+                       </output>
+               </layer>
+               <layer id="143" name="SecondStageFeatureExtractor/InceptionV2/Mixed_5a/Branch_0/Conv2d_0a_1x1/Conv2D" precision="FP16" type="Convolution">
+                       <data auto_pad="same_upper" dilations="1,1" group="1" kernel="1,1" output="128" pads_begin="0,0" pads_end="0,0" strides="1,1"/>
+                       <input>
+                               <port id="0">
+                                       <dim>10</dim>
+                                       <dim>576</dim>
+                                       <dim>7</dim>
+                                       <dim>7</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="3">
+                                       <dim>10</dim>
+                                       <dim>128</dim>
+                                       <dim>7</dim>
+                                       <dim>7</dim>
+                               </port>
+                       </output>
+                       <blobs>
+                               <weights offset="0" size="147456"/>
+                               <biases offset="147456" size="256"/>
+                       </blobs>
+               </layer>
+               <layer id="144" name="SecondStageFeatureExtractor/InceptionV2/Mixed_5a/Branch_0/Conv2d_0a_1x1/Relu" precision="FP16" type="ReLU">
+                       <input>
+                               <port id="0">
+                                       <dim>10</dim>
+                                       <dim>128</dim>
+                                       <dim>7</dim>
+                                       <dim>7</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="1">
+                                       <dim>10</dim>
+                                       <dim>128</dim>
+                                       <dim>7</dim>
+                                       <dim>7</dim>
+                               </port>
+                       </output>
+               </layer>
+               <layer id="145" name="SecondStageFeatureExtractor/InceptionV2/Mixed_5a/Branch_0/Conv2d_1a_3x3/Conv2D" precision="FP16" type="Convolution">
+                       <data auto_pad="same_upper" dilations="1,1" group="1" kernel="3,3" output="192" pads_begin="1,1" pads_end="1,1" strides="2,2"/>
+                       <input>
+                               <port id="0">
+                                       <dim>10</dim>
+                                       <dim>128</dim>
+                                       <dim>7</dim>
+                                       <dim>7</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="3">
+                                       <dim>10</dim>
+                                       <dim>192</dim>
+                                       <dim>4</dim>
+                                       <dim>4</dim>
+                               </port>
+                       </output>
+                       <blobs>
+                               <weights offset="147712" size="442368"/>
+                               <biases offset="590080" size="384"/>
+                       </blobs>
+               </layer>
+               <layer id="146" name="SecondStageFeatureExtractor/InceptionV2/Mixed_5a/Branch_0/Conv2d_1a_3x3/Relu" precision="FP16" type="ReLU">
+                       <input>
+                               <port id="0">
+                                       <dim>10</dim>
+                                       <dim>192</dim>
+                                       <dim>4</dim>
+                                       <dim>4</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="1">
+                                       <dim>10</dim>
+                                       <dim>192</dim>
+                                       <dim>4</dim>
+                                       <dim>4</dim>
+                               </port>
+                       </output>
+               </layer>
+               <layer id="147" name="SecondStageFeatureExtractor/InceptionV2/Mixed_5a/Branch_1/Conv2d_0a_1x1/Conv2D" precision="FP16" type="Convolution">
+                       <data auto_pad="same_upper" dilations="1,1" group="1" kernel="1,1" output="192" pads_begin="0,0" pads_end="0,0" strides="1,1"/>
+                       <input>
+                               <port id="0">
+                                       <dim>10</dim>
+                                       <dim>576</dim>
+                                       <dim>7</dim>
+                                       <dim>7</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="3">
+                                       <dim>10</dim>
+                                       <dim>192</dim>
+                                       <dim>7</dim>
+                                       <dim>7</dim>
+                               </port>
+                       </output>
+                       <blobs>
+                               <weights offset="590464" size="221184"/>
+                               <biases offset="811648" size="384"/>
+                       </blobs>
+               </layer>
+               <layer id="148" name="SecondStageFeatureExtractor/InceptionV2/Mixed_5a/Branch_1/Conv2d_0a_1x1/Relu" precision="FP16" type="ReLU">
+                       <input>
+                               <port id="0">
+                                       <dim>10</dim>
+                                       <dim>192</dim>
+                                       <dim>7</dim>
+                                       <dim>7</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="1">
+                                       <dim>10</dim>
+                                       <dim>192</dim>
+                                       <dim>7</dim>
+                                       <dim>7</dim>
+                               </port>
+                       </output>
+               </layer>
+               <layer id="149" name="SecondStageFeatureExtractor/InceptionV2/Mixed_5a/Branch_1/Conv2d_0b_3x3/Conv2D" precision="FP16" type="Convolution">
+                       <data auto_pad="same_upper" dilations="1,1" group="1" kernel="3,3" output="256" pads_begin="1,1" pads_end="1,1" strides="1,1"/>
+                       <input>
+                               <port id="0">
+                                       <dim>10</dim>
+                                       <dim>192</dim>
+                                       <dim>7</dim>
+                                       <dim>7</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="3">
+                                       <dim>10</dim>
+                                       <dim>256</dim>
+                                       <dim>7</dim>
+                                       <dim>7</dim>
+                               </port>
+                       </output>
+                       <blobs>
+                               <weights offset="812032" size="884736"/>
+                               <biases offset="1696768" size="512"/>
+                       </blobs>
+               </layer>
+        </layers>
+        <edges>
+            <edge from-layer="0" from-port="0" to-layer="142" to-port="0"/>
+            <edge from-layer="142" from-port="1" to-layer="143" to-port="0"/>
+            <edge from-layer="143" from-port="3" to-layer="144" to-port="0"/>
+            <edge from-layer="144" from-port="1" to-layer="145" to-port="0"/>
+            <edge from-layer="145" from-port="3" to-layer="146" to-port="0"/>
+            <edge from-layer="142" from-port="1" to-layer="147" to-port="0"/>
+            <edge from-layer="147" from-port="3" to-layer="148" to-port="0"/>
+            <edge from-layer="148" from-port="1" to-layer="149" to-port="0"/>
+        </edges>
+    </net>
+)V0G0N";
+
+TEST_P(myriadLayers_BatchTest2_ConvTests_nightly, Conv) {
+    std::map<std::string, std::string> params;
+    InferenceEngine::SizeVector output_tensor;
+    int32_t IW = 0;
+    int32_t IH = 0;
+    int32_t IC = 0;
+    int32_t I_N = 0;
+    size_t  group = 0;
+
+    auto p = ::testing::WithParamInterface<IR3_params>::GetParam();
+    auto input_tensor = std::get<0>(p);
+    param_size kernel = std::get<1>(p);
+    param_size stride = std::get<2>(p);
+    param_size pads_begin = std::get<3>(p);
+    param_size pads_end = std::get<4>(p);
+    size_t out_channels = std::get<5>(p);
+    group = std::get<6>(p);
+    get_dims(input_tensor, IW, IH, IC, I_N);
+    if (I_N > 1)
+        _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+    else
+        _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(YES);
+    size_t out_w = (IW + pads_begin.x + pads_end.x - kernel.x + stride.x) / stride.x;
+    size_t out_h = (IH + pads_begin.y + pads_end.y - kernel.y + stride.y) / stride.y;
+    gen_dims(output_tensor, input_tensor.size(), out_w, out_h, out_channels, I_N);
+
+    size_t num_weights = kernel.x * kernel.y * (IC / group) * out_channels;
+    size_t num_bias    = out_channels;
+
+    std::string padsB   = gen_param(pads_begin);
+    std::string padsE   = gen_param(pads_end);
+    std::string strides = gen_param(stride);
+    std::string kern    = gen_param(kernel);
+
+    std::map<std::string, std::string> layer_params = {
+              {"kernel",     kern}
+            , {"strides",    strides}
+            , {"pads_begin", padsB}
+            , {"pads_end",   padsE}
+            , {"output", std::to_string(out_channels)}
+            , {"group", std::to_string(group)}
+            , {"auto_pad", "same_upper"}
+            , {"dilations", "1,1"}
+    };
+    _genDataCallback = genTestData;
+    _testNet.addLayer(LayerInitParams("Convolution")
+               .params(layer_params)
+               .in({input_tensor})
+               .out({output_tensor})
+               .weights(num_weights).fillWeights(constWeightsRange)
+               .biases(num_bias).fillBiases(constWeightsRange),
+             ref_convolution_wrap);
+    _testNet.addLayer(LayerInitParams("ReLU")
+             .in({output_tensor})
+             .out({output_tensor}),
+             ref_ReLU_wrap);
+
+    std::map<std::string, std::string> conv2_params = {
+              {"kernel",     "3,3"}
+            , {"strides",    "1,1"}
+            , {"pads_begin", "1,1"}
+            , {"pads_end",   "1,1"}
+            , {"output", "256"}
+            , {"group", "1"}
+            , {"auto_pad", "same_upper"}
+            , {"dilations", "1,1"}
+    };
+    _testNet.addLayer(LayerInitParams("Convolution")
+             .params(conv2_params)
+             .in({output_tensor})
+             .out({{10, 256, 7, 7}})
+             .weights(442368).fillWeights(constWeightsRange)
+             .biases(256).fillBiases(constWeightsRange),
+             ref_convolution_wrap);
+    _testNet.addLayer(LayerInitParams("ReLU")
+             .in({{10, 256, 7, 7}})
+             .out({{10, 256, 7, 7}}),
+             ref_ReLU_wrap);
+
+    ASSERT_TRUE(generateNetAndInfer(NetworkInitParams().useHWOpt( CheckMyriadX()) ));
+    // Error is calculated for sum of 2 convolutions
+    float maxerr = 0.001f * (IC + 256) * kernel.x * kernel.y * 9;
+    CompareCommonAbsolute(_outputMap.begin()->second, getReferenceOutput(), maxerr);
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_copy_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_copy_test.cpp
new file mode 100644 (file)
index 0000000..f5c70a5
--- /dev/null
@@ -0,0 +1,13 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_copy_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayerCopy_nightly,
+        ::testing::Combine(
+            ::testing::Values<NDims>(MAKE_STRUCT(nd_tensor_test_params, {36, 19, 20, 21})
+                                   , MAKE_STRUCT(nd_tensor_test_params, {7, 8, 5, 12})
+                                   , MAKE_STRUCT(nd_tensor_test_params, {196, 12, 20, 5}))
+          , ::testing::Values<int>(2, 3, 4)
+                        ));
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_copy_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_copy_test.hpp
new file mode 100644 (file)
index 0000000..bf3f4cc
--- /dev/null
@@ -0,0 +1,50 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tests.hpp"
+#include "myriad_layers_reference_functions.hpp"
+#include <algorithm>
+#include "ie_memcpy.h"
+
+using std::tuple;
+using std::get;
+
+using namespace InferenceEngine;
+
+PRETTY_PARAM(NDims, nd_tensor_test_params);
+
+typedef myriadLayerTestBaseWithParam<tuple<NDims, int>> myriadLayerCopy_nightly;
+
+TEST_P(myriadLayerCopy_nightly, Copy) {
+
+    nd_tensor_test_params input_dims = get<0>(GetParam());
+    int ndims = get<1>(GetParam());
+
+    IN_OUT_desc inputTensors;
+    IN_OUT_desc outputTensors;
+    outputTensors.resize(1);
+    inputTensors.resize(1);
+    inputTensors[0].resize(ndims);
+    outputTensors[0].resize(ndims);
+
+    for (int i = 0; i < ndims; i++)
+    {
+        inputTensors[0][i] = input_dims.dims[i];
+        outputTensors[0][i] = input_dims.dims[i];
+    }
+
+    SetInputTensors(inputTensors);
+    SetOutputTensors(outputTensors);
+
+    _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("Copy")));
+    SetFirstInputToRange(1.0f, 100.0f);
+
+    ASSERT_TRUE(Infer());
+    auto inputBlob = _inputMap.begin()->second;
+    auto outputBlob = _outputMap.begin()->second;
+
+    CompareCommonAbsolute(outputBlob, inputBlob, 0);
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_crop_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_crop_test.cpp
new file mode 100644 (file)
index 0000000..fd28beb
--- /dev/null
@@ -0,0 +1,35 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_crop_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(
+    accuracy_Crop, myriadLayerCropOneInputAndDim_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(s_tileTensors1),
+        ::testing::ValuesIn(s_tileTensors2),
+        ::testing::ValuesIn(s_tileCropAxis),
+        ::testing::ValuesIn(s_tileOffset),
+        ::testing::ValuesIn(s_tileDim))
+);
+
+INSTANTIATE_TEST_CASE_P(
+    accuracy_Crop1, myriadLayerCropOneInput_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(s_tileTensors1),
+        ::testing::ValuesIn(s_tileTensors2),
+        ::testing::ValuesIn(s_tileCropAxis),
+        ::testing::ValuesIn(s_tileCropBegin),
+        ::testing::ValuesIn(s_tileCropEnd))
+);
+
+INSTANTIATE_TEST_CASE_P(
+    accuracy_Crop2, myriadLayerCropTwoInputs_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(s_tileTensors1),
+        ::testing::ValuesIn(s_tileTensors2),
+        ::testing::ValuesIn(s_tileTensors2),
+        ::testing::ValuesIn(s_tileCropAxis),
+        ::testing::ValuesIn(s_tileOffset))
+);
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_crop_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_crop_test.hpp
new file mode 100644 (file)
index 0000000..c9d1360
--- /dev/null
@@ -0,0 +1,197 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tests.hpp"
+
+using namespace InferenceEngine;
+
+#define ERROR_BOUND (0.0f)
+
+PRETTY_PARAM(crop_axis, InferenceEngine::SizeVector)
+PRETTY_PARAM(offset, InferenceEngine::SizeVector)
+PRETTY_PARAM(dim, InferenceEngine::SizeVector)
+PRETTY_PARAM(crop_begin, InferenceEngine::SizeVector)
+PRETTY_PARAM(crop_end, InferenceEngine::SizeVector)
+
+typedef myriadLayerTestBaseWithParam<std::tuple<Dims, Dims, crop_axis, offset, dim >> myriadLayerCropOneInputAndDim_nightly;
+typedef myriadLayerTestBaseWithParam<std::tuple<Dims, Dims, crop_axis, crop_begin, crop_end >> myriadLayerCropOneInput_nightly;
+typedef myriadLayerTestBaseWithParam<std::tuple<Dims, Dims, Dims, crop_axis, offset >> myriadLayerCropTwoInputs_nightly;
+
+static void ref_crop(const Blob::Ptr src,
+                     Blob::Ptr dst,
+                     InferenceEngine::SizeVector& axis,
+                     InferenceEngine::SizeVector& offset) {
+
+    ASSERT_NE(src, nullptr);
+    ASSERT_NE(dst, nullptr);
+    int32_t IW;
+    int32_t IH;
+    int32_t IC;
+    int32_t OW = 1;
+    int32_t OH = 1;
+    int32_t OC = 1;
+
+    get_dims(src, IW, IH, IC);
+    get_dims(dst, OW, OH, OC);
+    int32_t IW_off = 0;
+    int32_t IH_off = 0;
+    int32_t IC_off = 0;
+
+    for (size_t i = 0; i < axis.size(); ++i) {
+        switch(axis[i]){
+            case 1:
+                IC_off = offset[i];
+                break;
+            case 2:
+                IH_off = offset[i];
+                break;
+            case 3:
+                IW_off = offset[i];
+                break;
+        }
+    }
+    auto real_W = std::min(OW, IW - IW_off);
+    auto real_H = std::min(OH, IH - IH_off);
+    auto real_C = std::min(OC, IC - IC_off);
+    const uint16_t *src_data = src->buffer();
+    uint16_t *dst_data = dst->buffer();
+    for (int32_t w = 0; w < real_W; ++w) {
+        for (int32_t h = 0; h < real_H; ++h) {
+            for (int32_t c = 0; c < real_C; ++c) {
+                int32_t inp_ind = (c + IC_off) + IC * ((w + IW_off) + (h + IH_off)* IW);
+                int32_t out_ind = c + OC * (w  + h * OW);
+                dst_data[out_ind] = src_data[inp_ind];
+            }
+        }
+    }
+}
+
+TEST_P(myriadLayerCropOneInputAndDim_nightly, CropWithOneInputAndDim) {
+    auto param = GetParam();
+    tensor_test_params tensor1 = std::get<0>(param);
+    tensor_test_params tensor2 = std::get<1>(param);
+    InferenceEngine::SizeVector axis_val = std::get<2>(param);
+    InferenceEngine::SizeVector offsets = std::get<3>(param);
+    InferenceEngine::SizeVector dims = std::get<4>(param);
+    InferenceEngine::SizeVector input_dim1 = {tensor1.n, tensor1.c, tensor1.h, tensor1.w};
+    InferenceEngine::SizeVector input_dim2 = {tensor2.n, tensor2.c, tensor2.h, tensor2.w};
+    ASSERT_EQ(axis_val.size(), offsets.size());
+    ASSERT_EQ(axis_val.size(), dims.size());
+    char prm[256];
+    char val[256];
+    std::string axis;
+    std::string offset;
+    std::string dim;
+    std::map<std::string, std::string> params;
+
+    for (size_t i = 0; i < axis_val.size(); ++i) {
+        axis += std::to_string(axis_val[i]) +",";
+        offset += std::to_string(offsets[i]) +",";
+        dim += std::to_string(dims[i]) +",";
+    }
+    params["dim"] = dim;
+    params["axis"] = axis;
+    params["offset"] = offset;
+    SetInputTensors({input_dim1});
+    SetOutputTensors({input_dim2});
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("Crop").params(params), NetworkInitParams().layoutPreference(vpu::LayoutPreference::ChannelMinor)));
+    ASSERT_TRUE(Infer());
+    ref_crop(_inputMap.begin()->second, _refBlob, axis_val, offsets);
+    CompareCommonAbsolute(_outputMap.begin()->second, _refBlob, ERROR_BOUND);
+}
+
+TEST_P(myriadLayerCropOneInput_nightly, CropWithOneInput) {
+    auto param = GetParam();
+    tensor_test_params tensor1 = std::get<0>(param);
+    tensor_test_params tensor2 = std::get<1>(param);
+    InferenceEngine::SizeVector axis_val = std::get<2>(param);
+    InferenceEngine::SizeVector crop_begin_val = std::get<3>(param);
+    InferenceEngine::SizeVector crop_end_val = std::get<4>(param);
+    InferenceEngine::SizeVector input_dim1 = {tensor1.n, tensor1.c, tensor1.h, tensor1.w};
+    InferenceEngine::SizeVector input_dim2 = {tensor2.n, tensor2.c, tensor2.h, tensor2.w};
+
+    ASSERT_EQ(axis_val.size(), crop_begin_val.size());
+    ASSERT_EQ(axis_val.size(), crop_end_val.size());
+    std::string axis;
+    std::string crop_begin;
+    std::string dim;
+    std::map<std::string, std::string> params;
+
+    for (size_t i = 0; i < axis_val.size(); ++i) {
+        axis += std::to_string(axis_val[i]) +",";
+        crop_begin += std::to_string(crop_begin_val[i]) +",";
+    }
+
+    params["axis"] = axis;
+    params["offset"] = crop_begin;
+    SetInputTensors({input_dim1});
+    SetOutputTensors({input_dim2});
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("Crop").params(params), NetworkInitParams().layoutPreference(vpu::LayoutPreference::ChannelMinor)));
+    ASSERT_TRUE(Infer());
+    InferenceEngine::SizeVector sum;
+
+    ref_crop(_inputMap.begin()->second, _refBlob, axis_val, crop_begin_val);
+    CompareCommonAbsolute(_outputMap.begin()->second, _refBlob, ERROR_BOUND);
+}
+
+TEST_P(myriadLayerCropTwoInputs_nightly, CropWithTwoInputs) {
+    auto param = GetParam();
+    tensor_test_params tensor1 = std::get<0>(param);
+    tensor_test_params tensor2 = std::get<1>(param);
+    tensor_test_params tensor3 = std::get<2>(param);
+    InferenceEngine::SizeVector axis_val = std::get<3>(param);
+    InferenceEngine::SizeVector offsets = std::get<4>(param);
+    InferenceEngine::SizeVector input_dim1 = {tensor1.n, tensor1.c, tensor1.h, tensor1.w};
+    InferenceEngine::SizeVector input_dim2 = {tensor2.n, tensor2.c, tensor2.h, tensor2.w};
+    InferenceEngine::SizeVector output_dim3 = {tensor3.n, tensor3.c, tensor3.h, tensor3.w};
+
+    ASSERT_EQ(axis_val.size(), offsets.size());
+    std::string axis;
+    std::string offset;
+    std::map<std::string, std::string> params;
+
+    for (size_t i = 0; i < axis_val.size(); ++i) {
+        axis += std::to_string(axis_val[i]) +",";
+        offset += std::to_string(offsets[i]) +",";
+    }
+
+    params["axis"] = axis;
+    params["offset"] = offset;
+    SetInputTensors({input_dim1, input_dim2});
+    SetOutputTensors({output_dim3});
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("Crop").params(params), NetworkInitParams().layoutPreference(vpu::LayoutPreference::ChannelMinor)));
+    ASSERT_TRUE(Infer());
+    ref_crop(_inputMap.begin()->second, _refBlob, axis_val, offsets);
+    CompareCommonAbsolute(_outputMap.begin()->second, _refBlob, ERROR_BOUND);
+}
+
+static std::vector<Dims> s_tileTensors1 = {
+    {{1, 4, 16, 32}},
+    {{1, 8, 20, 36}},
+};
+
+static std::vector<Dims> s_tileTensors2 = {
+    {{1, 2, 12, 26}},
+};
+
+static std::vector<crop_axis> s_tileCropAxis = {
+    {{1, 2, 3}},
+};
+
+static std::vector<offset> s_tileOffset = {
+    {{2, 4, 6}},
+    {{2, 2, 2}},
+};
+
+static std::vector<dim> s_tileDim = {
+    {{2, 12, 26}},
+};
+
+static std::vector<crop_begin> s_tileCropBegin= {
+    {{2, 2, 3}},
+};
+
+static std::vector<crop_end> s_tileCropEnd = {
+    {{2, 2, 6}},
+};
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_custom_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_custom_test.cpp
new file mode 100644 (file)
index 0000000..d1413ef
--- /dev/null
@@ -0,0 +1,38 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_custom_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsShuffleChannel_nightly,
+        ::testing::Combine(
+        ::testing::ValuesIn(s_ShuffleChannelTensors),
+        ::testing::ValuesIn(s_ShuffleChannelGroup),
+        ::testing::ValuesIn(s_CustomConfig)));
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsQuantize_nightly,
+        ::testing::Combine(
+        ::testing::ValuesIn(s_QuantizeTensors),
+        ::testing::ValuesIn(s_QuantizeLevels),
+        ::testing::ValuesIn(s_CustomConfig)));
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsQuantizeBinarize_nightly,
+        ::testing::Combine(
+        ::testing::ValuesIn(s_QuantizeTensors),
+        ::testing::ValuesIn(s_QuantizeLevels),
+        ::testing::ValuesIn(s_QuantizeSwitchOut),
+        ::testing::ValuesIn(s_CustomConfig)));
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsBinaryConvolution_nightly,
+        ::testing::Combine(
+        ::testing::ValuesIn(s_BinaryConvolutionTensors),
+        ::testing::ValuesIn(s_BinaryConvolutionDilations),
+        ::testing::ValuesIn(s_BinaryConvolutionGroup),
+        ::testing::ValuesIn(s_BinaryConvolutionKernel),
+        ::testing::ValuesIn(s_BinaryConvolutionStrides),
+        ::testing::ValuesIn(s_CustomConfig)));
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsExperimentalDetectronPriorGridGenerator_nightly,
+        ::testing::Combine(
+        ::testing::ValuesIn(s_ExperimentalDetectronPriorGridGeneratorImageDims),
+        ::testing::ValuesIn(s_CustomConfig)));
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_custom_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_custom_test.hpp
new file mode 100644 (file)
index 0000000..d50e170
--- /dev/null
@@ -0,0 +1,854 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tests.hpp"
+
+using namespace InferenceEngine;
+
+static void refShuffleChannel(const Blob::Ptr src,
+                              Blob::Ptr dst,
+                              int group, int isCHW) {
+    ASSERT_NE(src, nullptr);
+    ASSERT_NE(dst, nullptr);
+    const uint16_t *src_data = src->buffer();
+          uint16_t *dst_data = dst->buffer();
+    ASSERT_NE(src_data, nullptr);
+    ASSERT_NE(dst_data, nullptr);
+    int32_t IW = 1;
+    int32_t IH = 1;
+    int32_t IC = 1;
+    get_dims(src, IW, IH, IC);
+
+    int G = group;
+    int CX = IC / G;
+    int CY = G;
+
+    for (int cy = 0; cy < CY; cy++) {
+        for (int cx = 0; cx < CX; cx++) {
+            for (int h = 0; h < IH; h++) {
+                for (int w = 0; w < IW; w++) {
+                    if (isCHW) {
+                        dst_data[(cx*CY + cy)*IW*IH + h*IW + w] = src_data[(cy*CX + cx)*IW*IH + h*IW + w];
+                    } else {
+                        dst_data[(cx*CY + cy) + h*IW*IC + w*IC] = src_data[(cy*CX + cx) + h*IW*IC + w*IC];
+                    }
+                }
+            }
+        }
+    }
+}
+
+static void refQuantize(const Blob::Ptr src,
+                        const Blob::Ptr input_low,
+                        const Blob::Ptr input_high,
+                        const Blob::Ptr output_low,
+                        const Blob::Ptr output_high,
+                        Blob::Ptr dst,
+                        int levels, int isCHW) {
+    ASSERT_NE(src, nullptr);
+    ASSERT_NE(input_low, nullptr);
+    ASSERT_NE(input_high, nullptr);
+    ASSERT_NE(output_low, nullptr);
+    ASSERT_NE(output_high, nullptr);
+    ASSERT_NE(dst, nullptr);
+    const uint16_t *src_data = src->buffer();
+    const uint16_t *input_low_data = input_low->buffer();
+    const uint16_t *input_high_data = input_high->buffer();
+    const uint16_t *output_low_data = output_low->buffer();
+    const uint16_t *output_high_data = output_high->buffer();
+    uint16_t *dst_data = dst->buffer();
+    ASSERT_NE(src_data, nullptr);
+    ASSERT_NE(input_low_data, nullptr);
+    ASSERT_NE(input_high_data, nullptr);
+    ASSERT_NE(output_low_data, nullptr);
+    ASSERT_NE(output_high_data, nullptr);
+    ASSERT_NE(dst_data, nullptr);
+    int32_t W = 1;
+    int32_t H = 1;
+    int32_t C = 1;
+    get_dims(src, W, H, C);
+
+    for (int c = 0; c < C; c++) {
+        float ilow  = PrecisionUtils::f16tof32(input_low->size()   == 1 ? input_low_data[0]   : input_low_data[c]);
+        float ihigh = PrecisionUtils::f16tof32(input_high->size()  == 1 ? input_high_data[0]  : input_high_data[c]);
+        float olow  = PrecisionUtils::f16tof32(output_low->size()  == 1 ? output_low_data[0]  : output_low_data[c]);
+        float ohigh = PrecisionUtils::f16tof32(output_high->size() == 1 ? output_high_data[0] : output_high_data[c]);
+
+        for (int h = 0; h < H; h++) {
+            for (int w = 0; w < W; w++) {
+                int idx = (isCHW) ? c*W*H + h*W + w : c + h*W*C + w*C;
+                float src_val = PrecisionUtils::f16tof32(src_data[idx]);
+                float dst_val;
+
+                if (src_val <= ilow) {
+                    dst_val = olow;
+                } else if (src_val > ihigh) {
+                    dst_val = ohigh;
+                } else {
+                    dst_val = round((src_val - ilow) * ((float)(levels - 1) / (ihigh - ilow))) * ((ohigh - olow) / (float)(levels - 1))+ olow;
+                    //dst_val = round((src_val - ilow) / (ihigh - ilow) * (levels - 1)) / (levels - 1) * (ohigh - olow) + olow;
+                }
+
+                dst_data[idx] = PrecisionUtils::f32tof16(dst_val);
+            }
+        }
+    }
+}
+
+static void ref_QuantizeBinarization(const Blob::Ptr src,
+                        const Blob::Ptr input_low,
+                        const Blob::Ptr input_high,
+                        const Blob::Ptr output_low,
+                        const Blob::Ptr output_high,
+                        Blob::Ptr dst,
+                        int levels) {
+    ASSERT_NE(src, nullptr);
+    ASSERT_NE(input_low, nullptr);
+    ASSERT_NE(input_high, nullptr);
+    ASSERT_NE(output_low, nullptr);
+    ASSERT_NE(output_high, nullptr);
+    ASSERT_NE(dst, nullptr);
+    const uint16_t *src_data = src->buffer();
+    const uint16_t *input_low_data = input_low->buffer();
+    const uint16_t *input_high_data = input_high->buffer();
+    const uint16_t *output_low_data = output_low->buffer();
+    const uint16_t *output_high_data = output_high->buffer();
+    uint16_t *dst_data = dst->buffer();
+    ASSERT_NE(src_data, nullptr);
+    ASSERT_NE(input_low_data, nullptr);
+    ASSERT_NE(input_high_data, nullptr);
+    ASSERT_NE(output_low_data, nullptr);
+    ASSERT_NE(output_high_data, nullptr);
+    ASSERT_NE(dst_data, nullptr);
+    int32_t W = 1;
+    int32_t H = 1;
+    int32_t C = 1;
+    get_dims(src, W, H, C);
+
+    for (int c = 0; c < C; c++) {
+        float ilow  = PrecisionUtils::f16tof32(input_low->size()   == 1 ? input_low_data[0]   : input_low_data[c]);
+        float ihigh = PrecisionUtils::f16tof32(input_high->size()  == 1 ? input_high_data[0]  : input_high_data[c]);
+        float olow  = PrecisionUtils::f16tof32(output_low->size()  == 1 ? output_low_data[0]  : output_low_data[c]);
+        float ohigh = PrecisionUtils::f16tof32(output_high->size() == 1 ? output_high_data[0] : output_high_data[c]);
+
+        // emulate half math to be close to half float SHAVE implementation
+        float hTof_ilow = PrecisionUtils::f16tof32(PrecisionUtils::f32tof16(ilow));
+        float hTof_ihigh = PrecisionUtils::f16tof32(PrecisionUtils::f32tof16(ihigh));
+        float a = (0.01 > (hTof_ihigh - hTof_ilow)) ? 0.0f : PrecisionUtils::f16tof32(PrecisionUtils::f32tof16((float)(levels - 1) / (hTof_ihigh - hTof_ilow)));
+        float b = !(levels - 1) ? 0.0f : PrecisionUtils::f16tof32(PrecisionUtils::f32tof16((ohigh - olow) / (float)(levels - 1)));
+
+        for (int h = 0; h < H; h++) {
+            for (int w = 0; w < W; w++) {
+                int idx = c*W*H + h*W + w;
+                float src_val = PrecisionUtils::f16tof32(src_data[idx]);
+                float dst_val;
+
+                if (src_val <= ilow) {
+                    dst_val = olow;
+                } else if (src_val > ihigh) {
+                    dst_val = ohigh;
+                } else {
+                    if(!(ihigh - ilow) || !(levels - 1))
+                        dst_val = olow;
+                    else
+                    {
+                        // quantization pass
+                        float quantized = PrecisionUtils::f16tof32(PrecisionUtils::f32tof16((src_val - ilow) * a));
+                        // de-quantization pass
+                        dst_val = PrecisionUtils::f16tof32(PrecisionUtils::f32tof16(roundf( quantized ) * b)) + olow;
+                    }
+                }
+
+                dst_data[idx] = PrecisionUtils::f32tof16(dst_val);
+            }
+        }
+    }
+}
+
+static void refBinaryConvolution(const Blob::Ptr src, const Blob::Ptr weights, Blob::Ptr dst,
+                                 int dilations, int group, param_size kernel, int strides,
+                                 int isCHW) {
+    ASSERT_NE(src, nullptr);
+    ASSERT_NE(dst, nullptr);
+    const uint16_t* src_data = src->buffer();
+    const uint8_t*  weights_data = weights->buffer();
+          uint16_t* dst_data = dst->buffer();
+    ASSERT_NE(src_data, nullptr);
+    ASSERT_NE(weights_data, nullptr);
+    ASSERT_NE(dst_data, nullptr);
+    int32_t IW = 1;
+    int32_t IH = 1;
+    int32_t IC = 1;
+    get_dims(src, IW, IH, IC);
+    int32_t OW = 1;
+    int32_t OH = 1;
+    int32_t OC = 1;
+    get_dims(dst, OW, OH, OC);
+
+    int KW = kernel.x;
+    int KH = kernel.y;
+    int KD = 1;
+
+    int SW = strides;
+    int SH = strides;
+    int SD = 0;
+
+    int DW = dilations;
+    int DH = dilations;
+    int DD = 0;
+
+    int PW = kernel.x/2;
+    int PH = kernel.y/2;
+    int PD = 0;
+
+    int GC = group;
+
+    int ID = 1;
+    int OD = 1;
+
+    int pad_value = 0;
+
+    int nbits = 8;
+
+    auto extract_weights = [](uint8_t val, uint8_t bit) -> int {
+        return (uint8_t)((val >> bit) & 1);
+    };
+
+    for (uint32_t g = 0; g < GC; g++) {
+        for (uint32_t oc = 0; oc < OC / GC; oc++) {
+            for (uint32_t od = 0; od < OD; od++) {
+                for (uint32_t oh = 0; oh < OH; oh++) {
+                    for (uint32_t ow = 0; ow < OW; ow++) {
+                        int oidx = (isCHW) ? g  * OC / GC * OD * OH * OW +
+                                             oc *           OD * OH * OW +
+                                             od *           OH * OW +
+                                             oh *           OW +
+                                             ow
+                                           : g  * OC / GC * OD +
+                                             oc * OD +
+                                             od +
+                                             oh * OW * OC +
+                                             ow * OC;
+
+                        int dst_val = 0;
+
+                        for (int ic = 0; ic < IC / GC; ic++) {
+                            for (int kd = 0; kd < KD; kd++) {
+                                for (int kh = 0; kh < KH; kh++) {
+                                    for (int kw = 0; kw < KW; kw++) {
+                                        int widx = g  * OC / GC * IC / GC * KD * KH * KW +
+                                                   oc * IC / GC * KD * KH * KW +
+                                                   ic * KD * KH * KW +
+                                                   kd * KH * KW +
+                                                   kh * KW +
+                                                   kw;
+                                        int w = extract_weights(weights_data[widx/nbits], (uint8_t)(widx % nbits));
+
+                                        int s;
+
+                                        int iw = ow * SW - PW + kw * DW;
+                                        int ih = oh * SH - PH + kh * DH;
+                                        int id = od * SD - PD + kd * DD;
+                                        if (iw < 0 || iw >= (int) IW ||
+                                            ih < 0 || ih >= (int) IH ||
+                                            id < 0 || id >= (int) ID) {
+                                            s = pad_value;
+                                        } else {
+                                            int iidx = (isCHW) ? g  * IC / GC * ID * IH * IW +
+                                                                 ic * ID * IH * IW +
+                                                                 id * IH * IW +
+                                                                 ih * IW +
+                                                                 iw
+                                                               : g  * IC / GC * ID +
+                                                                 ic * ID +
+                                                                 id +
+                                                                 ih * IW * IC +
+                                                                 iw * IC;
+                                            s = ((PrecisionUtils::f16tof32(src_data[iidx]) > 0.f) ? 1 : 0);
+                                        }
+
+                                        dst_val += s ^ w;
+                                    }
+                                }
+                            }
+                        }
+
+                        dst_data[oidx] = PrecisionUtils::f32tof16((float)(IC/GC*KD*KH*KW - 2*dst_val));
+                    }
+                }
+            }
+        }
+    }
+}
+
+static void refExperimentalDetectronPriorGridGenerator(
+        std::vector<Blob::Ptr> &inputs, std::vector<Blob::Ptr> &outputs,
+        int grid_h, int grid_w, int stride_h, int stride_w) {
+    int num_priors = inputs[0]->getTensorDesc().getDims()[0];
+
+    uint16_t *src_data = inputs[0]->buffer();
+    uint16_t *dst_data = outputs[0]->buffer();
+
+    using namespace PrecisionUtils;
+
+    for (int h = 0; h < grid_h; ++h) {
+        for (int w = 0; w < grid_w; ++w) {
+            for (int s = 0; s < 3; ++s) {
+                dst_data[0] = f32tof16(
+                        f16tof32(src_data[4 * s + 0]) + stride_w * (w + 0.5f));
+                dst_data[1] = f32tof16(
+                        f16tof32(src_data[4 * s + 1]) + stride_h * (h + 0.5f));
+                dst_data[2] = f32tof16(
+                        f16tof32(src_data[4 * s + 2]) + stride_w * (w + 0.5f));
+                dst_data[3] = f32tof16(
+                        f16tof32(src_data[4 * s + 3]) + stride_h * (h + 0.5f));
+                dst_data += 4;
+            }
+        }
+    }
+}
+static std::vector<std::string> s_CustomConfig = {
+#ifdef VPU_HAS_CUSTOM_KERNELS
+    getIELibraryPath() + "/vpu_custom_kernels/customLayerBindings.xml"
+#endif
+};
+
+PRETTY_PARAM(Group, int)
+PRETTY_PARAM(Levels, int)
+PRETTY_PARAM(SwitchOut, int)
+PRETTY_PARAM(Dilations, int)
+PRETTY_PARAM(Kernel, param_size)
+PRETTY_PARAM(Strides, int)
+
+typedef myriadLayerTestBaseWithParam<std::tuple<Dims, Group, std::string>> myriadLayersTestsShuffleChannel_nightly;
+typedef myriadLayerTestBaseWithParam<std::tuple<Dims, Levels, std::string>> myriadLayersTestsQuantize_nightly;
+typedef myriadLayerTestBaseWithParam<std::tuple<Dims, Levels, SwitchOut, std::string>> myriadLayersTestsQuantizeBinarize_nightly;
+typedef myriadLayerTestBaseWithParam<std::tuple<Dims, Dilations, Group, Kernel, Strides, std::string>> myriadLayersTestsBinaryConvolution_nightly;
+typedef myriadLayerTestBaseWithParam<std::tuple<std::vector<size_t>, std::string>>
+myriadLayersTestsExperimentalDetectronPriorGridGenerator_nightly;
+
+TEST_P(myriadLayersTestsShuffleChannel_nightly, ShuffleChannel) {
+    tensor_test_params dims  = std::get<0>(GetParam());
+    int group                = std::get<1>(GetParam());
+    std::string customConfig = std::get<2>(GetParam());
+
+    if(!customConfig.empty() && !CheckMyriadX()) {
+        GTEST_SKIP()<<"Custom layers for MYRIAD2 not supported";
+    }
+    _config[VPU_CONFIG_KEY(CUSTOM_LAYERS)] = customConfig;
+
+    SetInputTensor(dims);
+    SetOutputTensor(dims);
+
+    std::map<std::string, std::string> params;
+    params["group"] = std::to_string(group);
+
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("ShuffleChannel").params(params)));
+
+    ASSERT_TRUE(Infer());
+
+    ASSERT_NO_FATAL_FAILURE(refShuffleChannel(_inputMap.begin()->second, _refBlob, group, false));
+
+    CompareCommonAbsolute(_outputMap.begin()->second, _refBlob, 0);
+}
+
+static std::vector<Dims> s_ShuffleChannelTensors = {
+    {{1,  48, 28, 28}},
+    {{1,  96, 14, 14}},
+    {{1, 192,  7,  7}},
+};
+
+static std::vector<Group> s_ShuffleChannelGroup = {
+    2
+};
+
+TEST_P(myriadLayersTestsQuantize_nightly, Quantize) {
+    tensor_test_params dims  = std::get<0>(GetParam());
+    int levels               = std::get<1>(GetParam());
+    std::string customConfig = std::get<2>(GetParam());
+
+    if(!customConfig.empty() && !CheckMyriadX()) {
+        GTEST_SKIP()<<"Custom layers for MYRIAD2 not supported";
+    }
+    _config[VPU_CONFIG_KEY(CUSTOM_LAYERS)] = customConfig;
+
+    IN_OUT_desc inpt(5);
+    for (int i = 0; i < inpt.size(); ++i) {
+        inpt[i].resize(4);
+        inpt[i][0] = dims.n;
+        inpt[i][1] = 1;
+        inpt[i][2] = 1;
+        inpt[i][3] = 1;
+    }
+    inpt[0][1] = dims.c;
+    inpt[0][2] = dims.h;
+    inpt[0][3] = dims.w;
+    for (int i = 1; i < inpt.size(); ++i) {
+        if (rand()%2 > 0) {
+            inpt[i][1] = dims.c;
+        }
+    }
+
+    SetInputTensors(inpt);
+    SetOutputTensor(dims);
+
+    std::map<std::string, std::string> params;
+    params["levels"] = std::to_string(levels);
+
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("FakeQuantize").params(params)));
+
+    ASSERT_TRUE(Infer());
+
+    std::vector<Blob::Ptr> inputBlobs(inpt.size());
+    auto inptIter = _inputMap.begin();
+    for (int i = 0; i < inpt.size(); i++) {
+        inputBlobs[i] = inptIter->second;
+        inptIter++;
+    }
+
+    ASSERT_NO_FATAL_FAILURE(refQuantize(inputBlobs[0],
+                                        inputBlobs[1],
+                                        inputBlobs[2],
+                                        inputBlobs[3],
+                                        inputBlobs[4],
+                                        _refBlob,
+                                        levels, false));
+
+    CompareCommonAbsolute(_outputMap.begin()->second, _refBlob, 0.01f);
+}
+
+TEST_P(myriadLayersTestsQuantizeBinarize_nightly, Quantize_Binarization) {
+    std::string model = R"V0G0N(
+       <net name="Quantize_Binarization" version="2" batch="1">
+           <layers>
+            <layer id="0" name="data" precision="FP16" type="Input">
+                <output>
+                    <port id="0">
+                        <dim>@IB@</dim>
+                        <dim>@IC@</dim>
+                        <dim>@IH@</dim>
+                        <dim>@IW@</dim>
+                    </port>
+                </output>
+            </layer>
+            <layer id="1" name="input_low" precision="FP16" type="Input">
+                <output>
+                    <port id="0">
+                        <dim>1</dim>
+                        <dim>@input_low_size@</dim>
+                        <dim>1</dim>
+                        <dim>1</dim>
+                    </port>
+                </output>
+            </layer>
+            <layer id="2" name="input_high" precision="FP16" type="Input">
+                <output>
+                    <port id="0">
+                        <dim>1</dim>
+                        <dim>@input_high_size@</dim>
+                        <dim>1</dim>
+                        <dim>1</dim>
+                    </port>
+                </output>
+            </layer>
+            <layer id="3" name="output_low" precision="FP16" type="Input">
+                <output>
+                    <port id="0">
+                        <dim>1</dim>
+                        <dim>@output_low_size@</dim>
+                        <dim>1</dim>
+                        <dim>1</dim>
+                    </port>
+                </output>
+            </layer>
+            <layer id="4" name="output_high" precision="FP16" type="Input">
+                <output>
+                    <port id="0">
+                        <dim>1</dim>
+                        <dim>@output_high_size@</dim>
+                        <dim>1</dim>
+                        <dim>1</dim>
+                    </port>
+                </output>
+            </layer>
+            <layer id="5" name="Quantize" precision="FP16" type="QuantizeTemporaryType">
+                <data levels="@levels@" input_low_size="@input_low_size@" input_high_size="@input_high_size@" output_low_size="@output_low_size@" output_high_size="@output_high_size@" switch_out="@switch_out@"/>
+                <input>
+                    <port id="0">
+                        <dim>@IB@</dim>
+                        <dim>@IC@</dim>
+                        <dim>@IH@</dim>
+                        <dim>@IW@</dim>
+                    </port>
+                    <port id="1">
+                        <dim>1</dim>
+                        <dim>@input_low_size@</dim>
+                        <dim>1</dim>
+                        <dim>1</dim>
+                    </port>
+                    <port id="2">
+                        <dim>1</dim>
+                        <dim>@input_high_size@</dim>
+                        <dim>1</dim>
+                        <dim>1</dim>
+                    </port>
+                    <port id="3">
+                        <dim>1</dim>
+                        <dim>@output_low_size@</dim>
+                        <dim>1</dim>
+                        <dim>1</dim>
+                    </port>
+                    <port id="4">
+                        <dim>1</dim>
+                        <dim>@output_high_size@</dim>
+                        <dim>1</dim>
+                        <dim>1</dim>
+                    </port>
+                </input>
+                <output>
+                    <port id="0">
+                        <dim>@OB@</dim>
+                        <dim>@OC@</dim>
+                        <dim>@OH@</dim>
+                        <dim>@OW@</dim>
+                    </port>
+                </output>
+            </layer>
+           </layers>
+           <edges>
+               <edge from-layer="0" from-port="0" to-layer="5" to-port="0"/>
+               <edge from-layer="1" from-port="0" to-layer="5" to-port="1"/>
+               <edge from-layer="2" from-port="0" to-layer="5" to-port="2"/>
+               <edge from-layer="3" from-port="0" to-layer="5" to-port="3"/>
+               <edge from-layer="4" from-port="0" to-layer="5" to-port="4"/>
+           </edges>
+       </net>
+   )V0G0N";
+
+    SetSeed(DEFAULT_SEED_VALUE + 6);
+
+    tensor_test_params dims  = std::get<0>(GetParam());
+    int levels               = std::get<1>(GetParam());
+    int switch_out           = std::get<2>(GetParam());
+    std::string customConfig = std::get<3>(GetParam());
+
+    if(!customConfig.empty() && !CheckMyriadX()) {
+        GTEST_SKIP()<<"Custom layers for MYRIAD2 not supported";
+    }
+    _config[VPU_CONFIG_KEY(CUSTOM_LAYERS)] = customConfig;
+
+    int IB = dims.n;
+    int IC = dims.c;
+    int IH = dims.h;
+    int IW = dims.w;
+
+    int OB = dims.n;
+    int OC = dims.c;
+    int OH = dims.h;
+    int OW = dims.w;
+
+    int input_low_size = (rand()%2>0) ? dims.c : 1; 
+    int input_high_size = (levels == 2) ? input_low_size : ((rand()%2>0) ? dims.c : 1); 
+    int output_low_size = (rand()%2>0) ? dims.c : 1; 
+    int output_high_size = (levels == 2) ? output_low_size : ((rand()%2>0) ? dims.c : 1); 
+
+    model.replace( model.find("@IB@"), sizeof("@IB@") -1, std::to_string(IB));
+    model.replace( model.find("@IB@"), sizeof("@IB@") -1, std::to_string(IB));
+    model.replace( model.find("@IC@"), sizeof("@IC@") -1, std::to_string(IC));
+    model.replace( model.find("@IC@"), sizeof("@IC@") -1, std::to_string(IC));
+    model.replace( model.find("@IH@"), sizeof("@IH@") -1, std::to_string(IH));
+    model.replace( model.find("@IH@"), sizeof("@IH@") -1, std::to_string(IH));
+    model.replace( model.find("@IW@"), sizeof("@IW@") -1, std::to_string(IW));
+    model.replace( model.find("@IW@"), sizeof("@IW@") -1, std::to_string(IW));
+
+    model.replace( model.find("@OB@"), sizeof("@OB@") -1, std::to_string(OB));
+    model.replace( model.find("@OC@"), sizeof("@OC@") -1, std::to_string(OC));
+    model.replace( model.find("@OH@"), sizeof("@OH@") -1, std::to_string(OH));
+    model.replace( model.find("@OW@"), sizeof("@OW@") -1, std::to_string(OW));
+
+    model.replace( model.find("@levels@"), sizeof("@levels@") -1, std::to_string(levels));
+    model.replace( model.find("@switch_out@"), sizeof("@switch_out@") -1, std::to_string(switch_out));
+    model.replace( model.find("@input_low_size@"), sizeof("@input_low_size@") -1, std::to_string(input_low_size));
+    model.replace( model.find("@input_high_size@"), sizeof("@input_high_size@") -1, std::to_string(input_high_size));
+    model.replace( model.find("@output_low_size@"), sizeof("@output_low_size@") -1, std::to_string(output_low_size));
+    model.replace( model.find("@output_high_size@"), sizeof("@output_high_size@") -1, std::to_string(output_high_size));
+    model.replace( model.find("@input_low_size@"), sizeof("@input_low_size@") -1, std::to_string(input_low_size));
+    model.replace( model.find("@input_high_size@"), sizeof("@input_high_size@") -1, std::to_string(input_high_size));
+    model.replace( model.find("@output_low_size@"), sizeof("@output_low_size@") -1, std::to_string(output_low_size));
+    model.replace( model.find("@output_high_size@"), sizeof("@output_high_size@") -1, std::to_string(output_high_size));
+    model.replace( model.find("@input_low_size@"), sizeof("@input_low_size@") -1, std::to_string(input_low_size));
+    model.replace( model.find("@input_high_size@"), sizeof("@input_high_size@") -1, std::to_string(input_high_size));
+    model.replace( model.find("@output_low_size@"), sizeof("@output_low_size@") -1, std::to_string(output_low_size));
+    model.replace( model.find("@output_high_size@"), sizeof("@output_high_size@") -1, std::to_string(output_high_size));
+
+    StatusCode st;
+
+    InferenceEngine::Core ie;
+    auto network = ie.ReadNetwork(model, InferenceEngine::Blob::CPtr());
+
+    _inputsInfo  = network.getInputsInfo();
+    _outputsInfo = network.getOutputsInfo();
+
+    _inputsInfo["data"]->setPrecision(Precision::FP16);
+    _inputsInfo["input_low"]->setPrecision(Precision::FP16);
+    _inputsInfo["input_high"]->setPrecision(Precision::FP16);
+    _inputsInfo["output_low"]->setPrecision(Precision::FP16);
+    _inputsInfo["output_high"]->setPrecision(Precision::FP16);
+    _outputsInfo["Quantize"]->setPrecision(Precision::FP16);
+
+    _inputsInfo["data"]->setLayout(NCHW);
+    _inputsInfo["input_low"]->setLayout(NCHW);
+    _inputsInfo["input_high"]->setLayout(NCHW);
+    _inputsInfo["output_low"]->setLayout(NCHW);
+    _inputsInfo["output_high"]->setLayout(NCHW);
+    _outputsInfo["Quantize"]->setLayout(NCHW);
+
+    ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network,
+                                                    {{VPU_CONFIG_KEY(CUSTOM_LAYERS), customConfig }}, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    ASSERT_NE(_exeNetwork, nullptr) << _resp.msg;
+
+    ASSERT_NO_THROW(st = _exeNetwork->CreateInferRequest(_inferRequest, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    Blob::Ptr data;
+    ASSERT_NO_THROW(st = _inferRequest->GetBlob("data", data, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    GenRandomData(data);
+
+    Blob::Ptr input_low;
+    ASSERT_NO_THROW(st = _inferRequest->GetBlob("input_low", input_low, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    GenRandomData(input_low);
+
+    Blob::Ptr input_high;
+    ASSERT_NO_THROW(st = _inferRequest->GetBlob("input_high", input_high, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    Blob::Ptr output_low;
+    ASSERT_NO_THROW(st = _inferRequest->GetBlob("output_low", output_low, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    Blob::Ptr output_high;
+    ASSERT_NO_THROW(st = _inferRequest->GetBlob("output_high", output_high, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    if(levels == 2){
+        memcpy((uint8_t*)input_high->buffer(), (uint8_t*)input_low->buffer(), input_high->byteSize());
+        for(int i = 0; i < (output_low->byteSize() / output_low->element_size()); ++i){
+            *((ie_fp16*)output_low->buffer() + i) = switch_out ? PrecisionUtils::f32tof16(1.0f) : PrecisionUtils::f32tof16(-1.0f);
+            *((ie_fp16*)output_high->buffer() + i) = switch_out ? PrecisionUtils::f32tof16(-1.0f) : PrecisionUtils::f32tof16(1.0f);
+        }
+    }
+    else{
+        GenRandomData(input_high);
+        GenRandomData(output_low);
+        GenRandomData(output_high);
+    }
+
+    ASSERT_NO_THROW(st = _inferRequest->Infer(&_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+{
+    std::map<std::string, InferenceEngine::InferenceEngineProfileInfo> perfMap;
+    _inferRequest->GetPerformanceCounts(perfMap, nullptr);
+    std::vector <std::pair<std::string, InferenceEngine::InferenceEngineProfileInfo>> perfVec(perfMap.begin(), perfMap.end());
+    std::sort(perfVec.begin(), perfVec.end(),
+              [=](const std::pair<std::string, InferenceEngine::InferenceEngineProfileInfo> &pair1,
+                  const std::pair<std::string, InferenceEngine::InferenceEngineProfileInfo> &pair2) -> bool {
+                  return pair1.second.execution_index < pair2.second.execution_index;
+              });
+
+    unsigned currentIndex = 0;
+    for (auto it = perfVec.begin(); it != perfVec.end(); ++it) {
+        std::string layerName = it->first;
+        InferenceEngine::InferenceEngineProfileInfo info = it->second;
+        if (info.status == InferenceEngine::InferenceEngineProfileInfo::EXECUTED) {
+            printf("\x1B[32m[----------]\x1B[0m Myriad time = '%s' layer with '%s' type is %f ms.\n", layerName.c_str(), info.exec_type, info.realTime_uSec / 1000.f);
+        }
+    }
+}
+
+    Blob::Ptr outputBlob;
+    ASSERT_NO_THROW(_inferRequest->GetBlob("Quantize", outputBlob, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    _refBlob = make_shared_blob<ie_fp16>(TensorDesc(Precision::FP16, outputBlob->getTensorDesc().getDims(), NCHW));
+    _refBlob->allocate();
+
+    ASSERT_NO_FATAL_FAILURE(ref_QuantizeBinarization(data,
+                                                    input_low,
+                                                    input_high,
+                                                    output_low,
+                                                    output_high,
+                                                    _refBlob,
+                                                    levels));
+
+    CompareCommonAbsolute(outputBlob, _refBlob, 0.1);
+}
+
+static std::vector<Dims> s_QuantizeTensors = {
+    {{1,  64, 56, 56}},
+    {{1, 256, 28, 28}},
+    {{1, 512,  7,  7}},
+    {{1,  64, 56, 57}},
+    {{1, 256, 28, 31}},
+    {{1, 512,  8,  9}},
+    {{1,  64, 56, 56}},
+    {{1, 256, 56, 56}},
+    {{1, 128, 56, 56}},
+    {{1, 128, 28, 28}},
+    {{1, 512, 28, 28}},
+    {{1, 256, 28, 28}},
+    {{1, 256, 14, 14}},
+    {{1, 1024,14, 14}},
+    {{1, 512, 14, 14}},
+    {{1, 512,  7,  7}},
+    {{1, 2048, 7,  7}},
+    {{1, 512,  7,  7}}
+};
+
+static std::vector<Levels> s_QuantizeLevels = {
+    2,
+    256
+};
+
+static std::vector<SwitchOut> s_QuantizeSwitchOut = {
+    0,
+    1
+};
+
+TEST_P(myriadLayersTestsBinaryConvolution_nightly, BinaryConvolution) {
+    tensor_test_params dims  = std::get<0>(GetParam());
+    int dilations            = std::get<1>(GetParam());
+    int group                = std::get<2>(GetParam());
+    param_size kernel        = std::get<3>(GetParam());
+    int strides              = std::get<4>(GetParam());
+    std::string customConfig = std::get<5>(GetParam());
+
+    if(!customConfig.empty() && !CheckMyriadX()) {
+        GTEST_SKIP()<<"Custom layers for MYRIAD2 not supported";
+    }
+    _config[VPU_CONFIG_KEY(CUSTOM_LAYERS)] = customConfig;
+
+    SetInputTensor(dims);
+    auto dimsOutput = dims;
+    dimsOutput.h = (dims.h) / strides;
+    dimsOutput.w = (dims.w) / strides;
+    SetOutputTensor(dimsOutput);
+    size_t numWeights = kernel.x * kernel.y * dims.c * dims.c;
+    size_t numBiases = 0;
+    InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(GenWeights(numWeights));
+
+    std::map<std::string, std::string> params;
+    params["mode"] = "xnor-popcount";
+    params["pad_value"] = "-1.0";
+    params["pads_begin"] = std::to_string(kernel.x/2) + "," + std::to_string(kernel.y/2);
+    params["pads_end"] = std::to_string(kernel.x/2) + "," + std::to_string(kernel.y/2);
+    params["input"] = std::to_string(dims.c);
+    params["output"] = std::to_string(dims.c);
+    params["dilations"] = std::to_string(dilations) + "," + std::to_string(dilations);
+    params["group"] = std::to_string(group);
+    params["kernel"] = std::to_string(kernel.x) + "," + std::to_string(kernel.y);
+    params["strides"] = std::to_string(strides) + "," + std::to_string(strides);
+
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("BinaryConvolution")
+                                        .params(params)
+                                        .weights(numWeights)
+                                        .biases(numBiases),
+                                        {},
+                                        weights_ptr));
+
+    ASSERT_TRUE(Infer());
+
+    ASSERT_NO_FATAL_FAILURE(refBinaryConvolution(_inputMap.begin()->second, weights_ptr, _refBlob,
+                                                 dilations, group, kernel, strides,
+                                                 false));
+
+    CompareCommonAbsolute(_outputMap.begin()->second, _refBlob, 0);
+}
+
+static std::vector<Dims> s_BinaryConvolutionTensors = {
+    {{1, 64, 112, 112}},
+    {{1, 128, 56, 56}},
+    {{1, 256, 28, 28}},
+    {{1, 256, 14, 14}},
+    {{1, 16, 16, 16}},
+    {{1,  2,  2,  2}},
+};
+
+static std::vector<Dilations> s_BinaryConvolutionDilations = {
+    1, 2
+};
+static std::vector<Group> s_BinaryConvolutionGroup = {
+    1, 2
+};
+static std::vector<Kernel> s_BinaryConvolutionKernel = {
+    {{1, 1}},
+    {{1, 3}},
+    {{3, 3}},
+};
+static std::vector<Strides> s_BinaryConvolutionStrides = {
+    1, 2
+};
+
+TEST_P(myriadLayersTestsExperimentalDetectronPriorGridGenerator_nightly,
+       ExperimentalDetectronPriorGridGenerator) {
+
+    // Setup parameters and configuration.
+    std::vector<size_t> image_dims = std::get<0>(GetParam());
+    std::string customConfig = std::get<1>(GetParam());
+    if(!customConfig.empty() && !CheckMyriadX()) {
+        GTEST_SKIP() << "Custom layers for MYRIAD2 not supported";
+    }
+    _config[VPU_CONFIG_KEY(CUSTOM_LAYERS)] = customConfig;
+
+    IN_OUT_desc inputTensors = {{1, 1, 3, 4}, image_dims, {1, 3, 480, 480}};
+    IN_OUT_desc outputTensors = {{1, 1,
+         inputTensors[0][2] *
+         inputTensors[1][2] *
+         inputTensors[1][3],
+         inputTensors[0][3]}};
+    SetInputTensors(inputTensors);
+    SetOutputTensors(outputTensors);
+
+    // Calculate strides. The stride dimensions are calculated by the equation
+    // (image feature map dimension) / (input feature map dimension).
+    float stride_h = static_cast<float>(inputTensors[2][2]) /
+                     inputTensors[1][2];
+    float stride_w = static_cast<float>(inputTensors[2][3]) /
+                     inputTensors[1][3];
+
+    std::map<std::string, std::string> params = {
+        {"stride_h", std::to_string(stride_h)},
+        {"stride_w", std::to_string(stride_w)}
+    };
+    // Run inference on OpenCL kernel.
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(
+                LayerInitParams("ExperimentalDetectronPriorGridGenerator").params(params)));
+    ASSERT_TRUE(Infer());
+
+    // Setup of reference input and reference output blobs.
+    std::vector<Blob::Ptr> reference_input_blobs(inputTensors.size());
+    std::vector<Blob::Ptr> reference_output_blobs(outputTensors.size());
+    int k = 0;
+    for (auto& p : _inputMap) {
+        reference_input_blobs[k++] = p.second;
+    }
+    reference_output_blobs[0] = _refBlob;
+
+    // Run inference on reference implementation.
+    refExperimentalDetectronPriorGridGenerator(
+            reference_input_blobs, reference_output_blobs,
+            inputTensors[1][2], inputTensors[1][3], stride_h, stride_w);
+
+    CompareCommonAbsolute(_outputMap.begin()->second, reference_output_blobs[0], 0.01f);
+}
+
+static std::vector<std::vector<size_t>>
+s_ExperimentalDetectronPriorGridGeneratorImageDims = {
+    {1, 128, 240, 240},
+    {1, 128, 120, 120},
+    {1, 128, 60, 60},
+    {1, 128, 30, 30}
+};
+
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_deconvolution_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_deconvolution_test.cpp
new file mode 100644 (file)
index 0000000..9a4703a
--- /dev/null
@@ -0,0 +1,123 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_deconvolution_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(accuracy_deconv_to_conv, myriadLayerDeconvolution_nightly,
+        ::testing::Combine(
+            ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 6, 5, 6))
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 1), MAKE_STRUCT(param_size, 3, 3))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 0), MAKE_STRUCT(param_size, 0, 1), MAKE_STRUCT(param_size, 0, 0))
+          , ::testing::Values<out_channels>(4)
+          , ::testing::Values<group>(1)
+          , ::testing::Values<layoutPreference>(vpu::LayoutPreference::ChannelMajor)
+          , ::testing::Values<hw_optimization>(true)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy_deconv_to_conv_2, myriadLayerDeconvolution_nightly,
+        ::testing::Combine(
+            ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 2, 256, 14, 14))
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 2, 2), MAKE_STRUCT(param_size, 3, 3))
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 2, 2))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 0), MAKE_STRUCT(param_size, 0, 1), MAKE_STRUCT(param_size, 0, 0))
+          , ::testing::Values<out_channels>(256)
+          , ::testing::Values<group>(1)
+          , ::testing::Values<layoutPreference>(vpu::LayoutPreference::ChannelMajor)
+          , ::testing::Values<hw_optimization>(true)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy_group, myriadLayerDeconvolution_nightly,
+        ::testing::Combine(
+            ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 384, 4, 2))
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 2, 2)
+                                    , MAKE_STRUCT(param_size, 3, 3)
+                                    , MAKE_STRUCT(param_size, 3, 4)
+                                    , MAKE_STRUCT(param_size, 4, 4)
+                                     )
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 2, 2))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<out_channels>(384)
+          , ::testing::Values<group>(2, 4)
+          , ::testing::Values<layoutPreference>(vpu::LayoutPreference::ChannelMinor, vpu::LayoutPreference::ChannelMajor)
+          , ::testing::Values<hw_optimization>(false)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy_depthDeconv, myriadLayerDeconvolution_nightly,
+        ::testing::Combine(
+            ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 384, 4, 2))
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 2, 2)
+                                    , MAKE_STRUCT(param_size, 3, 3)
+                                    , MAKE_STRUCT(param_size, 3, 4)
+                                    , MAKE_STRUCT(param_size, 4, 4)
+                                     )
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 2, 2))
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<out_channels>(384)
+          , ::testing::Values<group>(384)
+          , ::testing::Values<layoutPreference>(vpu::LayoutPreference::ChannelMajor,
+                                                vpu::LayoutPreference::ChannelMinor)
+          , ::testing::Values<hw_optimization>(false)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayerDeconvolution_asymm_pad,
+        ::testing::Combine(
+            ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 128, 60, 80),
+                                         MAKE_STRUCT(tensor_test_params, 1,   2, 37, 59))
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3)
+                                     )
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 2, 2)
+                                     )
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 0), MAKE_STRUCT(param_size, 2, 2))
+          , ::testing::Values<pad_end>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<out_channels>(21)
+          , ::testing::Values<group>(1)
+          , ::testing::Values<layoutPreference>(
+                                                vpu::LayoutPreference::ChannelMajor,
+                                                vpu::LayoutPreference::ChannelMinor)
+          , ::testing::Values<hw_optimization>(false)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayerDeconvolution_nightly,
+        ::testing::Combine(
+            ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 2, 37, 59)
+                                       , MAKE_STRUCT(tensor_test_params, 1, 21, 16, 16)
+                                       , MAKE_STRUCT(tensor_test_params, 1, 512, 11, 13)
+                                         )
+          , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 2, 2)
+                                    , MAKE_STRUCT(param_size, 3, 3)
+                                    , MAKE_STRUCT(param_size, 4, 4)
+                                    , MAKE_STRUCT(param_size, 5, 5)
+                                      )
+          , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1)
+                                    , MAKE_STRUCT(param_size, 2, 2)
+                                      )
+          , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0)
+                                 , MAKE_STRUCT(param_size, 1, 1)
+                                 , MAKE_STRUCT(param_size, 2, 2)
+                                  )
+          , ::testing::Values<out_channels>(1, 21)
+          , ::testing::Values<group>(1)
+          , ::testing::Values<layoutPreference>(vpu::LayoutPreference::ChannelMinor, vpu::LayoutPreference::ChannelMajor)
+          , ::testing::Values<hw_optimization>(false)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(extra3x3s1, myriadLayerDeconvolution_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 256, 1, 1))
+                              , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+                              , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+                              , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+                              , ::testing::Values<out_channels>(256)
+                              , ::testing::Values<group>(1)
+                              , ::testing::Values<layoutPreference>(vpu::LayoutPreference::ChannelMinor)
+                              , ::testing::Values<hw_optimization>(false)
+                              )
+);
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_deconvolution_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_deconvolution_test.hpp
new file mode 100644 (file)
index 0000000..87ebb68
--- /dev/null
@@ -0,0 +1,188 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <deconv_ref.hpp>
+#include "myriad_layers_tests.hpp"
+#include "common_test_utils/common_layers_params.hpp"
+
+using std::tuple;
+using std::get;
+
+using namespace InferenceEngine;
+
+
+static void refDeconvolution(const Blob::Ptr src, Blob::Ptr dst
+        , const ie_fp16* weights_data, const ie_fp16* bias_data
+        , param_size &kernel, param_size &stride, param_size &pad, size_t group) {
+    CommonTestUtils::conv_common_params params;
+    params.kernel.insert(X_AXIS, kernel.x);
+    params.kernel.insert(Y_AXIS, kernel.y);
+    params.stride.insert(X_AXIS, stride.x);
+    params.stride.insert(Y_AXIS, stride.y);
+    params.pads_begin.insert(X_AXIS, pad.x);
+    params.pads_begin.insert(Y_AXIS, pad.y);
+    params.group = group;
+    ref_deconv_common<ie_fp16>({ src }, *dst.get(), weights_data, 0, bias_data, 0, params);
+}
+
+PRETTY_PARAM(kernel, param_size)
+PRETTY_PARAM(stride, param_size)
+
+PRETTY_PARAM(pad, param_size)
+PRETTY_PARAM(pad_end, param_size)
+
+PRETTY_PARAM(out_channels, int)
+PRETTY_PARAM(group, int)
+PRETTY_PARAM(layoutPreference, vpu::LayoutPreference)
+PRETTY_PARAM(hw_optimization, bool)
+
+typedef myriadLayerTestBaseWithParam<tuple<DimsInput, kernel, stride, pad
+        , out_channels, group, layoutPreference, hw_optimization >> myriadLayerDeconvolution_nightly;
+
+typedef myriadLayerTestBaseWithParam<tuple<DimsInput, kernel, stride, pad, pad_end
+        , out_channels, group, layoutPreference, hw_optimization >> myriadLayerDeconvolution_asymm_pad;
+
+TEST_P(myriadLayerDeconvolution_nightly, Deconvolution) {
+    tensor_test_params input_dims = get<0>(GetParam());
+    param_size kernel = get<1>(GetParam());
+    param_size stride = get<2>(GetParam());
+    param_size pad = get<3>(GetParam());
+    size_t out_channels = get<4>(GetParam());
+    size_t group = get<5>(GetParam());
+    auto layoutPreference = get<6>(GetParam());
+    bool hw_optimization = get<7>(GetParam());
+
+    if(hw_optimization && !CheckMyriadX()) {
+        GTEST_SKIP_("Skip test with hw_optimization=On for Myriad2\n");
+    }
+
+    if (input_dims.n > 1)
+        _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+    else
+        _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(YES);
+
+    size_t out_w = stride.x * (input_dims.w - 1) + kernel.x - 2 * pad.x;
+    size_t out_h = stride.y * (input_dims.h - 1) + kernel.y - 2 * pad.y;
+
+    tensor_test_params output_dims = {input_dims.n, out_channels, out_h, out_w};
+
+    SetInputTensor(input_dims);
+    SetOutputTensor(output_dims);
+
+    size_t num_weights = kernel.x * kernel.y * (input_dims.c / group) * output_dims.c;
+    size_t num_bias = output_dims.c;
+
+    InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr =
+            InferenceEngine::TBlob<uint8_t>::Ptr(GenWeights(num_weights + num_bias));
+    ie_fp16* weights = weights_ptr->data().as<ie_fp16*>();
+    ie_fp16* bias = weights + num_weights;
+
+    std::map<std::string, std::string> layer_params = {
+              {"kernel-x", std::to_string(kernel.x)}
+            , {"kernel-y", std::to_string(kernel.y)}
+            , {"stride-x", std::to_string(stride.x)}
+            , {"stride-y", std::to_string(stride.y)}
+
+            , {"pad-x", std::to_string(pad.x)}
+            , {"pad-y", std::to_string(pad.y)}
+
+            , {"output", std::to_string(out_channels)}
+            , {"group", std::to_string(group)}
+    };
+
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("Deconvolution")
+                                        .params(layer_params)
+                                        .weights(num_weights)
+                                        .biases(num_bias),
+                                        NetworkInitParams().layoutPreference(layoutPreference)
+                                        .useHWOpt(hw_optimization),
+                                        weights_ptr));
+
+    auto inputBlob = _inputMap.begin()->second;
+    SetFirstInputToRange(-0.9f, 0.9f);
+
+    ASSERT_TRUE(Infer());
+
+    auto outputBlob = _outputMap.begin()->second;
+
+    refDeconvolution(inputBlob, _refBlob, weights, bias, kernel, stride, pad, group);
+
+    float maxerr = 0.00075 * (input_dims.c / group) * kernel.x * kernel.y;
+    CompareCommonAbsolute(outputBlob, _refBlob, maxerr);
+}
+
+TEST_P(myriadLayerDeconvolution_asymm_pad, Deconvolution) {
+    tensor_test_params input_dims = get<0>(GetParam());
+    param_size kernel = get<1>(GetParam());
+    param_size stride = get<2>(GetParam());
+    param_size pad = get<3>(GetParam());
+    param_size pad_end = get<4>(GetParam());
+    size_t out_channels = get<5>(GetParam());
+    size_t group = get<6>(GetParam());
+    auto layoutPreference = get<7>(GetParam());
+    bool hw_optimization = get<8>(GetParam());
+
+    if(hw_optimization && !CheckMyriadX()) {
+        GTEST_SKIP_("Skip test with hw_optimization=On for Myriad2\n");
+    }
+
+    if (input_dims.n > 1)
+        _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+    else
+        _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(YES);
+
+    size_t out_w = stride.x * (input_dims.w - 1) + kernel.x - (pad.x + pad_end.x);
+    size_t out_h = stride.y * (input_dims.h - 1) + kernel.y - (pad.y + pad_end.y);
+
+    tensor_test_params output_dims = {input_dims.n, out_channels, out_h, out_w};
+
+    SetInputTensor(input_dims);
+    SetOutputTensor(output_dims);
+
+    size_t num_weights = kernel.x * kernel.y * (input_dims.c / group) * output_dims.c;
+    size_t num_bias = output_dims.c;
+
+    InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr =
+            InferenceEngine::TBlob<uint8_t>::Ptr(GenWeights(num_weights + num_bias));
+    ie_fp16* weights = weights_ptr->data().as<ie_fp16*>();
+    ie_fp16* bias = weights + num_weights;
+
+    std::map<std::string, std::string> layer_params = {
+              {"kernel-x", std::to_string(kernel.x)}
+            , {"kernel-y", std::to_string(kernel.y)}
+            , {"stride-x", std::to_string(stride.x)}
+            , {"stride-y", std::to_string(stride.y)}
+
+            , {"pad-x", std::to_string(pad.x)}
+            , {"pad-y", std::to_string(pad.y)}
+
+            , {"pad-r", std::to_string(pad_end.x)}
+            , {"pad-b", std::to_string(pad_end.y)}
+
+            , {"output", std::to_string(out_channels)}
+            , {"group", std::to_string(group)}
+    };
+
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("Deconvolution")
+                                        .params(layer_params)
+                                        .weights(num_weights)
+                                        .biases(num_bias),
+                                        NetworkInitParams().layoutPreference(layoutPreference)
+                                        .useHWOpt(hw_optimization),
+                                        weights_ptr));
+
+    auto inputBlob = _inputMap.begin()->second;
+    SetFirstInputToRange(-0.9f, 0.9f);
+
+    ASSERT_TRUE(Infer());
+
+    auto outputBlob = _outputMap.begin()->second;
+
+    refDeconvolution(inputBlob, _refBlob, weights, bias, kernel, stride, pad, group);
+
+    float maxerr = 0.00075 * (input_dims.c / group) * kernel.x * kernel.y;
+    CompareCommonAbsolute(outputBlob, _refBlob, maxerr);
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_detection_output_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_detection_output_test.cpp
new file mode 100644 (file)
index 0000000..4da2d75
--- /dev/null
@@ -0,0 +1,1166 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <cstdlib>
+#include <cmath>
+#include <cstring>
+#include <thread>
+#include "../bbox_util.h"
+#include "myriad_layers_tests.hpp"
+#include "ie_memcpy.h"
+
+//#define USE_OPENCV_IMSHOW
+
+#ifdef USE_OPENCV_IMSHOW
+#include <opencv2/core.hpp>
+#include <opencv2/imgproc.hpp>
+#include <opencv2/highgui.hpp>
+#endif
+
+using namespace InferenceEngine;
+
+namespace {
+
+const int WIDTH = 300;
+const int HEIGHT = 300;
+
+const size_t NUM_LOC = 12996;
+const size_t NUM_CONF = 6498;
+const size_t NUM_OUT = 200;
+
+const int MAX_NONMATCH_OBJS = 0;
+
+const int NUM_CLASSES = 2;
+const int BACKGROUND_LABEL_ID = 0;
+const int TOP_K = 400;
+const int VARIANCE_ENCODED_IN_TARGET = 0;
+const int KEEP_TOP_K = 200;
+const CodeType CODE_TYPE = CENTER_SIZE;
+const int SHARE_LOCATION = 1;
+const int INTERPOLATE_ORIENTATION = 0;
+const float NMS_THRESHOLD = 0.45f;
+const float CONFIDENCE_THRESHOLD = 0.01f;
+const int NUM_ORIENT_CLASSES = 0;
+
+void refDetectionOutput(const TBlob<float>::Ptr &locations,
+                        const TBlob<float>::Ptr &confidence,
+                        const TBlob<float>::Ptr &prior,
+                        TBlob<float>::Ptr &output,
+                        const bool decrease_label_id) {
+    int _num_loc_classes = SHARE_LOCATION ? 1 : NUM_CLASSES;
+    int _num_priors = prior->getTensorDesc().getDims()[2] / 4;
+
+    ASSERT_EQ(_num_priors * _num_loc_classes * 4, locations->getTensorDesc().getDims()[1])
+        << "Number of priors must match number of location predictions";
+    ASSERT_EQ(_num_priors * NUM_CLASSES, confidence->getTensorDesc().getDims()[1])
+        << "Number of priors must match number of confidence predictions";
+
+    float* dst_data = output->data();
+
+    const float* loc_data = locations->readOnly();
+    const float* orient_data = nullptr; // TODO : support orientation, when myriad will do it
+    const float* conf_data = confidence->readOnly();
+    const float* prior_data = prior->readOnly();
+    const int num = locations->getTensorDesc().getDims()[0];
+
+    // Retrieve all location predictions.
+    std::vector<LabelBBox> all_loc_preds;
+    GetLocPredictions(loc_data, num, _num_priors, _num_loc_classes, SHARE_LOCATION, &all_loc_preds);
+
+    // Retrieve all confidences.
+    std::vector<std::map<int, std::vector<float>>> all_conf_scores;
+    GetConfidenceScores(conf_data, num, _num_priors, NUM_CLASSES, &all_conf_scores);
+
+    // Retrieve all orientations
+    std::vector<std::vector<std::vector<float>>> all_orient_scores;
+    if (orient_data) {
+        GetOrientationScores(orient_data, num, _num_priors, NUM_ORIENT_CLASSES, &all_orient_scores);
+    }
+
+    // Retrieve all prior bboxes. It is same within a batch since we assume all
+    // images in a batch are of same dimension.
+    std::vector<NormalizedBBox> prior_bboxes(_num_priors);
+    std::vector<float> prior_variances(_num_priors * 4);
+    GetPriorBBoxes(prior_data, _num_priors, prior_bboxes, prior_variances);
+
+    // Decode all loc predictions to bboxes.
+    std::vector<LabelBBox> all_decode_bboxes;
+    DecodeBBoxesAll(all_loc_preds, prior_bboxes, prior_variances, num,
+                    SHARE_LOCATION, _num_loc_classes, BACKGROUND_LABEL_ID,
+                    CODE_TYPE, VARIANCE_ENCODED_IN_TARGET, &all_decode_bboxes);
+
+    int num_kept = 0;
+
+    std::vector<std::map<int, std::vector<int>>> all_indices;
+
+    for (int image_index_in_batch = 0; image_index_in_batch < num; ++image_index_in_batch) {
+        if (orient_data) {
+            ASSERT_EQ(_num_priors, all_orient_scores[image_index_in_batch].size())
+                << "Orientation scores not equal to num priors.";
+        }
+
+        const LabelBBox& decode_bboxes = all_decode_bboxes[image_index_in_batch];
+        const std::map<int, std::vector<float>>& conf_scores = all_conf_scores[image_index_in_batch];
+        std::map<int, std::vector<int> > indices;
+        int num_det = 0;
+
+        for (int label_index = 0; label_index < NUM_CLASSES; ++label_index) {
+            if (label_index == BACKGROUND_LABEL_ID) {
+                // Ignore background class.
+                continue;
+            }
+
+            ASSERT_NE(conf_scores.end(), conf_scores.find(label_index))
+                << "Could not find confidence predictions for label " << label_index;
+
+            const std::vector<float>& scores = conf_scores.find(label_index)->second;
+
+            int label = SHARE_LOCATION ? -1 : label_index;
+
+            if (decode_bboxes.find(label) == decode_bboxes.end()) {
+                // Something bad happened if there are no predictions for current label.
+                continue;
+            }
+
+            const std::vector<NormalizedBBox>& bboxes = decode_bboxes.find(label)->second;
+
+            ApplyNMSFast(bboxes, scores, CONFIDENCE_THRESHOLD, NMS_THRESHOLD, TOP_K, &(indices[label_index]));
+            num_det += indices[label_index].size();
+        }
+
+        if (KEEP_TOP_K > -1 && num_det > KEEP_TOP_K) {
+            std::vector<std::pair<float, std::pair<int, int>>> score_index_pairs;
+
+            for (std::map<int, std::vector<int> >::iterator it = indices.begin(); it != indices.end(); ++it) {
+                int label = it->first;
+
+                const std::vector<int>& label_indices = it->second;
+
+                if (conf_scores.find(label) == conf_scores.end()) {
+                    // Something bad happened for current label.
+                    continue;
+                }
+
+                const std::vector<float>& scores = conf_scores.find(label)->second;
+
+                for (int j = 0; j < label_indices.size(); ++j) {
+                    int idx = label_indices[j];
+
+                    ASSERT_LT(idx, scores.size())
+                        << "Label index is out of array size";
+
+                    score_index_pairs.push_back(std::make_pair(scores[idx], std::make_pair(label, idx)));
+                }
+            }
+
+            // Keep top k results per image.
+            std::sort(score_index_pairs.begin(), score_index_pairs.end(), SortScorePairDescend<std::pair<int, int>>);
+            score_index_pairs.resize(KEEP_TOP_K);
+
+            // Store the new indices.
+            std::map<int, std::vector<int> > new_indices;
+
+            for (int j = 0; j < score_index_pairs.size(); ++j) {
+                int label = score_index_pairs[j].second.first;
+                int idx = score_index_pairs[j].second.second;
+                new_indices[label].push_back(idx);
+            }
+
+            all_indices.push_back(new_indices);
+            num_kept += KEEP_TOP_K;
+
+        } else {
+            all_indices.push_back(indices);
+            num_kept += num_det;
+        }
+    }
+
+    const int DETECTION_OUTPUT_SIZE = output->getTensorDesc().getDims()[0];
+    for (int i = 0; i < num * KEEP_TOP_K * DETECTION_OUTPUT_SIZE; i++) {
+        dst_data[i] = 0;
+    }
+
+    auto mark_end = [] (float* data) -> void {
+        *data++ = -1;
+        *data++ = 0;
+        *data++ = 0;
+        *data++ = 0;
+        *data++ = 0;
+        *data++ = 0;
+        *data++ = 0;
+    };
+
+    if (num_kept == 0) {
+        // Nothing to detect
+        mark_end(dst_data);
+        return;
+    }
+
+    int count = 0;
+
+    for (int image_index_in_batch = 0; image_index_in_batch < num; ++image_index_in_batch) {
+        const std::map<int, std::vector<float>>& conf_scores = all_conf_scores[image_index_in_batch];
+        const std::vector<std::vector<float> >* p_orient_scores =  orient_data ? &(all_orient_scores[image_index_in_batch]) : NULL;
+
+        if (orient_data) {
+            ASSERT_EQ(_num_priors, p_orient_scores->size())
+                << "Orientation scores not equal to num priors";
+        }
+
+        const LabelBBox& decode_bboxes = all_decode_bboxes[image_index_in_batch];
+
+        for (auto it = all_indices[image_index_in_batch].begin(); it != all_indices[image_index_in_batch].end(); ++it) {
+            int label = it->first;
+
+            if (conf_scores.find(label) == conf_scores.end()) {
+                // Something bad happened if there are no predictions for current label.
+                continue;
+            }
+
+            const std::vector<float>& scores = conf_scores.find(label)->second;
+            int loc_label = SHARE_LOCATION ? -1 : label;
+
+            if (decode_bboxes.find(loc_label) == decode_bboxes.end()) {
+                // Something bad happened if there are no predictions for current label.
+                continue;
+            }
+
+            const std::vector<NormalizedBBox>& bboxes = decode_bboxes.find(loc_label)->second;
+
+            ASSERT_EQ(_num_priors, bboxes.size())
+                << "Bounding boxes num is not equal to num priors";
+
+            std::vector<int>& indices = it->second;
+
+            for (int j = 0; j < indices.size(); ++j) {
+                int idx = indices[j];
+
+                dst_data[count * DETECTION_OUTPUT_SIZE + 0] = image_index_in_batch;
+                dst_data[count * DETECTION_OUTPUT_SIZE + 1] = decrease_label_id ? label - 1 : label;
+                dst_data[count * DETECTION_OUTPUT_SIZE + 2] = scores[idx];
+
+                const NormalizedBBox &clip_bbox = bboxes[idx];
+                dst_data[count * DETECTION_OUTPUT_SIZE + 3] = clip_bbox.xmin();
+                dst_data[count * DETECTION_OUTPUT_SIZE + 4] = clip_bbox.ymin();
+                dst_data[count * DETECTION_OUTPUT_SIZE + 5] = clip_bbox.xmax();
+                dst_data[count * DETECTION_OUTPUT_SIZE + 6] = clip_bbox.ymax();
+
+                // NormalizedBBox::orientation
+                if (DETECTION_OUTPUT_SIZE == 8) {
+                    float orientation = -10;
+
+                    if (p_orient_scores) {
+                        orientation = get_orientation((*p_orient_scores)[idx], INTERPOLATE_ORIENTATION);
+                    }
+
+                    dst_data[count * DETECTION_OUTPUT_SIZE + 7] = orientation;
+                }
+
+                ++count;
+            }
+        }
+    }
+
+    // TODO: Logic is correct only for mb=1
+    if (count < KEEP_TOP_K) {
+        // marker at end of boxes list
+        mark_end(dst_data + count * DETECTION_OUTPUT_SIZE);
+    }
+}
+
+const std::string PRIOR_BOX_CLUSTERED_MODEL = R"V0G0N(
+<net name="PRIOR_BOX_CLUSTERED_MODEL" version="2" batch="1">
+    <layers>
+        <layer name="data1" type="Input" precision="FP16" id="1">
+            <output>
+                <port id="1">
+                    <dim>1</dim>
+                    <dim>3</dim>
+                    <dim>300</dim>
+                    <dim>300</dim>
+                </port>
+            </output>
+        </layer>
+        <layer name="data1_copy" type="Power" precision="FP16" id="2">
+            <power_data power="1" scale="1" shift="0"/>
+            <input>
+                <port id="2">
+                    <dim>1</dim>
+                    <dim>3</dim>
+                    <dim>300</dim>
+                    <dim>300</dim>
+                </port>
+            </input>
+            <output>
+                <port id="3">
+                    <dim>1</dim>
+                    <dim>3</dim>
+                    <dim>300</dim>
+                    <dim>300</dim>
+                </port>
+            </output>
+        </layer>
+        <layer name="data2" type="Input" precision="FP16" id="3">
+            <output>
+                <port id="4">
+                    <dim>1</dim>
+                    <dim>384</dim>
+                    <dim>19</dim>
+                    <dim>19</dim>
+                </port>
+            </output>
+        </layer>
+        <layer name="data2_copy" type="Power" precision="FP16" id="4">
+            <power_data power="1" scale="1" shift="0"/>
+            <input>
+                <port id="5">
+                    <dim>1</dim>
+                    <dim>384</dim>
+                    <dim>19</dim>
+                    <dim>19</dim>
+                </port>
+            </input>
+            <output>
+                <port id="6">
+                    <dim>1</dim>
+                    <dim>384</dim>
+                    <dim>19</dim>
+                    <dim>19</dim>
+                </port>
+            </output>
+        </layer>
+        <layer name="priorboxclustered" type="PriorBoxClustered" precision="FP16" id="5">
+            <data
+                min_size="#"
+                max_size="#"
+                aspect_ratio="#"
+                flip="1"
+                clip="0"
+                variance="0.100000,0.100000,0.200000,0.200000"
+                img_size="0"
+                img_h="0"
+                img_w="0"
+                step="16.000000"
+                step_h="0.000000"
+                step_w="0.000000"
+                offset="0.500000"
+                width="9.400000,25.100000,14.700000,34.700001,143.000000,77.400002,128.800003,51.099998,75.599998"
+                height="15.000000,39.599998,25.500000,63.200001,227.500000,162.899994,124.500000,105.099998,72.599998"
+            />
+            <input>
+                <port id="7">
+                    <dim>1</dim>
+                    <dim>384</dim>
+                    <dim>19</dim>
+                    <dim>19</dim>
+                </port>
+                <port id="8">
+                    <dim>1</dim>
+                    <dim>3</dim>
+                    <dim>300</dim>
+                    <dim>300</dim>
+                </port>
+            </input>
+            <output>
+                <port id="9">
+                    <dim>1</dim>
+                    <dim>2</dim>
+                    <dim>12996</dim>
+                </port>
+            </output>
+        </layer>
+        <layer name="priorboxclustered_copy" type="Power" precision="FP16" id="6">
+            <power_data power="1" scale="1" shift="0"/>
+            <input>
+                <port id="10">
+                    <dim>1</dim>
+                    <dim>2</dim>
+                    <dim>12996</dim>
+                </port>
+            </input>
+            <output>
+                <port id="11">
+                    <dim>1</dim>
+                    <dim>2</dim>
+                    <dim>12996</dim>
+                </port>
+            </output>
+        </layer>
+    </layers>
+    <edges>
+        <edge from-layer="1" from-port="1" to-layer="2" to-port="2"/>
+        <edge from-layer="1" from-port="1" to-layer="5" to-port="8"/>
+        <edge from-layer="3" from-port="4" to-layer="4" to-port="5"/>
+        <edge from-layer="3" from-port="4" to-layer="5" to-port="7"/>
+        <edge from-layer="5" from-port="9" to-layer="6" to-port="10"/>
+    </edges>
+</net>
+)V0G0N";
+
+const std::string DETECTION_OUTPUT_MODEL = R"V0G0N(
+<Net Name="DETECTION_OUTPUT_MYRIAD_MODEL" version="2" precision="FP16" batch="1">
+<layers>
+    <layer name="data1" type="Input" precision="FP16" id="1">
+        <output>
+            <port id="1">
+                <dim>1</dim>
+                <dim>3</dim>
+                <dim>300</dim>
+                <dim>300</dim>
+            </port>
+        </output>
+    </layer>
+    <layer name="data1_copy" type="Power" precision="FP16" id="2">
+        <power_data power="1" scale="1" shift="0"/>
+        <input>
+            <port id="2">
+                <dim>1</dim>
+                <dim>3</dim>
+                <dim>300</dim>
+                <dim>300</dim>
+            </port>
+        </input>
+        <output>
+            <port id="3">
+                <dim>1</dim>
+                <dim>3</dim>
+                <dim>300</dim>
+                <dim>300</dim>
+            </port>
+        </output>
+    </layer>
+    <layer name="data2" type="Input" precision="FP16" id="3">
+        <output>
+            <port id="4">
+                <dim>1</dim>
+                <dim>384</dim>
+                <dim>19</dim>
+                <dim>19</dim>
+            </port>
+        </output>
+    </layer>
+    <layer name="data2_copy" type="Power" precision="FP16" id="4">
+        <power_data power="1" scale="1" shift="0"/>
+        <input>
+            <port id="5">
+                <dim>1</dim>
+                <dim>384</dim>
+                <dim>19</dim>
+                <dim>19</dim>
+            </port>
+        </input>
+        <output>
+            <port id="6">
+                <dim>1</dim>
+                <dim>384</dim>
+                <dim>19</dim>
+                <dim>19</dim>
+            </port>
+        </output>
+    </layer>
+    <layer name="priorboxclustered" type="PriorBoxClustered" precision="FP16" id="5">
+        <data
+            min_size="#"
+            max_size="#"
+            aspect_ratio="#"
+            flip="1"
+            clip="0"
+            variance="0.100000,0.100000,0.200000,0.200000"
+            img_size="0"
+            img_h="0"
+            img_w="0"
+            step="16.000000"
+            step_h="0.000000"
+            step_w="0.000000"
+            offset="0.500000"
+            width="9.400000,25.100000,14.700000,34.700001,143.000000,77.400002,128.800003,51.099998,75.599998"
+            height="15.000000,39.599998,25.500000,63.200001,227.500000,162.899994,124.500000,105.099998,72.599998"/>
+        <input>
+            <port id="7">
+                <dim>1</dim>
+                <dim>384</dim>
+                <dim>19</dim>
+                <dim>19</dim>
+            </port>
+            <port id="8">
+                <dim>1</dim>
+                <dim>3</dim>
+                <dim>300</dim>
+                <dim>300</dim>
+            </port>
+        </input>
+        <output>
+            <port id="9">
+                <dim>1</dim>
+                <dim>2</dim>
+                <dim>12996</dim>
+            </port>
+        </output>
+    </layer>
+    <layer name="locations" type="Input" precision="FP16" id="6">
+        <output>
+            <port id="10">
+                <dim>1</dim>
+                <dim>12996</dim>
+            </port>
+        </output>
+    </layer>
+    <layer name="confidence" type="Input" precision="FP16" id="7">
+        <output>
+            <port id="11">
+                <dim>1</dim>
+                <dim>6498</dim>
+            </port>
+        </output>
+    </layer>
+    <layer name="detection_out" type="DetectionOutput" precision="FP16" id="8">
+        <data num_classes="2"
+              share_location="1"
+              background_label_id="0"
+              nms_threshold="0.45"
+              top_k="400"
+              code_type="caffe.PriorBoxParameter.CENTER_SIZE"
+              variance_encoded_in_target="0"
+              keep_top_k="200"
+              confidence_threshold="0.01"
+              visualize="0"
+              normalized="1"
+        />
+        <input>
+            <port id="12">
+                <dim>1</dim>
+                <dim>12996</dim>
+            </port>
+            <port id="13">
+                <dim>1</dim>
+                <dim>6498</dim>
+            </port>
+            <port id="14">
+                <dim>1</dim>
+                <dim>2</dim>
+                <dim>12996</dim>
+            </port>
+        </input>
+        <output>
+            <port id="15">
+                <dim>1</dim>
+                <dim>1</dim>
+                <dim>200</dim>
+                <dim>7</dim>
+            </port>
+        </output>
+    </layer>
+</layers>
+<edges>
+    <edge from-layer="1" from-port="1" to-layer="2" to-port="2"/>
+    <edge from-layer="1" from-port="1" to-layer="5" to-port="8"/>
+    <edge from-layer="3" from-port="4" to-layer="4" to-port="5"/>
+    <edge from-layer="3" from-port="4" to-layer="5" to-port="7"/>
+    <edge from-layer="6" from-port="10" to-layer="8" to-port="12"/>
+    <edge from-layer="7" from-port="11" to-layer="8" to-port="13"/>
+    <edge from-layer="5" from-port="9" to-layer="8" to-port="14"/>
+</edges>
+</Net>
+)V0G0N";
+
+const std::string DETECTION_OUTPUT_MODEL_WITH_CONST = R"V0G0N(
+<Net Name="DETECTION_OUTPUT_MODEL_WITH_CONST" version="2" precision="FP16" batch="1">
+<layers>
+    <layer name="locations" type="Input" precision="FP16" id="1">
+        <output>
+            <port id="1">
+                <dim>1</dim>
+                <dim>12996</dim>
+            </port>
+        </output>
+    </layer>
+    <layer name="confidence" type="Input" precision="FP16" id="2">
+        <output>
+            <port id="2">
+                <dim>1</dim>
+                <dim>6498</dim>
+            </port>
+        </output>
+    </layer>
+    <layer name="priorboxclustered" type="Const" precision="FP16" id="3">
+        <output>
+            <port id="3">
+                <dim>1</dim>
+                <dim>2</dim>
+                <dim>12996</dim>
+            </port>
+        </output>
+        <blobs>
+            <custom offset="0" size="51984"/>
+        </blobs>
+    </layer>
+    <layer name="detection_out" type="DetectionOutput" precision="FP16" id="4">
+        <data num_classes="2"
+              share_location="1"
+              background_label_id="0"
+              nms_threshold="0.45"
+              top_k="400"
+              code_type="caffe.PriorBoxParameter.CENTER_SIZE"
+              variance_encoded_in_target="0"
+              keep_top_k="200"
+              confidence_threshold="0.01"
+              visualize="0"
+              normalized="1"
+        />
+        <input>
+            <port id="4">
+                <dim>1</dim>
+                <dim>12996</dim>
+            </port>
+            <port id="5">
+                <dim>1</dim>
+                <dim>6498</dim>
+            </port>
+            <port id="6">
+                <dim>1</dim>
+                <dim>2</dim>
+                <dim>12996</dim>
+            </port>
+        </input>
+        <output>
+            <port id="7">
+                <dim>1</dim>
+                <dim>1</dim>
+                <dim>200</dim>
+                <dim>7</dim>
+            </port>
+        </output>
+    </layer>
+</layers>
+<edges>
+    <edge from-layer="1" from-port="1" to-layer="4" to-port="4"/>
+    <edge from-layer="2" from-port="2" to-layer="4" to-port="5"/>
+    <edge from-layer="3" from-port="3" to-layer="4" to-port="6"/>
+</edges>
+</Net>
+)V0G0N";
+
+const std::string DETECTION_OUTPUT_MODEL_MXNET = R"V0G0N(
+<Net Name="DETECTION_OUTPUT_MODEL_MXNET" version="2" precision="FP16" batch="1">
+<layers>
+    <layer name="locations" type="Input" precision="FP16" id="1">
+        <output>
+            <port id="1">
+                <dim>1</dim>
+                <dim>12996</dim>
+            </port>
+        </output>
+    </layer>
+    <layer name="confidence" type="Input" precision="FP16" id="2">
+        <output>
+            <port id="2">
+                <dim>1</dim>
+                <dim>6498</dim>
+            </port>
+        </output>
+    </layer>
+    <layer name="priorboxclustered" type="Const" precision="FP16" id="3">
+        <output>
+            <port id="3">
+                <dim>1</dim>
+                <dim>2</dim>
+                <dim>12996</dim>
+            </port>
+        </output>
+        <blobs>
+            <custom offset="0" size="51984"/>
+        </blobs>
+    </layer>
+    <layer name="detection_out" type="DetectionOutput" precision="FP16" id="4">
+        <data num_classes="2"
+              decrease_label_id="1"
+              share_location="1"
+              background_label_id="0"
+              nms_threshold="0.45"
+              top_k="400"
+              code_type="caffe.PriorBoxParameter.CENTER_SIZE"
+              variance_encoded_in_target="0"
+              keep_top_k="200"
+              confidence_threshold="0.01"
+              visualize="0"
+              normalized="1"
+        />
+        <input>
+            <port id="4">
+                <dim>1</dim>
+                <dim>12996</dim>
+            </port>
+            <port id="5">
+                <dim>1</dim>
+                <dim>6498</dim>
+            </port>
+            <port id="6">
+                <dim>1</dim>
+                <dim>2</dim>
+                <dim>12996</dim>
+            </port>
+        </input>
+        <output>
+            <port id="7">
+                <dim>1</dim>
+                <dim>1</dim>
+                <dim>200</dim>
+                <dim>7</dim>
+            </port>
+        </output>
+    </layer>
+</layers>
+<edges>
+    <edge from-layer="1" from-port="1" to-layer="4" to-port="4"/>
+    <edge from-layer="2" from-port="2" to-layer="4" to-port="5"/>
+    <edge from-layer="3" from-port="3" to-layer="4" to-port="6"/>
+</edges>
+</Net>
+)V0G0N";
+
+void cvt(float src, float& dst) {
+    dst = src;
+}
+void cvt(float src, ie_fp16& dst) {
+    dst = PrecisionUtils::f32tof16(src);
+}
+void cvt(ie_fp16 src, float& dst) {
+    dst = PrecisionUtils::f16tof32(src);
+}
+
+struct BBox {
+    int x, y;
+    int width, height;
+};
+
+std::ostream& operator<<(std::ostream& os, const BBox& bbox)
+{
+    return os << "[" << bbox.x << ", " << bbox.y << ", "
+              << bbox.width << ", " << bbox.height << "]";
+}
+
+struct DetectionObject {
+    int batch_id;
+    int class_id;
+    float confidence;
+    BBox bbox;
+};
+
+std::ostream& operator<<(std::ostream& os, const DetectionObject& obj)
+{
+    return os << "[" << obj.batch_id << ", " << obj.class_id << ", "
+              << obj.confidence << ", " << obj.bbox << "]";
+}
+
+template <typename T>
+void cvtDetectionObjects(T* data, int num, int width, int height, std::vector<DetectionObject>& out) {
+    out.clear();
+    out.reserve(num);
+
+    for (int i = 0; i < num; ++i) {
+        float batch_id, class_id, conf;
+        cvt(data[i * 7 + 0], batch_id);
+        cvt(data[i * 7 + 1], class_id);
+        cvt(data[i * 7 + 2], conf);
+
+        if (batch_id == -1) {
+            break;
+        }
+
+        float xmin, ymin, xmax, ymax;
+        cvt(data[i * 7 + 3], xmin);
+        cvt(data[i * 7 + 4], ymin);
+        cvt(data[i * 7 + 5], xmax);
+        cvt(data[i * 7 + 6], ymax);
+
+        BBox bbox;
+        bbox.x = width * xmin;
+        bbox.y = height * ymin;
+        bbox.width  = width * (xmax - xmin);
+        bbox.height = height * (ymax - ymin);
+
+        out.push_back({int(batch_id), int(class_id), conf, bbox});
+    }
+}
+
+bool cmpDetectionObject(const DetectionObject& first, const DetectionObject& second) {
+    const float max_confidence_delta = 0.001f;
+    const int max_pixel_delta = 1;
+
+    return ((first.batch_id == second.batch_id) &&
+            (first.class_id == second.class_id) &&
+            (std::abs(first.bbox.x - second.bbox.x) <= max_pixel_delta) &&
+            (std::abs(first.bbox.y - second.bbox.y) <= max_pixel_delta) &&
+            (std::abs(first.bbox.width - second.bbox.width) <= 2*max_pixel_delta) &&
+            (std::abs(first.bbox.height - second.bbox.height) <= 2*max_pixel_delta) &&
+            (std::fabs(first.confidence - second.confidence) <=  max_confidence_delta));
+}
+
+void checkDetectionObjectArrays(std::vector<DetectionObject> gold, std::vector<DetectionObject> actual, int max_nonmatch_objs = 0) {
+    for (auto it_gold = gold.begin(); it_gold != gold.end();) {
+        std::vector<std::vector<DetectionObject>::iterator> candidates;
+        for (auto it_actual = actual.begin(); it_actual != actual.end(); it_actual++) {
+            if (cmpDetectionObject(*it_gold, *it_actual))
+                candidates.push_back(it_actual);
+        }
+
+        if (0 == candidates.size()) {
+            ++it_gold;
+        } else {
+            int best_index = 0;
+            float best_abs_delta = std::fabs(candidates[0]->confidence - it_gold->confidence);
+
+            for (size_t i = 1; i < candidates.size(); i++) {
+                float abs_delta = std::fabs(candidates[i]->confidence - it_gold->confidence);
+                if (abs_delta < best_abs_delta) {
+                    best_index = i;
+                    best_abs_delta = abs_delta;
+                }
+            }
+
+            actual.erase(candidates[best_index]);
+            it_gold = gold.erase(it_gold);
+        }
+    }
+
+    for (auto miss_gold : gold) {
+        std::cout << "Mistmatch in gold array: " << miss_gold << std::endl;
+    }
+
+    for (auto miss_actual : actual) {
+        std::cout << "Mistmatch in actual array: " << miss_actual << std::endl;
+    }
+
+    EXPECT_LE(gold.size(), max_nonmatch_objs);
+    EXPECT_LE(actual.size(), max_nonmatch_objs);
+}
+
+}
+
+class myriadDetectionOutputTests_nightly : public myriadLayersTests_nightly {
+public:
+    std::vector<float> gen_locations;
+    std::vector<float> gen_confidence;
+    Blob::Ptr priorOutput;
+
+    void PrepareInput() {
+        gen_locations.resize(NUM_LOC);
+        for (size_t i = 0; i < NUM_LOC; i += 4) {
+            int x = std::rand() % WIDTH;
+            int y = std::rand() % HEIGHT;
+            int w = std::rand() % (WIDTH - x);
+            int h = std::rand() % (HEIGHT - y);
+
+            gen_locations[i + 0] = static_cast<float>(x) / WIDTH; // xmin
+            gen_locations[i + 1] = static_cast<float>(y) / HEIGHT; // ymin
+            gen_locations[i + 2] = static_cast<float>(x + w) / WIDTH; // xmax
+            gen_locations[i + 3] = static_cast<float>(y + h) / HEIGHT; // ymax
+        }
+
+        gen_confidence.resize(NUM_CONF);
+        for (size_t i = 0; i < NUM_CONF; ++i) {
+            gen_confidence[i] = static_cast<float>(std::rand()) / RAND_MAX;
+        }
+
+        StatusCode st;
+
+        InferenceEngine::Core ie;
+        auto network = ie.ReadNetwork(PRIOR_BOX_CLUSTERED_MODEL, InferenceEngine::Blob::CPtr());
+
+        auto inputsInfo = network.getInputsInfo();
+        inputsInfo["data1"]->setPrecision(Precision::FP16);
+        inputsInfo["data2"]->setPrecision(Precision::FP16);
+
+        auto outputsInfo = network.getOutputsInfo();
+        outputsInfo["data1_copy"]->setPrecision(Precision::FP16);
+        outputsInfo["data2_copy"]->setPrecision(Precision::FP16);
+        outputsInfo["priorboxclustered_copy"]->setPrecision(Precision::FP16);
+
+        IExecutableNetwork::Ptr exeNetwork;
+        ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(exeNetwork, network, {}, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+        ASSERT_NE(exeNetwork, nullptr) << _resp.msg;
+
+        IInferRequest::Ptr inferRequest;
+        ASSERT_NO_THROW(st = exeNetwork->CreateInferRequest(inferRequest, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        ASSERT_NO_THROW(st = inferRequest->Infer(&_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        ASSERT_NO_THROW(inferRequest->GetBlob("priorboxclustered_copy", priorOutput, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    }
+
+    TBlob<float>::Ptr refOutput;
+
+    void CalcRefOutput(const bool decrease_label_id) {
+        auto locations = make_shared_blob<float>({Precision::FP32, {1, NUM_LOC}, Layout::ANY});
+        locations->allocate();
+        {
+            float *dst = locations->data();
+            ie_memcpy(dst, locations->byteSize(), gen_locations.data(), NUM_LOC * sizeof(float));
+        }
+
+        auto confidence = make_shared_blob<float>({Precision::FP32, {1, NUM_CONF}, Layout::ANY});
+        confidence->allocate();
+        {
+            float *dst = confidence->data();
+            ie_memcpy(dst, confidence->byteSize(), gen_confidence.data(), NUM_CONF * sizeof(float));
+        }
+
+        auto prior = make_shared_blob<float>({Precision::FP32, {1, 2, NUM_LOC}, Layout::ANY});
+        prior->allocate();
+        {
+            float *dst = prior->buffer().as<float *>();
+            ie_fp16 *src = priorOutput->buffer().as<ie_fp16 *>();
+            for (int i = 0; i < 2 * NUM_LOC; ++i) {
+                dst[i] = PrecisionUtils::f16tof32(src[i]);
+            }
+        }
+
+        refOutput = make_shared_blob<float>({Precision::FP32, {7, NUM_OUT, 1, 1}, Layout::ANY});
+        refOutput->allocate();
+
+        ASSERT_NO_FATAL_FAILURE(refDetectionOutput(locations, confidence, prior, refOutput, decrease_label_id));
+    }
+
+    Blob::Ptr myriadOutput;
+
+    void CheckResults() {
+        ASSERT_NE(myriadOutput, nullptr);
+
+        ASSERT_EQ(7 * NUM_OUT, refOutput->size());
+        ASSERT_EQ(7 * NUM_OUT, myriadOutput->size());
+
+    #ifdef USE_OPENCV_IMSHOW
+        {
+            const float *ref_data = refOutput->readOnly();
+            const ie_fp16 *actual_data = myriadOutput->cbuffer().as<const ie_fp16 *>();
+
+            cv::Mat ref_img(HEIGHT, WIDTH, CV_8UC1, cv::Scalar::all(0));
+            cv::Mat actual_img(HEIGHT, WIDTH, CV_8UC1, cv::Scalar::all(0));
+
+            for (int i = 0; i < NUM_OUT; ++i)
+            {
+                float xmin, ymin, xmax, ymax;
+                cv::Rect rect;
+
+                xmin = ref_data[i * 7 + 3];
+                ymin = ref_data[i * 7 + 4];
+                xmax = ref_data[i * 7 + 5];
+                ymax = ref_data[i * 7 + 6];
+
+                rect.x = WIDTH * xmin;
+                rect.y = HEIGHT * ymin;
+                rect.width  = WIDTH * (xmax - xmin);
+                rect.height = HEIGHT * (ymax - ymin);
+
+                cv::rectangle(ref_img, rect, cv::Scalar::all(255));
+
+                xmin = PrecisionUtils::f16tof32(actual_data[i * 7 + 3]);
+                ymin = PrecisionUtils::f16tof32(actual_data[i * 7 + 4]);
+                xmax = PrecisionUtils::f16tof32(actual_data[i * 7 + 5]);
+                ymax = PrecisionUtils::f16tof32(actual_data[i * 7 + 6]);
+
+                rect.x = WIDTH * xmin;
+                rect.y = HEIGHT * ymin;
+                rect.width  = WIDTH * (xmax - xmin);
+                rect.height = HEIGHT * (ymax - ymin);
+
+                cv::rectangle(actual_img, rect, cv::Scalar::all(255));
+            }
+
+            cv::Mat diff;
+            cv::absdiff(ref_img, actual_img, diff);
+
+            cv::imshow("ref_img", ref_img);
+            cv::imshow("actual_img", actual_img);
+            cv::imshow("diff", diff);
+            cv::waitKey();
+        }
+    #endif
+
+        {
+            const float *ref_data = refOutput->readOnly();
+            const ie_fp16 *actual_data = myriadOutput->cbuffer().as<const ie_fp16 *>();
+
+            std::vector<DetectionObject> ref_objs, actual_objs;
+            cvtDetectionObjects(ref_data, NUM_OUT, WIDTH, HEIGHT, ref_objs);
+            cvtDetectionObjects(actual_data, NUM_OUT, WIDTH, HEIGHT, actual_objs);
+
+            checkDetectionObjectArrays(ref_objs, actual_objs, MAX_NONMATCH_OBJS);
+        }
+    }
+};
+
+TEST_F(myriadDetectionOutputTests_nightly, NoConst) {
+    ASSERT_NO_FATAL_FAILURE(PrepareInput());
+    ASSERT_NO_FATAL_FAILURE(CalcRefOutput(false));
+
+    StatusCode st;
+    
+    InferenceEngine::Core ie;
+    auto network = ie.ReadNetwork(DETECTION_OUTPUT_MODEL, InferenceEngine::Blob::CPtr());
+
+    auto inputsInfo = network.getInputsInfo();
+    inputsInfo["data1"]->setPrecision(Precision::FP16);
+    inputsInfo["data2"]->setPrecision(Precision::FP16);
+    inputsInfo["locations"]->setPrecision(Precision::FP16);
+    inputsInfo["confidence"]->setPrecision(Precision::FP16);
+
+    auto outputsInfo = network.getOutputsInfo();
+    outputsInfo["data1_copy"]->setPrecision(Precision::FP16);
+    outputsInfo["data2_copy"]->setPrecision(Precision::FP16);
+    outputsInfo["detection_out"]->setPrecision(Precision::FP16);
+
+    IExecutableNetwork::Ptr exeNetwork;
+    ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(exeNetwork, network, {}, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    ASSERT_NE(exeNetwork, nullptr) << _resp.msg;
+
+    IInferRequest::Ptr inferRequest;
+    ASSERT_NO_THROW(st = exeNetwork->CreateInferRequest(inferRequest, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    Blob::Ptr locations;
+    ASSERT_NO_THROW(st = inferRequest->GetBlob("locations", locations, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    {
+        ie_fp16 *dst = locations->buffer().as<ie_fp16 *>();
+        for (int i = 0; i < NUM_LOC; ++i) {
+            dst[i] = PrecisionUtils::f32tof16(gen_locations[i]);
+        }
+    }
+
+    Blob::Ptr confidence;
+    ASSERT_NO_THROW(st = inferRequest->GetBlob("confidence", confidence, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    {
+        ie_fp16 *dst = confidence->buffer().as<ie_fp16 *>();
+        for (int i = 0; i < NUM_CONF; ++i) {
+            dst[i] = PrecisionUtils::f32tof16(gen_confidence[i]);
+        }
+    }
+
+    ASSERT_NO_THROW(st = inferRequest->Infer(&_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    ASSERT_NO_THROW(inferRequest->GetBlob("detection_out", myriadOutput, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    CheckResults();
+}
+
+TEST_F(myriadDetectionOutputTests_nightly, MxNet) {
+    ASSERT_NO_FATAL_FAILURE(PrepareInput());
+    ASSERT_NO_FATAL_FAILURE(CalcRefOutput(true));
+
+    StatusCode st;
+
+    TBlob<uint8_t>::Ptr weights(new TBlob<uint8_t>({Precision::U8, {priorOutput->byteSize()}, Layout::C}, priorOutput->buffer().as<uint8_t *>()));
+    
+    InferenceEngine::Core ie;
+    auto network = ie.ReadNetwork(DETECTION_OUTPUT_MODEL_MXNET, weights);
+
+    auto inputsInfo = network.getInputsInfo();
+    inputsInfo["locations"]->setPrecision(Precision::FP16);
+    inputsInfo["confidence"]->setPrecision(Precision::FP16);
+
+    auto outputsInfo = network.getOutputsInfo();
+    outputsInfo["detection_out"]->setPrecision(Precision::FP16);
+
+    IExecutableNetwork::Ptr exeNetwork;
+    ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(exeNetwork, network, {}, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    ASSERT_NE(exeNetwork, nullptr) << _resp.msg;
+
+    IInferRequest::Ptr inferRequest;
+    ASSERT_NO_THROW(st = exeNetwork->CreateInferRequest(inferRequest, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    Blob::Ptr locations;
+    ASSERT_NO_THROW(st = inferRequest->GetBlob("locations", locations, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    {
+        ie_fp16 *dst = locations->buffer().as<ie_fp16 *>();
+        for (int i = 0; i < NUM_LOC; ++i) {
+            dst[i] = PrecisionUtils::f32tof16(gen_locations[i]);
+        }
+    }
+
+    Blob::Ptr confidence;
+    ASSERT_NO_THROW(st = inferRequest->GetBlob("confidence", confidence, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    {
+        ie_fp16 *dst = confidence->buffer().as<ie_fp16 *>();
+        for (int i = 0; i < NUM_CONF; ++i) {
+            dst[i] = PrecisionUtils::f32tof16(gen_confidence[i]);
+        }
+    }
+
+    ASSERT_NO_THROW(st = inferRequest->Infer(&_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    ASSERT_NO_THROW(inferRequest->GetBlob("detection_out", myriadOutput, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    CheckResults();
+}
+
+TEST_F(myriadDetectionOutputTests_nightly, WithConst) {
+    ASSERT_NO_FATAL_FAILURE(PrepareInput());
+    ASSERT_NO_FATAL_FAILURE(CalcRefOutput(false));
+
+    StatusCode st;
+
+    TBlob<uint8_t>::Ptr weights(new TBlob<uint8_t>({Precision::U8, {priorOutput->byteSize()}, Layout::C}, priorOutput->buffer().as<uint8_t *>()));
+   
+    InferenceEngine::Core ie;
+    auto network = ie.ReadNetwork(DETECTION_OUTPUT_MODEL_WITH_CONST, weights);
+
+    auto inputsInfo = network.getInputsInfo();
+    inputsInfo["locations"]->setPrecision(Precision::FP16);
+    inputsInfo["confidence"]->setPrecision(Precision::FP16);
+
+    auto outputsInfo = network.getOutputsInfo();
+    outputsInfo["detection_out"]->setPrecision(Precision::FP16);
+
+    IExecutableNetwork::Ptr exeNetwork;
+    ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(exeNetwork, network, {}, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    ASSERT_NE(exeNetwork, nullptr) << _resp.msg;
+
+    IInferRequest::Ptr inferRequest;
+    ASSERT_NO_THROW(st = exeNetwork->CreateInferRequest(inferRequest, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    Blob::Ptr locations;
+    ASSERT_NO_THROW(st = inferRequest->GetBlob("locations", locations, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    {
+        ie_fp16 *dst = locations->buffer().as<ie_fp16 *>();
+        for (int i = 0; i < NUM_LOC; ++i) {
+            dst[i] = PrecisionUtils::f32tof16(gen_locations[i]);
+        }
+    }
+
+    Blob::Ptr confidence;
+    ASSERT_NO_THROW(st = inferRequest->GetBlob("confidence", confidence, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    {
+        ie_fp16 *dst = confidence->buffer().as<ie_fp16 *>();
+        for (int i = 0; i < NUM_CONF; ++i) {
+            dst[i] = PrecisionUtils::f32tof16(gen_confidence[i]);
+        }
+    }
+
+    ASSERT_NO_THROW(st = inferRequest->Infer(&_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    ASSERT_NO_THROW(inferRequest->GetBlob("detection_out", myriadOutput, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    CheckResults();
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_eltwise_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_eltwise_test.cpp
new file mode 100644 (file)
index 0000000..467dd42
--- /dev/null
@@ -0,0 +1,976 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_eltwise_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadTestsEltwiseMax_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(s_eltwiseTensors),
+        ::testing::ValuesIn(s_eltwiseInputs),
+        ::testing::ValuesIn(s_eltwiseDims))
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadTestsEltwiseSum_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(s_eltwiseTensors),
+        ::testing::ValuesIn(s_eltwiseInputs),
+        ::testing::ValuesIn(s_eltwiseDims))
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadTestsEltwiseSub_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(s_eltwiseTensors),
+        ::testing::ValuesIn(s_eltwiseOnlyTwoInputs),
+        ::testing::ValuesIn(s_eltwiseDims))
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadTestsEltwiseMul_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(s_eltwiseTensors),
+        ::testing::ValuesIn(s_eltwiseInputs),
+        ::testing::ValuesIn(s_eltwiseDims))
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadTestsEltwiseSumWithCoeff_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(s_eltwiseTensors),
+        ::testing::ValuesIn(s_eltwiseInputs),
+        ::testing::ValuesIn(s_eltwiseDims))
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadTestsEltwiseSumWithBroadcast_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(s_eltwiseTensors),
+        ::testing::ValuesIn(s_eltwiseInputs),
+        ::testing::Values<int>(4))
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadTestsEltwiseSubWithCoeff_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(s_eltwiseTensors),
+        ::testing::ValuesIn(s_eltwiseOnlyTwoInputs),
+        ::testing::ValuesIn(s_eltwiseDims))
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadTestsEltwiseSubWithBroadcast_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(s_eltwiseTensors),
+        ::testing::ValuesIn(s_eltwiseOnlyTwoInputs),
+        ::testing::Values<int>(4))
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadTestsEltwiseDiv_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(s_eltwiseTensors),
+        ::testing::ValuesIn(s_eltwiseOnlyTwoInputs),
+        ::testing::ValuesIn(s_eltwiseDims))
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadTestsEltwiseMin_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(s_eltwiseTensors),
+        ::testing::ValuesIn(s_eltwiseInputs),
+        ::testing::ValuesIn(s_eltwiseDims))
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadTestsEltwiseSqDiff_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(s_eltwiseTensors),
+        ::testing::ValuesIn(s_eltwiseOnlyTwoInputs),
+        ::testing::ValuesIn(s_eltwiseDims))
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadTestsEltwisePow_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(s_eltwiseTensors),
+        ::testing::ValuesIn(s_eltwiseOnlyTwoInputs),
+        ::testing::ValuesIn(s_eltwiseDims))
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadTestsEltwiseFloorMod_nightly,
+        ::testing::Combine(
+        ::testing::ValuesIn(s_eltwiseTensors),
+        ::testing::ValuesIn(s_eltwiseOnlyTwoInputs),
+        ::testing::ValuesIn(s_eltwiseDims))
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadTestsEltwiseEqual_nightly,
+        ::testing::Combine(
+        ::testing::ValuesIn(s_eltwiseTensors),
+        ::testing::ValuesIn(s_eltwiseOnlyTwoInputs),
+        ::testing::ValuesIn(s_eltwiseDims))
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadTestsEltwiseNotEqual_nightly,
+        ::testing::Combine(
+        ::testing::ValuesIn(s_eltwiseTensors),
+        ::testing::ValuesIn(s_eltwiseOnlyTwoInputs),
+        ::testing::ValuesIn(s_eltwiseDims))
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadTestsEltwiseGreater_nightly,
+        ::testing::Combine(
+        ::testing::ValuesIn(s_eltwiseTensors),
+        ::testing::ValuesIn(s_eltwiseOnlyTwoInputs),
+        ::testing::ValuesIn(s_eltwiseDims))
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadTestsEltwiseGreaterEqual_nightly,
+        ::testing::Combine(
+        ::testing::ValuesIn(s_eltwiseTensors),
+        ::testing::ValuesIn(s_eltwiseOnlyTwoInputs),
+        ::testing::ValuesIn(s_eltwiseDims))
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadTestsEltwiseLess_nightly,
+        ::testing::Combine(
+        ::testing::ValuesIn(s_eltwiseTensors),
+        ::testing::ValuesIn(s_eltwiseOnlyTwoInputs),
+        ::testing::ValuesIn(s_eltwiseDims))
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadTestsEltwiseLessEqual_nightly,
+        ::testing::Combine(
+        ::testing::ValuesIn(s_eltwiseTensors),
+        ::testing::ValuesIn(s_eltwiseOnlyTwoInputs),
+        ::testing::ValuesIn(s_eltwiseDims))
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadTestsEltwiseLogicalNot_nightly,
+        ::testing::Combine(
+        ::testing::ValuesIn(s_eltwiseTensors),
+        ::testing::ValuesIn(s_eltwiseOnlyOneInput),
+        ::testing::ValuesIn(s_eltwiseDims))
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadTestsEltwiseLogicalAnd_nightly,
+        ::testing::Combine(
+        ::testing::ValuesIn(s_eltwiseTensors),
+        ::testing::ValuesIn(s_eltwiseInputs),
+        ::testing::ValuesIn(s_eltwiseDims))
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadTestsEltwiseLogicalOr_nightly,
+        ::testing::Combine(
+        ::testing::ValuesIn(s_eltwiseTensors),
+        ::testing::ValuesIn(s_eltwiseInputs),
+        ::testing::ValuesIn(s_eltwiseDims))
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadTestsEltwiseLogicalXor_nightly,
+        ::testing::Combine(
+        ::testing::ValuesIn(s_eltwiseTensors),
+        ::testing::ValuesIn(s_eltwiseInputs),
+        ::testing::ValuesIn(s_eltwiseDims))
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadTestsEltwiseMean_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(s_eltwiseTensors),
+        ::testing::ValuesIn(s_eltwiseOnlyTwoInputs),
+        ::testing::ValuesIn(s_eltwiseDims))
+);
+
+TEST_F(myriadLayersTestsEltwiseBase, EltwiseWithSameInputs) {
+    StatusCode st;
+
+    const std::string model = R"V0G0N(
+<net batch="1" name="VNect: Test" version="2">
+       <layers>
+               <layer id="0" name="data" precision="FP16" type="Input">
+                       <output>
+                               <port id="0">
+                                       <dim>1</dim>
+                                       <dim>2</dim>
+                                       <dim>3</dim>
+                                       <dim>3</dim>
+                               </port>
+                       </output>
+               </layer>
+               <layer id="1" name="pow1" precision="FP16" type="Power">
+                       <data power="1.0" scale="1.0" shift="0.0"/>
+                       <input>
+                               <port id="0">
+                                       <dim>1</dim>
+                                       <dim>2</dim>
+                                       <dim>3</dim>
+                                       <dim>3</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="1">
+                                       <dim>1</dim>
+                                       <dim>2</dim>
+                                       <dim>3</dim>
+                                       <dim>3</dim>
+                               </port>
+                       </output>
+               </layer>
+               <layer id="2" name="eltwise" precision="FP16" type="Eltwise">
+                       <data operation="mul"/>
+                       <input>
+                               <port id="0">
+                                       <dim>1</dim>
+                                       <dim>2</dim>
+                                       <dim>3</dim>
+                                       <dim>3</dim>
+                               </port>
+                               <port id="1">
+                                       <dim>1</dim>
+                                       <dim>2</dim>
+                                       <dim>3</dim>
+                                       <dim>3</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="2">
+                                       <dim>1</dim>
+                                       <dim>2</dim>
+                                       <dim>3</dim>
+                                       <dim>3</dim>
+                               </port>
+                       </output>
+               </layer>
+       </layers>
+       <edges>
+               <edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
+               <edge from-layer="1" from-port="1" to-layer="2" to-port="0"/>
+               <edge from-layer="1" from-port="1" to-layer="2" to-port="1"/>
+       </edges>
+</net>
+            )V0G0N";
+
+    InferenceEngine::Core ie;
+    auto network = ie.ReadNetwork(model, InferenceEngine::Blob::CPtr());
+
+    InferenceEngine::InputsDataMap networkInputs;
+    ASSERT_NO_THROW(networkInputs = network.getInputsInfo());
+    InferenceEngine::OutputsDataMap networkOutputs;
+    ASSERT_NO_THROW(networkOutputs = network.getOutputsInfo());
+
+    networkInputs.begin()->second->setPrecision(InferenceEngine::Precision::FP16);
+    networkOutputs.begin()->second->setPrecision(InferenceEngine::Precision::FP16);
+
+    InferenceEngine::Blob::Ptr inputBlob;
+
+    InferenceEngine::IExecutableNetwork::Ptr exeNetwork;
+    std::map<std::string, std::string> networkConfig = {{VPU_CONFIG_KEY(PERF_REPORT_MODE), VPU_CONFIG_VALUE(PER_STAGE)}};
+    ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(exeNetwork, network, networkConfig, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    ASSERT_NE(exeNetwork, nullptr) << _resp.msg;
+
+    InferenceEngine::IInferRequest::Ptr inferRequest;
+    ASSERT_NO_THROW(st = exeNetwork->CreateInferRequest(inferRequest, &_resp));
+
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    ASSERT_NO_THROW(st = inferRequest->GetBlob(networkInputs.begin()->first.c_str(), inputBlob, &_resp));
+
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    GenRandomData(inputBlob);
+
+    InferenceEngine::Blob::Ptr output;
+    ASSERT_NO_THROW(st = inferRequest->Infer(&_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    ASSERT_NO_THROW(st = inferRequest->GetBlob(networkOutputs.begin()->first.c_str(), output, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    _refBlob = make_shared_blob<ie_fp16>({Precision::FP16, output->getTensorDesc().getDims(), output->getTensorDesc().getLayout()});
+    _refBlob->allocate();
+    ref_eltwise(inputBlob, inputBlob, inputBlob, _refBlob, refMul, std::vector<float>({1.0f, 1.0f, 1.0f}));
+
+    CompareCommonAbsolute(_refBlob, output, 0.1f);
+};
+
+TEST_F(myriadLayersTests_nightly, MergeEltwiseWithReLU) {
+    const std::string model = R"V0G0N(
+        <Net name="WithEltwise" version="2" batch="1">
+            <layers>
+                <layer name="input" type="Input" precision="FP16" id="1">
+                    <output>
+                        <port id="1">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="branch1" type="Convolution" precision="FP16" id="2">
+                    <convolution_data
+                        stride-x="1" stride-y="1"
+                        pad-x="0" pad-y="0"
+                        kernel-x="1" kernel-y="1"
+                        output="64"
+                        group="1"/>
+                    <input>
+                        <port id="2">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="3">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </output>
+                    <weights offset="0" size="8192"/>
+                    <biases offset="8192" size="128"/>
+                </layer>
+                <layer name="branch2a" type="Convolution" precision="FP16" id="3">
+                    <convolution_data
+                        stride-x="1" stride-y="1"
+                        pad-x="1" pad-y="1"
+                        kernel-x="3" kernel-y="3"
+                        output="64"
+                        group="1"/>
+                    <input>
+                        <port id="4">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="5">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </output>
+                    <weights offset="8320" size="73728"/>
+                    <biases offset="82048" size="128"/>
+                </layer>
+                <layer name="branch2a_relu" type="ReLU" precision="FP16" id="4">
+                    <input>
+                        <port id="6">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="7">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="branch2b" type="Convolution" precision="FP16" id="5">
+                    <convolution_data
+                        stride-x="1" stride-y="1"
+                        pad-x="1" pad-y="1"
+                        kernel-x="3" kernel-y="3"
+                        output="64"
+                        group="1"/>
+                    <input>
+                        <port id="8">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="9">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </output>
+                    <weights offset="82176" size="73728"/>
+                    <biases offset="155904" size="128"/>
+                </layer>
+                <layer name="sum" type="Eltwise" precision="FP16" id="6">
+                    <elementwise_data operation="sum"/>
+                    <input>
+                        <port id="10">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                        <port id="11">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="12">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="sum_relu" type="ReLU" precision="FP16" id="7">
+                    <input>
+                        <port id="13">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="14">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="last" type="Convolution" precision="FP16" id="8">
+                    <convolution_data
+                        stride-x="1" stride-y="1"
+                        pad-x="0" pad-y="0"
+                        kernel-x="1" kernel-y="1"
+                        output="64"
+                        group="1"/>
+                    <input>
+                        <port id="15">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="16">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </output>
+                    <weights offset="156032" size="8192"/>
+                    <biases offset="164224" size="128"/>
+                </layer>
+            </layers>
+            <edges>
+                <edge from-layer="1" from-port="1" to-layer="2" to-port="2"/>
+                <edge from-layer="1" from-port="1" to-layer="3" to-port="4"/>
+                <edge from-layer="3" from-port="5" to-layer="4" to-port="6"/>
+                <edge from-layer="4" from-port="7" to-layer="5" to-port="8"/>
+                <edge from-layer="2" from-port="3" to-layer="6" to-port="10"/>
+                <edge from-layer="5" from-port="9" to-layer="6" to-port="11"/>
+                <edge from-layer="6" from-port="12" to-layer="7" to-port="13"/>
+                <edge from-layer="7" from-port="14" to-layer="8" to-port="15"/>
+            </edges>
+        </Net>
+    )V0G0N";
+
+    TBlob<uint8_t>::Ptr weights(GenWeights(164352 / sizeof(ie_fp16)));
+
+    ASSERT_NO_THROW(readNetwork(model, weights));
+
+    const auto& network = _cnnNetwork;
+
+    _inputsInfo = network.getInputsInfo();
+    auto inputInfo = _inputsInfo["input"];
+    inputInfo->setPrecision(Precision::FP16);
+
+    _outputsInfo = network.getOutputsInfo();
+    auto outputInfo = _outputsInfo["last"];
+    outputInfo->setPrecision(Precision::FP16);
+
+    StatusCode st;
+    ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network, { {VPU_CONFIG_KEY(PERF_REPORT_MODE), VPU_CONFIG_VALUE(PER_STAGE)},
+                                                                              {CONFIG_KEY(PERF_COUNT), CONFIG_VALUE(YES)},
+                                                                              {VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), CONFIG_VALUE(NO)} },
+                                                      &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    ASSERT_NE(_exeNetwork, nullptr) << _resp.msg;
+
+    ASSERT_NO_THROW(st = _exeNetwork->CreateInferRequest(_inferRequest, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    ASSERT_NO_THROW(st = _inferRequest->Infer(&_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    std::map<std::string, InferenceEngineProfileInfo> perfMap;
+    ASSERT_NO_THROW(st = _inferRequest->GetPerformanceCounts(perfMap, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    auto sumAndReLULayerIt = perfMap.find("sum + sum_relu");
+    ASSERT_TRUE(sumAndReLULayerIt != perfMap.end());
+    EXPECT_EQ(InferenceEngineProfileInfo::EXECUTED, sumAndReLULayerIt->second.status);
+}
+
+TEST_F(myriadLayersTests_nightly, MergeEltwiseWithLeakyReLU) {
+    const std::string model = R"V0G0N(
+        <Net name="WithEltwise" version="2" batch="1">
+            <layers>
+                <layer name="input" type="Input" precision="FP16" id="1">
+                    <output>
+                        <port id="1">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="branch1" type="Convolution" precision="FP16" id="2">
+                    <convolution_data
+                        stride-x="1" stride-y="1"
+                        pad-x="0" pad-y="0"
+                        kernel-x="1" kernel-y="1"
+                        output="64"
+                        group="1"/>
+                    <input>
+                        <port id="2">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="3">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </output>
+                    <weights offset="0" size="8192"/>
+                    <biases offset="8192" size="128"/>
+                </layer>
+                <layer name="branch2a" type="Convolution" precision="FP16" id="3">
+                    <convolution_data
+                        stride-x="1" stride-y="1"
+                        pad-x="1" pad-y="1"
+                        kernel-x="3" kernel-y="3"
+                        output="64"
+                        group="1"/>
+                    <input>
+                        <port id="4">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="5">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </output>
+                    <weights offset="8320" size="73728"/>
+                    <biases offset="82048" size="128"/>
+                </layer>
+                <layer name="branch2a_relu" type="ReLU" precision="FP16" id="4">
+                    <input>
+                        <port id="6">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="7">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="branch2b" type="Convolution" precision="FP16" id="5">
+                    <convolution_data
+                        stride-x="1" stride-y="1"
+                        pad-x="1" pad-y="1"
+                        kernel-x="3" kernel-y="3"
+                        output="64"
+                        group="1"/>
+                    <input>
+                        <port id="8">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="9">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </output>
+                    <weights offset="82176" size="73728"/>
+                    <biases offset="155904" size="128"/>
+                </layer>
+                <layer name="sum" type="Eltwise" precision="FP16" id="6">
+                    <elementwise_data operation="sum"/>
+                    <input>
+                        <port id="10">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                        <port id="11">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="12">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="sum_leaky_relu" type="ReLU" precision="FP16" id="7">
+                    <data negative_slope="3.0"/>
+                    <input>
+                        <port id="13">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="14">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="last" type="Convolution" precision="FP16" id="8">
+                    <convolution_data
+                        stride-x="1" stride-y="1"
+                        pad-x="0" pad-y="0"
+                        kernel-x="1" kernel-y="1"
+                        output="64"
+                        group="1"/>
+                    <input>
+                        <port id="15">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="16">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </output>
+                    <weights offset="156032" size="8192"/>
+                    <biases offset="164224" size="128"/>
+                </layer>
+            </layers>
+            <edges>
+                <edge from-layer="1" from-port="1" to-layer="2" to-port="2"/>
+                <edge from-layer="1" from-port="1" to-layer="3" to-port="4"/>
+                <edge from-layer="3" from-port="5" to-layer="4" to-port="6"/>
+                <edge from-layer="4" from-port="7" to-layer="5" to-port="8"/>
+                <edge from-layer="2" from-port="3" to-layer="6" to-port="10"/>
+                <edge from-layer="5" from-port="9" to-layer="6" to-port="11"/>
+                <edge from-layer="6" from-port="12" to-layer="7" to-port="13"/>
+                <edge from-layer="7" from-port="14" to-layer="8" to-port="15"/>
+            </edges>
+        </Net>
+    )V0G0N";
+
+    TBlob<uint8_t>::Ptr weights(GenWeights(164352 / sizeof(ie_fp16)));
+
+    ASSERT_NO_THROW(readNetwork(model, weights));
+
+    const auto& network = _cnnNetwork;
+
+    _inputsInfo = network.getInputsInfo();
+    auto inputInfo = _inputsInfo["input"];
+    inputInfo->setPrecision(Precision::FP16);
+
+    _outputsInfo = network.getOutputsInfo();
+    auto outputInfo = _outputsInfo["last"];
+    outputInfo->setPrecision(Precision::FP16);
+
+    StatusCode st;
+    ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network, { {VPU_CONFIG_KEY(PERF_REPORT_MODE), VPU_CONFIG_VALUE(PER_STAGE)},
+                                                                              {CONFIG_KEY(PERF_COUNT), CONFIG_VALUE(YES)},
+                                                                              {VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), CONFIG_VALUE(NO)} },
+                                                      &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    ASSERT_NE(_exeNetwork, nullptr) << _resp.msg;
+
+    ASSERT_NO_THROW(st = _exeNetwork->CreateInferRequest(_inferRequest, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    ASSERT_NO_THROW(st = _inferRequest->Infer(&_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    std::map<std::string, InferenceEngineProfileInfo> perfMap;
+    ASSERT_NO_THROW(st = _inferRequest->GetPerformanceCounts(perfMap, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    auto sumAndReLULayerIt = perfMap.find("sum + sum_leaky_relu");
+    ASSERT_TRUE(sumAndReLULayerIt != perfMap.end());
+    EXPECT_EQ(InferenceEngineProfileInfo::EXECUTED, sumAndReLULayerIt->second.status);
+}
+
+TEST_F(myriadLayersTests_nightly, MergeEltwiseWithClamp) {
+    const std::string model = R"V0G0N(
+        <Net name="WithEltwise" version="2" batch="1">
+            <layers>
+                <layer name="input" type="Input" precision="FP16" id="1">
+                    <output>
+                        <port id="1">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="branch1" type="Convolution" precision="FP16" id="2">
+                    <convolution_data
+                        stride-x="1" stride-y="1"
+                        pad-x="0" pad-y="0"
+                        kernel-x="1" kernel-y="1"
+                        output="64"
+                        group="1"/>
+                    <input>
+                        <port id="2">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="3">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </output>
+                    <weights offset="0" size="8192"/>
+                    <biases offset="8192" size="128"/>
+                </layer>
+                <layer name="branch2a" type="Convolution" precision="FP16" id="3">
+                    <convolution_data
+                        stride-x="1" stride-y="1"
+                        pad-x="1" pad-y="1"
+                        kernel-x="3" kernel-y="3"
+                        output="64"
+                        group="1"/>
+                    <input>
+                        <port id="4">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="5">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </output>
+                    <weights offset="8320" size="73728"/>
+                    <biases offset="82048" size="128"/>
+                </layer>
+                <layer name="branch2a_relu" type="ReLU" precision="FP16" id="4">
+                    <input>
+                        <port id="6">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="7">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="branch2b" type="Convolution" precision="FP16" id="5">
+                    <convolution_data
+                        stride-x="1" stride-y="1"
+                        pad-x="1" pad-y="1"
+                        kernel-x="3" kernel-y="3"
+                        output="64"
+                        group="1"/>
+                    <input>
+                        <port id="8">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="9">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </output>
+                    <weights offset="82176" size="73728"/>
+                    <biases offset="155904" size="128"/>
+                </layer>
+                <layer name="sum" type="Eltwise" precision="FP16" id="6">
+                    <elementwise_data operation="sum"/>
+                    <input>
+                        <port id="10">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                        <port id="11">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="12">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="sum_clamp" type="Clamp" precision="FP16" id="7">
+                    <data max="10" min="-10" />
+                    <input>
+                        <port id="13">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="14">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="last" type="Convolution" precision="FP16" id="8">
+                    <convolution_data
+                        stride-x="1" stride-y="1"
+                        pad-x="0" pad-y="0"
+                        kernel-x="1" kernel-y="1"
+                        output="64"
+                        group="1"/>
+                    <input>
+                        <port id="15">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="16">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </output>
+                    <weights offset="156032" size="8192"/>
+                    <biases offset="164224" size="128"/>
+                </layer>
+            </layers>
+            <edges>
+                <edge from-layer="1" from-port="1" to-layer="2" to-port="2"/>
+                <edge from-layer="1" from-port="1" to-layer="3" to-port="4"/>
+                <edge from-layer="3" from-port="5" to-layer="4" to-port="6"/>
+                <edge from-layer="4" from-port="7" to-layer="5" to-port="8"/>
+                <edge from-layer="2" from-port="3" to-layer="6" to-port="10"/>
+                <edge from-layer="5" from-port="9" to-layer="6" to-port="11"/>
+                <edge from-layer="6" from-port="12" to-layer="7" to-port="13"/>
+                <edge from-layer="7" from-port="14" to-layer="8" to-port="15"/>
+            </edges>
+        </Net>
+    )V0G0N";
+
+    TBlob<uint8_t>::Ptr weights(GenWeights(164352 / sizeof(ie_fp16)));
+
+    ASSERT_NO_THROW(readNetwork(model, weights));
+
+    const auto& network = _cnnNetwork;
+
+    _inputsInfo = network.getInputsInfo();
+    auto inputInfo = _inputsInfo["input"];
+    inputInfo->setPrecision(Precision::FP16);
+
+    _outputsInfo = network.getOutputsInfo();
+    auto outputInfo = _outputsInfo["last"];
+    outputInfo->setPrecision(Precision::FP16);
+
+    StatusCode st;
+    ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network,{ {VPU_CONFIG_KEY(PERF_REPORT_MODE), VPU_CONFIG_VALUE(PER_STAGE)},
+                                                                             {CONFIG_KEY(PERF_COUNT), CONFIG_VALUE(YES)},
+                                                                             {VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), CONFIG_VALUE(NO)} },
+                                                      &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    ASSERT_NE(_exeNetwork, nullptr) << _resp.msg;
+
+    ASSERT_NO_THROW(st = _exeNetwork->CreateInferRequest(_inferRequest, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    ASSERT_NO_THROW(st = _inferRequest->Infer(&_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    std::map<std::string, InferenceEngineProfileInfo> perfMap;
+    ASSERT_NO_THROW(st = _inferRequest->GetPerformanceCounts(perfMap, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    auto sumAndReLULayerIt = perfMap.find("sum + sum_clamp");
+    ASSERT_TRUE(sumAndReLULayerIt != perfMap.end());
+    EXPECT_EQ(InferenceEngineProfileInfo::EXECUTED, sumAndReLULayerIt->second.status);
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_eltwise_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_eltwise_test.hpp
new file mode 100644 (file)
index 0000000..ae216bb
--- /dev/null
@@ -0,0 +1,634 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tests.hpp"
+#include <functional>
+#include <algorithm>
+#include <string>
+#include "myriad_layers_reference_functions.hpp"
+
+// TODO: no tests for multiple inputs to eltwise at all
+extern const char ELTWISE_MAX[] = "max";
+extern const char ELTWISE_MUL[] = "mul";
+extern const char ELTWISE_SUM[] = "sum";
+extern const char ELTWISE_SUB[] = "sub";
+extern const char ELTWISE_DIV[] = "div";
+extern const char ELTWISE_MIN[] = "min";
+extern const char ELTWISE_SQDIFF[] = "squared_diff";
+extern const char ELTWISE_POW[] = "pow";
+extern const char ELTWISE_FLOOR_MOD[] = "floor_mod";
+extern const char ELTWISE_EQUAL[] = "equal";
+extern const char ELTWISE_NOT_EQUAL[] = "not_equal";
+extern const char ELTWISE_GREATER[] = "greater";
+extern const char ELTWISE_GREATER_EQUAL[] = "greater_equal";
+extern const char ELTWISE_LESS[] = "less";
+extern const char ELTWISE_LESS_EQUAL[] = "less_equal";
+extern const char ELTWISE_LOGICAL_NOT[] = "logical_not";
+extern const char ELTWISE_LOGICAL_AND[] = "logical_and";
+extern const char ELTWISE_LOGICAL_OR[] = "logical_or";
+extern const char ELTWISE_LOGICAL_XOR[] = "logical_xor";
+extern const char ELTWISE_MEAN[] = "mean";
+
+using namespace InferenceEngine;
+
+PRETTY_PARAM(NDims, nd_tensor_test_params);
+
+auto refMax = [](const float a, const float b, const float /*c*/)noexcept {
+    return std::max(a, b);
+};
+
+auto refMul = [](const float a, const float b, const float /*c*/)noexcept {
+    return a * b;
+};
+
+auto refSum = [](const float a, const float b, const float /*c*/)noexcept {
+    return a + b;
+};
+
+auto refSub = [](const float a, const float b, const float /*c*/) noexcept {
+    return a - b;
+};
+
+auto refDiv = [](const float a, const float b, const float /*c*/) noexcept {
+    return a / b;
+};
+
+auto refMin = [](const float a, const float b, const float /*c*/) noexcept {
+    return std::min(a, b);
+};
+
+auto refSqDiff = [](const float a, const float b, const float /*c*/) noexcept {
+    return (a - b) * (a - b);
+};
+
+auto refPow = [](const float a, const float b, const float /*c*/) noexcept {
+    return powf(a, b);
+};
+
+auto refFloorMod = [](const float a, const float b, const float /*c*/) noexcept {
+    return a - b * floorf(a / b);
+};
+
+auto refEqual = [](const float a, const float b, const float /*c*/) noexcept {
+    return a == b ? 1.f : 0.f;
+};
+
+auto refNotEqual = [](const float a, const float b, const float /*c*/) noexcept {
+    return a != b ? 1.f : 0.f;
+};
+
+auto refGreater = [](const float a, const float b, const float /*c*/) noexcept {
+    return a > b ? 1.f : 0.f;
+};
+
+auto refGreaterEqual = [](const float a, const float b, const float /*c*/) noexcept {
+    return a >= b ? 1.f : 0.f;
+};
+
+auto refLess = [](const float a, const float b, const float /*c*/) noexcept {
+    return a < b ? 1.f : 0.f;
+};
+
+auto refLessEqual = [](const float a, const float b, const float /*c*/) noexcept {
+    return a <= b ? 1.f : 0.f;
+};
+
+auto refLogicalNot = [](const float a, const float b, const float /*c*/) noexcept {
+    return (a == 0) ? 1.f : 0.f;
+};
+
+auto refLogicalAnd = [](const float a, const float b, const float /*c*/) noexcept {
+    return (a != 0) && (b != 0) ? 1.f : 0.f;
+};
+
+auto refLogicalOr = [](const float a, const float b, const float /*c*/) noexcept {
+    return (a != 0) || (b != 0) ? 1.f : 0.f;
+};
+
+auto refLogicalXor = [](const float a, const float b, const float /*c*/) noexcept {
+    return int((a != 0) && !(b != 0)) + int(!(a != 0) && (b != 0)) ? 1.f : 0.f;
+};
+
+auto refMean = [](const float a, const float b, const float /*c*/) noexcept {
+    return (a + b)/2.f;
+};
+
+typedef float (*kernel)(const float a, const float b, const float c);
+
+static const std::map<const char*, kernel> s_kernels = {
+        {ELTWISE_MAX, refMax},
+        {ELTWISE_MUL, refMul},
+        {ELTWISE_SUM, refSum},
+        {ELTWISE_SUB, refSub},
+        {ELTWISE_DIV, refDiv},
+        {ELTWISE_MIN, refMin},
+        {ELTWISE_SQDIFF, refSqDiff},
+        {ELTWISE_POW, refPow},
+        {ELTWISE_FLOOR_MOD, refFloorMod},
+        {ELTWISE_EQUAL, refEqual},
+        {ELTWISE_NOT_EQUAL, refNotEqual},
+        {ELTWISE_GREATER, refGreater},
+        {ELTWISE_GREATER_EQUAL, refGreaterEqual},
+        {ELTWISE_LESS, refLess},
+        {ELTWISE_LESS_EQUAL, refLessEqual},
+        {ELTWISE_LOGICAL_NOT, refLogicalNot},
+        {ELTWISE_LOGICAL_AND, refLogicalAnd},
+        {ELTWISE_LOGICAL_OR, refLogicalOr},
+        {ELTWISE_LOGICAL_XOR, refLogicalXor},
+        {ELTWISE_MEAN, refMean}
+};
+
+void genRandomDataPow(Blob::Ptr blob) {
+    float scale = 2.0f / RAND_MAX;
+    /* fill by random data in the range (-1, 1)*/
+    auto * blobRawDataFp16 = blob->buffer().as<ie_fp16 *>();
+    size_t count = blob->size();
+    for (size_t indx = 0; indx < count; ++indx) {
+        float val = rand();
+        val = val * scale - 1.0f;
+        while (fabs(val) < .01f) {
+            val *= 10.f;
+        }
+        blobRawDataFp16[indx] = PrecisionUtils::f32tof16(val);
+    }
+}
+
+void genRandomDataLogic(Blob::Ptr blob) {
+    /*fill inputs by 0x0000 or 0xFFFF*/
+    auto * blobRawDataFp16 = blob->buffer().as<ie_fp16 *>();
+    size_t count = blob->size();
+    const auto TrueVal = PrecisionUtils::f32tof16(1.f);
+    const auto FalseVal = PrecisionUtils::f32tof16(0.f);
+    float scale = 1.0f / RAND_MAX;
+    for (size_t indx = 0; indx < count; ++indx) {
+        float val = rand() * scale;
+        blobRawDataFp16[indx] = val <.5f ? FalseVal : TrueVal;
+    }
+}
+
+void getCoord(uint32_t nSubspace, SizeVector dims, uint32_t subspaceCoord[])
+{
+    for(int i = 0; i < dims.size(); ++i) {
+        int nUpSubspace = nSubspace / dims[i];
+        subspaceCoord[i] = nSubspace - nUpSubspace * dims[i];
+        nSubspace = nUpSubspace;
+    }
+}
+
+int getNum(uint32_t subspaceDims[], SizeVector dims)
+{
+    int totalSubspaces = 1;
+    int num = 0;
+    for(int i = 0; i < dims.size(); i++) {
+        num += totalSubspaces * subspaceDims[i];
+        totalSubspaces *= dims[i];
+    }
+    return num;
+}
+
+SizeVector convertDims(SizeVector dims)
+{
+    SizeVector ret(4);
+    if (dims.size() == 1) {
+        ret[0] = dims[0];
+        ret[1] = 1;
+        ret[2] = 1;
+        ret[3] = 1;
+        return ret;
+    }
+
+    if (dims.size() == 2) {
+        ret[0] = dims[1];
+        ret[1] = 1;
+        ret[2] = 1;
+        ret[3] = dims[0];
+        return ret;
+    }
+
+    if (dims.size() == 3) {
+        ret[0] = dims[0];
+        ret[1] = dims[2];
+        ret[2] = dims[1];
+        ret[3] = 1;
+        return ret;
+    }
+
+    else {// (dims.size() == 4)
+        ret[0] = dims[1];
+        ret[1] = dims[3];
+        ret[2] = dims[2];
+        ret[3] = dims[0];
+        return ret;
+    }
+}
+
+class myriadLayersTestsEltwiseBase: public myriadLayersTests_nightly {
+protected:
+    template <typename Func>void RefEltwise(Func fun, std::vector<float> coeff)
+    {
+        auto itr = _inputMap.begin();
+        int coeff_num = 0;
+        const uint16_t *srcData = itr->second->buffer().as<const uint16_t*>();
+        uint16_t *dstData = _refBlob->buffer().as<uint16_t*>();
+        uint32_t src_coords[4];
+        SizeVector refDims = convertDims(_refBlob->getTensorDesc().getDims());
+        SizeVector itrDims = convertDims(itr->second->getTensorDesc().getDims());
+
+        if (fun == s_kernels.at(ELTWISE_LOGICAL_NOT)) {
+            for (int i = 0; i < _refBlob->size(); i++) {
+                getCoord(i, refDims, src_coords);
+
+                for (int c = 0; c < refDims.size(); c++)
+                    if (src_coords[c] >= itrDims[c])
+                        src_coords[c] = 0;
+
+                int src_i = getNum(src_coords, itrDims);
+
+                dstData[i] = PrecisionUtils::f32tof16(fun(PrecisionUtils::f16tof32(srcData[src_i]), 0.f, 0.f));
+            }
+        } else {
+            for (int i = 0; i < _refBlob->size(); i++) {
+                getCoord(i, refDims, src_coords);
+
+                for (int c = 0; c < refDims.size(); c++)
+                    if (src_coords[c] >= itrDims[c])
+                        src_coords[c] = 0;
+
+                int src_i = getNum(src_coords, itrDims);
+
+                dstData[i] = PrecisionUtils::f32tof16(PrecisionUtils::f16tof32(srcData[src_i]) * coeff[coeff_num]);
+            }
+        }
+
+        itr++;
+        coeff_num++;
+
+        while(itr != _inputMap.end()) {
+            ASSERT_NE(itr->second, nullptr);
+            const uint16_t *srcData = itr->second->buffer().as<const uint16_t*>();
+            ASSERT_NE(srcData, nullptr);
+            uint16_t *dstData = _refBlob->buffer().as<uint16_t*>();
+            itrDims = convertDims(itr->second->getTensorDesc().getDims());
+
+            for (int i = 0; i < _refBlob->size(); i++) {
+                getCoord(i, refDims, src_coords);
+
+                for (int c = 0; c < refDims.size(); c++)
+                    if (src_coords[c] >= itrDims[c])
+                        src_coords[c] = 0;
+
+                int src_i = getNum(src_coords, itrDims);
+                float val = fun(PrecisionUtils::f16tof32(dstData[i]), PrecisionUtils::f16tof32(srcData[src_i])*coeff[coeff_num], 0.f);
+
+                dstData[i] = PrecisionUtils::f32tof16(val);
+            }
+            ++itr;
+            ++coeff_num;
+        }
+    }
+
+    nd_tensor_test_params _p;
+    std::map<std::string, std::string> _params;
+
+};
+
+template <const char* EltwiseType> class EltwiseTest : public myriadLayersTestsEltwiseBase,
+                                                       public testing::WithParamInterface<std::tuple<NDims, int, int>> {
+protected:
+    virtual void InitBody(bool withCoefs = false, bool withBroadcast = false, bool isOutputLogic = false)
+    {
+        float ERROR_BOUND;
+
+        if (strcmp(EltwiseType, ELTWISE_POW) == 0)
+            ERROR_BOUND = .125f;
+        else
+            ERROR_BOUND = 8.4e-3f;
+
+        _params.clear();
+        auto params = GetParam();
+        _p = std::get<0>(params);
+        int count = std::get<1>(params);
+        int ndims = std::get<2>(params);
+
+        _params["operation"] = EltwiseType;
+
+        std::vector<float> coeff;
+        for (int i = 0; i < count; i++)
+            coeff.push_back(withCoefs ? ((float)rand() / RAND_MAX) * 2.0f : 1.0f);
+        if (withCoefs) {
+            _params["coeff"] = std::to_string(coeff[0]);
+            for (int i = 1; i < count; i++)
+                _params["coeff"] += "," + std::to_string(coeff[i]);
+        }
+
+        InferenceEngine::SizeVector dims;
+        dims.resize(ndims);
+        for (int i = 0; i < ndims; i++)
+            dims[i] = _p.dims[i];
+
+        IN_OUT_desc inpt(count);
+        for (int i = 0; i < count; ++i) {
+            inpt[i] = dims;
+        }
+
+        if (withBroadcast) {
+            if(ndims == 3) {
+                GTEST_SKIP_("Please look at #-19681");
+                // inpt[2].resize(2);
+            } else {
+                inpt[rand()%count].resize(rand()%ndims + 1);
+            }
+            for (int i = 0; i < count; ++i) {
+                for (int j = 0; j < inpt[i].size(); j++) {
+                    if (rand()%2 > 0) {
+                        inpt[i][j] = 1;
+                    }
+                }
+            }
+        }
+
+        SetInputTensors(inpt);
+        SetOutputTensors({dims});
+
+        _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+
+        ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("Eltwise").params(_params)));
+        ASSERT_TRUE(Infer());
+
+        ASSERT_NO_FATAL_FAILURE(RefEltwise(s_kernels.at(EltwiseType), coeff));
+        ASSERT_EQ(_outputMap.size(), 1);
+
+        if (isOutputLogic) {
+            Blob::Ptr& output = _outputMap.begin()->second;
+            size_t out_size = output->size();
+            InferenceEngine::ie_fp16 *output_fp16_ptr = output->buffer().as<ie_fp16*>();
+
+            for (size_t i = 0; i < out_size; i++) {
+                if (PrecisionUtils::f16tof32(output_fp16_ptr[i]) != 0.f) {
+                    output_fp16_ptr[i] = PrecisionUtils::f32tof16(1.f);
+                }
+            }
+        }
+
+        CompareCommonAbsolute(_outputMap.begin()->second, _refBlob, ERROR_BOUND);
+    }
+};
+
+class myriadTestsEltwiseMax_nightly: public EltwiseTest<ELTWISE_MAX>
+{
+};
+
+class myriadTestsEltwiseSum_nightly: public EltwiseTest<ELTWISE_SUM>
+{
+};
+
+class myriadTestsEltwiseSub_nightly: public EltwiseTest<ELTWISE_SUB>
+{
+};
+
+class myriadTestsEltwiseMul_nightly: public EltwiseTest<ELTWISE_MUL>
+{
+};
+
+class myriadTestsEltwiseSumWithCoeff_nightly: public EltwiseTest<ELTWISE_SUM>
+{
+};
+
+class myriadTestsEltwiseSubWithCoeff_nightly: public EltwiseTest<ELTWISE_SUB>
+{
+};
+
+class myriadTestsEltwiseSumWithBroadcast_nightly: public EltwiseTest<ELTWISE_SUM>
+{
+};
+
+class myriadTestsEltwiseSubWithBroadcast_nightly: public EltwiseTest<ELTWISE_SUB>
+{
+};
+
+class myriadTestsEltwiseDiv_nightly: public EltwiseTest<ELTWISE_DIV>
+{
+};
+
+class myriadTestsEltwiseMin_nightly: public EltwiseTest<ELTWISE_MIN>
+{
+};
+
+class myriadTestsEltwiseSqDiff_nightly: public EltwiseTest<ELTWISE_SQDIFF>
+{
+};
+
+class myriadTestsEltwisePow_nightly: public EltwiseTest<ELTWISE_POW>
+{
+    void SetUp() override {
+        EltwiseTest::SetUp();
+        _genDataCallback = genRandomDataPow;
+    }
+};
+
+class myriadTestsEltwiseFloorMod_nightly: public EltwiseTest<ELTWISE_FLOOR_MOD>
+{
+};
+
+class myriadTestsEltwiseEqual_nightly: public EltwiseTest<ELTWISE_EQUAL>
+{
+};
+
+class myriadTestsEltwiseNotEqual_nightly: public EltwiseTest<ELTWISE_NOT_EQUAL>
+{
+};
+
+class myriadTestsEltwiseGreater_nightly: public EltwiseTest<ELTWISE_GREATER>
+{
+};
+
+class myriadTestsEltwiseGreaterEqual_nightly: public EltwiseTest<ELTWISE_GREATER_EQUAL>
+{
+};
+
+class myriadTestsEltwiseLess_nightly: public EltwiseTest<ELTWISE_LESS>
+{
+};
+
+class myriadTestsEltwiseLessEqual_nightly: public EltwiseTest<ELTWISE_LESS_EQUAL>
+{
+};
+
+class myriadTestsEltwiseLogicalNot_nightly: public EltwiseTest<ELTWISE_LOGICAL_NOT>
+{
+    void SetUp() override {
+        EltwiseTest::SetUp();
+        _genDataCallback = genRandomDataLogic;
+    }
+};
+
+class myriadTestsEltwiseLogicalAnd_nightly: public EltwiseTest<ELTWISE_LOGICAL_AND>
+{
+    void SetUp() override {
+        EltwiseTest::SetUp();
+        _genDataCallback = genRandomDataLogic;
+    }
+};
+
+class myriadTestsEltwiseLogicalOr_nightly: public EltwiseTest<ELTWISE_LOGICAL_OR>
+{
+    void SetUp() override {
+        EltwiseTest::SetUp();
+        _genDataCallback = genRandomDataLogic;
+    }
+};
+
+class myriadTestsEltwiseLogicalXor_nightly: public EltwiseTest<ELTWISE_LOGICAL_XOR>
+{
+    void SetUp() override {
+        EltwiseTest::SetUp();
+        _genDataCallback = genRandomDataLogic;
+    }
+};
+
+class myriadTestsEltwiseMean_nightly: public EltwiseTest<ELTWISE_MEAN>
+{
+};
+
+TEST_P(myriadTestsEltwiseMax_nightly, Max)
+{
+    InitBody();
+}
+
+TEST_P(myriadTestsEltwiseSum_nightly, Sum)
+{
+    InitBody();
+}
+
+TEST_P(myriadTestsEltwiseSub_nightly, Sub)
+{
+    InitBody();
+}
+
+TEST_P(myriadTestsEltwiseMul_nightly, Mul)
+{
+    InitBody();
+}
+
+TEST_P(myriadTestsEltwiseSumWithCoeff_nightly, Sum)
+{
+    InitBody(true);
+}
+
+TEST_P(myriadTestsEltwiseSubWithCoeff_nightly, Sub)
+{
+    InitBody(true);
+}
+
+TEST_P(myriadTestsEltwiseSumWithBroadcast_nightly, Sum)
+{
+    InitBody(false, true);
+}
+
+TEST_P(myriadTestsEltwiseSubWithBroadcast_nightly, Sub)
+{
+    InitBody(false, true);
+}
+
+TEST_P(myriadTestsEltwiseDiv_nightly, Div)
+{
+    InitBody();
+}
+
+TEST_P(myriadTestsEltwiseMin_nightly, Min)
+{
+    InitBody();
+}
+
+TEST_P(myriadTestsEltwiseSqDiff_nightly, SqDiff)
+{
+    InitBody();
+}
+
+TEST_P(myriadTestsEltwisePow_nightly, Pow)
+{
+    InitBody();
+}
+
+TEST_P(myriadTestsEltwiseFloorMod_nightly, FloorMod)
+{
+    InitBody();
+}
+
+TEST_P(myriadTestsEltwiseEqual_nightly, Equal)
+{
+    InitBody(false, false, true);
+}
+
+TEST_P(myriadTestsEltwiseNotEqual_nightly, NotEqual)
+{
+    InitBody(false, false, true);
+}
+
+TEST_P(myriadTestsEltwiseGreater_nightly, Greater)
+{
+    InitBody(false, false, true);
+}
+
+TEST_P(myriadTestsEltwiseGreaterEqual_nightly, GreaterEqual)
+{
+    InitBody(false, false, true);
+}
+
+TEST_P(myriadTestsEltwiseLess_nightly, Less)
+{
+    InitBody(false, false, true);
+}
+
+TEST_P(myriadTestsEltwiseLessEqual_nightly, LessEqual)
+{
+    InitBody(false, false, true);
+}
+
+TEST_P(myriadTestsEltwiseLogicalNot_nightly, LogicalNot)
+{
+    InitBody(false, false, true);
+}
+
+TEST_P(myriadTestsEltwiseLogicalAnd_nightly, LogicalAnd)
+{
+    InitBody(false, false, true);
+}
+
+TEST_P(myriadTestsEltwiseLogicalOr_nightly, LogicalOr)
+{
+    InitBody(false, false, true);
+}
+
+TEST_P(myriadTestsEltwiseLogicalXor_nightly, LogicalXor)
+{
+    InitBody(false, false, true);
+}
+
+TEST_P(myriadTestsEltwiseMean_nightly, Mean)
+{
+    InitBody();
+}
+
+static std::vector<NDims> s_eltwiseTensors = {
+        {{3, 2, 14, 32}},
+        {{5, 4, 8, 16}},
+        {{2, 16, 16, 8}},
+};
+
+static std::vector<int> s_eltwiseInputs = {
+        2, 3, 4, 5, 6
+};
+
+static std::vector<int> s_eltwiseOnlyTwoInputs = {
+        2
+};
+
+static std::vector<int> s_eltwiseOnlyOneInput = {
+        1
+};
+
+static std::vector<int> s_eltwiseDims = {
+        2, 3, 4
+};
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_elu_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_elu_test.cpp
new file mode 100644 (file)
index 0000000..c40101b
--- /dev/null
@@ -0,0 +1,11 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_elu_test.hpp"
+
+INSTANTIATE_TEST_CASE_P( accuracy, myriadLayersTestsELUParams,
+    ::testing::Combine(
+        ::testing::ValuesIn(s_powerTensors),
+        ::testing::ValuesIn(s_powerParams))
+);
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_elu_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_elu_test.hpp
new file mode 100644 (file)
index 0000000..9a80695
--- /dev/null
@@ -0,0 +1,70 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tests.hpp"
+#include <cmath>
+
+#define BOUND (5.0f)
+#define ERROR_BOUND (8e-3)
+
+using namespace InferenceEngine;
+
+PRETTY_PARAM(alpha, float);
+
+void gen_ref_elu(const InferenceEngine::Blob::Ptr src,
+                        InferenceEngine::Blob::Ptr dst,
+                        alpha p) {
+    ASSERT_NE(src, nullptr);
+    ASSERT_NE(dst, nullptr);
+    ASSERT_EQ(src->getTensorDesc().getDims().size(), dst->getTensorDesc().getDims().size());
+    const int16_t *srcData = src->buffer();
+    int16_t *dstData = dst->buffer();
+    ASSERT_NE(srcData, nullptr);
+    ASSERT_NE(dstData, nullptr);
+
+    for (size_t indx = 0; indx < src->size(); indx++) {
+        float src_val = PrecisionUtils::f16tof32(srcData[indx]);
+        dstData[indx] = PrecisionUtils::f32tof16(src_val > 0 ? src_val : p * (expf(src_val) - 1.f));
+    }
+}
+
+typedef myriadLayerTestBaseWithParam<std::tuple<SizeVector, alpha>> myriadLayersTestsELUParams;
+
+TEST_P(myriadLayersTestsELUParams, TestsELU) {
+    _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+
+    auto param = GetParam();
+    SizeVector tensor = std::get<0>(param);
+    alpha p = std::get<1>(param);
+
+    std::map<std::string, std::string> params;
+    params["alpha"] = std::to_string(p);
+
+    SetInputTensors({tensor});
+    SetOutputTensors({tensor});
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("ELU").params(params)));
+    /* input data preparation */
+    SetFirstInputToRange(-BOUND, BOUND);
+    ASSERT_TRUE(Infer());
+
+    /* output check */
+    auto outputBlob =_outputMap[_outputsInfo.begin()->first];
+    auto inputBlob = _inputMap[_inputsInfo.begin()->first];
+    
+    gen_ref_elu(inputBlob, _refBlob, p);
+    CompareCommonAbsolute(outputBlob, _refBlob, ERROR_BOUND);
+}
+
+static std::vector<SizeVector> s_powerTensors = {
+    {{6, 5, 4, 3, 40, 43}},
+    {{6, 5, 4, 3}},
+    {{6, 5, 4}},
+};
+
+static std::vector<alpha> s_powerParams = {
+    0.1f,
+    0.0f,
+    1.0f,
+    5.0f,
+};
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_erf_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_erf_test.cpp
new file mode 100644 (file)
index 0000000..9034960
--- /dev/null
@@ -0,0 +1,9 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_erf_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(
+        accuracy, myriadLayersTestsErf_nightly,
+        ::testing::ValuesIn(s_ErfDims));
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_erf_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_erf_test.hpp
new file mode 100644 (file)
index 0000000..d998674
--- /dev/null
@@ -0,0 +1,66 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tests.hpp"
+#include <cmath>
+
+// 5. may be also too wide, consider lowering it to 3. - 4. outside +-3.0 it is +-1 with precision of 3 digits.
+#define BOUND (5.0f)
+#define ERROR_BOUND (1.2e-3f)
+
+using namespace InferenceEngine;
+
+void ref_erf(const InferenceEngine::Blob::Ptr src,
+             InferenceEngine::Blob::Ptr dst) {
+    ASSERT_NE(src, nullptr);
+    ASSERT_NE(dst, nullptr);
+    ASSERT_EQ(src->getTensorDesc().getDims().size(), dst->getTensorDesc().getDims().size());
+    ie_fp16 *srcData = src->buffer();
+    ie_fp16 *dstData = dst->buffer();
+    ASSERT_NE(srcData, nullptr);
+    ASSERT_NE(dstData, nullptr);
+    for (size_t indx = 0; indx < src->size(); indx++) {
+        dstData[indx] =
+                PrecisionUtils::f32tof16(erff(PrecisionUtils::f16tof32(srcData[indx])));
+    }
+}
+
+class myriadLayersTestsErf_nightly: public myriadLayersTests_nightly,
+                                    public testing::WithParamInterface<SizeVector> {
+public:
+};
+
+TEST_P(myriadLayersTestsErf_nightly, TestsErf)
+{
+    _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+    auto p = ::testing::WithParamInterface<SizeVector>::GetParam();
+    SetInputTensors({p});
+    SetOutputTensors({p});
+    makeSingleLayerNetwork(LayerInitParams("Erf"));
+    SetFirstInputToRange(-BOUND, BOUND);
+    ASSERT_TRUE(Infer());
+
+    /* output check */
+    ref_erf(_inputMap.begin()->second, _refBlob);
+    CompareCommonAbsolute(_outputMap.begin()->second, _refBlob, ERROR_BOUND);
+}
+
+static const std::vector<SizeVector> s_ErfDims = {
+    {4, 1, 16, 16},
+    {4, 2, 16, 16},
+    {4, 3, 16, 16},
+    {4, 4, 1, 53, 16},
+    {4, 4, 2, 53, 16},
+    {4, 4, 3, 53, 16},
+    {4, 4, 1, 224, 224},
+    {4, 4, 4, 2, 224, 224},
+    {4, 4, 4, 3, 224, 224},
+    {4, 4, 4, 1, 224, 235},
+    {4, 4, 4, 2, 224, 235},
+    {4, 4, 4, 3, 224, 235},
+    {1, 1, 277, 230},
+    {1, 2, 277, 230},
+    {1, 3, 277, 230},
+    {32, 8, 16}
+};
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_exp_detectionoutput_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_exp_detectionoutput_test.cpp
new file mode 100644 (file)
index 0000000..d0b9287
--- /dev/null
@@ -0,0 +1,25 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_exp_detectionoutput_test.hpp"
+
+const int _NumRois = 1000;
+const int _NumClasses = 81;
+const int _MaxDetections = 100;
+
+static const std::vector<SizeParams> s_sizeParams_list =
+{
+    { _NumRois, _NumClasses, _MaxDetections },
+};
+
+static const std::vector<ExpDetectionOutputParams> s_layerParams_list =
+{
+    {{ 10.0, 10.0, 5.0, 5.0 }, 4.135166645050049, 0.5, 0.05, _MaxDetections, _NumClasses, 2000, 0 },
+};
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadTestsExpDetectionOutput_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(s_sizeParams_list),
+        ::testing::ValuesIn(s_layerParams_list))
+);
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_exp_detectionoutput_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_exp_detectionoutput_test.hpp
new file mode 100644 (file)
index 0000000..12eb048
--- /dev/null
@@ -0,0 +1,391 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_reference_functions.hpp"
+#include "myriad_layers_tests.hpp"
+#include "tests_vpu_common.hpp"
+
+#include <algorithm>
+#include <functional>
+#include <string>
+
+using namespace InferenceEngine;
+
+struct SizeParams {
+    int numRois;
+    int numClasses;
+    int maxDetections;
+};
+
+static void generateData(Blob::Ptr inputBoxesBlob,
+                         Blob::Ptr inputDeltasBlob,
+                         Blob::Ptr inputScoresBlob,
+                         Blob::Ptr inputIMinfoBlob,
+                         const SizeParams& sizeParams,
+                         const ExpDetectionOutputParams& layerParams)
+{
+    auto inputBoxes  = inputBoxesBlob->buffer().as<ie_fp16*>();
+    auto inputDeltas = inputDeltasBlob->buffer().as<ie_fp16*>();
+    auto inputScores = inputScoresBlob->buffer().as<ie_fp16*>();
+    auto inputIMinfo = inputIMinfoBlob->buffer().as<ie_fp16*>();
+
+    const size_t numRois    = sizeParams.numRois;
+    const size_t numClasses = sizeParams.numClasses;
+
+    const int W = 320;
+    const int H = 240;
+
+    // boxes generator
+    auto genXY = [](int min, int max, int minSize, int maxSize)
+        {
+            int a = min + maxSize * (float(std::rand()) / RAND_MAX);
+            int b = min + maxSize * (float(std::rand()) / RAND_MAX);
+            if (b < a)
+                std::swap(a, b);
+            if (b - a < minSize)
+                b = a + minSize;
+            if (b > max)
+            {
+                const int d = b - max;
+                a -= d;
+                b -= d;
+            }
+            return std::make_pair(a, b);
+        };
+
+    // input boxes
+    {
+        const int DX = 5 * layerParams.deltas_weights[0];
+        const int DY = 5 * layerParams.deltas_weights[1];
+
+        const int X0 = 0 + DX, X1 = W - DX, SX = X1 - X0 + 1;
+        const int Y0 = 0 + DY, Y1 = W - DY, SY = Y1 - Y0 + 1;
+
+        for (int roi_idx = 0; roi_idx < numRois; ++roi_idx)
+        {
+            auto xx = genXY(X0, X1, DX, SX);
+            auto yy = genXY(X0, X1, DY, SY);
+
+            ie_fp16* iboxes = &inputBoxes[roi_idx * 4];
+
+            iboxes[0] = PrecisionUtils::f32tof16( (float) xx.first );
+            iboxes[1] = PrecisionUtils::f32tof16( (float) yy.first );
+            iboxes[2] = PrecisionUtils::f32tof16( (float) xx.second );
+            iboxes[3] = PrecisionUtils::f32tof16( (float) yy.second );
+        }
+    }
+
+    // input deltas
+    for (int roi_idx = 0; roi_idx < numRois; ++roi_idx)
+    {
+        for (int class_idx = 0; class_idx < numClasses; ++class_idx)
+        {
+            float dx = 0.5*layerParams.deltas_weights[0] + layerParams.deltas_weights[0] * (float(std::rand()) / RAND_MAX);
+            float dy = 0.5*layerParams.deltas_weights[1] + layerParams.deltas_weights[1] * (float(std::rand()) / RAND_MAX);
+
+            const float minD = 0.95;
+            const float maxD = 1.10;
+
+            float d_log_w = std::log(layerParams.deltas_weights[2] * (minD + (maxD - minD) * (float(std::rand()) / RAND_MAX)));
+            float d_log_h = std::log(layerParams.deltas_weights[3] * (minD + (maxD - minD) * (float(std::rand()) / RAND_MAX)));
+
+            ie_fp16* ideltas = &inputDeltas[(roi_idx * numClasses + class_idx) * 4];
+
+            ideltas[0] = PrecisionUtils::f32tof16( dx );
+            ideltas[1] = PrecisionUtils::f32tof16( dy );
+            ideltas[2] = PrecisionUtils::f32tof16( d_log_w );
+            ideltas[3] = PrecisionUtils::f32tof16( d_log_h );
+        }
+    }
+
+    // input scores
+    // for the stable testing reasons, we try to produce totally different scores
+    // fp16 has 2^16 different codes (including nans, etc), but we have to generate at least 81000 (81*1000),
+    // so we use all successive FP numbers, starting from 1.0-1ulp towards 0, until small value is reached
+    // (less than score_threshold), so all such small score values can be the same
+    // score tensor is filled in random-like manner by using index step which is coprime with overall size
+    {
+        static const int primes[] = {97, 89, 83, 79, 73, 71, 67, 61, 59, 53, 47, 43,
+                                     41, 37, 31, 29, 23, 19, 17, 13, 11, 7, 5, 3, 2};
+
+        int count = numRois * numClasses;
+
+        int step = 0;
+        for (auto p : primes)
+        {
+            if ((count % p) != 0)
+            {
+                step = p;
+                break;
+            }
+        }
+        IE_ASSERT(step != 0); // unable to generate consistent scores list
+
+        ie_fp16 score = PrecisionUtils::f32tof16( 1.0f );
+        ie_fp16 minScore = PrecisionUtils::f32tof16( 0.001f );
+        int n = std::min(step/2, 1);
+        for (int i = 0; i < count; ++i)
+        {
+            if ((uint32_t)score > (uint32_t)minScore)
+                --score;
+            inputScores[n] = score;
+            n = (n + step) % count; // covers whole array since count & step are coprime ##s
+        }
+    }
+
+    // image info
+    inputIMinfo[0] = PrecisionUtils::f32tof16( (float) H );
+    inputIMinfo[1] = PrecisionUtils::f32tof16( (float) W );
+}
+
+using ExpDetectionOutputTestParams = std::tuple<SizeParams, ExpDetectionOutputParams>;
+
+static const Precision dataPrecision = Precision::FP16;
+static const Precision classPrecision = Precision::I32;
+
+enum BlobIndices { InputBoxes=0, InputDeltas, InputScores, InputIMinfo,
+                   OutputBoxes, OutputClasses, OutputScores, NumBlobs };
+
+typedef std::vector<SizeVector> BlobDimsList;
+
+class ExpDetectionOutputTest: public myriadLayerTestBaseWithParam<ExpDetectionOutputTestParams>
+{
+protected:
+    void testExpDetectionOutput()
+        {
+            _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+
+            const auto testParams = GetParam();
+            const auto sizeParams = std::get<0>(testParams);
+            const auto layerParams = std::get<1>(testParams);
+
+            const auto blobDims = calcBlobDims(sizeParams);
+
+            const auto model = getModel(blobDims, layerParams);
+
+            ASSERT_NO_THROW(readNetwork(model));
+
+            const auto& network = _cnnNetwork;
+
+            _inputsInfo = network.getInputsInfo();
+            _inputsInfo["detectionOutput_inputBoxes"]->setPrecision(dataPrecision);
+            _inputsInfo["detectionOutput_inputBoxes"]->setLayout(defaultLayout(blobDims[InputBoxes].size()));
+            _inputsInfo["detectionOutput_inputDeltas"]->setPrecision(dataPrecision);
+            _inputsInfo["detectionOutput_inputDeltas"]->setLayout(defaultLayout(blobDims[InputDeltas].size()));
+            _inputsInfo["detectionOutput_inputScores"]->setPrecision(dataPrecision);
+            _inputsInfo["detectionOutput_inputScores"]->setLayout(defaultLayout(blobDims[InputScores].size()));
+            _inputsInfo["detectionOutput_inputIMinfo"]->setPrecision(dataPrecision);
+            _inputsInfo["detectionOutput_inputIMinfo"]->setLayout(defaultLayout(blobDims[InputIMinfo].size()));
+
+            _outputsInfo = network.getOutputsInfo();
+            _outputsInfo["expDetectionOutput.0"]->setPrecision(dataPrecision);
+            _outputsInfo["expDetectionOutput.0"]->setLayout(defaultLayout(blobDims[OutputBoxes].size()));
+            _outputsInfo["expDetectionOutput.1"]->setPrecision(classPrecision);
+            _outputsInfo["expDetectionOutput.1"]->setLayout(defaultLayout(blobDims[OutputClasses].size()));
+            _outputsInfo["expDetectionOutput.2"]->setPrecision(dataPrecision);
+            _outputsInfo["expDetectionOutput.2"]->setLayout(defaultLayout(blobDims[OutputScores].size()));
+
+            StatusCode st = OK;
+
+            ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network, _config, &_resp));
+            ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+            ASSERT_NE(_exeNetwork, nullptr) << _resp.msg;
+
+            ASSERT_NO_THROW(st = _exeNetwork->CreateInferRequest(_inferRequest, &_resp));
+            ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+            Blob::Ptr inputBoxesBlob;
+            ASSERT_NO_THROW(st = _inferRequest->GetBlob("detectionOutput_inputBoxes", inputBoxesBlob, &_resp));
+            ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+            Blob::Ptr inputDeltasBlob;
+            ASSERT_NO_THROW(st = _inferRequest->GetBlob("detectionOutput_inputDeltas", inputDeltasBlob, &_resp));
+            ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+            Blob::Ptr inputScoresBlob;
+            ASSERT_NO_THROW(st = _inferRequest->GetBlob("detectionOutput_inputScores", inputScoresBlob, &_resp));
+            ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+            Blob::Ptr inputIMinfoBlob;
+            ASSERT_NO_THROW(st = _inferRequest->GetBlob("detectionOutput_inputIMinfo", inputIMinfoBlob, &_resp));
+            ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+            generateData(inputBoxesBlob, inputDeltasBlob, inputScoresBlob, inputIMinfoBlob, sizeParams, layerParams);
+
+            ASSERT_NO_THROW(st = _inferRequest->Infer(&_resp));
+            ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+            Blob::Ptr outputBoxesBlob;
+            ASSERT_NO_THROW(st = _inferRequest->GetBlob("expDetectionOutput.0", outputBoxesBlob, &_resp));
+            ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+            Blob::Ptr refBoxesBlob = make_shared_blob<ie_fp16>(outputBoxesBlob->getTensorDesc());
+            refBoxesBlob->allocate();
+
+            Blob::Ptr outputClassesBlob;
+            ASSERT_NO_THROW(st = _inferRequest->GetBlob("expDetectionOutput.1", outputClassesBlob, &_resp));
+            ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+            Blob::Ptr refClassesBlob = make_shared_blob<int32_t>(outputClassesBlob->getTensorDesc());
+            refClassesBlob->allocate();
+
+            Blob::Ptr outputScoresBlob;
+            ASSERT_NO_THROW(st = _inferRequest->GetBlob("expDetectionOutput.2", outputScoresBlob, &_resp));
+            ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+            Blob::Ptr refScoresBlob = make_shared_blob<ie_fp16>(outputScoresBlob->getTensorDesc());
+            refScoresBlob->allocate();
+
+            ref_expDetectionOutput(inputBoxesBlob, inputDeltasBlob, inputScoresBlob, inputIMinfoBlob,
+                                   refBoxesBlob, refClassesBlob, refScoresBlob,
+                                   sizeParams.numRois, sizeParams.numClasses, sizeParams.maxDetections, layerParams);
+
+            CompareCommonAbsolute(refBoxesBlob, outputBoxesBlob, 0.0f);
+            CompareCommonExact(refClassesBlob, outputClassesBlob);
+            CompareCommonAbsolute(refScoresBlob, outputScoresBlob, 0.0f);
+        }
+    static std::string getModel(const BlobDimsList& blobDims, const ExpDetectionOutputParams& layerParams)
+        {
+            std::string model = R"V0G0N(
+                <net name="testExpDetectionOutput" version="5">
+                    <layers>
+                        <layer id="0" name="detectionOutput_inputBoxes" type="Input">
+                            <output>
+                                <port id="0" precision="__DATA_PRECISION__">__INPUT_BOXES_DIMS__</port>
+                            </output>
+                        </layer>
+                        <layer id="1" name="detectionOutput_inputDeltas" type="Input">
+                            <output>
+                                <port id="0" precision="__DATA_PRECISION__">__INPUT_DELTAS_DIMS__</port>
+                            </output>
+                        </layer>
+                        <layer id="2" name="detectionOutput_inputScores" type="Input">
+                            <output>
+                                <port id="0" precision="__DATA_PRECISION__">__INPUT_SCORES_DIMS__</port>
+                            </output>
+                        </layer>
+                        <layer id="3" name="detectionOutput_inputIMinfo" type="Input">
+                            <output>
+                                <port id="0" precision="__DATA_PRECISION__">__INPUT_IM_INFO_DIMS__</port>
+                            </output>
+                        </layer>
+                        <layer id="4" name="expDetectionOutput" type="ExperimentalDetectronDetectionOutput">
+                             <data __LAYER_PARAMS__/>
+                             <input>
+                                 <port id="0">__INPUT_BOXES_DIMS__</port>
+                                 <port id="1">__INPUT_DELTAS_DIMS__</port>
+                                 <port id="2">__INPUT_SCORES_DIMS__</port>
+                                 <port id="3">__INPUT_IM_INFO_DIMS__</port>
+                             </input>
+                             <output>
+                                 <port id="4" precision="__DATA_PRECISION__">__OUTPUT_BOXES_DIMS__</port>
+                                 <port id="5" precision="__CLASS_PRECISION__">__OUTPUT_CLASSES_DIMS__</port>
+                                 <port id="6" precision="__DATA_PRECISION__">__OUTPUT_SCORES_DIMS__</port>
+                             </output>
+                         </layer>
+                    </layers>
+                    <edges>
+                         <edge from-layer="0" from-port="0" to-layer="4" to-port="0"/>
+                         <edge from-layer="1" from-port="0" to-layer="4" to-port="1"/>
+                         <edge from-layer="2" from-port="0" to-layer="4" to-port="2"/>
+                         <edge from-layer="3" from-port="0" to-layer="4" to-port="3"/>
+                    </edges>
+                </net>
+            )V0G0N";
+
+            const auto inputBoxesDimsStr = dimsToString(blobDims[InputBoxes]);
+            const auto inputDeltasDimsStr = dimsToString(blobDims[InputDeltas]);
+            const auto inputScoresDimsStr = dimsToString(blobDims[InputScores]);
+            const auto inputIMinfoDimsStr = dimsToString(blobDims[InputIMinfo]);
+
+            const auto outputBoxesDimsStr = dimsToString(blobDims[OutputBoxes]);
+            const auto outputClassesDimsStr = dimsToString(blobDims[OutputClasses]);
+            const auto outputScoresDimsStr = dimsToString(blobDims[OutputScores]);
+
+            const auto layerParamsStr = layerParamsToString(layerParams);
+
+            REPLACE_WITH_STR(model, "__DATA_PRECISION__", dataPrecision.name());
+            REPLACE_WITH_STR(model, "__CLASS_PRECISION__", classPrecision.name());
+
+            REPLACE_WITH_STR(model, "__INPUT_BOXES_DIMS__", inputBoxesDimsStr);
+            REPLACE_WITH_STR(model, "__INPUT_DELTAS_DIMS__", inputDeltasDimsStr);
+            REPLACE_WITH_STR(model, "__INPUT_SCORES_DIMS__", inputScoresDimsStr);
+            REPLACE_WITH_STR(model, "__INPUT_IM_INFO_DIMS__", inputIMinfoDimsStr);
+
+            REPLACE_WITH_STR(model, "__OUTPUT_BOXES_DIMS__", outputBoxesDimsStr);
+            REPLACE_WITH_STR(model, "__OUTPUT_CLASSES_DIMS__", outputClassesDimsStr);
+            REPLACE_WITH_STR(model, "__OUTPUT_SCORES_DIMS__", outputScoresDimsStr);
+
+            REPLACE_WITH_STR(model, "__LAYER_PARAMS__", layerParamsStr);
+
+            return model;
+        }
+    static std::string layerParamsToString(const ExpDetectionOutputParams& layerParams)
+        {
+            std::string str;
+
+            str += "deltas_weights=\"";
+            const char* sep = "";
+            for (auto& w : layerParams.deltas_weights)
+            {
+                str += sep + std::to_string(w);
+                sep = ",";
+            }
+            str += "\"";
+
+            str += " max_delta_log_wh=\"" + std::to_string(layerParams.max_delta_log_wh) + "\"";
+            str += " nms_threshold=\"" + std::to_string(layerParams.nms_threshold) + "\"";
+            str += " score_threshold=\"" + std::to_string(layerParams.score_threshold) + "\"";
+            str += " max_detections_per_image=\"" + std::to_string(layerParams.max_detections_per_image) + "\"";
+            str += " num_classes=\"" + std::to_string(layerParams.num_classes) + "\"";
+            str += " post_nms_count=\"" + std::to_string(layerParams.post_nms_count) + "\"";
+            str += " class_agnostic_box_regression=\"" + std::to_string(layerParams.class_agnostic_box_regression) + "\"";
+
+            return str;
+        }
+    static std::string dimsToString(const SizeVector& dims)
+        {
+            std::string str;
+            for (auto& d : dims)
+                str += "<dim>" + std::to_string(d) + "</dim>";
+            return str;
+        }
+    static BlobDimsList calcBlobDims(const SizeParams& sizeParams)
+        {
+            const size_t numRois       = sizeParams.numRois;
+            const size_t numClasses    = sizeParams.numClasses;
+            const size_t maxDetections = sizeParams.maxDetections;
+
+            BlobDimsList list(NumBlobs);
+
+            list[InputBoxes]    = SizeVector({numRois, 4});
+            list[InputDeltas]   = SizeVector({numRois, numClasses * 4});
+            list[InputScores]   = SizeVector({numRois, numClasses});
+            list[InputIMinfo]   = SizeVector({1, 3});
+
+            list[OutputBoxes]   = SizeVector({maxDetections, 4});
+            list[OutputClasses] = SizeVector({maxDetections});
+            list[OutputScores]  = SizeVector({maxDetections});
+
+            return list;
+        }
+    static Layout defaultLayout(int ndims)
+        {
+            switch (ndims)
+            {
+            case 5: return NCDHW;
+            case 4: return NCHW;
+            case 3: return CHW;
+            case 2: return NC;
+            case 1: return C;
+            }
+            return ANY;
+        }
+};
+
+class myriadTestsExpDetectionOutput_nightly: public ExpDetectionOutputTest
+{
+};
+
+TEST_P(myriadTestsExpDetectionOutput_nightly, ExpDetectionOutput)
+{
+    testExpDetectionOutput();
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_exp_generateproposals.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_exp_generateproposals.cpp
new file mode 100644 (file)
index 0000000..546e74e
--- /dev/null
@@ -0,0 +1,12 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_exp_generateproposals_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsExpGenerateProposals_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(s_ExpGenerateProposalsLayerScores),
+        ::testing::ValuesIn(s_ExpGenerateProposalsLayerImInfo),
+        ::testing::ValuesIn(s_ExpGenerateProposalsLayerParam))
+);
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_exp_generateproposals_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_exp_generateproposals_test.hpp
new file mode 100644 (file)
index 0000000..976b445
--- /dev/null
@@ -0,0 +1,228 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include "myriad_layers_reference_functions.hpp"
+#include "myriad_layers_tests.hpp"
+#include "vpu/utils/error.hpp"
+
+using namespace InferenceEngine;
+
+#define NUM_COORDS (4)
+#define ERROR_BOUND (2.5e-3f)
+
+struct GenerateProposalsParam {
+    float          min_size;
+    float          nms_threshold;
+    int            pre_nms_topn;
+    int            post_nms_topn;
+
+
+    friend std::ostream& operator<<(std::ostream& os, GenerateProposalsParam const& tst)
+    {
+        return os << "min size = " << tst.min_size
+                  << ", nms threshold = " << tst.nms_threshold
+                  << ", pre nms topn = " << tst.pre_nms_topn
+                  << ", post nms topn = " << tst.post_nms_topn;
+    };
+};
+
+using ExpGenerateProposalsTestParams = std::tuple<Dims, std::vector<int>, GenerateProposalsParam>;
+
+typedef myriadLayerTestBaseWithParam<ExpGenerateProposalsTestParams> myriadLayersTestsExpGenerateProposals_nightly;
+
+static void genInputs(InferenceEngine::BlobMap inputMap,
+                      const int numProposals,
+                      const int imgH, const int imgW) {
+    const std::string INPUT_IM_INFO = "input0";
+    const std::string INPUT_ANCHORS = "input1";
+    const std::string INPUT_DELTAS  = "input2";
+    const std::string INPUT_SCORES  = "input3";
+
+    auto inputProposals = inputMap[INPUT_ANCHORS]->buffer().as<ie_fp16*>();
+    auto inputDeltas    = inputMap[INPUT_DELTAS]->buffer().as<ie_fp16*>();
+    auto inputScores    = inputMap[INPUT_SCORES]->buffer().as<ie_fp16*>();
+    auto inputIMinfo    = inputMap[INPUT_IM_INFO]->buffer().as<ie_fp16*>();
+
+    auto iScoresDims = inputMap[INPUT_SCORES]->getTensorDesc().getDims();
+
+    // boxes generator
+    auto genXY = [](int min, int max, int maxSize) {
+            int a = min + maxSize * (static_cast<float>(rand()) / RAND_MAX);
+            int b = a + maxSize * (static_cast<float>(rand()) / RAND_MAX) + 1;
+
+            if (b > max) {
+                const int d = b - max;
+                a -= d;
+                b -= d;
+            }
+            return std::make_pair(a, b);
+        };
+
+    // input boxes
+    {
+        const int X0 = 0, X1 = imgW, SX = (X1 - X0 + 1) * 4 / 5;
+        const int Y0 = 0, Y1 = imgH, SY = (Y1 - Y0 + 1) * 4 / 5;
+
+        for (int idx = 0; idx < numProposals; ++idx) {
+            auto xx = genXY(X0, X1, SX);
+            auto yy = genXY(Y0, Y1, SY);
+
+            ie_fp16* iproposals = &inputProposals[idx * 4];
+
+            iproposals[0] = PrecisionUtils::f32tof16( static_cast<float>(xx.first) );
+            iproposals[1] = PrecisionUtils::f32tof16( static_cast<float>(yy.first) );
+            iproposals[2] = PrecisionUtils::f32tof16( static_cast<float>(xx.second) );
+            iproposals[3] = PrecisionUtils::f32tof16( static_cast<float>(yy.second) );
+        }
+    }
+
+    const auto step_hw = iScoresDims[1] * iScoresDims[0];
+    // input deltas
+    for (int idx = 0; idx < iScoresDims[2]; ++idx) {
+        for (int h = 0; h < iScoresDims[1]; ++h) {
+            for (int w = 0; w < iScoresDims[0]; ++w) {
+                const float maxDelta = 16.0f;
+                float dx = maxDelta * (static_cast<float>(std::rand()) / RAND_MAX);
+                float dy = maxDelta * (static_cast<float>(std::rand()) / RAND_MAX);
+
+                const float maxlogDelta = 1000.f / 128;
+                const float minlogDelta = 0.65;
+                float d_log_w = std::log(minlogDelta + (maxlogDelta - minlogDelta) * (static_cast<float>(std::rand()) / RAND_MAX));
+                float d_log_h = std::log(minlogDelta + (maxlogDelta - minlogDelta) * (static_cast<float>(std::rand()) / RAND_MAX));
+
+                ie_fp16* ideltas = &inputDeltas[idx * step_hw * 4];
+
+                ideltas[0 * step_hw] = PrecisionUtils::f32tof16( dx );
+                ideltas[1 * step_hw] = PrecisionUtils::f32tof16( dy );
+                ideltas[2 * step_hw] = PrecisionUtils::f32tof16( d_log_w );
+                ideltas[3 * step_hw] = PrecisionUtils::f32tof16( d_log_h );
+            }
+        }
+    }
+
+    // input scores
+    // for the stable testing reasons, we try to produce totally different scores
+    // fp16 has 2^16 different codes (including nans, etc), but we have to generate at least 81000 (81*1000),
+    // so we use all successive FP numbers, starting from 1.0-1ulp towards 0, until small value is reached
+    // (less than score_threshold), so all such small score values can be the same
+    // score tensor is filled in random-like manner by using index step which is coprime with overall size
+    {
+        static const int primes[] = {97, 89, 83, 79, 73, 71, 67, 61, 59, 53, 47, 43,
+                                     41, 37, 31, 29, 23, 19, 17, 13, 11, 7, 5, 3, 2};
+
+        int count = inputMap[INPUT_SCORES]->size();
+
+        int step = 0;
+        for (auto p : primes) {
+            if ((count % p) != 0) {
+                step = p;
+                break;
+            }
+        }
+        IE_ASSERT(step != 0);
+
+        ie_fp16 score = PrecisionUtils::f32tof16( 1.0f );
+        ie_fp16 minScore = PrecisionUtils::f32tof16( 0.001f );
+        int n = std::min(step/2, 1);
+        for (int i = 0; i < count; ++i) {
+            if ((uint32_t)score > (uint32_t)minScore)
+                --score;
+
+            inputScores[n] = score;
+            n = (n + step) % count; // covers whole array since count & step are coprime ##s
+        }
+    }
+
+    // image info
+    inputIMinfo[0] = PrecisionUtils::f32tof16( (float) imgH );
+    inputIMinfo[1] = PrecisionUtils::f32tof16( (float) imgW );
+}
+
+TEST_P(myriadLayersTestsExpGenerateProposals_nightly, ExpGenerateProposals) {
+    tensor_test_params scoresDims = std::get<0>(GetParam());
+    std::vector<int> im_info = std::get<1>(GetParam());
+    GenerateProposalsParam opParams = std::get<2>(GetParam());
+
+    const auto numProposals = scoresDims.c * scoresDims.h * scoresDims.w;
+
+    _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+
+    IN_OUT_desc inputTensors, outputTensors;
+    inputTensors.push_back({3}); // im info
+    inputTensors.push_back({numProposals, NUM_COORDS}); // input anchors
+    inputTensors.push_back({scoresDims.c * NUM_COORDS,
+                            scoresDims.h, scoresDims.w}); // input deltas
+    inputTensors.push_back({scoresDims.c, scoresDims.h, scoresDims.w}); // input scores
+
+    outputTensors.push_back({static_cast<size_t>(opParams.post_nms_topn), NUM_COORDS}); // output rois
+    outputTensors.push_back({static_cast<size_t>(opParams.post_nms_topn)}); //output scores
+
+    SetInputTensors(inputTensors);
+    SetOutputTensors(outputTensors);
+
+    std::map<std::string, std::string> layerParams = {
+        {"min_size",          std::to_string(opParams.min_size)},
+        {"nms_threshold",     std::to_string(opParams.nms_threshold)},
+        {"post_nms_count",    std::to_string(opParams.post_nms_topn)},
+        {"pre_nms_count",     std::to_string(opParams.pre_nms_topn)}
+    };
+
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("ExperimentalDetectronGenerateProposalsSingleImage").params(layerParams)));
+
+    genInputs(_inputMap, numProposals, im_info.at(0), im_info.at(1));
+
+    std::vector<InferenceEngine::Blob::Ptr> refInputBlobs;
+    std::vector<InferenceEngine::Blob::Ptr> refOutputBlobs;
+
+    for (const auto& blob : _inputMap) {
+        auto _refInputBlob = make_shared_blob<ie_fp16>({Precision::FP16,
+                                                        blob.second->getTensorDesc().getDims(),
+                                                        blob.second->getTensorDesc().getLayout()},
+                                                        blob.second->buffer());
+        refInputBlobs.push_back(_refInputBlob);
+    }
+
+    for (const auto& blob : _outputMap) {
+        auto refOutputBlob = make_shared_blob<ie_fp16>({Precision::FP16,
+                                                      blob.second->getTensorDesc().getDims(),
+                                                      blob.second->getTensorDesc().getLayout()});
+        refOutputBlob->allocate();
+        refOutputBlobs.push_back(refOutputBlob);
+    }
+
+    ref_ExpGenerateProposals(refInputBlobs,
+                             refOutputBlobs,
+                             opParams.min_size,
+                             opParams.nms_threshold,
+                             opParams.post_nms_topn,
+                             opParams.pre_nms_topn);
+
+    ASSERT_TRUE(Infer());
+
+    int refIdx = 0;
+    for (auto blob : _outputMap) {
+        CompareCommonAbsolute(blob.second, refOutputBlobs[refIdx++], ERROR_BOUND);
+    }
+}
+
+// Dimensions of scores input tensor
+static std::vector<Dims> s_ExpGenerateProposalsLayerScores = {
+    {
+        Dims({1, 3, 8, 8}),
+        Dims({1, 3, 15, 15}),
+        Dims({1, 3, 30, 30}),
+        Dims({1, 3, 60, 60}),
+        Dims({1, 3, 120, 125}),
+        Dims({1, 10, 240, 240}),
+    },
+};
+
+static std::vector<std::vector<int>> s_ExpGenerateProposalsLayerImInfo = {
+    {480, 480}, {240, 320}, {480, 320},
+};
+
+static std::vector<GenerateProposalsParam> s_ExpGenerateProposalsLayerParam = {
+    {0.f, 0.7f, 100, 100}, {0.0f, 0.4f, 100, 100}, {0.0f, 0.9f, 100, 100}, {0.0f, 0.7f, 100, 50}, {4.0f, 0.7f, 100, 100},
+};
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_exp_priorgridgenerator_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_exp_priorgridgenerator_test.cpp
new file mode 100644 (file)
index 0000000..12283f0
--- /dev/null
@@ -0,0 +1,11 @@
+// Copyright (C) 2019 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_exp_priorgridgenerator_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsExpPriorGridGenerator_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(s_ExpPriorGridGeneratorLayerInputs),
+        ::testing::ValuesIn(s_ExpPriorGridGeneratorLayerParam))
+);
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_exp_priorgridgenerator_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_exp_priorgridgenerator_test.hpp
new file mode 100644 (file)
index 0000000..780b614
--- /dev/null
@@ -0,0 +1,177 @@
+// Copyright (C) 2019 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include "myriad_layers_reference_functions.hpp"
+#include "myriad_layers_tests.hpp"
+
+using namespace InferenceEngine;
+
+#define NUM_ELEMS_PRIORS (4)
+#define ERROR_BOUND (2.5e-3f)
+
+struct PriorGridGeneratorParam {
+    int               flatten;
+    int               grid_w;
+    int               grid_h;
+    float             stride_w;
+    float             stride_h;
+
+    friend std::ostream& operator<<(std::ostream& os, PriorGridGeneratorParam const& tst)
+    {
+        return os << "grid width = " << tst.grid_w
+                  << ", grid height = " << tst.grid_h
+                  << ", step width = " << tst.stride_w
+                  << ", step height = " << tst.stride_h;
+    };
+};
+
+struct InputDims {
+    tensor_test_params priors;
+    tensor_test_params featureMap;
+    tensor_test_params imData;
+
+    InputDims(Dims priorDims, Dims featureMapDims, Dims imDataDims) :
+                priors(priorDims),
+                featureMap(featureMapDims),
+                imData(imDataDims) {}
+    
+    InputDims() = default;
+};
+
+using ExpPriorGridGeneratorTestParams = std::tuple<InputDims, PriorGridGeneratorParam>;
+
+typedef myriadLayerTestBaseWithParam<ExpPriorGridGeneratorTestParams> myriadLayersTestsExpPriorGridGenerator_nightly;
+
+static void genPriors(InferenceEngine::Blob::Ptr rois,
+                    const tensor_test_params& params,
+                    const uint32_t numPriors) {
+    ie_fp16 *roisBlobData = rois->buffer().as<ie_fp16*>();
+    const int maxRangeWidth  = params.w * 4 / 5;
+    const int maxRangeHeight = params.h * 4 / 5;
+
+    for (int i = 0; i < numPriors; i++) {
+        int x0 = std::rand() % maxRangeWidth;
+        int x1 = x0 + (std::rand() % (params.w - x0 - 1)) + 1;
+        int y0 = std::rand() % maxRangeHeight;
+        int y1 = y0 + (std::rand() % (params.h - y0 - 1)) + 1;
+
+        roisBlobData[i * NUM_ELEMS_PRIORS + 0] = PrecisionUtils::f32tof16((float)x0);
+        roisBlobData[i * NUM_ELEMS_PRIORS + 1] = PrecisionUtils::f32tof16((float)y0);
+        roisBlobData[i * NUM_ELEMS_PRIORS + 2] = PrecisionUtils::f32tof16((float)x1);
+        roisBlobData[i * NUM_ELEMS_PRIORS + 3] = PrecisionUtils::f32tof16((float)y1);
+    }
+}
+
+TEST_P(myriadLayersTestsExpPriorGridGenerator_nightly, ExpPriorGridGenerator) {
+    InputDims inputTensorsDims = std::get<0>(GetParam());
+    PriorGridGeneratorParam opParams = std::get<1>(GetParam());
+
+    const auto numPriors = inputTensorsDims.priors.n;
+
+    _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+
+    IN_OUT_desc inputTensors, outputTensors;
+    inputTensors.push_back({inputTensorsDims.priors.n, inputTensorsDims.priors.c});
+    inputTensors.push_back({inputTensorsDims.featureMap.n,
+                             inputTensorsDims.featureMap.c,
+                             inputTensorsDims.featureMap.h,
+                             inputTensorsDims.featureMap.w});
+    inputTensors.push_back({inputTensorsDims.imData.n,
+                             inputTensorsDims.imData.c,
+                             inputTensorsDims.imData.h,
+                             inputTensorsDims.imData.w});
+
+    const int gridWidth  = opParams.grid_w ? opParams.grid_w : inputTensorsDims.featureMap.w;
+    const int gridHeight = opParams.grid_h ? opParams.grid_h : inputTensorsDims.featureMap.h;
+
+    outputTensors.push_back({numPriors * gridHeight * gridWidth, inputTensorsDims.priors.c});
+
+    SetInputTensors(inputTensors);
+    SetOutputTensors(outputTensors);
+
+    std::map<std::string, std::string> layerParams = {
+        {"flatten",  std::to_string(opParams.flatten)},
+        {"h",        std::to_string(opParams.grid_h)},
+        {"w",        std::to_string(opParams.grid_w)},
+        {"stride_y", std::to_string(opParams.stride_h)},
+        {"stride_x", std::to_string(opParams.stride_w)}
+    };
+
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("ExperimentalDetectronPriorGridGenerator").params(layerParams)));
+
+    /* Input data generating */
+    for (auto blob : _inputMap) {
+        if (blob.second == _inputMap.begin()->second) {
+            genPriors(blob.second, inputTensorsDims.featureMap, numPriors);
+        } else {
+            GenRandomData(blob.second);
+        }
+    }
+
+    std::vector<InferenceEngine::Blob::Ptr> refInputBlobs;
+    std::vector<InferenceEngine::Blob::Ptr> refOutputBlobs;
+
+    for (auto blob : _inputMap) {
+        auto _refInputBlob = make_shared_blob<ie_fp16>({Precision::FP16,
+                                                        blob.second->getTensorDesc().getDims(),
+                                                        blob.second->getTensorDesc().getLayout()},
+                                                        blob.second->buffer());
+        refInputBlobs.push_back(_refInputBlob);
+    }
+
+    for (auto blob : _outputMap) {
+        auto refOutputBlob = make_shared_blob<ie_fp16>({Precision::FP16,
+                                                      blob.second->getTensorDesc().getDims(),
+                                                      blob.second->getTensorDesc().getLayout()});
+        refOutputBlob->allocate();
+        refOutputBlobs.push_back(refOutputBlob);
+    }
+
+    ref_ExpPriorGridGenerator(refInputBlobs,
+                              refOutputBlobs,
+                              opParams.grid_w,
+                              opParams.grid_h,
+                              opParams.stride_w,
+                              opParams.stride_h);
+
+    ASSERT_TRUE(Infer());
+    CompareCommonAbsolute(_outputMap.begin()->second, refOutputBlobs[0], ERROR_BOUND);
+}
+
+static std::vector<InputDims> s_ExpPriorGridGeneratorLayerInputs = {
+    {
+        InputDims(
+            Dims({3, 4}),          // priors
+            Dims({1, 128, 8, 8}),  // feature map
+            Dims({1, 3, 480, 480}) // im_data
+        )
+    },
+    {
+        InputDims(
+            Dims({3, 4}),           // priors
+            Dims({1, 128, 60, 60}), // feature map
+            Dims({1, 3, 480, 480})  // im_data
+        )
+    },
+    {
+        InputDims(
+            Dims({3, 4}),             // priors
+            Dims({1, 128, 120, 120}), // feature map
+            Dims({1, 3, 480, 480})    // im_data
+        )
+    },
+    {
+        InputDims(
+            Dims({64, 4}),          // priors
+            Dims({1, 128, 16, 16}), // feature map
+            Dims({1, 3, 480, 480})  // im_data
+        )
+    },
+};
+
+static std::vector<PriorGridGeneratorParam> s_ExpPriorGridGeneratorLayerParam = {
+    {1, 0, 0, 16.0f, 16.0f}, {1, 0, 0, 8.0f, 8.0f}, {1, 0, 0, 4.0f, 4.0f},
+    {1, 8, 8, 64.0f, 64.0f}, {1, 10, 16, 0.0f, 0.0f}
+};
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_exp_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_exp_test.cpp
new file mode 100644 (file)
index 0000000..71a3814
--- /dev/null
@@ -0,0 +1,9 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_exp_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(
+        accuracy, myriadLayersTestsExp_nightly,
+        ::testing::ValuesIn(s_expParams));
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_exp_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_exp_test.hpp
new file mode 100644 (file)
index 0000000..1a95aaf
--- /dev/null
@@ -0,0 +1,46 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tests.hpp"
+#include "myriad_layers_reference_functions.hpp"
+#include <cmath>
+
+#define BOUND (5.0f)
+#define REL_ERROR_BOUND (0.003f)
+
+using namespace InferenceEngine;
+
+class myriadLayersTestsExp_nightly: public myriadLayersTests_nightly,
+                                    public testing::WithParamInterface<Dims> {};
+
+TEST_P(myriadLayersTestsExp_nightly, TestsExp)
+{
+    auto p = ::testing::WithParamInterface<Dims>::GetParam();
+    SetInputTensor(p);
+    SetOutputTensor(p);
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("Exp")));
+    SetFirstInputToRange(-BOUND, BOUND);
+    ASSERT_TRUE(Infer());
+
+    /* output check */
+    ref_exp(_inputMap.begin()->second, _refBlob);
+    CompareCommonRelative(_outputMap.begin()->second, _refBlob, REL_ERROR_BOUND);
+}
+
+static std::vector<Dims> s_expParams = {
+    {{1, 1, 16, 16}},
+    {{1, 2, 16, 16}},
+    {{1, 3, 16, 16}},
+    {{1, 1, 53, 16}},
+    {{1, 2, 53, 16}},
+    {{1, 3, 53, 16}},
+    {{1, 1, 224, 224}},
+    {{1, 2, 224, 224}},
+    {{1, 3, 224, 224}},
+    {{1, 1, 224, 235}},
+    {{1, 2, 224, 235}},
+    {{1, 3, 224, 235}},
+    {{10, 17191, 1, 1}},
+    {{1, 1, 10, 17191}}
+};
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_exp_topkrois_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_exp_topkrois_test.cpp
new file mode 100644 (file)
index 0000000..80793c6
--- /dev/null
@@ -0,0 +1,11 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_exp_topkrois_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsExpTopKROIs_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(s_ExpTopKROIsInputRoisNum),
+        ::testing::ValuesIn(s_ExpTopKROIsMaxRoisNum))
+);
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_exp_topkrois_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_exp_topkrois_test.hpp
new file mode 100644 (file)
index 0000000..f00be24
--- /dev/null
@@ -0,0 +1,161 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include "myriad_layers_reference_functions.hpp"
+#include "myriad_layers_tests.hpp"
+
+using namespace InferenceEngine;
+
+#define NUM_COORDS (4)
+#define ERROR_BOUND (2.5e-3f)
+
+struct TopKROIsParam {
+    int            max_rois;
+
+    friend std::ostream& operator<<(std::ostream& os, TopKROIsParam const& tst)
+    {
+        return os << "max rois = " << tst.max_rois;
+    };
+};
+
+using ExpTopKROIsTestParams = std::tuple<int, TopKROIsParam>;
+
+typedef myriadLayerTestBaseWithParam<ExpTopKROIsTestParams> myriadLayersTestsExpTopKROIs_nightly;
+
+static void genInputs(InferenceEngine::BlobMap inputMap) {
+    const std::string INPUT_ROIS    = "input0";
+    const std::string INPUT_SCORES  = "input1";
+
+    const auto numRois = inputMap[INPUT_ROIS]->getTensorDesc().getDims()[0];
+
+    auto inputRois   = inputMap[INPUT_ROIS]->buffer().as<ie_fp16*>();
+    auto inputScores = inputMap[INPUT_SCORES]->buffer().as<ie_fp16*>();
+
+    // boxes generator
+    auto genXY = [](int min, int max, int maxSize) {
+            int a = min + maxSize * (float(rand()) / RAND_MAX);
+            int b = a + maxSize * (float(rand()) / RAND_MAX) + 1;
+
+            if (b > max) {
+                const int d = b - max;
+                a -= d;
+                b -= d;
+            }
+            return std::make_pair(a, b);
+        };
+
+    // input boxes
+    {
+        const int minS = 200;
+        const int maxS = 880;
+        const int W = minS + maxS * (float(rand()) / RAND_MAX);
+        const int H = minS + maxS * (float(rand()) / RAND_MAX);
+
+        const int X0 = 0, X1 = W, SX = (X1 - X0 + 1) * 3 / 5;
+        const int Y0 = 0, Y1 = H, SY = (Y1 - Y0 + 1) * 3 / 5;
+
+        for (int idx = 0; idx < numRois; ++idx) {
+            auto xx = genXY(X0, X1, SX);
+            auto yy = genXY(Y0, Y1, SY);
+
+            ie_fp16* irois = &inputRois[idx * 4];
+
+            irois[0] = PrecisionUtils::f32tof16( (float) xx.first );
+            irois[1] = PrecisionUtils::f32tof16( (float) yy.first );
+            irois[2] = PrecisionUtils::f32tof16( (float) xx.second );
+            irois[3] = PrecisionUtils::f32tof16( (float) yy.second );
+        }
+    }
+
+    // input scores
+    // for the stable testing reasons, we try to produce totally different scores
+    // fp16 has 2^16 different codes (including nans, etc), but we have to generate at least 81000 (81*1000),
+    // so we use all successive FP numbers, starting from 1.0-1ulp towards 0, until small value is reached
+    // (less than score_threshold), so all such small score values can be the same
+    // score tensor is filled in random-like manner by using index step which is coprime with overall size
+    {
+        static const int primes[] = {97, 89, 83, 79, 73, 71, 67, 61, 59, 53, 47, 43,
+                                     41, 37, 31, 29, 23, 19, 17, 13, 11, 7, 5, 3, 2};
+
+        int step = 0;
+        for (auto p : primes) {
+            if ((numRois % p) != 0) {
+                step = p;
+                break;
+            }
+        }
+        IE_ASSERT(step != 0); // unable to generate consistent scores list
+
+        ie_fp16 score = PrecisionUtils::f32tof16( 1.0f );
+        ie_fp16 minScore = PrecisionUtils::f32tof16( 0.001f );
+        int n = std::min(step/2, 1);
+        for (int i = 0; i < numRois; ++i) {
+            if ((uint32_t)score > (uint32_t)minScore)
+                --score;
+
+            inputScores[n] = score;
+            n = (n + step) % numRois; // covers whole array since numRois & step are coprime ##s
+        }
+    }
+}
+
+TEST_P(myriadLayersTestsExpTopKROIs_nightly, ExpTopKROIs) {
+    int inputRoisNum = std::get<0>(GetParam());
+    TopKROIsParam opParams = std::get<1>(GetParam());
+
+    _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+
+    IN_OUT_desc inputTensors, outputTensors;
+    inputTensors.push_back({static_cast<size_t>(inputRoisNum), NUM_COORDS}); // input rois
+    inputTensors.push_back({static_cast<size_t>(inputRoisNum)}); // input probs
+
+    outputTensors.push_back({static_cast<size_t>(opParams.max_rois), NUM_COORDS}); // output rois
+
+    SetInputTensors(inputTensors);
+    SetOutputTensors(outputTensors);
+
+    std::map<std::string, std::string> layerParams = {
+        {"max_rois", std::to_string(opParams.max_rois)},
+    };
+
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("ExperimentalDetectronTopKROIs").params(layerParams)));
+
+    genInputs(_inputMap);
+
+    std::vector<InferenceEngine::Blob::Ptr> refInputBlobs;
+    std::vector<InferenceEngine::Blob::Ptr> refOutputBlobs;
+
+    for (const auto& blob : _inputMap) {
+        auto _refInputBlob = make_shared_blob<ie_fp16>({Precision::FP16,
+                                                        blob.second->getTensorDesc().getDims(),
+                                                        blob.second->getTensorDesc().getLayout()},
+                                                        blob.second->buffer());
+        refInputBlobs.push_back(_refInputBlob);
+    }
+
+    for (const auto& blob : _outputMap) {
+        auto refOutputBlob = make_shared_blob<ie_fp16>({Precision::FP16,
+                                                      blob.second->getTensorDesc().getDims(),
+                                                      blob.second->getTensorDesc().getLayout()});
+        refOutputBlob->allocate();
+        refOutputBlobs.push_back(refOutputBlob);
+    }
+
+    ref_ExpTopKROIs(refInputBlobs,
+                    refOutputBlobs,
+                    opParams.max_rois);
+
+    ASSERT_TRUE(Infer());
+
+    CompareCommonAbsolute(_outputMap.begin()->second, refOutputBlobs[0], ERROR_BOUND);
+}
+
+static std::vector<int> s_ExpTopKROIsInputRoisNum = {
+    100, 150, 50, 200, 101
+};
+
+static std::vector<TopKROIsParam> s_ExpTopKROIsMaxRoisNum = {
+    { 100 }, { 150 }, { 50 }, { 101 }
+};
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_flatten_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_flatten_test.cpp
new file mode 100644 (file)
index 0000000..261a682
--- /dev/null
@@ -0,0 +1,12 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_flatten_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsFlatten_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(s_flattenTensors),
+        ::testing::ValuesIn(s_flattenAxis)
+    )
+);
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_flatten_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_flatten_test.hpp
new file mode 100644 (file)
index 0000000..833bf87
--- /dev/null
@@ -0,0 +1,137 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tests.hpp"
+
+typedef myriadLayerTestBaseWithParam<std::tuple<InferenceEngine::SizeVector, int32_t>> myriadLayersTestsFlatten_nightly;
+
+static void ref_flatten(const InferenceEngine::Blob::Ptr src,
+                        InferenceEngine::Blob::Ptr dst) {
+    ASSERT_NE(src, nullptr);
+    ASSERT_NE(dst, nullptr);
+    int32_t OW = 1;
+    int32_t OH = 1;
+    int32_t OC = 1;
+    int32_t IW = 1;
+    int32_t IH = 1;
+    int32_t IC = 1;
+    get_dims(src, IW, IH, IC);
+    get_dims(dst, OW, OH, OC);
+
+    ASSERT_EQ(IW * IH *IC, OW * OH * OC);
+
+    const uint16_t *src_data = src->buffer();
+    uint16_t *dst_data = dst->buffer();
+
+    ASSERT_NE(src_data, nullptr);
+    ASSERT_NE(dst_data, nullptr);
+
+    size_t sz = IW * IH *IC;
+    std::vector<uint16_t> temp(sz);
+    uint16_t* pTmp = temp.data();
+    ASSERT_NE(pTmp, nullptr);
+    //HWC->CHW
+    for (int32_t ic = 0; ic < IC; ++ic) {
+        for (int32_t ih = 0; ih < IH; ++ih) {
+            for (int32_t iw = 0; iw < IW; ++iw) {
+                int32_t iidx = iw + IW * ( ih  + ic * IH );
+                int32_t oodx = ic + IC * ( iw  + ih * IW );
+                temp[iidx] = src_data[oodx];
+            }
+        }
+    }
+    //CHW->HWC
+    for (int32_t ow = 0; ow < OW; ++ow) {
+        for (int32_t oh = 0; oh < OH; ++oh) {
+            for (int32_t oc = 0; oc < OC; ++oc) {
+                int32_t iidx = ow + OW * ( oh  + oc * OH );
+                int32_t oodx = oc + OC * ( ow  + oh * OW );
+                dst_data[oodx] = temp[iidx];
+            }
+        }
+    }
+}
+
+TEST_P(myriadLayersTestsFlatten_nightly, Flatten) {
+    auto input = std::get<0>(GetParam());
+    int32_t axis_val = std::get<1>(GetParam());
+    IN_OUT_desc input_tensor;
+    IN_OUT_desc output_tensor;
+    input_tensor.push_back(input);
+    SetInputTensors(input_tensor);
+    SetInputReshape();
+    InferenceEngine::SizeVector out_dims;
+    if (input.size() < 4) {
+        axis_val -= 1;
+        axis_val %= input.size();
+    }
+    if (input.size() == 4) {
+        ASSERT_EQ(input[0], 1);
+    }
+    out_dims.push_back(1);
+    switch (axis_val) {
+        case 0:
+            ASSERT_NE(input.size(), 4);
+            {
+                int32_t count = 1;
+                for ( auto val : input)
+                    count *= val;
+                out_dims.push_back(count);
+            }
+            break;
+        case 1:
+            {
+                if (input.size() == 4) {
+                    int32_t count = 1;
+                    for ( auto val : input)
+                        count *= val;
+                    out_dims.push_back(count);
+                }else if (input.size() == 3) {
+                    out_dims.push_back(input[0]);
+                    out_dims.push_back(input[1] *input[2]);
+                }else if (input.size() == 2) {
+                    out_dims = input;
+                }
+            }
+            break;
+        case 2:
+            {
+                ASSERT_NE(input.size(), 2);
+                if (input.size() == 3) {
+                    out_dims = input;
+                }else if (input.size() == 4) {
+                    out_dims.push_back(input[1]);
+                    out_dims.push_back(input[2] *input[3]);
+                }
+            }
+            break;
+        case 3:
+            ASSERT_EQ(input.size(), 4);
+            out_dims = input;
+            break;
+        default:
+            FAIL() << "Unsupported axis value";
+    }
+    output_tensor.push_back(out_dims);
+    SetOutputTensors(output_tensor);
+    std::map<std::string, std::string> params;
+    params["axis"] = std::to_string(axis_val);
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("Flatten").params(params)));
+    ASSERT_TRUE(Infer());
+    ref_flatten(_inputMap.begin()->second, _refBlob);
+    CompareCommonAbsolute(_outputMap.begin()->second, _refBlob, 0);
+}
+
+static std::vector<InferenceEngine::SizeVector> s_flattenTensors = {
+    {{1, 4, 8, 16}},
+
+    // FIXME: the test is written for [N]HWC layout, but InferenceEngine doesn't have 3D HWC layout.
+//    {{4, 16, 32}},
+
+    {{64, 32}},
+};
+
+static std::vector<int32_t> s_flattenAxis = {
+    1, 2, 3
+};
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_floor_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_floor_test.cpp
new file mode 100644 (file)
index 0000000..9342ee0
--- /dev/null
@@ -0,0 +1,9 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_floor_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(
+        accuracy, myriadLayersTestsFloor_nightly,
+        ::testing::ValuesIn(s_FloorParams));
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_floor_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_floor_test.hpp
new file mode 100644 (file)
index 0000000..a086ac9
--- /dev/null
@@ -0,0 +1,62 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tests.hpp"
+#include <cmath>
+
+#define BOUND (1.e+7f)
+#define ERROR_BOUND (0.f)
+
+using namespace InferenceEngine;
+
+void ref_floor(const InferenceEngine::Blob::Ptr src,
+             InferenceEngine::Blob::Ptr dst) {
+    ASSERT_NE(src, nullptr);
+    ASSERT_NE(dst, nullptr);
+    ASSERT_EQ(src->getTensorDesc().getDims().size(), dst->getTensorDesc().getDims().size());
+    ie_fp16 *srcData = src->buffer();
+    ie_fp16 *dstData = dst->buffer();
+    ASSERT_NE(srcData, nullptr);
+    ASSERT_NE(dstData, nullptr);
+    for (size_t indx = 0; indx < src->size(); indx++) {
+        dstData[indx] =
+                PrecisionUtils::f32tof16(floorf(PrecisionUtils::f16tof32(srcData[indx])));
+    }
+}
+
+class myriadLayersTestsFloor_nightly: public myriadLayersTests_nightly,
+                                    public testing::WithParamInterface<Dims> {
+public:
+};
+
+TEST_P(myriadLayersTestsFloor_nightly, TestsFloor)
+{
+    auto p = ::testing::WithParamInterface<Dims>::GetParam();
+    SetInputTensor(p);
+    SetOutputTensor(p);
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("Floor")));
+    SetFirstInputToRange(-BOUND, BOUND);
+    ASSERT_TRUE(Infer());
+
+    /* output check */
+    ref_floor(_inputMap.begin()->second, _refBlob);
+    CompareCommonAbsolute(_outputMap.begin()->second, _refBlob, ERROR_BOUND);
+}
+
+static std::vector<Dims> s_FloorParams = {
+        {{1, 1, 16, 16}},
+        {{1, 2, 16, 16}},
+        {{1, 3, 16, 16}},
+        {{1, 1, 53, 16}},
+        {{1, 2, 53, 16}},
+        {{1, 3, 53, 16}},
+        {{1, 1, 224, 224}},
+        {{1, 2, 224, 224}},
+        {{1, 3, 224, 224}},
+        {{1, 1, 224, 235}},
+        {{1, 2, 224, 235}},
+        {{1, 3, 224, 235}},
+        {{10, 17191, 1, 1}},
+        {{1, 1, 10, 17191}}
+};
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_fully_connected_tests.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_fully_connected_tests.cpp
new file mode 100644 (file)
index 0000000..fa2f928
--- /dev/null
@@ -0,0 +1,24 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_fully_connected_tests.hpp"
+INSTANTIATE_TEST_CASE_P(
+        accuracy, myriadLayersTestsFullyConnected_nightly,
+        ::testing::ValuesIn(s_fcTestParams)
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsFullyConnectedBatch_nightly,
+        ::testing::Combine(
+            ::testing::ValuesIn(s_fcTestBatchParams)
+          , ::testing::ValuesIn(s_fcTestBatchOutSizes)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsFullyConnectedPVA_nightly,
+        ::testing::Combine(
+            ::testing::ValuesIn(s_fcTestPVAParams)
+          , ::testing::ValuesIn(s_fcTestPVAOutSizes)
+          )
+);
+
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_fully_connected_tests.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_fully_connected_tests.hpp
new file mode 100644 (file)
index 0000000..791e1a8
--- /dev/null
@@ -0,0 +1,213 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tests.hpp"
+#include "myriad_layers_reference_functions.hpp"
+
+using namespace InferenceEngine;
+
+class myriadLayersTestsFullyConnected_nightly: public myriadLayersTests_nightly,
+                           public testing::WithParamInterface<fcon_test_params> {
+};
+
+typedef std::tuple<InferenceEngine::SizeVector, uint32_t> IR3_FC_params;
+class myriadLayersTestsFullyConnectedBatch_nightly: public myriadLayersTests_nightly,
+                           public testing::WithParamInterface<IR3_FC_params> {
+};
+
+TEST_P(myriadLayersTestsFullyConnected_nightly, TestsFullyConnected)
+{
+    fcon_test_params p = ::testing::WithParamInterface<fcon_test_params>::GetParam();
+
+    size_t sz_weights = p.in.c * p.in.h * p.in.w * p.out_c;
+    size_t sz_bias = 0;
+    size_t sz = sz_weights + sz_bias;
+    InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(GenWeights(sz));
+    uint16_t* weights = weights_ptr->data().as<uint16_t*>();
+    SetInputTensors({{p.in.n, p.in.c, p.in.h, p.in.w}});
+    SetOutputTensors({{1, p.out_c}});
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("FullyConnected")
+                                        .params({{"out-size", std::to_string(p.out_c)}})
+                                        .weights(sz_weights)
+                                        .biases(sz_bias),
+                                        NetworkInitParams().layoutPreference(vpu::LayoutPreference::ChannelMinor),
+                                        weights_ptr));
+    ASSERT_TRUE(Infer());
+
+    ref_innerproduct(_inputMap.begin()->second, _refBlob, weights,  sz_weights, nullptr, sz_bias, p.out_c);
+    CompareCommonAbsolute(_outputMap.begin()->second, _refBlob, p.error_bound);
+}
+
+static void constWeightsRange1(uint16_t* ptr, size_t weightsSize) {
+    ASSERT_NE(ptr, nullptr);
+    float shft = 0.125f;
+    float val = 0.125f;
+    for (size_t count = 0 ; count < weightsSize; ++count) {
+        ptr[count] = PrecisionUtils::f32tof16(val);
+        val += shft;
+        if (val >0.9f)
+            val = -0.9f;
+    }
+}
+
+static void genTestData1(InferenceEngine::Blob::Ptr blob) {
+    ASSERT_NE(blob, nullptr);
+    Layout layout = blob->getTensorDesc().getLayout();
+    SizeVector dims = blob->getTensorDesc().getDims();
+    ie_fp16* ptr = blob->buffer().as<ie_fp16*>();
+    if (layout == NCHW || layout == NHWC) {
+        size_t N = dims[0];
+        size_t C = dims[1];
+        size_t H = dims[2];
+        size_t W = dims[3];
+        float counter = 0.025f;
+        for (size_t n = 0; n < N; n++) {
+            for (size_t c = 0; c < C; c++) {
+                for (size_t h = 0; h < H; h++) {
+                    for (size_t w = 0; w < W; w++) {
+                        size_t actualIdx = layout == NCHW ?
+                                           w + h * W + c * W * H + n * W * H * C : c + w * C + h * C * W +
+                                                                                   n * W * H * C;
+                        ptr[actualIdx] = PrecisionUtils::f32tof16(counter);
+                        counter += 0.025f;
+                        if (counter > 0.99990f)
+                            counter = -1.0f;
+                    }
+                }
+            }
+        }
+    } else {
+        ASSERT_TRUE(false);
+    }
+}
+
+
+TEST_P(myriadLayersTestsFullyConnectedBatch_nightly, TestsFullyConnected)
+{
+    auto p = ::testing::WithParamInterface<IR3_FC_params>::GetParam();
+    auto input_tensor = std::get<0>(p);
+    uint32_t out_size = std::get<1>(p);
+    int32_t IW = 0;
+    int32_t IH = 0;
+    int32_t IC = 0;
+    int32_t I_N = 0;
+
+    std::map<std::string, std::string> params;
+    params["out-size"] = std::to_string(out_size);
+    get_dims(input_tensor, IW, IH, IC, I_N);
+    InferenceEngine::SizeVector output_tensor = {(size_t)I_N, (size_t)out_size};
+    if (I_N > 1)
+        _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+    else
+        _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(YES);
+
+    size_t sz_weights = IC * IH * IW * out_size;
+    size_t sz_bias = 0;
+    size_t sz = sz_weights + sz_bias;
+    _genDataCallback = genTestData1;
+    _testNet.addLayer(LayerInitParams("FullyConnected")
+             .params(params)
+             .weights(sz_weights).fillWeights(constWeightsRange1)
+             .biases(sz_bias).fillBiases(constWeightsRange1)
+             .in({input_tensor})
+             .out({output_tensor}),
+             ref_innerproduct_wrap);
+    ASSERT_TRUE(generateNetAndInfer(NetworkInitParams().useHWOpt( CheckMyriadX() ) ));
+    CompareCommonAbsolute(_outputMap.begin()->second, getReferenceOutput(), 0.02);
+}
+
+class myriadLayersTestsFullyConnectedPVA_nightly: public myriadLayersTests_nightly,
+                           public testing::WithParamInterface<IR3_FC_params> {
+};
+
+TEST_P(myriadLayersTestsFullyConnectedPVA_nightly, TestsFullyConnected)
+{
+    auto p = ::testing::WithParamInterface<IR3_FC_params>::GetParam();
+    auto input_tensor = std::get<0>(p);
+    uint32_t out_size = std::get<1>(p);
+    int32_t IW = 0;
+    int32_t IH = 0;
+    int32_t IC = 0;
+    int32_t I_N = 0;
+
+    std::map<std::string, std::string> params;
+    params["out-size"] = std::to_string(out_size);
+    get_dims(input_tensor, IW, IH, IC, I_N);
+    InferenceEngine::SizeVector output_tensor = {(size_t)I_N, (size_t)out_size};
+    if (I_N > 1)
+        _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+    else
+        _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(YES);
+
+    size_t sz_weights = IC * IH * IW * out_size;
+    size_t sz_bias = 0;
+    size_t sz = sz_weights + sz_bias;
+    _genDataCallback = genTestData1;
+    _testNet.addLayer(LayerInitParams("FullyConnected")
+             .params(params)
+             .weights(sz_weights).fillWeights(constWeightsRange1)
+             .biases(sz_bias).fillBiases(constWeightsRange1)
+             .in({input_tensor})
+             .out({output_tensor}),
+             ref_innerproduct_wrap
+             );
+    std::map<std::string, std::string> reshape_params = {
+                {"axis", "0"}
+              , {"dim", "0,0"}
+              , {"num_axes", "-1"}
+    };
+    size_t in_n = I_N;
+    _testNet.addLayer(LayerInitParams("Reshape")
+             .params(reshape_params)
+             .in({output_tensor})
+             .out({{in_n, out_size}}),
+             ref_reshape_wrap);
+    size_t last_sz = 8;
+    std::map<std::string, std::string> fc_params;
+    fc_params["out-size"] = std::to_string(last_sz);
+    _testNet.addLayer(LayerInitParams("FullyConnected")
+             .params(fc_params)
+             .weights(out_size * last_sz).fillWeights(constWeightsRange1)
+             .biases(0)
+             .in({{in_n, out_size}})
+             .out({{in_n, last_sz}}),
+             ref_innerproduct_wrap);
+
+    ASSERT_TRUE(generateNetAndInfer(NetworkInitParams().useHWOpt( CheckMyriadX())));
+    CompareCommonAbsolute(_outputMap.begin()->second, getReferenceOutput(), 0.07);
+}
+
+static std::vector<fcon_test_params> s_fcTestParams = {
+    {{1,    1, 16,  8},    8, 0.02f},
+    {{1,    1,  8, 16},    8, 0.02f},
+    {{1,    1,  8, 16},    4, 0.02f},
+    {{1,    4,  8, 16},    4, 0.065f},
+    {{1,   16, 16, 16},   16, 0.36f},
+    {{1,   16,  8,  8},    8, 0.065f},
+    {{1,  512,  7,  7}, 4096, 0.4f},
+    {{1, 4096,  1,  1}, 4096, 0.1f},   // AlexNet layer
+    {{1, 4096,  1,  1}, 1000, 0.1f},   // AlexNet layer
+    {{1, 1024,  1,  1}, 1000, 0.1f},   // GoogleNet layer
+    {{1,   71,  1, 78}, 6248, 0.065f}, // LPR-0001 layer
+    {{1, 1024,  7,  7}, 2048, 0.5f},
+    {{1,  576,  1,  1},  128, 0.02f},
+    {{1, 1152,  1,  1},  128, 0.032f},
+};
+
+
+static const std::vector<InferenceEngine::SizeVector> s_fcTestBatchParams = {
+    {10, 8, 3,  3}
+};
+
+static const std::vector<uint32_t> s_fcTestBatchOutSizes = {
+    12
+};
+
+static const std::vector<InferenceEngine::SizeVector> s_fcTestPVAParams = {
+    {2, 2, 7,  7}
+};
+
+static const std::vector<uint32_t> s_fcTestPVAOutSizes = {
+    16
+};
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_gather_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_gather_test.cpp
new file mode 100644 (file)
index 0000000..251c65d
--- /dev/null
@@ -0,0 +1,27 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_gather_test.hpp"
+
+using namespace testing;
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayerGather_nightly,
+                        Values(GatherTestParams { {36549, 1024},   {16},           0, "FP16" },
+                               GatherTestParams { {10},            {10},           0, "FP16" },
+                               GatherTestParams { {36549, 1024},   {10},           0, "FP16" },
+                               GatherTestParams { {365490},        {10},           0, "FP16" },
+                               GatherTestParams { {10, 1024},      {10},           0, "FP16" },
+                               GatherTestParams { {30522, 768},    {1, 128, 1},    0, "FP16" },
+                               GatherTestParams { {30522, 768},    {1, 128, 1},    1, "FP16" },
+                               GatherTestParams { {6, 12, 10, 24}, {15, 4, 20, 5}, 0, "FP16" },
+                               GatherTestParams { {6, 12, 10, 24}, {15, 4, 20, 5}, 1, "FP16" },
+                               GatherTestParams { {6, 12, 10, 24}, {15, 4, 20, 5}, 2, "FP16" },
+                               GatherTestParams { {6, 12, 10, 24}, {15, 4, 20, 5}, 3, "FP16" },
+                               GatherTestParams { {10},            {10},           0, "I32" },
+                               GatherTestParams { {365490},        {10},           0, "I32" },
+                               GatherTestParams { {36549, 768},    {10},           0, "I32" },
+                               GatherTestParams { {30522, 768},    {1, 128, 1},    0, "I32" },
+                               GatherTestParams { {30522, 768},    {1, 128, 1},    1, "I32" },
+                               GatherTestParams { {6, 12, 10, 24}, {15, 4, 20, 5}, 0, "I32" },
+                               GatherTestParams { {6, 12, 10, 24}, {15, 4, 20, 5}, 3, "I32" }));
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_gather_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_gather_test.hpp
new file mode 100644 (file)
index 0000000..868858f
--- /dev/null
@@ -0,0 +1,310 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tests.hpp"
+#include "myriad_layers_reference_functions.hpp"
+#include "vpu_tests_config.hpp"
+#include "vpu_case_common.hpp"
+
+#include <algorithm>
+#include <random>
+#include <vector>
+#include <string>
+
+using namespace InferenceEngine;
+
+using   InputShape = std::vector<int>;
+using IndicesShape = std::vector<int>;
+using         Axis = int;
+using         Type = std::string;  // "FP16", "I32"
+
+using GatherTestParams = std::tuple<InputShape,
+                                    IndicesShape,
+                                    Axis,
+                                    Type>;
+
+class myriadLayerGather_nightly :
+    public myriadLayerTestBaseWithParam<GatherTestParams> {
+protected:
+
+    void testGather() {
+        SKIP_IF_CURRENT_TEST_IS_DISABLED();
+
+        _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+
+        //
+        // Parse and check test parameters
+        //
+
+        const GatherTestParams& gatherTestParams = GetParam();
+        const std::vector<int>&   inputShape = std::get<0>(gatherTestParams);
+        const std::vector<int>& indicesShape = std::get<1>(gatherTestParams);
+        const             int      axisParam = std::get<2>(gatherTestParams);
+        const std::string     &         type = std::get<3>(gatherTestParams);
+
+        IE_ASSERT(type == "I32" ||
+                  type == "FP16");
+
+        const int indicesNDims = indicesShape.size();
+        const int   inputNDims =   inputShape.size();
+        const int  outputNDims = indicesNDims + inputNDims - 1;
+        IE_ASSERT(outputNDims > 0);
+
+        // NB: axis param must be in [-len(in.shape), len(in.shape)-1]
+        const int axis = axisParam + (axisParam < 0 ? inputNDims : 0);
+        IE_ASSERT(0 <= axis && axis < inputNDims);
+
+        // Deduce shape of `output` tensor
+        //
+        // E.g.:
+        //    {N, C, H, W} could be shape of `input`
+        // {I, J}          could be shape of `indices`
+        // {I, J, C, H, W} could be shape of `output`
+        std::vector<int> outputShape;
+        for (int i = 0; i < axis; i++) {
+            outputShape.push_back(inputShape[i]);
+        }
+        for (int i = 0; i < indicesNDims; i++) {
+            outputShape.push_back(indicesShape[i]);
+        }
+        for (int i = axis + 1; i < inputNDims; i++) {
+            outputShape.push_back(inputShape[i]);
+        }
+        IE_ASSERT(outputShape.size() == outputNDims);
+
+        //
+        // Skip test if data is too large for device
+        //
+
+        const int inputTotal = getTotal(inputShape);
+        const int outputTotal = getTotal(outputShape);
+        const int indicesTotal = getTotal(indicesShape);
+
+        const Precision precision = type == "I32" ?
+                                        Precision::I32 :
+                                        Precision::FP16;
+
+        const int bpp = precision == Precision::I32 ?
+                                         sizeof(int32_t) :
+                                         sizeof(ie_fp16);
+
+        const int threshold = 50 * (1 << 20);  // empirical
+
+        const bool tooLarge = inputTotal * bpp > threshold ||
+                             outputTotal * bpp > threshold;
+
+        DISABLE_IF(tooLarge && !CheckMA2085());
+
+        //
+        // Initialize 1-layer network
+        //
+
+        std::string model = createModel(inputShape,
+                                        outputShape,
+                                        indicesShape,
+                                        axis,
+                                        type);
+
+        ASSERT_NO_THROW(readNetwork(model));
+
+        const auto& network = _cnnNetwork;
+
+        _inputsInfo = network.getInputsInfo();
+        _inputsInfo["input"]->setPrecision(precision);
+        _inputsInfo["indices"]->setPrecision(Precision::I32);
+
+        _outputsInfo = network.getOutputsInfo();
+        _outputsInfo["gather"]->setPrecision(precision);
+
+        //
+        // Create infer request and get its blobs pointers
+        //
+
+        StatusCode st = OK;
+
+        ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network, _config, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+        ASSERT_NE(_exeNetwork, nullptr) << _resp.msg;
+
+        ASSERT_NO_THROW(st = _exeNetwork->CreateInferRequest(_inferRequest, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        Blob::Ptr inputBlob;
+        ASSERT_NO_THROW(st = _inferRequest->GetBlob("input", inputBlob, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        Blob::Ptr indicesBlob;
+        ASSERT_NO_THROW(st = _inferRequest->GetBlob("indices", indicesBlob, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        Blob::Ptr outputBlob;
+        ASSERT_NO_THROW(st = _inferRequest->GetBlob("gather", outputBlob, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        Blob::Ptr referenceBlob;
+        if (type == "I32") {
+            referenceBlob = make_shared_blob<int32_t>(outputBlob->getTensorDesc());
+        } else {
+            referenceBlob = make_shared_blob<ie_fp16>(outputBlob->getTensorDesc());
+        }
+        referenceBlob->allocate();
+
+        //
+        // Initialize `input` and `indices` blobs
+        //
+
+        void* inputBlobData = inputBlob->buffer();
+        ASSERT_NE(inputBlobData, nullptr);
+
+        void* indicesBlobData = indicesBlob->buffer();
+        ASSERT_NE(indicesBlobData, nullptr);
+
+        const int indicesLimit = inputShape[axis] - 1;
+
+        std::mt19937 gen;
+        fillUniformly(inputBlobData, inputTotal, precision, 0, 255, gen);
+        fillUniformly(indicesBlobData, indicesTotal, Precision::I32, 0, indicesLimit, gen);
+
+        //
+        // Infer
+        //
+
+        const auto inputLayout = inputBlob->getTensorDesc().getLayout();
+        const auto outputLayout = outputBlob->getTensorDesc().getLayout();
+        const auto indicesLayout = indicesBlob->getTensorDesc().getLayout();
+        const auto layoutPreference = vpu::LayoutPreference::ChannelMajor;
+
+        inputBlob->getTensorDesc().setLayout(vpu::deviceLayout(inputLayout, layoutPreference));
+        indicesBlob->getTensorDesc().setLayout(vpu::deviceLayout(indicesLayout, layoutPreference));
+        outputBlob->getTensorDesc().setLayout(vpu::deviceLayout(outputLayout, layoutPreference));
+        referenceBlob->getTensorDesc().setLayout(vpu::deviceLayout(outputLayout, layoutPreference));
+
+        ASSERT_NO_THROW(st = _inferRequest->Infer(&_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        //
+        // Check result
+        //
+
+        ref_gather(indicesBlob, inputBlob, referenceBlob, axis);
+
+        CompareCommonExact(outputBlob, referenceBlob);
+    }
+
+private:
+
+    // Count total number of elements in ND tensor
+    static
+    int getTotal(const std::vector<int>& shape) {
+        return std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<int>());
+    }
+
+    // Fill data[] array with random numbers
+    // distributed uniformly in the interval [a,b]
+    static
+    void fillUniformly(void* data,
+                       const int num,
+                       const Precision& precision,
+                       const double a,
+                       const double b,
+                       std::mt19937& gen) {
+        if (Precision::FP16 == precision) {
+            std::uniform_real_distribution<float> uniform(a, b);
+            for (int i = 0; i < num; i++) {
+                const float v = uniform(gen);
+                reinterpret_cast<ie_fp16*>(data)[i] = PrecisionUtils::f32tof16(v);
+            }
+        } else if (Precision::I32 == precision) {
+            const int ia = static_cast<int>(std::round(a));
+            const int ib = static_cast<int>(std::round(b));
+            std::uniform_int_distribution<int> uniform(ia, ib);
+            for (int i = 0; i < num; i++) {
+                const int v = uniform(gen);
+                reinterpret_cast<int32_t*>(data)[i] = v;
+            }
+        } else {
+            IE_ASSERT(precision == Precision::I32 ||
+                      precision == Precision::FP16);
+        }
+    }
+
+    // Note that:
+    // - IR version is v7 (should be v10): as readNetwork() method
+    //   cannot parse / denies IR v10 if there's no weights tensor
+    static
+    std::string createModel(const std::vector<int>& inputShape,
+                            const std::vector<int>& outputShape,
+                            const std::vector<int>& indicesShape,
+                            const             int   axis,
+                            const std::string     & type) {
+        std::string model = R"V0G0N(
+            <?xml version="1.0" ?>
+            <net name="testGather" version="7">
+                <layers>
+                    <layer id="0" name="input" type="Input">
+                        <output>
+                            <port id="0" precision="__TYPE__">
+                                __INPUT_DIMS__
+                            </port>
+                        </output>
+                    </layer>
+                    <layer id="1" name="indices" type="Input">
+                        <output>
+                            <port id="0" precision="I32">
+                                __INDICES_DIMS__
+                            </port>
+                        </output>
+                    </layer>
+                    <layer id="2" name="gather" type="Gather">
+                        <data axis="__AXIS__"/>
+                        <input>
+                            <port id="0" precision="__TYPE__">
+                                __INPUT_DIMS__
+                            </port>
+                            <port id="1" precision="I32">
+                                __INDICES_DIMS__
+                            </port>
+                        </input>
+                        <output>
+                            <port id="4" precision="__TYPE__">
+                                __OUTPUT_DIMS__
+                            </port>
+                        </output>
+                    </layer>
+                </layers>
+                <edges>
+                    <edge from-layer="0" from-port="0" to-layer="2" to-port="0"/>
+                    <edge from-layer="1" from-port="0" to-layer="2" to-port="1"/>
+                </edges>
+            </net>
+        )V0G0N";
+
+        const std::string inputDimsStr = shapeToDimsString(inputShape);
+        const std::string outputDimsStr = shapeToDimsString(outputShape);
+        const std::string indicesDimsStr = shapeToDimsString(indicesShape);
+        const std::string axisStr = std::to_string(axis);
+        REPLACE_WITH_STR(model, "__INPUT_DIMS__", inputDimsStr);
+        REPLACE_WITH_STR(model, "__OUTPUT_DIMS__", outputDimsStr);
+        REPLACE_WITH_STR(model, "__INDICES_DIMS__", indicesDimsStr);
+        REPLACE_WITH_STR(model, "__AXIS__", axisStr);
+        REPLACE_WITH_STR(model, "__TYPE__", type);
+
+        return model;
+    }
+
+    static
+    std::string shapeToDimsString(const std::vector<int>& shape)
+    {
+        std::string str;
+        for (int i = 0; i < shape.size(); i++) {
+            str += (i? " ": "");
+            str += "<dim>" + std::to_string(shape[i]) + "</dim>";
+        }
+        return str;
+    }
+};
+
+TEST_P(myriadLayerGather_nightly, Gather) {
+    testGather();
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_gemm_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_gemm_test.cpp
new file mode 100644 (file)
index 0000000..8baa645
--- /dev/null
@@ -0,0 +1,28 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_gemm_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayerGEMM,
+        ::testing::Combine(
+        ::testing::Values<gemm_parameters>(
+                MAKE_STRUCT(gemm_parameters, 4.7f, 2.3f, 5,  7,   11,  1, 2,   3, 4,  5, 6,  7, 8),
+                MAKE_STRUCT(gemm_parameters, 1.0f, 1.0f, 1, 16, 1024, 10, 1,  10, 1, 10, 1, 10, 1),
+                MAKE_STRUCT(gemm_parameters, 1.0f, 1.0f, 3,  5,    6,  1, 1,   1, 1,  1, 1,  1, 1),
+
+                MAKE_STRUCT(gemm_parameters, 1.0f, 1.0f,   8, 17,   32,  1, 12, 1, 12,  1, 12, 1, 12),
+                MAKE_STRUCT(gemm_parameters, 1.0f, 1.0f, 128, 128, 128,  1, 12, 1, 12,  1, 12, 1, 12),
+                MAKE_STRUCT(gemm_parameters, 1.0f, 1.0f, 128, 768, 768,  1, 1,  1,  1,  1,  1, 1, 1),
+                MAKE_STRUCT(gemm_parameters, 1.0f, 1.0f, 128, 768, 3072, 1, 1,  1,  1,  1,  1, 1, 1),
+                MAKE_STRUCT(gemm_parameters, 1.0f, 1.0f, 128, 768, 3072, 1, 2,  1,  2,  1,  2, 1, 2),
+
+                MAKE_STRUCT(gemm_parameters, 1.0f, 1.0f, 8 * 1, 5, 8 * 7,  1, 1,  1, 1,  1, 1, 1, 1)
+        ),
+
+        ::testing::Values<layoutPreference>(vpu::LayoutPreference::ChannelMajor),
+        ::testing::Values<hasThreeInputs>(true, false),
+        ::testing::Values<transposeA>(true, false),
+        ::testing::Values<transposeB>(true, false)
+        )
+);
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_gemm_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_gemm_test.hpp
new file mode 100644 (file)
index 0000000..a9ed573
--- /dev/null
@@ -0,0 +1,214 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tests.hpp"
+#include <algorithm>
+
+using std::tuple;
+using std::get;
+
+using namespace InferenceEngine;
+
+PRETTY_PARAM(layoutPreference, vpu::LayoutPreference);
+PRETTY_PARAM(hasThreeInputs, bool);
+PRETTY_PARAM(transposeA, bool);
+PRETTY_PARAM(transposeB, bool);
+
+struct gemm_parameters {
+    float alpha;
+    float beta;
+
+    long unsigned int M;
+    long unsigned int N;
+    long unsigned int K;
+
+    long unsigned int MB1_A; long unsigned int MB2_A;
+    long unsigned int MB1_B; long unsigned int MB2_B;
+    long unsigned int MB1_C; long unsigned int MB2_C;
+    long unsigned int MB1_D; long unsigned int MB2_D;
+
+    friend std::ostream& operator<<(std::ostream& os, gemm_parameters const& tst)
+    {
+        return os << "alpha=" << tst.alpha << ", " << "beta=" << tst.beta << ", "
+                  << "M=" << tst.M << ", " << "N=" << tst.N << ", " << "K=" << tst.K << ", "
+                  << "MB1_A=" << tst.MB1_A << ", " << "MB2_A=" << tst.MB2_A << ", "
+                  << "MB1_B=" << tst.MB1_B << ", " << "MB2_B=" << tst.MB2_B << ", "
+                  << "MB1_C=" << tst.MB1_C << ", " << "MB2_C=" << tst.MB2_C << ", "
+                  << "MB1_D=" << tst.MB1_D << ", " << "MB2_D=" << tst.MB2_D;
+    };
+};
+
+static void gemm_ref(int M, int N, int K,
+                     int MB1_A, int MB2_A,
+                     int MB1_B, int MB2_B,
+                     int MB1_C, int MB2_C,
+                     int MB1, int MB2,
+                     Blob::Ptr srcBlob1,
+                     Blob::Ptr srcBlob2,
+                     Blob::Ptr srcBlob3,
+                     Blob::Ptr dstBlob,
+                     float alpha,
+                     float beta,
+                     bool transposeA,
+                     bool transposeB
+                    )
+{
+
+    ie_fp16 *a = static_cast<ie_fp16*>(srcBlob1->buffer());
+    ie_fp16 *b = static_cast<ie_fp16*>(srcBlob2->buffer());
+    ie_fp16 *c = nullptr;
+    ie_fp16 *d = static_cast<ie_fp16*>(dstBlob->buffer());
+
+    const int stride_a = (transposeA ? M : K);
+    const int stride_b = (transposeB ? K : N);
+    const int stride_d = N;
+
+    const int strideMB2_src1 = (MB2 != MB2_A) ? 0 : 1;
+    const int strideMB2_src2 = (MB2 != MB2_B) ? 0 : 1;
+    const int strideMB2_dst  = 1;
+
+    const int strideMB1_src1 = (MB1 != MB1_A) ? 0 : MB2_A * M * K;
+    const int strideMB1_src2 = (MB1 != MB1_B) ? 0 : MB2_B * K * N;
+    const int strideMB1_dst  = MB2 * M * N;
+
+    int strideMB2_src3 = 0;
+    int strideMB1_src3 = 0;
+
+    if (srcBlob3 != nullptr) {
+        c = static_cast<ie_fp16 *>(srcBlob3->buffer());
+        strideMB2_src3 = (MB2 != MB2_C) ? 0 : 1;
+        strideMB1_src3 = (MB1 != MB1_C) ? 0 : MB2_C * M * N;
+    }
+
+    for (int mb1 = 0; mb1 < MB1; mb1++) {
+        for (int mb2 = 0; mb2 < MB2; mb2++) {
+            for (int i = 0; i < M; i++) {
+                for (int j = 0; j < N; j++) {
+                    float dst = 0.0;
+                    if (srcBlob3 != nullptr) {
+                        dst = beta * PrecisionUtils::f16tof32(*(c + MB2_C * (j + i * N) + mb2 * strideMB2_src3 + mb1 * strideMB1_src3));
+                    }
+                    for (int k = 0; k < K; k++) {
+                        float src1 = PrecisionUtils::f16tof32(transposeA ? *(a + MB2_A * (i + k * stride_a) + mb2 * strideMB2_src1 + mb1 * strideMB1_src1) : *(a + (k + i * stride_a) * MB2_A + mb2 * strideMB2_src1 + mb1 * strideMB1_src1));
+                        float src2 = PrecisionUtils::f16tof32(transposeB ? *(b + MB2_B * (k + j * stride_b) + mb2 * strideMB2_src2 + mb1 * strideMB1_src2) : *(b + (j + k * stride_b) * MB2_B + mb2 * strideMB2_src2 + mb1 * strideMB1_src2));
+
+                        dst += alpha * src1 * src2;
+                    }
+
+                    *(d + (j + i * N) * MB2 + mb2 * strideMB2_dst + mb1 * strideMB1_dst) = PrecisionUtils::f32tof16(dst);
+                }
+            }
+        }
+    }
+}
+
+typedef myriadLayerTestBaseWithParam<tuple<gemm_parameters, layoutPreference, hasThreeInputs, transposeA, transposeB>> myriadLayerGEMM;
+
+TEST_P(myriadLayerGEMM, GEMM) {
+    gemm_parameters gemm_parameter = get<0>(GetParam());
+    auto layoutPreference = get<1>(GetParam());
+    auto hasThreeInputs = get<2>(GetParam());
+    auto transposeA = get<3>(GetParam());
+    auto transposeB = get<4>(GetParam());
+
+    const float alpha = gemm_parameter.alpha;
+    const float beta = gemm_parameter.beta;
+
+    const long unsigned int MB1_A = gemm_parameter.MB1_A; const long unsigned int MB2_A = gemm_parameter.MB2_A;
+    const long unsigned int MB1_B = gemm_parameter.MB1_B; const long unsigned int MB2_B = gemm_parameter.MB2_B;
+    const long unsigned int MB1_C = gemm_parameter.MB1_C; const long unsigned int MB2_C = gemm_parameter.MB2_C;
+    const long unsigned int MB1_D = gemm_parameter.MB1_D; const long unsigned int MB2_D = gemm_parameter.MB2_D;
+
+    IN_OUT_desc dims_input;
+    IN_OUT_desc dims_output;
+
+    dims_input.resize(2);
+    if (hasThreeInputs) {
+        dims_input.resize(3);
+    }
+
+    /* inputs */
+    dims_input[0].resize(4);
+    dims_input[0][0] = MB1_A;
+    dims_input[0][1] = MB2_A;
+    dims_input[0][2] = transposeA ? gemm_parameter.K : gemm_parameter.M;
+    dims_input[0][3] = transposeA ? gemm_parameter.M : gemm_parameter.K;
+    dims_input[1].resize(4);
+    dims_input[1][0] = MB1_B;
+    dims_input[1][1] = MB2_B;
+    dims_input[1][2] = transposeB ? gemm_parameter.N : gemm_parameter.K;
+    dims_input[1][3] = transposeB ? gemm_parameter.K : gemm_parameter.N;
+
+    if (hasThreeInputs) {
+        dims_input[2].resize(4);
+        dims_input[2][0] = MB1_C;
+        dims_input[2][1] = MB2_C;
+        dims_input[2][2] = gemm_parameter.M;
+        dims_input[2][3] = gemm_parameter.N;
+    }
+
+
+    dims_output.resize(1);
+    dims_output[0].resize(4);
+    dims_output[0][0] = MB1_D;
+    dims_output[0][1] = MB2_D;
+    dims_output[0][2] = gemm_parameter.M;
+    dims_output[0][3] = gemm_parameter.N;
+
+    SetInputTensors(dims_input);
+    SetOutputTensors(dims_output);
+
+    std::map<std::string, std::string> params {{"alpha", std::to_string(alpha)},
+                                               {"beta", std::to_string(beta)},
+                                               {"transpose_a", std::to_string(transposeA)},
+                                               {"transpose_b", std::to_string(transposeB)},
+                                              };
+
+    if (MB1_D > 1)
+        _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+    else
+        _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(YES);
+
+
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("GEMM").params(params), NetworkInitParams().layoutPreference(layoutPreference)));
+
+    /* input tensor generating */
+    auto pInputBlob = _inputMap.begin();
+    Blob::Ptr inputBlob0 = pInputBlob->second;
+    pInputBlob++;
+    Blob::Ptr inputBlob1 = pInputBlob->second;
+    Blob::Ptr inputBlob2 = nullptr;
+
+    if (hasThreeInputs) {
+        pInputBlob++;
+        inputBlob2 = pInputBlob->second;
+    }
+
+    /* reference version */
+    auto refOutBlob = make_shared_blob<ie_fp16>({Precision::FP16, {MB1_D, MB2_D, gemm_parameter.M, gemm_parameter.N}, Layout::NHWC});
+    refOutBlob->allocate();
+    gemm_ref(gemm_parameter.M, gemm_parameter.N, gemm_parameter.K,
+             MB1_A, MB2_A,
+             MB1_B, MB2_B,
+             MB1_C, MB2_C,
+             MB1_D, MB2_D,
+
+             inputBlob0,
+             inputBlob1,
+             inputBlob2,
+             refOutBlob,
+
+             gemm_parameter.alpha,
+             gemm_parameter.beta,
+             transposeA,
+             transposeB
+            );
+
+    ASSERT_TRUE(Infer());
+
+    auto pOutputBlob = _outputMap.begin();
+    auto outputBlob = pOutputBlob->second;
+    float maxerr = 0.0016f * gemm_parameter.K;
+    CompareCommonAbsolute(outputBlob, refOutBlob, maxerr);
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_grn_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_grn_test.cpp
new file mode 100644 (file)
index 0000000..7a004ed
--- /dev/null
@@ -0,0 +1,99 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_grn_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsGRN_nightly,
+        ::testing::Combine(
+        ::testing::ValuesIn(s_GRNTensors),
+        ::testing::ValuesIn(s_GRN_bias),
+        ::testing::ValuesIn(s_MVNCustomConfig)));
+
+
+TEST_F(myriadLayersTests_nightly, GRN_CHW_Input)
+{
+    std::string model = R"V0G0N(
+        <net name="GRN" version="2" batch="1">
+            <layers>
+                <layer name="data" type="Input" precision="FP16" id="1">
+                    <output>
+                        <port id="1">
+                            <dim>1</dim>
+                            <dim>24</dim>
+                            <dim>128</dim>
+                            <dim>224</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="grn" type="GRN" precision="FP16" id="2">
+                    <data bias="0.5"/>
+                    <input>
+                        <port id="2">
+                            <dim>1</dim>
+                            <dim>24</dim>
+                            <dim>128</dim>
+                            <dim>224</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="3">
+                            <dim>1</dim>
+                            <dim>24</dim>
+                            <dim>128</dim>
+                            <dim>224</dim>
+                        </port>
+                    </output>
+                </layer>
+            </layers>
+            <edges>
+                <edge from-layer="1" from-port="1" to-layer="2" to-port="2"/>
+            </edges>
+        </net>
+    )V0G0N";
+
+    StatusCode st;
+
+    ASSERT_NO_THROW(readNetwork(model));
+
+    const auto& network = _cnnNetwork;
+
+    _inputsInfo = network.getInputsInfo();
+    _inputsInfo["data"]->setPrecision(Precision::FP16);
+
+    _outputsInfo = network.getOutputsInfo();
+    _outputsInfo["grn"]->setPrecision(Precision::FP16);
+
+    ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network, {}, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    ASSERT_NE(_exeNetwork, nullptr) << _resp.msg;
+
+    ASSERT_NO_THROW(st = _exeNetwork->CreateInferRequest(_inferRequest, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    auto tensorDesc = TensorDesc(Precision::FP16, _inputsInfo["data"]->getTensorDesc().getDims(), Layout::NCHW);
+    
+    auto inputNCHW = make_shared_blob<ie_fp16>(tensorDesc);
+    ASSERT_NO_THROW(inputNCHW->allocate());
+
+    auto outputNCHW = make_shared_blob<ie_fp16>(tensorDesc);
+    ASSERT_NO_THROW(outputNCHW->allocate());
+
+    auto output_ref = make_shared_blob<ie_fp16>(tensorDesc);
+    ASSERT_NO_THROW(output_ref->allocate());
+
+    ASSERT_NO_THROW(GenRandomData(inputNCHW));
+
+    ASSERT_NO_THROW(st = _inferRequest->SetBlob("data", inputNCHW, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    ASSERT_NO_THROW(st = _inferRequest->SetBlob("grn", outputNCHW, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    ASSERT_NO_THROW(st = _inferRequest->Infer(&_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    ASSERT_NO_FATAL_FAILURE(refGRN(inputNCHW, output_ref, 0.5f, true));
+
+    CompareCommonAbsolute(outputNCHW, output_ref, 0.003);
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_grn_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_grn_test.hpp
new file mode 100644 (file)
index 0000000..768cf52
--- /dev/null
@@ -0,0 +1,88 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tests.hpp"
+
+using namespace InferenceEngine;
+
+#define ERROR_BOUND 1e-3f
+
+static void refGRN(const Blob::Ptr src,
+                         Blob::Ptr dst,
+                   float bias, int isCHW) {
+    ASSERT_NE(src, nullptr);
+    ASSERT_NE(dst, nullptr);
+    const uint16_t *src_data = src->buffer();
+          uint16_t *dst_data = dst->buffer();
+    ASSERT_NE(src_data, nullptr);
+    ASSERT_NE(dst_data, nullptr);
+    int32_t IW = 1;
+    int32_t IH = 1;
+    int32_t IC = 1;
+    get_dims(src, IW, IH, IC);
+    for (uint32_t h = 0; h < IH; h++) {
+        for (uint32_t w = 0; w < IW; w++) {
+            float variance = 1e-9f;
+            for (uint32_t c = 0; c < IC; c++) {
+                int ind = isCHW ? c*IH*IW + h*IW + w : h*IW*IC + w*IC + c;
+                float s = PrecisionUtils::f16tof32(src_data[ind]);
+                variance += powf(s, 2);
+            }
+            variance = sqrtf(variance + bias);
+            for (uint32_t c = 0; c < IC; c++) {
+                int ind = isCHW ? c*IH*IW + h*IW + w : h*IW*IC + w*IC + c;
+
+                float s = PrecisionUtils::f16tof32(src_data[ind]);
+                float result = s / variance;
+
+                dst_data[ind] = PrecisionUtils::f32tof16(result);
+            }
+        }
+    }
+}
+
+PRETTY_PARAM(Bias, float)
+
+typedef myriadLayerTestBaseWithParam<std::tuple<Dims, Bias, std::string>> myriadLayersTestsGRN_nightly;
+
+TEST_P(myriadLayersTestsGRN_nightly, GRN) {
+    tensor_test_params dims  = std::get<0>(GetParam());
+    float bias               = std::get<1>(GetParam());
+    std::string customConfig = std::get<2>(GetParam());
+
+    if(!customConfig.empty() && !CheckMyriadX()) {
+        GTEST_SKIP()<<"Custom layers for MYRIAD2 not supported";
+    }
+    _config[VPU_CONFIG_KEY(CUSTOM_LAYERS)] = customConfig;
+
+    SetInputTensor(dims);
+    SetOutputTensor(dims);
+
+    std::map<std::string, std::string> params;
+    params["bias"] = std::to_string(bias);
+
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("GRN").params(params)));
+
+    ASSERT_TRUE(Infer());
+
+    ASSERT_NO_FATAL_FAILURE(refGRN(_inputMap.begin()->second, _refBlob, bias, false));
+
+    CompareCommonAbsolute(_outputMap.begin()->second, _refBlob, ERROR_BOUND);
+}
+
+static std::vector<Dims> s_GRNTensors = {
+        {{1, 3, 16, 224}},
+        {{1, 24, 128, 224}},
+};
+
+static std::vector<Bias> s_GRN_bias = {
+        0.5f, 10.f
+};
+
+static std::vector<std::string> s_MVNCustomConfig = {
+    "" ,
+#ifdef VPU_HAS_CUSTOM_KERNELS
+    getIELibraryPath() + "/vpu_custom_kernels/customLayerBindings.xml"
+#endif
+};
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_interp_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_interp_test.cpp
new file mode 100644 (file)
index 0000000..44eb70f
--- /dev/null
@@ -0,0 +1,26 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_interp_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(
+        accuracy, myriadLayersTestsInterp_nightly,
+        ::testing::Combine(
+
+            ::testing::Values<SizeInputOutput>(
+                                                MAKE_STRUCT(interp_test_params, 128, 64, 256, 128, 128),
+                                                MAKE_STRUCT(interp_test_params, 128, 64, 256, 128, 128),
+                                                MAKE_STRUCT(interp_test_params, 128, 64, 512, 256, 19),
+                                                MAKE_STRUCT(interp_test_params, 6,    6, 64, 32, 1024),
+                                                MAKE_STRUCT(interp_test_params, 1,    1, 64, 32, 1024)
+                                              )
+
+          , ::testing::Values<layoutPreference>(vpu::LayoutPreference::ChannelMajor,
+                                                vpu::LayoutPreference::ChannelMinor)
+          , ::testing::Values<align_corners>(
+                                        true,
+                                        false
+                                            )
+          )
+        );
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_interp_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_interp_test.hpp
new file mode 100644 (file)
index 0000000..533940b
--- /dev/null
@@ -0,0 +1,134 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tests.hpp"
+#include "myriad_layers_reference_functions.hpp"
+
+using std::tuple;
+using std::get;
+
+using namespace InferenceEngine;
+
+PRETTY_PARAM(layoutPreference, vpu::LayoutPreference)
+PRETTY_PARAM(SizeInputOutput, interp_test_params)
+PRETTY_PARAM(align_corners, bool)
+
+typedef myriadLayerTestBaseWithParam<tuple<interp_test_params, layoutPreference, align_corners>> myriadLayersTestsInterp_nightly;
+
+
+void ref_interp(const Blob::Ptr src,
+                Blob::Ptr dst, bool align_corners) {
+    ASSERT_NE(src, nullptr);
+    ASSERT_NE(dst, nullptr);
+
+    ie_fp16 *src_data = static_cast<ie_fp16*>(src->buffer());
+    ie_fp16 *dst_data = static_cast<ie_fp16*>(dst->buffer());
+    ASSERT_NE(src_data, nullptr);
+    ASSERT_NE(dst_data, nullptr);
+
+    int32_t IW = 0;
+    int32_t IH = 0;
+    int32_t IC = 0;
+    int32_t OW = 0;
+    int32_t OH = 0;
+    int32_t OC = 0;
+    int32_t N = 1;
+
+    get_dims(src, IW, IH, IC);
+    get_dims(dst, OW, OH, OC);
+    int32_t C = IC;
+
+    if (IH == OH && IW == OW)
+    {
+        for (size_t b = 0; b < N; b++) {
+            for (size_t c = 0; c < C; c++) {
+                for (size_t h = 0; h < IH; h++) {
+                    for (size_t w = 0; w < IW; w++) {
+                        size_t oidx = c + w * C + h * C * OW;
+                        size_t iidx = c + w * C + h * C * IW;
+                        ASSERT_LT(iidx, src->size());
+                        ASSERT_LT(oidx, dst->size());
+                        dst_data[oidx] = src_data[iidx];
+                    }
+                }
+            }
+        }
+        return;
+    }
+
+    const float rh = (OH > 1 && align_corners) ? static_cast<float>(IH - 1) / (OH - 1) : static_cast<float>(IH) / OH;
+    const float rw = (OW > 1 && align_corners) ? static_cast<float>(IW - 1) / (OW - 1) : static_cast<float>(IW) / OW;
+
+    for (size_t b = 0; b < N; ++b) {
+        for (size_t h = 0; h < OH; h++) {
+            float fh = rh * h;
+            size_t ih0 = static_cast<size_t>(fh);
+            size_t ih1 = (ih0 < IH - 1) ? ih0 + 1 : ih0;
+
+            float h_lambda0 = fh - ih0;
+            float h_lambda1 = 1.0f - h_lambda0;
+
+            for (size_t w = 0; w < OW; w++) {
+                float fw = rw * w;
+                size_t iw0 = static_cast<size_t>(fw);
+                size_t iw1 = (iw0 < IW - 1) ? iw0 + 1 : iw0;
+
+                float w_lambda0 = fw - iw0;
+                float w_lambda1 = 1.0f - w_lambda0;
+
+                for (size_t c = 0; c < C; c++) {
+                    size_t iidx00 = c + iw0 * C + ih0 * C * IW;
+                    size_t iidx01 = c + iw1 * C + ih0 * C * IW;
+                    size_t iidx10 = c + iw0 * C + ih1 * C * IW;
+                    size_t iidx11 = c + iw1 * C + ih1 * C * IW;
+                    ASSERT_LT(iidx00, src->size());
+                    ASSERT_LT(iidx01, src->size());
+                    ASSERT_LT(iidx10, src->size());
+                    ASSERT_LT(iidx11, src->size());
+
+                    float src00 = PrecisionUtils::f16tof32(src_data[iidx00]);
+                    float src01 = PrecisionUtils::f16tof32(src_data[iidx01]);
+                    float src10 = PrecisionUtils::f16tof32(src_data[iidx10]);
+                    float src11 = PrecisionUtils::f16tof32(src_data[iidx11]);
+
+                    size_t oidx = c + w * C + h * C * OW;
+                    ASSERT_LT(oidx, dst->size());
+
+                    dst_data[oidx] = PrecisionUtils::f32tof16(h_lambda1 * (w_lambda1 * src00 + w_lambda0 * src01) +
+                                                              h_lambda0 * (w_lambda1 * src10 + w_lambda0 * src11));
+                }
+            }
+        }
+    }
+}
+
+TEST_P(myriadLayersTestsInterp_nightly, Interp)
+{
+    interp_test_params test_params = get<0>(GetParam());
+    auto layoutPreference = get<1>(GetParam());
+    bool align_corner = get<2>(GetParam());
+
+    std::map<std::string, std::string> params;
+    params["align_corners"] = std::to_string(int(align_corner));
+    params["factor"] = std::to_string(1);
+    tensor_test_params input_dims  = {1, test_params.c, test_params.ih, test_params.iw};
+    tensor_test_params output_dims = {1, test_params.c, test_params.oh, test_params.ow};
+
+    SetInputTensor(input_dims);
+    SetOutputTensor(output_dims);
+
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("Interp").params(params), NetworkInitParams().layoutPreference(layoutPreference)));
+    ASSERT_NO_FATAL_FAILURE(SetFirstInputToRange(-0.9f, 0.9f));
+
+    auto inputBlob = _inputMap.begin()->second;
+
+    ASSERT_TRUE(Infer());
+    auto outputBlob = _outputMap.begin()->second;
+
+    ref_interp(inputBlob, _refBlob, align_corner);
+
+    float maxerr = 0.07f;
+
+    CompareCommonAbsolute(outputBlob, _refBlob, maxerr);
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_log_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_log_test.cpp
new file mode 100644 (file)
index 0000000..3b23651
--- /dev/null
@@ -0,0 +1,9 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_log_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(
+        accuracy, myriadLayersTestsLog_nightly,
+        ::testing::ValuesIn(s_logParams));
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_log_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_log_test.hpp
new file mode 100644 (file)
index 0000000..14c4db5
--- /dev/null
@@ -0,0 +1,49 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tests.hpp"
+#include "myriad_layers_reference_functions.hpp"
+#include <cmath>
+
+#define BOUND (10.0f)
+#define ERROR_BOUND (1.e-2f)
+#define ERROR_BOUND_WITH_LOG (1.e-2f)
+
+using namespace InferenceEngine;
+
+class myriadLayersTestsLog_nightly: public myriadLayersTests_nightly,
+                           public testing::WithParamInterface<Dims> {
+public:
+};
+
+TEST_P(myriadLayersTestsLog_nightly, TestsLog)
+{
+    auto p = ::testing::WithParamInterface<Dims>::GetParam();
+    SetInputTensor(p);
+    SetOutputTensor(p);
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("Log")));
+    SetFirstInputToRange(-BOUND, BOUND);
+    ASSERT_TRUE(Infer());
+
+    /* output check */
+    ref_log(_inputMap.begin()->second, _refBlob);
+    CompareCommonAbsolute(_outputMap.begin()->second, _refBlob, ERROR_BOUND);
+}
+
+static std::vector<Dims> s_logParams = {
+    {{1, 1, 16, 16}},
+    {{1, 2, 16, 16}},
+    {{1, 3, 16, 16}},
+    {{1, 1, 53, 16}},
+    {{1, 2, 53, 16}},
+    {{1, 3, 53, 16}},
+    {{1, 1, 224, 224}},
+    {{1, 2, 224, 224}},
+    {{1, 3, 224, 224}},
+    {{1, 1, 224, 235}},
+    {{1, 2, 224, 235}},
+    {{1, 3, 224, 235}},
+    {{10, 17191, 1, 1}},
+    {{1, 1, 10, 17191}}
+};
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_lrn_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_lrn_test.cpp
new file mode 100644 (file)
index 0000000..ff6f0cd
--- /dev/null
@@ -0,0 +1,14 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_lrn_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsLRN_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(s_LRNTensors),
+        ::testing::ValuesIn(s_LRNlocal_size),
+        ::testing::ValuesIn(s_LRN_K),
+        ::testing::ValuesIn(s_LRNalpha),
+        ::testing::ValuesIn(s_LRNbeta))
+);
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_lrn_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_lrn_test.hpp
new file mode 100644 (file)
index 0000000..7137ffc
--- /dev/null
@@ -0,0 +1,178 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tests.hpp"
+
+
+#define ERROR_BOUND 1e-3f
+
+static void refLRN(const InferenceEngine::Blob::Ptr src,
+                         InferenceEngine::Blob::Ptr dst,
+                         uint32_t local_size,
+                         float alpha,
+                         float beta,
+                         float k) {
+    ASSERT_NE(src, nullptr);
+    ASSERT_NE(dst, nullptr);
+    const uint16_t *src_data = src->buffer();
+    uint16_t *dst_data = dst->buffer();
+    ASSERT_NE(src_data, nullptr);
+    ASSERT_NE(dst_data, nullptr);
+    int32_t IW = 1;
+    int32_t IH = 1;
+    int32_t IC = 1;
+    get_dims(src, IW, IH, IC);
+    for (uint32_t h = 0; h < IH; h++) {
+        for (uint32_t w = 0; w < IW; w++) {
+            for (uint32_t c = 0; c < IC; c++) {
+                uint32_t oidx = c + w * IC + h * IC * IW;
+                uint32_t sz = local_size;
+                int32_t c_start = c - sz / 2;
+                int32_t c_end = c_start + sz;
+                c_start = std::max(c_start, 0);
+                c_end   = std::min(c_end, (int32_t)IC);
+                float sum = 0.0;
+                for (int32_t c1 = c_start; c1 < c_end; c1++) {
+                    uint32_t idx = c1 + w * IC + h * IC * IW;
+                    float s =InferenceEngine::PrecisionUtils::f16tof32(src_data[idx]);
+                    sum += s * s;
+                }
+                float norm_coef = powf(k + alpha * sum / sz, -beta);
+
+                dst_data[oidx] = InferenceEngine::PrecisionUtils::f32tof16(norm_coef *
+                                        InferenceEngine::PrecisionUtils::f16tof32(src_data[oidx]));
+           }
+        }
+    }
+}
+
+static void refInnerLRN(const InferenceEngine::Blob::Ptr src,
+                         InferenceEngine::Blob::Ptr dst,
+                         uint32_t local_size,
+                         float alpha,
+                         float beta,
+                         float k) {
+    ASSERT_NE(src, nullptr);
+    ASSERT_NE(dst, nullptr);
+    const uint16_t *src_data = src->buffer();
+    uint16_t *dst_data = dst->buffer();
+    ASSERT_NE(src_data, nullptr);
+    ASSERT_NE(dst_data, nullptr);
+    int32_t IW = 1;
+    int32_t IH = 1;
+    int32_t IC = 1;
+    get_dims(src, IW, IH, IC);
+    for (uint32_t h = 0; h < IH; h++) {
+        for (uint32_t w = 0; w < IW; w++) {
+            for (uint32_t c = 0; c < IC; c++) {
+                uint32_t oidx = c + w * IC + h * IC * IW;
+                uint32_t sz = local_size;
+                int32_t h_start = h - sz / 2;
+                int32_t h_end = h + sz / 2;
+                int32_t w_start = w - sz / 2;
+                int32_t w_end = w + sz / 2;
+                h_start = std::max(h_start, 0);
+                h_end   = std::min(h_end, (int32_t)IH - 1);
+                w_start = std::max(w_start, 0);
+                w_end   = std::min(w_end, (int32_t)IW - 1);
+                float sum = 0;
+                for (int32_t h1 = h_start; h1 <= h_end; h1++) {
+                    for (int32_t w1 = w_start; w1 <= w_end; w1++) {
+                        uint32_t idx = c + w1 * IC + h1 * IC * IW;
+                        float s = InferenceEngine::PrecisionUtils::f16tof32(src_data[idx]);
+                        sum += s * s;
+                    }
+                }
+                float norm_coef = powf(k + alpha * sum / (float)(sz * sz), -beta);
+
+                dst_data[oidx] = InferenceEngine::PrecisionUtils::f32tof16(norm_coef *
+                                        InferenceEngine::PrecisionUtils::f16tof32(src_data[oidx]));
+            }
+        }
+    }
+}
+
+PRETTY_PARAM(local_size, uint32_t)
+PRETTY_PARAM(k_val, float)
+PRETTY_PARAM(alpha, float)
+PRETTY_PARAM(beta,  float)
+
+typedef myriadLayerTestBaseWithParam<std::tuple<Dims, local_size, k_val, alpha, beta>> myriadLayersTestsLRN_nightly;
+
+TEST_P(myriadLayersTestsLRN_nightly, LRN) {
+    tensor_test_params dims = std::get<0>(GetParam());
+    uint32_t local_v = std::get<1>(GetParam());
+    float k          = std::get<2>(GetParam());
+    float alpha_val  = std::get<3>(GetParam());
+    float beta_val   = std::get<4>(GetParam());
+
+    SetInputTensor(dims);
+    SetOutputTensor(dims);
+
+    std::map<std::string, std::string> layer_params = {
+        {"alpha",     std::to_string(alpha_val)},
+        {"beta",      std::to_string(beta_val)},
+        {"local-size", std::to_string(local_v)},
+        {"k", std::to_string(k)},
+        {"region", "Across"},
+    };
+
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("Norm").params(layer_params)));
+
+    ASSERT_TRUE(Infer());
+    auto src = _inputMap.begin()->second;
+    auto dst = _outputMap.begin()->second;
+    refLRN(src, _refBlob, local_v, alpha_val, beta_val, k);
+    
+    CompareCommonAbsolute(dst, _refBlob, ERROR_BOUND);
+}
+
+TEST_P(myriadLayersTestsLRN_nightly, InnerLRN) {
+    tensor_test_params dims = std::get<0>(GetParam());
+    uint32_t local_v = std::get<1>(GetParam());
+    float k          = std::get<2>(GetParam());
+    float alpha_val  = std::get<3>(GetParam());
+    float beta_val   = std::get<4>(GetParam());
+
+    SetInputTensor(dims);
+    SetOutputTensor(dims);
+
+    std::map<std::string, std::string> layer_params = {
+        {"alpha",     std::to_string(alpha_val)},
+        {"beta",      std::to_string(beta_val)},
+        {"local-size", std::to_string(local_v)},
+        {"k", std::to_string(k)},
+        {"region", "Same"},
+    };
+
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("Norm").params(layer_params)));
+
+    ASSERT_TRUE(Infer());
+    auto src = _inputMap.begin()->second;
+    auto dst = _outputMap.begin()->second;
+    refInnerLRN(src, _refBlob, local_v, alpha_val, beta_val, k);
+    
+    CompareCommonAbsolute(dst, _refBlob, ERROR_BOUND);
+}
+
+static std::vector<Dims> s_LRNTensors = {
+    {{1, 4, 16, 32}},
+    {{1, 8, 20, 36}},
+};
+
+static std::vector<local_size> s_LRNlocal_size = {
+    3, 5, /*1*/ // local_size = 1 is committed because mvTensor returns "junk" values in some output positions, but InnerLRN return correct values
+};
+
+static std::vector<alpha> s_LRNalpha = {
+    9.9999997e-05f,
+};
+
+static std::vector<beta> s_LRNbeta = {
+    0.75,
+};
+
+static std::vector<k_val> s_LRN_K = {
+    1, 3, 5, 7
+};
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_lstm_cell.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_lstm_cell.cpp
new file mode 100644 (file)
index 0000000..2eebd19
--- /dev/null
@@ -0,0 +1,177 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_lstm_cell.hpp"
+
+TEST_F(myriadLayersTests_nightly, LSTMCellSequenceNet) {
+    const size_t input_size = 512;
+    const size_t state_size = 128;
+    const size_t seq_size = 2;
+    const size_t batch_size = 4;
+
+    size_t num_weights = ngates * state_size * (input_size + state_size);
+    size_t num_bias = ngates * state_size;
+
+    /* weights generating */
+    TBlob<uint8_t>::Ptr weightsBlob_for_net(GenWeights((num_weights + num_bias)));
+    ie_fp16 *weights_for_net = static_cast<ie_fp16*>(weightsBlob_for_net->buffer());
+    TBlob<uint8_t>::Ptr weightsBlob_tmp(GenWeights((num_weights + num_bias)));
+    ie_fp16 *weights0 = static_cast<ie_fp16*>(weightsBlob_tmp->buffer());
+    ie_fp16 *weights1 = weights0 + ngates * state_size * input_size;
+    int counter = 0;
+    for (int j = 0; j < ngates * state_size; j++) {
+        for (int i = 0; i < input_size; i++) {
+            weights0[(input_size) * j + i] = PrecisionUtils::f32tof16(((float)(rand() % input_size)) / input_size * 0.01);
+            weights_for_net[counter++] = weights0[(input_size) * j + i];
+        }
+        for (int i = 0; i < state_size; i++) {
+            weights1[(state_size) * j + i] = PrecisionUtils::f32tof16(((float)(rand() % state_size)) / state_size * 0.05f);
+            weights_for_net[counter++] = weights1[(state_size) * j + i];
+        }
+    }
+    ie_fp16 *bias = weights0 + num_weights;
+    for (int i = 0; i < num_bias; i++) {
+        bias[i] = PrecisionUtils::f32tof16((float)((rand() % num_bias)) / num_bias);
+        *(weights_for_net + num_weights + i) = bias[i];
+    }
+
+    InferenceEngine::Core ie;
+    auto full_network = ie.ReadNetwork(tensorIteratorModel, weightsBlob_for_net);
+    full_network.addOutput("RNNOutput", 0);
+
+    InferenceEngine::InputsDataMap networkInputsFull;
+    networkInputsFull = full_network.getInputsInfo();
+    InferenceEngine::OutputsDataMap networkOutputsFull;
+    networkOutputsFull = full_network.getOutputsInfo();
+
+    networkInputsFull.begin()->second->setPrecision(InferenceEngine::Precision::FP16);
+    (++networkInputsFull.begin())->second->setPrecision(InferenceEngine::Precision::FP16);
+    (++++networkInputsFull.begin())->second->setPrecision(InferenceEngine::Precision::FP16);
+    networkOutputsFull.begin()->second->setPrecision(InferenceEngine::Precision::FP16);
+
+    InferenceEngine::IExecutableNetwork::Ptr exeNetworkFull;
+    std::map<std::string, std::string> networkConfig;
+    StatusCode st;
+    ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(exeNetworkFull, full_network, networkConfig, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    InferenceEngine::IInferRequest::Ptr inferRequest;
+    ASSERT_NO_THROW(st = exeNetworkFull->CreateInferRequest(inferRequest, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    InferenceEngine::Blob::Ptr inputBlob;
+    ASSERT_NO_THROW(st = inferRequest->GetBlob("RNNInput", inputBlob, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    InferenceEngine::Blob::Ptr inputBlobHidden;
+    ASSERT_NO_THROW(st = inferRequest->GetBlob("RNNInput_Hidden", inputBlobHidden, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    InferenceEngine::Blob::Ptr inputBlobCellState;
+    ASSERT_NO_THROW(st = inferRequest->GetBlob("RNNInput_CellState", inputBlobCellState, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    /* input tensors generating */
+    ie_fp16 *src_data_cell_state = static_cast<ie_fp16*>(inputBlobCellState->buffer());
+    for (int i = 0; i < state_size * batch_size; i++) {
+        src_data_cell_state[i] = PrecisionUtils::f32tof16((((float)(rand() % input_size)) / input_size * .1f));
+    }
+    ie_fp16 *src_data_hidden = static_cast<ie_fp16*>(inputBlobHidden->buffer());
+    for (int i = 0; i < state_size * batch_size; i++) {
+        src_data_hidden[i] = PrecisionUtils::f32tof16((((float)(rand() % input_size)) / input_size * .1f));
+    }
+    ie_fp16 *src_data = static_cast<ie_fp16*>(inputBlob->buffer());
+    for (int i = 0; i < input_size * batch_size * seq_size; i++) {
+        src_data[i] = PrecisionUtils::f32tof16((((float)(rand() % input_size)) / input_size * .1f));
+    }
+
+    /* gates repacking for weights for reference function */
+    TBlob<uint8_t>::Ptr weightsBlob_inv_tmp(GenWeights(num_weights + num_bias));
+    ie_fp16 *weights_inv0 = static_cast<ie_fp16*>(weightsBlob_inv_tmp->buffer());
+    ie_fp16 *weights_inv1 = weights_inv0 + ngates * state_size * input_size;
+    ie_fp16 *bias_inv = weights_inv0 + num_weights;
+    {
+        for (int g = 0; g < ngates; g++)
+        {
+            int stride = state_size * input_size;
+            for (int i = 0; i < stride; i++)
+            {
+                weights_inv0[g * stride + i] = weights0[gate_map[g] * stride + i];
+            }
+        }
+        for (int g = 0; g < ngates; g++)
+        {
+            int stride = state_size * state_size;
+            for (int i = 0; i < stride; i++)
+            {
+                weights_inv1[g * stride + i] = weights1[gate_map[g] * stride + i];
+            }
+        }
+        for (int g = 0; g < ngates; g++)
+        {
+            int stride = state_size;
+            for (int i = 0; i < stride; i++)
+            {
+                bias_inv[g * stride + i] = bias[gate_map[g] * stride + i];
+            }
+        }
+    }
+    /* weights repacking */
+    auto weightsBlob0_repacked = make_shared_blob<ie_fp16>({Precision::FP16, {1, ngates * state_size * input_size}, Layout::NC});
+    weightsBlob0_repacked->allocate();
+    auto weightsBlob1_repacked = make_shared_blob<ie_fp16>({Precision::FP16, {1, ngates * state_size * state_size}, Layout::NC});
+    weightsBlob1_repacked->allocate();
+    ie_fp16* weights0_repacked = static_cast<ie_fp16*>(weightsBlob0_repacked->buffer());
+    ie_fp16* weights1_repacked = static_cast<ie_fp16*>(weightsBlob1_repacked->buffer());
+    matrix_copy_transpose(weights_inv0, weights0_repacked, ngates * state_size, input_size);
+    matrix_copy_transpose(weights_inv1, weights1_repacked, ngates * state_size, state_size);
+    /* reference version */
+    auto refOut0 = make_shared_blob<ie_fp16>({Precision::FP16, {seq_size * batch_size, state_size}, Layout::NC});
+    refOut0->allocate();
+    auto refOut1 = make_shared_blob<ie_fp16>({Precision::FP16, {seq_size * batch_size, state_size}, Layout::NC});
+    refOut1->allocate();
+    auto gatesBlob = make_shared_blob<ie_fp16>({Precision::FP16, {1, ngates * state_size}, Layout::NC});
+    gatesBlob->allocate();
+    ie_fp16* h_dst = static_cast<ie_fp16*>(refOut0->buffer());
+    ie_fp16* c_dst = static_cast<ie_fp16*>(refOut1->buffer());
+    ie_fp16* gates = static_cast<ie_fp16*>(gatesBlob->buffer());
+    for (size_t b = 0; b < batch_size; b++)
+    {
+        for (size_t c = 0; c < seq_size; c++)
+        {
+            lstm_cell(input_size,
+                      state_size,
+                    // weights
+                      weights0_repacked,
+                      weights1_repacked,
+                      bias_inv,
+                    // input
+                      src_data + input_size * c + input_size * seq_size * b,
+                      (c == 0)?(src_data_hidden + state_size * b):(h_dst + state_size * (c-1) + state_size * seq_size * b),
+                      (c == 0)?(src_data_cell_state + state_size * b):(c_dst),
+                    // output
+                      h_dst + state_size * c + state_size * seq_size * b,
+                      c_dst,
+                      gates
+            );
+        }
+    }
+
+    ASSERT_NO_THROW(st = inferRequest->SetBlob("RNNInput_Hidden", inputBlobHidden, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    ASSERT_NO_THROW(st = inferRequest->SetBlob("RNNInput_CellState", inputBlobCellState, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    ASSERT_NO_THROW(st = inferRequest->SetBlob("RNNInput", inputBlob, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    ASSERT_NO_THROW(st = inferRequest->Infer(&_resp));
+
+    InferenceEngine::Blob::Ptr output;
+    ASSERT_NO_THROW(st = inferRequest->GetBlob("RNNOutput", output, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    CompareCommonAbsolute(output, refOut0, ERROR_BOUND);
+}
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsLSTMCell_nightly,
+                        ::testing::Values<lstmcell_test_params>(MAKE_STRUCT(lstmcell_test_params, 512, 128)),
+);
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_lstm_cell.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_lstm_cell.hpp
new file mode 100644 (file)
index 0000000..1491f72
--- /dev/null
@@ -0,0 +1,484 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tests.hpp"
+#include <cmath>
+
+using namespace InferenceEngine;
+
+const int gate_map[] = {0, 1, 3, 2};
+const size_t ngates = 4;
+#define ERROR_BOUND (.01f)
+
+std::string tensorIteratorModel = R"V0G0N(
+<net batch="1" name="ctpn" version="4">
+  <layers>
+    <layer id="0" name="RNNInput_Hidden" precision="FP16" type="Input">
+      <output>
+        <port id="0">
+          <dim>4</dim>
+          <dim>128</dim>
+        </port>
+      </output>
+    </layer>
+    <layer id="1" name="RNNInput_CellState" precision="FP16" type="Input">
+      <output>
+        <port id="0">
+          <dim>4</dim>
+          <dim>128</dim>
+        </port>
+      </output>
+    </layer>
+    <layer id="2" name="RNNInput" precision="FP16" type="Input">
+      <output>
+        <port id="0">
+          <dim>4</dim>
+          <dim>2</dim>
+          <dim>512</dim>
+        </port>
+      </output>
+    </layer>
+    <layer id="38" name="RNNOutput" precision="FP16" type="TensorIterator">
+      <input>
+        <port id="0">
+          <dim>4</dim>
+          <dim>2</dim>
+          <dim>512</dim>
+        </port>
+        <port id="1">
+          <dim>4</dim>
+          <dim>128</dim>
+        </port>
+        <port id="2">
+          <dim>4</dim>
+          <dim>128</dim>
+        </port>
+      </input>
+      <output>
+        <port id="3">
+          <dim>4</dim>
+          <dim>2</dim>
+          <dim>128</dim>
+        </port>
+      </output>
+      <port_map>
+        <input axis="1" external_port_id="0" internal_layer_id="0" internal_port_id="0"/>
+        <input external_port_id="1" internal_layer_id="1" internal_port_id="1"/>
+        <input external_port_id="2" internal_layer_id="1" internal_port_id="2"/>
+        <output axis="1" external_port_id="3" internal_layer_id="2" internal_port_id="1"/>
+      </port_map>
+      <back_edges>
+        <edge from-layer="1" from-port="5" to-layer="1" to-port="1"/>
+        <edge from-layer="1" from-port="6" to-layer="1" to-port="2"/>
+      </back_edges>
+      <body>
+        <layers>
+          <layer id="0" name="lstm_o/bidirectional_rnn/fw/fw/while/TensorArrayReadV3/Output_0/Data_/InputSqueeze" precision="FP16" type="Reshape">
+            <data dim="-1,512"/>
+            <input>
+              <port id="0">
+                <dim>4</dim>
+                <dim>1</dim>
+                <dim>512</dim>
+              </port>
+            </input>
+            <output>
+              <port id="1">
+                <dim>4</dim>
+                <dim>512</dim>
+              </port>
+            </output>
+          </layer>
+          <layer id="1" name="lstm_o/bidirectional_rnn/fw/fw/while/fw/lstm_cell/concat/LSTMCell" precision="FP16" type="LSTMCell">
+            <data hidden_size="128"/>
+            <input>
+              <port id="0">
+                <dim>4</dim>
+                <dim>512</dim>
+              </port>
+              <port id="1">
+                <dim>4</dim>
+                <dim>128</dim>
+              </port>
+              <port id="2">
+                <dim>4</dim>
+                <dim>128</dim>
+              </port>
+            </input>
+            <output>
+              <port id="5">
+                <dim>4</dim>
+                <dim>128</dim>
+              </port>
+              <port id="6">
+                <dim>4</dim>
+                <dim>128</dim>
+              </port>
+            </output>
+            <blobs>
+              <weights offset="0" size="655360"/>
+              <biases offset="655360" size="1024"/>
+            </blobs>
+          </layer>
+          <layer id="2" name="lstm_o/bidirectional_rnn/fw/fw/while/fw/lstm_cell/concat/LSTMCell/Output_0/Data_/OutputUnsqueeze" precision="FP16" type="Reshape">
+            <data dim="-1,1,128"/>
+            <input>
+              <port id="0">
+                <dim>4</dim>
+                <dim>128</dim>
+              </port>
+            </input>
+            <output>
+              <port id="1">
+                <dim>4</dim>
+                <dim>1</dim>
+                <dim>128</dim>
+              </port>
+            </output>
+          </layer>
+        </layers>
+        <edges>
+          <edge from-layer="0" from-port="1" to-layer="1" to-port="0"/>
+          <edge from-layer="1" from-port="5" to-layer="2" to-port="0"/>
+        </edges>
+      </body>
+    </layer>
+  </layers>
+  <edges>
+    <edge from-layer="2" from-port="0" to-layer="38" to-port="0"/>
+    <edge from-layer="0" from-port="0" to-layer="38" to-port="1"/>
+    <edge from-layer="1" from-port="0" to-layer="38" to-port="2"/>
+  </edges>
+</net>
+)V0G0N";
+
+struct  lstmcell_test_params {
+    int input_size;
+    int state_size;
+    friend std::ostream& operator<<(std::ostream& os, lstmcell_test_params const& tst)
+    {
+        return os << " input size = " << tst.input_size
+                  << ", state size = " << tst.state_size;
+    };
+};
+typedef myriadLayerTestBaseWithParam<lstmcell_test_params> myriadLayersTestsLSTMCell_nightly;
+
+#define f32Tof16 PrecisionUtils::f32tof16
+#define f16Tof32 PrecisionUtils::f16tof32
+static ie_fp16& at(ie_fp16 *a, int i, int j, int k, int stride0, int stride1)
+{
+    return *(i * stride1 + j * stride0 + k + a);
+}
+static ie_fp16& at(ie_fp16 *a, int i, int j, int stride)
+{
+    return *(i * stride + j + a);
+}
+// float a[m][k], float b[k][n], float c[m][n];
+// c = a * b;
+static void gemm(int m, int n, int k,
+                 ie_fp16 * a, int stride_a,
+                 ie_fp16 * b, int stride_b,
+                 ie_fp16 * c, int stride_c,
+                 ie_fp16 beta) {
+    for (int im = 0; im < m; im++) {
+        for (int in = 0; in < n; in++) {
+            // if beta == 0 the initialize pc by 0. Multiplication of
+            // uninitialized value even by zero can lead to nan
+            ie_fp16 c_elem = (beta == (ie_fp16)0.) ? (ie_fp16)0. : f32Tof16(f16Tof32(at(c, im, in, stride_c)) * f16Tof32(beta));
+            for (int ik = 0; ik < k; ik++) {
+                ie_fp16 a_elem = at(a, im, ik, stride_a);
+                ie_fp16 b_elem = at(b, ik, in, stride_b);
+                c_elem = f32Tof16(f16Tof32(a_elem) * f16Tof32(b_elem) + f16Tof32(c_elem));
+            }
+            at(c, im, in, stride_c) = c_elem;
+        }
+    }
+}
+static float logistic(float x) {
+    return 1.0f / (1.0f + expf(-x));
+}
+static void lstm_activation(int dic, int n_gates, int batch, ie_fp16 * a) {
+    for (int ib = 0; ib < batch; ib++) {
+        for (int ig = 0; ig < 3; ig++) {
+            for (int ih = 0; ih < dic; ih++) {
+                *(a + ih + ig * dic + ib * dic * n_gates) = f32Tof16(logistic(f16Tof32(*(a + ih + ig * dic + ib * dic * n_gates))));
+            }
+        }
+        int ig = 3;
+        for (int j = 0; j < dic; j++) {
+            *(a + j + ig * dic + ib * dic * n_gates) = f32Tof16(tanhf(f16Tof32(*(a + j + ig * dic + ib * dic * n_gates))));
+        }
+    }
+}
+
+// src_layer[input_size]
+// src_iter_h[state_size]
+// src_iter_c[state_size]
+// weights_layer[ngates * state_size][input_size]
+// weights_iter_h[ngates * state_size][state_size]
+// bias[ngates][state_size]
+// h_dst[state_size]
+// c_dst[state_size]
+void lstm_cell(int input_size,
+               int state_size,
+               // weights
+               ie_fp16* weights_layer,
+               ie_fp16* weights_iter_h,
+               ie_fp16* bias,
+               // input
+               ie_fp16* src_layer,
+               ie_fp16* src_iter_h,
+               ie_fp16* src_iter_c,
+               // output
+               ie_fp16* h_dst,
+               ie_fp16* c_dst,
+               ie_fp16* gates
+              )
+{
+    const int n_gates = 4;
+    const int ohf = 0; const int ohi = 1; const int oho = 2; const int ohc = 3;
+
+    int num_weights = state_size * (input_size + state_size);
+    int num_bias = state_size;
+
+    /* gates = src_layer * weights_layer */
+    gemm(1, n_gates * state_size, input_size,
+         src_layer,     input_size,
+         weights_layer, n_gates * state_size,
+         gates,         n_gates * state_size,
+         f32Tof16(0.0f));
+
+    /* gates += src_iter_h * weights_iter_h */
+    gemm(1, n_gates * state_size, state_size,
+         src_iter_h,     state_size,
+         weights_iter_h, n_gates * state_size,
+         gates,          n_gates * state_size,
+         f32Tof16(1.0f));
+
+    // add bias
+    for (int i = 0; i < 1; i++) {
+        for (int j = 0; j < n_gates; j++) {
+            for (int k = 0; k < state_size; k++) {
+                *(gates + i * n_gates * state_size + j * state_size + k) =
+                f32Tof16(
+                         f16Tof32(*(gates + i * n_gates * state_size + j * state_size + k))
+                       + f16Tof32(*(bias + j * state_size + k))
+                        );
+            }
+        }
+    }
+    // run the eltwise
+    lstm_activation(state_size, n_gates, 1, gates);
+    // compute C_t_l and H_t_l
+    for (int i = 0; i < 1; i++) {
+        for (int j = 0; j < state_size; j++) {
+            float tmp = f16Tof32(at(gates, i, ohf, j, state_size, state_size * n_gates)) *
+                        f16Tof32(at(src_iter_c, i, j, state_size))
+                      + f16Tof32(at(gates, i, ohi, j, state_size, state_size * n_gates)) *
+                        f16Tof32(at(gates, i, ohc, j, state_size, state_size * n_gates));
+            at(c_dst, i, j, state_size) = f32Tof16(tmp);
+            at(h_dst, i, j, state_size) = f32Tof16(f16Tof32(at(gates, i, oho, j, state_size, state_size * n_gates)) * tanhf(tmp));
+        }
+    }
+}
+/* psrc[m][n] -> pdst[n][m] */
+static void matrix_copy_transpose(const ie_fp16 *psrc, ie_fp16 *pdst, int m, int n)
+{
+    for (int i = 0; i < m; i++) {
+        for (int j = 0; j < n; j++) {
+            pdst[j * m + i] = psrc[i * n + j];
+        }
+    }
+}
+
+/* psrc[m][n][k] -> pdst[k][m][n] */
+static void matrix_copy_transpose_repack(const ie_fp16 *psrc, ie_fp16 *pdst, int m, int n, int k)
+{
+    for (int i = 0; i < m; i++) {
+        for (int j = 0; j < n; j++) {
+            for (int l = 0; l < k; l++) {
+                pdst[l * m * n + i * n + j] = psrc[i * m * n + j * n + l];
+            }
+        }
+    }
+}
+
+TEST_P(myriadLayersTestsLSTMCell_nightly, LSTMCell) {
+    auto param = GetParam();
+    lstmcell_test_params test_params = param;
+
+    size_t input_size = param.input_size;
+    size_t state_size = param.state_size;
+
+    size_t num_weights = ngates * state_size * (input_size + state_size);
+    size_t num_bias = ngates * state_size;
+
+    IN_OUT_desc dims_input;
+    dims_input.resize(3);
+    /* inputs */
+    dims_input[0].resize(2);
+    dims_input[0][0] = 1;
+    dims_input[0][1] = input_size;
+    dims_input[1].resize(2);
+    dims_input[1][0] = 1;
+    dims_input[1][1] = state_size;
+    dims_input[2].resize(2);
+    dims_input[2][0] = 1;
+    dims_input[2][1] = state_size;
+
+    IN_OUT_desc dims_output;
+    dims_output.resize(2);
+    dims_output[0].resize(2);
+    dims_output[0][0] = 1;
+    dims_output[0][1] = state_size;
+    dims_output[1].resize(2);
+    dims_output[1][0] = 1;
+    dims_output[1][1] = state_size;
+
+    SetInputTensors(dims_input);
+    SetOutputTensors(dims_output);
+
+    std::map<std::string, std::string> params {{"hidden_size", std::to_string(state_size)}};
+
+    /* weights generating */
+    TBlob<uint8_t>::Ptr weightsBlob_for_net(GenWeights((num_weights + num_bias)));
+    ie_fp16 *weights_for_net = static_cast<ie_fp16*>(weightsBlob_for_net->buffer());
+
+    TBlob<uint8_t>::Ptr weightsBlob_tmp(GenWeights(num_weights + num_bias));
+    ie_fp16 *weights0 = static_cast<ie_fp16*>(weightsBlob_tmp->buffer());
+    ie_fp16 *weights1 = weights0 + ngates * state_size * input_size;
+
+    TBlob<uint8_t>::Ptr weightsBlob_inv_tmp(GenWeights(num_weights + num_bias));
+    ie_fp16 *weights_inv0 = static_cast<ie_fp16*>(weightsBlob_inv_tmp->buffer());
+    ie_fp16 *weights_inv1 = weights_inv0 + ngates * state_size * input_size;
+
+    int counter = 0;
+    for (int j = 0; j < ngates * state_size; j++) {
+        for (int i = 0; i < input_size; i++) {
+            weights0[(input_size) * j + i] = PrecisionUtils::f32tof16(((float)(rand() % input_size)) / input_size * 0.01);
+            weights_for_net[counter++] = weights0[(input_size) * j + i];
+        }
+        for (int i = 0; i < state_size; i++) {
+            weights1[(state_size) * j + i] = PrecisionUtils::f32tof16(((float)(rand() % state_size)) / state_size * 0.05f);
+            weights_for_net[counter++] = weights1[(state_size) * j + i];
+        }
+    }
+
+    ie_fp16 *bias = weights0 + num_weights;
+    ie_fp16 *bias_inv = weights_inv0 + num_weights;
+    for (int i = 0; i < num_bias; i++) {
+        bias[i] = PrecisionUtils::f32tof16((float)((rand() % num_bias)) / num_bias);
+        *(weights_for_net + num_weights + i) = bias[i];
+    }
+
+    // gates repacking
+    {
+        for (int g = 0; g < ngates; g++)
+        {
+            int stride = state_size * input_size;
+            for (int i = 0; i < stride; i++)
+            {
+                weights_inv0[g * stride + i] = weights0[gate_map[g] * stride + i];
+            }
+        }
+        for (int g = 0; g < ngates; g++)
+        {
+            int stride = state_size * state_size;
+            for (int i = 0; i < stride; i++)
+            {
+                weights_inv1[g * stride + i] = weights1[gate_map[g] * stride + i];
+            }
+        }
+        for (int g = 0; g < ngates; g++)
+        {
+            int stride = state_size;
+            for (int i = 0; i < stride; i++)
+            {
+                bias_inv[g * stride + i] = bias[gate_map[g] * stride + i];
+
+            }
+        }
+    }
+
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("LSTMCell")
+                                        .params(params)
+                                        .weights(num_weights)
+                                        .biases(num_bias),
+                                        {},
+                                        weightsBlob_for_net));
+
+    /* input tensor generating */
+    auto pInputBlob = _inputMap.begin();
+    Blob::Ptr inputBlob0 = pInputBlob->second;
+    ie_fp16 *src_data0 = static_cast<ie_fp16*>(inputBlob0->buffer());
+    for (int i = 0; i < input_size; i++) {
+        src_data0[i] = PrecisionUtils::f32tof16(( ((float)(rand() % input_size)) / input_size * .1f));
+    }
+    pInputBlob++;
+    Blob::Ptr inputBlob1 = pInputBlob->second;
+    ie_fp16 *src_data1 = static_cast<ie_fp16*>(inputBlob1->buffer());
+    for (int i = 0; i < state_size; i++) {
+        src_data1[i] = PrecisionUtils::f32tof16(( ((float)(rand() % state_size)) / state_size * .2f));
+    }
+    pInputBlob++;
+    Blob::Ptr inputBlob2 = pInputBlob->second;
+    ie_fp16 *src_data2 = static_cast<ie_fp16*>(inputBlob2->buffer());
+    for (int i = 0; i < state_size; i++) {
+        src_data2[i] = PrecisionUtils::f32tof16(( ((float)(rand() % state_size)) / state_size * .3f));
+    }
+
+    /* reference version */
+    auto refOut0 = make_shared_blob<ie_fp16>({Precision::FP16, dims_output[0], Layout::NC});
+    refOut0->allocate();
+    auto refOut1 = make_shared_blob<ie_fp16>({Precision::FP16, dims_output[1], Layout::NC});
+    refOut1->allocate();
+    auto gatesBlob = make_shared_blob<ie_fp16>({Precision::FP16, {1, ngates * state_size}, Layout::NC});
+    gatesBlob->allocate();
+    // num_weights + num_bias
+    auto weightsBlob0_repacked = make_shared_blob<ie_fp16>({Precision::FP16, {ngates * state_size * input_size, 1}, Layout::NC});
+    weightsBlob0_repacked->allocate();
+    auto weightsBlob1_repacked = make_shared_blob<ie_fp16>({Precision::FP16, {ngates * state_size * state_size, 1}, Layout::NC});
+    weightsBlob1_repacked->allocate();
+
+    ie_fp16* h_dst = static_cast<ie_fp16*>(refOut0->buffer());
+    ie_fp16* c_dst = static_cast<ie_fp16*>(refOut1->buffer());
+    ie_fp16* gates = static_cast<ie_fp16*>(gatesBlob->buffer());
+
+    /* weights repacking */
+    ie_fp16* weights0_repacked = static_cast<ie_fp16*>(weightsBlob0_repacked->buffer());
+    ie_fp16* weights1_repacked = static_cast<ie_fp16*>(weightsBlob1_repacked->buffer());
+    matrix_copy_transpose(weights_inv0, weights0_repacked, ngates * state_size, input_size);
+    matrix_copy_transpose(weights_inv1, weights1_repacked, ngates * state_size, state_size);
+
+    lstm_cell(input_size,
+              state_size,
+
+              // weights
+              weights0_repacked,
+              weights1_repacked,
+              bias_inv,
+
+              // input
+              src_data0,
+              src_data1,
+              src_data2,
+
+              // output
+              h_dst,
+              c_dst,
+
+              gates
+             );
+
+    ASSERT_TRUE(Infer());
+
+    /* output tensors comparing */
+    auto pOutputBlob = _outputMap.begin();
+    auto outputBlob0 = pOutputBlob->second;
+    pOutputBlob++;
+    auto outputBlob1 = pOutputBlob->second;
+
+    CompareCommonAbsolute(outputBlob0, refOut0, ERROR_BOUND);
+    CompareCommonAbsolute(outputBlob1, refOut1, ERROR_BOUND);
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_mvn_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_mvn_test.cpp
new file mode 100644 (file)
index 0000000..a54ed4a
--- /dev/null
@@ -0,0 +1,100 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_mvn_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsMVN_nightly,
+                        ::testing::Combine(
+                                ::testing::ValuesIn(s_MVNTensors),
+                                ::testing::ValuesIn(s_MVN_acrossChannels),
+                                ::testing::ValuesIn(s_MVN_normalize),
+                                ::testing::ValuesIn(s_MVN_epsilon),
+                                ::testing::ValuesIn(s_MVNCustomConfig)));
+
+TEST_F(myriadLayersTests_nightly, MVN_CHW_Input)
+{
+    std::string model = R"V0G0N(
+        <net name="MVN" version="2" batch="1">
+            <layers>
+                <layer name="data" type="Input" precision="FP16" id="1">
+                    <output>
+                        <port id="1">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>512</dim>
+                            <dim>896</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="mvn" type="MVN" precision="FP16" id="2">
+                    <data across_channels="1" eps="9.999999717180685e-10" normalize_variance="1"/>
+                    <input>
+                        <port id="2">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>512</dim>
+                            <dim>896</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="3">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>512</dim>
+                            <dim>896</dim>
+                        </port>
+                    </output>
+                </layer>
+            </layers>
+            <edges>
+                <edge from-layer="1" from-port="1" to-layer="2" to-port="2"/>
+            </edges>
+        </net>
+    )V0G0N";
+
+    StatusCode st;
+
+    ASSERT_NO_THROW(readNetwork(model));
+
+    const auto& network = _cnnNetwork;
+
+    _inputsInfo = network.getInputsInfo();
+    _inputsInfo["data"]->setPrecision(Precision::FP16);
+
+    _outputsInfo = network.getOutputsInfo();
+    _outputsInfo["mvn"]->setPrecision(Precision::FP16);
+
+    ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network,
+                                                      {{VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), CONFIG_VALUE(YES)}}, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    ASSERT_NE(_exeNetwork, nullptr) << _resp.msg;
+
+    ASSERT_NO_THROW(st = _exeNetwork->CreateInferRequest(_inferRequest, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    auto tensorDesc = TensorDesc(Precision::FP16, _inputsInfo["data"]->getTensorDesc().getDims(), Layout::NCHW);
+    auto inputNCHW = make_shared_blob<ie_fp16>(tensorDesc);
+    ASSERT_NO_THROW(inputNCHW->allocate());
+
+    auto outputNCHW = make_shared_blob<ie_fp16>(tensorDesc);
+    ASSERT_NO_THROW(outputNCHW->allocate());
+
+    auto output_ref = make_shared_blob<ie_fp16>(tensorDesc);
+    ASSERT_NO_THROW(output_ref->allocate());
+
+    ASSERT_NO_THROW(GenRandomData(inputNCHW));
+
+    ASSERT_NO_THROW(st = _inferRequest->SetBlob("data", inputNCHW, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    ASSERT_NO_THROW(st = _inferRequest->SetBlob("mvn", outputNCHW, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    ASSERT_NO_THROW(st = _inferRequest->Infer(&_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    ASSERT_NO_FATAL_FAILURE(refMVN(inputNCHW, output_ref, 1, 1, 9.999999717180685e-10, true));
+
+    CompareCommonAbsolute(outputNCHW, output_ref, 0.003);
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_mvn_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_mvn_test.hpp
new file mode 100644 (file)
index 0000000..79def37
--- /dev/null
@@ -0,0 +1,184 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tests.hpp"
+
+using namespace InferenceEngine;
+
+#define ERROR_BOUND 0.2f
+
+static void refMVN(const Blob::Ptr src,
+                   Blob::Ptr dst,
+                   int across_channels, int normalize_variance, const float eps, int isCHW) {
+    ASSERT_NE(src, nullptr);
+    ASSERT_NE(dst, nullptr);
+    const uint16_t *src_data = src->buffer();
+    uint16_t *dst_data = dst->buffer();
+    ASSERT_NE(src_data, nullptr);
+    ASSERT_NE(dst_data, nullptr);
+
+    int32_t IW = 1;
+    int32_t IH = 1;
+    int32_t IC = 1;
+    int32_t IB = 1;
+    get_dims(src, IW, IH, IC);
+
+    float* mean_buf = new float[IW*IH*IC];
+
+    for (int b = 0; b < IB; b++)
+    {
+        // Calculate mean value
+        if (across_channels)
+        {
+            float mean = 0;
+            for (int c = 0; c < IC; c++) {
+                for (int h = 0; h < IH; h++) {
+                    for (int w = 0; w < IW; w++) {
+                        int ind = isCHW ? c*IH*IW + h*IW + w : h*IW*IC + w*IC + c;
+                        float s = PrecisionUtils::f16tof32(src_data[ind]);
+                        mean += s;
+                    }
+                }
+            }
+            mean /= IC*IH*IW;
+            for (int c = 0; c < IC; c++) {
+                for (int h = 0; h < IH; h++) {
+                    for (int w = 0; w < IW; w++) {
+                        int ind = isCHW ? c*IH*IW + h*IW + w : h*IW*IC + w*IC + c;
+                        float s = PrecisionUtils::f16tof32(src_data[ind]);
+                        mean_buf[ind] = s - mean;
+                        dst_data[ind] = PrecisionUtils::f32tof16(s - mean);
+                    }
+                }
+            }
+        }
+        else {
+            for (int c = 0; c < IC; c++)
+            {
+                float mean = 0;
+                for (int h = 0; h < IH; h++) {
+                    for (int w = 0; w < IW; w++) {
+                        int ind = isCHW ? c*IH*IW + h*IW + w : h*IW*IC + w*IC + c;
+                        float s = PrecisionUtils::f16tof32(src_data[ind]);
+                        mean += s;
+                    }
+                }
+                mean /= IH*IW;
+                for (int h = 0; h < IH; h++) {
+                    for (int w = 0; w < IW; w++) {
+                        int ind = isCHW ? c*IH*IW + h*IW + w : h*IW*IC + w*IC + c;
+                        float s = PrecisionUtils::f16tof32(src_data[ind]);
+                        mean_buf[ind] = s - mean;
+                        dst_data[ind] = PrecisionUtils::f32tof16(s - mean);
+                    }
+                }
+            }
+        }
+    }
+
+    if (normalize_variance)
+    {
+        for (int b = 0; b < IB; b++)
+        {
+            // Calculate variances value
+            if (across_channels)
+            {
+                float variance = 0;
+                for (int c = 0; c < IC; c++) {
+                    for (int h = 0; h < IH; h++) {
+                        for (int w = 0; w < IW; w++) {
+                            int ind = isCHW ? c*IH*IW + h*IW + w : h*IW*IC + w*IC + c;
+                            variance += mean_buf[ind] * mean_buf[ind];
+                        }
+                    }
+                }
+                variance /= IC*IH*IW;
+                variance = sqrtf(variance);//std::pow(variance, 0.5f);
+                variance += eps;
+                for (int c = 0; c < IC; c++) {
+                    for (int h = 0; h < IH; h++) {
+                        for (int w = 0; w < IW; w++) {
+                            int ind = isCHW ? c*IH*IW + h*IW + w : h*IW*IC + w*IC + c;
+                            dst_data[ind] = PrecisionUtils::f32tof16(mean_buf[ind] / variance);
+                        }
+                    }
+                }
+            }
+            else {
+                for (int c = 0; c < IC; c++)
+                {
+                    float variance = 0;
+                    for (int h = 0; h < IH; h++) {
+                        for (int w = 0; w < IW; w++) {
+                            int ind = isCHW ? c*IH*IW + h*IW + w : h*IW*IC + w*IC + c;
+                            variance += mean_buf[ind] * mean_buf[ind];
+                        }
+                    }
+                    variance /= IH*IW;
+                    variance = sqrtf(variance);
+                    variance += eps;
+                    for (int h = 0; h < IH; h++) {
+                        for (int w = 0; w < IW; w++) {
+                            int ind = isCHW ? c*IH*IW + h*IW + w : h*IW*IC + w*IC + c;
+                            dst_data[ind] = PrecisionUtils::f32tof16(mean_buf[ind] / variance);
+                        }
+                    }
+                }
+            }
+        }
+    }
+
+    delete[] mean_buf;
+}
+
+PRETTY_PARAM(AcrossChannels, int)
+PRETTY_PARAM(Normalize, int)
+PRETTY_PARAM(Epsilon, float)
+
+typedef myriadLayerTestBaseWithParam<std::tuple<Dims, AcrossChannels, Normalize, Epsilon, std::string>> myriadLayersTestsMVN_nightly;
+
+TEST_P(myriadLayersTestsMVN_nightly, MVN)
+{
+    tensor_test_params dims  = std::get<0>(GetParam());
+    int acrossChannels       = std::get<1>(GetParam());
+    int normalize            = std::get<2>(GetParam());
+    float eps                = std::get<3>(GetParam());
+    std::string customConfig = std::get<4>(GetParam());
+
+    if(!customConfig.empty() && !CheckMyriadX()) {
+        GTEST_SKIP()<<"Custom layers for MYRIAD2 not supported";
+    }
+    _config[VPU_CONFIG_KEY(CUSTOM_LAYERS)] = customConfig;
+
+    SetInputTensor(dims);
+    SetOutputTensor(dims);
+
+    std::map<std::string, std::string> params;
+    params["across_channels"] = std::to_string(acrossChannels);
+    params["normalize_variance"] = std::to_string(normalize);
+    params["eps"] = std::to_string(eps);
+
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("MVN").params(params)));
+    ASSERT_NO_FATAL_FAILURE(SetFirstInputToRange(0, 256));
+
+    ASSERT_TRUE(Infer());
+
+    ASSERT_NO_FATAL_FAILURE(refMVN(_inputMap.begin()->second, _refBlob, acrossChannels, normalize, eps, false));
+
+    CompareCommonAbsolute(_outputMap.begin()->second, _refBlob, ERROR_BOUND);
+}
+
+static std::vector<Dims> s_MVNTensors = {
+        {{1, 3, 512, 896}}
+};
+
+static std::vector<AcrossChannels> s_MVN_acrossChannels = { 0, 1};
+static std::vector<Normalize> s_MVN_normalize = { 0, 1};
+static std::vector<Epsilon> s_MVN_epsilon = { 1.0e-10, 1.0e-8, 1.0e-7, 1.0e-5, 1.0e-3};
+static std::vector<std::string> s_MVNCustomConfig = {
+    "",
+#ifdef VPU_HAS_CUSTOM_KERNELS
+    getIELibraryPath() + "/vpu_custom_kernels/customLayerBindings.xml"
+#endif
+};
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_nms_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_nms_test.cpp
new file mode 100644 (file)
index 0000000..d93cfeb
--- /dev/null
@@ -0,0 +1,260 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_nms_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsNonMaxSuppression_nightly,
+    ::testing::Values(
+            MAKE_STRUCT(NMS_testParams,
+                        {6, 1, 1}, // {spatial_dimension, num_classes, num_batches}
+                        1,
+                        {3},
+                        {0.5f},
+                        {0.f},
+                        { // batches
+                                { // spatial_dimension
+                                        {0.5f, 0.5f, 1.0f, 1.0f}, // center_point_box=0 {y1, x1, y2, x2}  center_point_box=1 {y0, x0, w, h}
+                                        {0.5f, 0.6f, 1.0f, 1.0f},
+                                        {0.5f, 0.4f, 1.0f, 1.0f},
+                                        {0.5f, 10.5f, 1.0f, 1.0f},
+                                        {0.5f, 10.6f, 1.0f, 1.0f},
+                                        {0.5f, 100.5f, 1.0f, 1.0f},
+                                },
+                        },
+                        { // batches
+                                { // classes
+                                        {0.9f, 0.75f, 0.6f, 0.95f, 0.5f, 0.3f}, // spatial_dimension
+                                },
+                        },
+                        { // num_selected_indices
+                                {0, 0, 3}, // {batch_index, class_index, box_index}
+                                {0, 0, 0},
+                                {0, 0, 5},
+                        }
+            ),
+            MAKE_STRUCT(NMS_testParams,
+                        {6, 1, 1},
+                        0,
+                        {3},
+                        {0.5f},
+                        {0.f},
+                        {
+                                {
+                                        {1.0f, 1.0f, 0.0f, 0.0f},
+                                        {0.0f, 0.1f, 1.0f, 1.1f},
+                                        {0.0f, 0.9f, 1.0f, -0.1f},
+                                        {0.0f, 10.0f, 1.0f, 11.0f},
+                                        {1.0f, 10.1f, 0.0f, 11.1f},
+                                        {1.0f, 101.0f, 0.0f, 100.0f}
+                                }
+                        },
+                        {
+                                {
+                                        {0.9f, 0.75f, 0.6f, 0.95f, 0.5f, 0.3f}
+                                }
+                        },
+                        {
+                                {0, 0, 3},
+                                {0, 0, 0},
+                                {0, 0, 5},
+                        }
+            ),
+            MAKE_STRUCT(NMS_testParams,
+                        {10, 1, 1},
+                        0,
+                        {3},
+                        {0.5f},
+                        {0.f},
+                        {
+                                {
+                                        {0.0f, 0.0f, 1.0f, 1.0f},
+                                        {0.0f, 0.0f, 1.0f, 1.0f},
+                                        {0.0f, 0.0f, 1.0f, 1.0f},
+                                        {0.0f, 0.0f, 1.0f, 1.0f},
+                                        {0.0f, 0.0f, 1.0f, 1.0f},
+                                        {0.0f, 0.0f, 1.0f, 1.0f},
+                                        {0.0f, 0.0f, 1.0f, 1.0f},
+                                        {0.0f, 0.0f, 1.0f, 1.0f},
+                                        {0.0f, 0.0f, 1.0f, 1.0f},
+                                        {0.0f, 0.0f, 1.0f, 1.0f},
+                                }
+                        },
+                        {
+                                {
+                                        {0.9f, 0.9f, 0.9f, 0.9f, 0.9f, 0.9f, 0.9f, 0.9f, 0.9f, 0.9f}
+                                }
+                        },
+                        {
+                                {0, 0, 0},
+                        }
+            ),
+            MAKE_STRUCT(NMS_testParams,
+                        {6, 1, 1},
+                        0,
+                        {2},
+                        {0.5f},
+                        {0.f},
+                        {
+                                {
+                                        {0.0f, 0.0f, 1.0f, 1.0f},
+                                        {0.0f, 0.1f, 1.0f, 1.1f},
+                                        {0.0f, -0.1f, 1.0f, 0.9f},
+                                        {0.0f, 10.0f, 1.0f, 11.0f},
+                                        {0.0f, 10.1f, 1.0f, 11.1f},
+                                        {0.0f, 100.0f, 1.0f, 101.0f},
+                                }
+                        },
+                        {
+                                {
+                                        {0.9f, 0.75f, 0.6f, 0.95f, 0.5f, 0.3f}
+                                }
+                        },
+                        {
+                                {0, 0, 3},
+                                {0, 0, 0},
+                        }
+            ),
+            MAKE_STRUCT(NMS_testParams,
+                        {1, 1, 1},
+                        0,
+                        {3},
+                        {0.5f},
+                        {0.f},
+                        {
+                                {
+                                        {0.0f, 0.0f, 1.0f, 1.0f},
+                                }
+                        },
+                        {
+                                {
+                                        {0.9f}
+                                }
+                        },
+                        {
+                                {0, 0, 0},
+                        }
+            ),
+            MAKE_STRUCT(NMS_testParams,
+                        {6, 1, 1},
+                        0,
+                        {3},
+                        {0.5f},
+                        {0.f},
+                        {
+                                {
+                                        {0.0f, 0.0f, 1.0f, 1.0f},
+                                        {0.0f, 0.1f, 1.0f, 1.1f},
+                                        {0.0f, -0.1f, 1.0f, 0.9f},
+                                        {0.0f, 10.0f, 1.0f, 11.0f},
+                                        {0.0f, 10.1f, 1.0f, 11.1f},
+                                        {0.0f, 100.0f, 1.0f, 101.0f},
+                                }
+                        },
+                        {
+                                {
+                                        {0.9f, 0.75f, 0.6f, 0.95f, 0.5f, 0.3f}
+                                }
+                        },
+                        {
+                                {0, 0, 3},
+                                {0, 0, 0},
+                                {0, 0, 5},
+                        }
+            ),
+            MAKE_STRUCT(NMS_testParams,
+                        {6, 1, 1},
+                        0,
+                        {3},
+                        {0.5f},
+                        {0.4f},
+                        {
+                                {
+                                        {0.0f, 0.0f, 1.0f, 1.0f},
+                                        {0.0f, 0.1f, 1.0f, 1.1f},
+                                        {0.0f, -0.1f, 1.0f, 0.9f},
+                                        {0.0f, 10.0f, 1.0f, 11.0f},
+                                        {0.0f, 10.1f, 1.0f, 11.1f},
+                                        {0.0f, 100.0f, 1.0f, 101.0f},
+                                }
+                        },
+                        {
+                                {
+                                        {0.9f, 0.75f, 0.6f, 0.95f, 0.5f, 0.3f}
+                                }
+                        },
+                        {
+                                {0, 0, 3},
+                                {0, 0, 0},
+                        }
+            ),
+            MAKE_STRUCT(NMS_testParams,
+                        {6, 1, 2},
+                        0,
+                        {2},
+                        {0.5f},
+                        {0.0f},
+                        {
+                                {
+                                        {0.0f, 0.0f, 1.0f, 1.0f},
+                                        {0.0f, 0.1f, 1.0f, 1.1f},
+                                        {0.0f, -0.1f, 1.0f, 0.9f},
+                                        {0.0f, 10.0f, 1.0f, 11.0f},
+                                        {0.0f, 10.1f, 1.0f, 11.1f},
+                                        {0.0f, 100.0f, 1.0f, 101.0f},
+                                },
+                                {
+                                        {0.0f, 0.0f, 1.0f, 1.0f},
+                                        {0.0f, 0.1f, 1.0f, 1.1f},
+                                        {0.0f, -0.1f, 1.0f, 0.9f},
+                                        {0.0f, 10.0f, 1.0f, 11.0f},
+                                        {0.0f, 10.1f, 1.0f, 11.1f},
+                                        {0.0f, 100.0f, 1.0f, 101.0f},
+                                }
+                        },
+                        {
+                                {
+                                        {0.9f, 0.75f, 0.6f, 0.95f, 0.5f, 0.3f}
+                                },
+                                {
+                                        {0.9f, 0.75f, 0.6f, 0.95f, 0.5f, 0.3f}
+                                }
+                        },
+                        {
+                                {0, 0, 3},
+                                {0, 0, 0},
+                                {1, 0, 3},
+                                {1, 0, 0},
+                        }
+            ),
+            MAKE_STRUCT(NMS_testParams,
+                        {6, 2, 1},
+                        0,
+                        {2},
+                        {0.5f},
+                        {0.0f},
+                        {
+                                {
+                                        {0.0f, 0.0f, 1.0f, 1.0f},
+                                        {0.0f, 0.1f, 1.0f, 1.1f},
+                                        {0.0f, -0.1f, 1.0f, 0.9f},
+                                        {0.0f, 10.0f, 1.0f, 11.0f},
+                                        {0.0f, 10.1f, 1.0f, 11.1f},
+                                        {0.0f, 100.0f, 1.0f, 101.0f},
+                                }
+                        },
+                        {
+                                {
+                                        {0.9f, 0.75f, 0.6f, 0.95f, 0.5f, 0.3f},
+                                        {0.9f, 0.75f, 0.6f, 0.95f, 0.5f, 0.3f}
+                                }
+                        },
+                        {
+                                {0, 0, 3},
+                                {0, 0, 0},
+                                {0, 1, 3},
+                                {0, 1, 0},
+                        }
+            )
+    )
+);
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_nms_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_nms_test.hpp
new file mode 100644 (file)
index 0000000..ec6340f
--- /dev/null
@@ -0,0 +1,264 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tests.hpp"
+#include "tests_vpu_common.hpp"
+
+using namespace InferenceEngine;
+
+typedef std::vector<int> NMS_Dims;
+typedef std::vector<std::vector<std::vector<float>>> init3DFloat;
+typedef std::vector<int> initIntScalar;
+typedef std::vector<float> initFPScalar;
+typedef std::vector<std::vector<int>> refType;
+struct NMS_testParams {
+    int dims[3]; // {spat_dim, num_classes, num_batches}
+    int centerPointBox;
+    initIntScalar MaxOutBoxesPerClass; // scalar
+    initFPScalar IoUThreshold; // scalar
+    initFPScalar ScoreThreshold; // scalar
+    init3DFloat boxes;
+    init3DFloat scores;
+    refType referenceOutput;
+};
+static std::string getModel(const int numOfInputs, const NMS_Dims &dims, const int center_point_box) {
+    std::string model = R"V0G0N(
+                <net name="testNMS" version="7">
+                    <layers>
+                        <layer id="0" name="boxes" precision="FP16" type="Input">
+                            <output>
+                                <port id="0">
+                                    <dim>__BATCHES__</dim>
+                                    <dim>__SPAT_DIM__</dim>
+                                    <dim>4</dim>
+                                </port>
+                            </output>
+                        </layer>
+                        <layer id="1" name="scores" precision="FP16" type="Input">
+                            <output>
+                                <port id="0">
+                                    <dim>__BATCHES__</dim>
+                                    <dim>__CLASSES__</dim>
+                                    <dim>__SPAT_DIM__</dim>
+                                </port>
+                            </output>
+                        </layer>)V0G0N";
+    if (numOfInputs > 2)
+        model += R"V0G0N(
+                        <layer id="2" name="MaxOutputBoxesPerClass" precision="I32" type="Input">
+                            <output>
+                                <port id="0">
+                                    <dim>1</dim>
+                                </port>
+                            </output>
+                        </layer>)V0G0N";
+    if (numOfInputs > 3)
+        model += R"V0G0N(
+                        <layer id="3" name="IoUThreshold" precision="FP16" type="Input">
+                            <output>
+                                <port id="0">
+                                    <dim>1</dim>
+                                </port>
+                            </output>
+                        </layer>)V0G0N";
+    if (numOfInputs > 4)
+        model += R"V0G0N(
+                        <layer id="4" name="ScoreThreshold" precision="FP16" type="Input">
+                            <output>
+                                <port id="0">
+                                    <dim>1</dim>
+                                </port>
+                            </output>
+                        </layer>)V0G0N";
+    model += R"V0G0N(
+                        <layer id="5" name="NMS" precision="I32" type="NonMaxSuppression">
+                            <data center_point_box="__CPB__"/>
+                            <input>
+                                <port id="0">
+                                    <dim>__BATCHES__</dim>
+                                    <dim>__SPAT_DIM__</dim>
+                                    <dim>4</dim>
+                                </port>
+                                <port id="1">
+                                    <dim>__BATCHES__</dim>
+                                    <dim>__CLASSES__</dim>
+                                    <dim>__SPAT_DIM__</dim>
+                                </port>)V0G0N";
+    if (numOfInputs > 2)
+        model += R"V0G0N(
+                                <port id="2">
+                                    <dim>1</dim>
+                                </port>)V0G0N";
+    if (numOfInputs > 3)
+        model += R"V0G0N(
+                                <port id="3">
+                                    <dim>1</dim>
+                                </port>)V0G0N";
+    if (numOfInputs > 4)
+        model += R"V0G0N(
+                                <port id="4">
+                                    <dim>1</dim>
+                                </port>)V0G0N";
+    model += R"V0G0N(
+                            </input>
+                            <output>
+                                <port id="4">
+                                    <dim>__SPAT_DIM__</dim>
+                                    <dim>3</dim>
+                                </port>
+                            </output>
+                        </layer>
+                    </layers>
+                    <edges>
+                        <edge from-layer="0" from-port="0" to-layer="5" to-port="0"/>
+                        <edge from-layer="1" from-port="0" to-layer="5" to-port="1"/>)V0G0N";
+    if (numOfInputs > 2)
+        model += R"V0G0N(
+                        <edge from-layer="2" from-port="0" to-layer="5" to-port="2"/>)V0G0N";
+    if (numOfInputs > 3)
+        model += R"V0G0N(
+                        <edge from-layer="3" from-port="0" to-layer="5" to-port="3"/>)V0G0N";
+    if (numOfInputs > 4)
+        model += R"V0G0N(
+                        <edge from-layer="4" from-port="0" to-layer="5" to-port="4"/>)V0G0N";
+    model += R"V0G0N(
+                    </edges>
+                </net>
+            )V0G0N";
+
+    REPLACE_WITH_STR(model, "__SPAT_DIM__", std::to_string(dims[0]));
+    REPLACE_WITH_STR(model, "__CLASSES__", std::to_string(dims[1]));
+    REPLACE_WITH_STR(model, "__BATCHES__", std::to_string(dims[2]));
+    REPLACE_WITH_STR(model, "__CPB__", std::to_string(center_point_box));
+
+    return model;
+}
+
+static void copyScalarToBlob(const Blob::Ptr& blob, const initIntScalar& scalar) {
+    auto *data = blob->buffer().as<int32_t *>();
+    data[0] = scalar[0];
+}
+
+static void copyScalarToBlob(const Blob::Ptr& blob, const initFPScalar& scalar) {
+    auto *data = blob->buffer().as<ie_fp16 *>();
+    data[0] = PrecisionUtils::f32tof16(scalar[0]);
+}
+
+static void copy3DToBlob(const Blob::Ptr& blob, const init3DFloat& src) {
+    auto *data = blob->buffer().as<ie_fp16 *>();
+    const auto dims = blob->getTensorDesc().getDims();
+    for (int i = 0; i < dims[0]; i++) {
+        for (int j = 0; j < dims[1]; j++) {
+            for (int k = 0; k < dims[2]; k++) {
+                data[i * dims[1] * dims[2] + j * dims[2] + k] = PrecisionUtils::f32tof16(src[i][j][k]);
+            }
+        }
+    }
+}
+
+static void copyReference(const Blob::Ptr& blob, const refType src) {
+    int32_t *data = blob->buffer().as<int32_t *>();
+    const auto dims = blob->getTensorDesc().getDims();
+
+    int boxNum = 0;
+    for (; boxNum < src.size(); boxNum++) {
+        data[boxNum * 3 + 0] = src[boxNum][0];
+        data[boxNum * 3 + 1] = src[boxNum][1];
+        data[boxNum * 3 + 2] = src[boxNum][2];
+    }
+    for (; boxNum < dims[0]; boxNum++) {
+        data[boxNum * 3 + 0] = -1;
+        data[boxNum * 3 + 1] = -1;
+        data[boxNum * 3 + 2] = -1;
+    }
+}
+
+typedef myriadLayerTestBaseWithParam<NMS_testParams> myriadLayersTestsNonMaxSuppression_nightly;
+
+TEST_P(myriadLayersTestsNonMaxSuppression_nightly, NonMaxSuppression) {
+    const auto params = GetParam();
+    const int spatDim = params.dims[0];
+    const int numClasses = params.dims[1];
+    const int numBatches = params.dims[2];
+    const int center_point_box = params.centerPointBox;
+
+    int numOfInputs = 2;
+    if (!params.ScoreThreshold.empty())
+        numOfInputs = 5;
+    else if (!params.IoUThreshold.empty())
+        numOfInputs = 4;
+    else if (!params.MaxOutBoxesPerClass.empty())
+        numOfInputs = 3;
+
+    const auto model = getModel(numOfInputs, {spatDim, numClasses, numBatches}, center_point_box);
+    ASSERT_NO_THROW(readNetwork(model));
+
+    const auto& network = _cnnNetwork;
+    _inputsInfo = network.getInputsInfo();
+    _inputsInfo["boxes"]->setPrecision(Precision::FP16);
+    _inputsInfo["scores"]->setPrecision(Precision::FP16);
+    if (numOfInputs > 2)
+        _inputsInfo["MaxOutputBoxesPerClass"]->setPrecision(Precision::I32);
+    if (numOfInputs > 3)
+        _inputsInfo["IoUThreshold"]->setPrecision(Precision::FP16);
+    if (numOfInputs > 4)
+        _inputsInfo["ScoreThreshold"]->setPrecision(Precision::FP16);
+
+    _outputsInfo = network.getOutputsInfo();
+    _outputsInfo["NMS"]->setPrecision(Precision::I32);
+
+    StatusCode st = OK;
+    ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network, _config, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    ASSERT_NE(_exeNetwork, nullptr) << _resp.msg;
+
+    ASSERT_NO_THROW(st = _exeNetwork->CreateInferRequest(_inferRequest, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    Blob::Ptr boxesBlob;
+    ASSERT_NO_THROW(st = _inferRequest->GetBlob("boxes", boxesBlob, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    std::cout << CheckMyriadX() << std::endl;
+    copy3DToBlob(boxesBlob, params.boxes);
+
+    Blob::Ptr scoresBlob;
+    ASSERT_NO_THROW(st = _inferRequest->GetBlob("scores", scoresBlob, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    copy3DToBlob(scoresBlob, params.scores);
+
+    if (numOfInputs > 2) {
+        Blob::Ptr MaxOutputBoxesBlob;
+        ASSERT_NO_THROW(st = _inferRequest->GetBlob("MaxOutputBoxesPerClass", MaxOutputBoxesBlob, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+        copyScalarToBlob(MaxOutputBoxesBlob, params.MaxOutBoxesPerClass);
+    }
+
+    if (numOfInputs > 3) {
+        Blob::Ptr IoUThresholdBlob;
+        ASSERT_NO_THROW(st = _inferRequest->GetBlob("IoUThreshold", IoUThresholdBlob, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+        copyScalarToBlob(IoUThresholdBlob, params.IoUThreshold);
+    }
+
+    if (numOfInputs > 4) {
+        Blob::Ptr ScoreThresholdBlob;
+        ASSERT_NO_THROW(st = _inferRequest->GetBlob("ScoreThreshold", ScoreThresholdBlob, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+        copyScalarToBlob(ScoreThresholdBlob, params.ScoreThreshold);
+    }
+
+    ASSERT_NO_THROW(st = _inferRequest->Infer(&_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    Blob::Ptr outputBlob;
+    ASSERT_NO_THROW(st = _inferRequest->GetBlob("NMS", outputBlob, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    Blob::Ptr refBlob = make_shared_blob<int32_t>(outputBlob->getTensorDesc());
+    refBlob->allocate();
+    copyReference(refBlob, params.referenceOutput);
+
+    if (memcmp(refBlob->cbuffer(), outputBlob->cbuffer(), outputBlob->byteSize()))
+        FAIL() << "Wrong result with compare ONNX reference!";
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_nonzero_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_nonzero_test.cpp
new file mode 100644 (file)
index 0000000..a75812d
--- /dev/null
@@ -0,0 +1,8 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_nonzero_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayerTestNonZero_nightly,
+                        ::testing::ValuesIn(inputDims));
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_nonzero_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_nonzero_test.hpp
new file mode 100644 (file)
index 0000000..c2af76b
--- /dev/null
@@ -0,0 +1,140 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tests.hpp"
+
+#include <blob_factory.hpp>
+
+#include <algorithm>
+#include <random>
+
+using namespace InferenceEngine;
+
+class myriadLayerTestNonZero_nightly: public myriadLayersTests_nightly,
+                                      public testing::WithParamInterface<SizeVector> {
+public:
+    void testNonZero(vpu::LayoutPreference preference, Precision precision);
+
+protected:
+    static void GenRandomNonZeroData(Blob::Ptr& blob) {
+        std::mt19937 generator(DEFAULT_SEED_VALUE);
+
+        const auto getRandomValue = [&generator]() {
+            // Each third value will be a zero for test NonZero functionality
+            return generator() % 3 ? float(generator()) / generator.max() * 255.f : 0.f;
+        };
+
+        size_t count = blob->size();
+        if (blob->getTensorDesc().getPrecision() == Precision::U8) {
+            auto blobPtr = InferenceEngine::as<MemoryBlob>(blob)->rwmap().as<uint8_t*>();
+            for (size_t idx = 0; idx < count; ++idx) {
+                blobPtr[idx] = static_cast<uint8_t>(getRandomValue());
+            }
+        } else if (blob->getTensorDesc().getPrecision() == Precision::I32) {
+            auto blobPtr = InferenceEngine::as<MemoryBlob>(blob)->rwmap().as<int32_t*>();
+            for (size_t idx = 0; idx < count; ++idx) {
+                blobPtr[idx] = static_cast<int32_t>(getRandomValue());
+            }
+        } else {
+            auto blobPtr = InferenceEngine::as<MemoryBlob>(blob)->rwmap().as<ie_fp16*>();
+            for (size_t idx = 0; idx < count; ++idx) {
+                blobPtr[idx] = PrecisionUtils::f32tof16(getRandomValue());
+            }
+        }
+    }
+
+    static void CompareNonZero(const InferenceEngine::Blob::Ptr& outputIndicesBlob,
+                               const InferenceEngine::Blob::Ptr& refIndicesBlob,
+                               const InferenceEngine::Blob::Ptr& outputDimsBlob,
+                               const InferenceEngine::Blob::Ptr& refDimsBlob) {
+        const auto outputIndicesPtr = InferenceEngine::as<MemoryBlob>(
+                outputIndicesBlob)->rmap().as<const int32_t*>();
+        const auto refIndicesPtr = InferenceEngine::as<MemoryBlob>(
+                refIndicesBlob)->rmap().as<const int32_t*>();
+        const auto outputDimsPtr = InferenceEngine::as<MemoryBlob>(
+                outputDimsBlob)->rmap().as<const int32_t*>();
+        const auto refDimsPtr = InferenceEngine::as<MemoryBlob>(
+                refDimsBlob)->rmap().as<const int32_t*>();
+
+        ASSERT_EQ(outputDimsPtr[0], refDimsPtr[0]);
+        ASSERT_EQ(outputDimsPtr[1], refDimsPtr[1]);
+
+        const auto totalDimsSize = refIndicesBlob->getTensorDesc().getDims()[1];
+
+        for (int axis = 0; axis < outputDimsPtr[1]; ++axis) {
+            for (int i = 0; i < outputDimsPtr[0]; ++i) {
+                const auto idx = i + axis * totalDimsSize;
+                ASSERT_EQ(outputIndicesPtr[idx], refIndicesPtr[idx]);
+            }
+        }
+    }
+};
+
+void myriadLayerTestNonZero_nightly::testNonZero(vpu::LayoutPreference preference, Precision precision) {
+    _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+
+    const auto& inputDims = GetParam();
+    const size_t numDims = inputDims.size();
+    const size_t inputTotalSize = std::accumulate(inputDims.begin(), inputDims.end(), 1,
+                                                  std::multiplies<size_t>());
+
+    const SizeVector outIndicesDims = {numDims, inputTotalSize};
+    const SizeVector outDimsDims = {outIndicesDims.size()};
+
+    SetInputTensors({inputDims});
+    SetOutputTensors({outIndicesDims, outDimsDims});
+
+    makeSingleLayerNetwork(LayerInitParams("StaticShapeNonZero"),
+                           NetworkInitParams()
+                                   .inputPrecision(precision)
+                                   .outputPrecision(Precision::I32)
+                                   .layoutPreference(preference));
+
+    auto inputBlob = _inputMap.begin()->second;
+    auto outputIndicesBlob = _outputMap.begin()->second;
+    auto outputDimsBlob = (++_outputMap.begin())->second;
+
+    inputBlob->getTensorDesc().setLayout(
+            vpu::deviceLayout(inputBlob->getTensorDesc().getLayout(), preference));
+    inputBlob->getTensorDesc().setPrecision(precision);
+    GenRandomNonZeroData(inputBlob);
+
+    auto refIndicesBlob = make_blob_with_precision(outputIndicesBlob->getTensorDesc());
+    auto refDimsBlob = make_blob_with_precision(outputDimsBlob->getTensorDesc());
+    refIndicesBlob->allocate();
+    refDimsBlob->allocate();
+    ref_nonZero(inputBlob, refIndicesBlob, refDimsBlob);
+
+    ASSERT_TRUE(Infer());
+
+    CompareNonZero(outputIndicesBlob, refIndicesBlob, outputDimsBlob, refDimsBlob);
+}
+
+TEST_P(myriadLayerTestNonZero_nightly, NonZero) {
+    testNonZero(vpu::LayoutPreference::ChannelMajor, Precision::FP16);
+}
+
+TEST_P(myriadLayerTestNonZero_nightly, NonZeroNHWC) {
+    testNonZero(vpu::LayoutPreference::ChannelMinor, Precision::FP16);
+}
+
+TEST_P(myriadLayerTestNonZero_nightly, NonZeroI32) {
+    testNonZero(vpu::LayoutPreference::ChannelMajor, Precision::I32);
+}
+
+TEST_P(myriadLayerTestNonZero_nightly, NonZeroU8) {
+    testNonZero(vpu::LayoutPreference::ChannelMajor, Precision::U8);
+}
+
+std::vector<InferenceEngine::SizeVector> inputDims = {
+        { 7 },
+        { 1000 },
+        { 3, 5 },
+        { 65, 33 },
+        { 33, 65 },
+        { 1, 1000 },
+        { 223, 217, 21 },
+        { 3, 4, 5, 1 },
+        { 3, 4, 1, 5, 1 }
+};
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_normalize_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_normalize_test.cpp
new file mode 100644 (file)
index 0000000..3527790
--- /dev/null
@@ -0,0 +1,40 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_normalize_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsNormalize_nightly, ::testing::Combine(
+    ::testing::Values<Dims>(
+        // small size, num_channels is not divisible by 8
+        MAKE_STRUCT(tensor_test_params, 1, 33, 1, 1),
+
+        // size used in SSD_VGG topology
+        MAKE_STRUCT(tensor_test_params, 1, 512, 38, 38),
+
+        // size used in a customer topology
+        MAKE_STRUCT(tensor_test_params, 1, 128, 1, 1)
+    ),
+    ::testing::Values<AcrossSpatial>(false, true),
+    ::testing::Values<ChannelSharedNormalize>(false, true),
+    ::testing::Values<EPS>(1e-10f, 1e-9f, 1e-8f, 1e-7f, 1.192093e-07, 1e-6f, 1e-5f, 1e-4f, 1e-3f, 0.0f)
+));
+
+
+INSTANTIATE_TEST_CASE_P(accuracy_more, myriadLayersTestsNormalize_nightly, ::testing::Combine(
+    ::testing::Values<Dims>(
+        //more tests
+        MAKE_STRUCT(tensor_test_params, 1, 1, 38, 38),
+        MAKE_STRUCT(tensor_test_params, 1, 1, 1, 1),
+        MAKE_STRUCT(tensor_test_params, 1, 1, 8, 8),
+        MAKE_STRUCT(tensor_test_params, 1, 3, 17, 17),
+        MAKE_STRUCT(tensor_test_params, 1, 1, 17, 17),
+        MAKE_STRUCT(tensor_test_params, 1, 1, 32, 32),
+        MAKE_STRUCT(tensor_test_params, 1, 8, 38, 38),
+        MAKE_STRUCT(tensor_test_params, 1, 512, 1, 1),
+        MAKE_STRUCT(tensor_test_params, 1, 512, 8, 8)
+    ),
+    ::testing::Values<AcrossSpatial>(false, true),
+    ::testing::Values<ChannelSharedNormalize>(false, true),
+    ::testing::Values<EPS>(1e-10f, 1e-9f, 1e-8f, 1e-7f, 1.192093e-07, 1e-6f, 1e-5f, 1e-4f, 1e-3f, 0.0f)
+));
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_normalize_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_normalize_test.hpp
new file mode 100644 (file)
index 0000000..e060fab
--- /dev/null
@@ -0,0 +1,143 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <cmath>
+#include "myriad_layers_tests.hpp"
+
+#include <sstream>
+
+template <typename T>
+std::string to_string_with_precision(const T a_value, const int n = 6)
+{
+    std::ostringstream out;
+    out.precision(n);
+    out << std::fixed << a_value;
+    return out.str();
+}
+
+using namespace InferenceEngine;
+
+#define ERROR_BOUND 1e-3f
+
+static void refNormalize(const Blob::Ptr src,
+                         Blob::Ptr dst,
+                         ie_fp16* weights_data,
+                         int across_spatial,
+                         int channel_shared,
+                         float eps) {
+    ASSERT_EQ(Layout::NHWC, src->getTensorDesc().getLayout());
+
+    auto src_data = src->buffer().as<const uint16_t*>();
+    auto dst_data = dst->buffer().as<uint16_t*>();
+
+    const auto& dims = src->getTensorDesc().getDims();
+    auto N = dims[0];
+    auto C = dims[1];
+    auto H = dims[2];
+    auto W = dims[3];
+
+    for (size_t n = 0; n < N; ++n) {
+        auto psrc = src_data + n * (C * H * W);
+        auto pdst = dst_data + n * (C * H * W);
+
+        if (across_spatial) {
+            float norm = eps;
+            for (size_t i = 0; i < C * H * W; ++i) {
+                auto src_val = PrecisionUtils::f16tof32(psrc[i]);
+                norm += src_val * src_val;
+            }
+            norm = 1.0f / std::sqrt(norm);
+
+            for (size_t hw = 0; hw < H * W; ++hw) {
+                for (size_t c = 0 ; c < C; ++c) {
+                    auto ind = hw * C + c;
+
+                    if (channel_shared) {
+                        auto w = PrecisionUtils::f16tof32(weights_data[0]);
+                        auto dst_val = PrecisionUtils::f16tof32(psrc[ind]) * norm * w;
+                        pdst[ind] = PrecisionUtils::f32tof16(dst_val);
+                    }
+                    else {
+                        auto w = PrecisionUtils::f16tof32(weights_data[c]);
+                        auto dst_val = PrecisionUtils::f16tof32(psrc[ind]) * norm * w;
+                        pdst[ind] = PrecisionUtils::f32tof16(dst_val);
+                    }
+                }
+            }
+        }
+        else {
+            for (int hw = 0; hw < H * W; ++hw) {
+                float norm = eps;
+                for (size_t c = 0; c < C; ++c) {
+                    auto ind = hw * C + c;
+                    auto src_val = PrecisionUtils::f16tof32(psrc[ind]);
+                    norm += src_val * src_val;
+                }
+                norm = 1.0f / std::sqrt(norm);
+
+                for (size_t c = 0; c < C; ++c) {
+                    auto ind = hw * C + c;
+
+                    if (channel_shared) {
+                        auto w = PrecisionUtils::f16tof32(weights_data[0]);
+                        auto dst_val = PrecisionUtils::f16tof32(psrc[ind]) * norm * w;
+                        pdst[ind] = PrecisionUtils::f32tof16(dst_val);
+                    }
+                    else {
+                        auto w = PrecisionUtils::f16tof32(weights_data[c]);
+                        auto dst_val = PrecisionUtils::f16tof32(psrc[ind]) * norm * w;
+                        pdst[ind] = PrecisionUtils::f32tof16(dst_val);
+                    }
+                }
+            }
+        }
+    }
+}
+
+PRETTY_PARAM(AcrossSpatial, bool)
+PRETTY_PARAM(ChannelSharedNormalize, bool)
+PRETTY_PARAM(EPS, float)
+
+typedef myriadLayerTestBaseWithParam<std::tuple<Dims, AcrossSpatial, ChannelSharedNormalize, EPS>> myriadLayersTestsNormalize_nightly;
+
+TEST_P(myriadLayersTestsNormalize_nightly, Normalize) {
+    tensor_test_params dims = std::get<0>(GetParam());
+    int across_spatial = std::get<1>(GetParam());
+    int channel_shared = std::get<2>(GetParam());
+    float eps = std::get<3>(GetParam());
+
+    SetInputTensor(dims);
+    SetOutputTensor(dims);
+
+    std::map<std::string, std::string> layer_params = {
+        {"across_spatial",  std::to_string(across_spatial)},
+        {"channel_shared",  std::to_string(channel_shared)},
+        {"eps",             to_string_with_precision(eps, 10)}
+    };
+
+    size_t num_weights = 0;
+    if (channel_shared) {
+        num_weights = 1;
+    }
+    else {
+        num_weights = dims.c;
+    }
+    TBlob<uint8_t>::Ptr weights(GenWeights(num_weights));
+
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("Normalize")
+                                        .params(layer_params)
+                                        .weights(weights->byteSize() / sizeof (uint16_t)),
+                                        {},
+                                        weights));
+
+    ASSERT_TRUE(Infer());
+
+    auto src = _inputMap.begin()->second;
+    auto dst = _outputMap.begin()->second;
+    auto weights_data = weights->data().as<ie_fp16*>();
+
+    refNormalize(src, _refBlob, weights_data, across_spatial, channel_shared, eps);
+
+    CompareCommonAbsolute(dst, _refBlob, ERROR_BOUND);
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_oneHot_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_oneHot_test.cpp
new file mode 100644 (file)
index 0000000..3834b2b
--- /dev/null
@@ -0,0 +1,33 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_oneHot_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayerTestOneHot_nightly,
+                        ::testing::Values<oneHot_test_params>(
+                                MAKE_STRUCT(OneHotParams, {64}, 2, {0}, {}, {}),
+                                MAKE_STRUCT(OneHotParams, {64}, 2, {-1}, {}, {}),
+                                MAKE_STRUCT(OneHotParams, {32, 64}, 2, {0}, {}, {}),
+                                MAKE_STRUCT(OneHotParams, {32, 64}, 2, {1}, {}, {}),
+                                MAKE_STRUCT(OneHotParams, {32, 64}, 2, {-1}, {}, {}),
+                                MAKE_STRUCT(OneHotParams, {16, 32, 64}, 2, {0}, {}, {}),
+                                MAKE_STRUCT(OneHotParams, {16, 32, 64}, 2, {1}, {}, {}),
+                                MAKE_STRUCT(OneHotParams, {16, 32, 64}, 2, {-1}, {}, {}),
+                                MAKE_STRUCT(OneHotParams, {8, 16, 32,64}, 2, {0}, {}, {}),
+                                MAKE_STRUCT(OneHotParams, {8, 16, 32,64}, 2, {1}, {}, {}),
+                                MAKE_STRUCT(OneHotParams, {8, 16, 32,64}, 2, {-1}, {}, {}),
+                                MAKE_STRUCT(OneHotParams, {4, 8, 16, 32, 64}, 2, {0}, {}, {}),
+                                MAKE_STRUCT(OneHotParams, {4, 8, 16, 32, 64}, 2, {1}, {}, {}),
+                                MAKE_STRUCT(OneHotParams, {4, 8, 16, 32, 64}, 2, {-1}, {}, {})
+                        ));
+
+INSTANTIATE_TEST_CASE_P(accuracy_add, myriadLayerTestOneHot_nightly,
+                        ::testing::Values<oneHot_test_params>(
+                                MAKE_STRUCT(OneHotParams, {16, 32, 64}, 2, {2}, {}, {}),
+                                MAKE_STRUCT(OneHotParams, {8, 16, 32,64}, 2, {2}, {}, {}),
+                                MAKE_STRUCT(OneHotParams, {8, 16, 32,64}, 2, {3}, {}, {}),
+                                MAKE_STRUCT(OneHotParams, {4, 8, 16, 32, 64}, 2, {2}, {}, {}),
+                                MAKE_STRUCT(OneHotParams, {4, 8, 16, 32, 64}, 2, {3}, {}, {}),
+                                MAKE_STRUCT(OneHotParams, {4, 8, 16, 32, 64}, 2, {4}, {}, {})
+                        ));
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_oneHot_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_oneHot_test.hpp
new file mode 100644 (file)
index 0000000..a12982f
--- /dev/null
@@ -0,0 +1,99 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tests.hpp"
+#include <algorithm>
+
+using namespace InferenceEngine;
+
+static const float ERROR_BOUND = 0.0f;
+
+struct OneHotParams {
+    SizeVector dims;
+    unsigned int depth;
+    std::vector<int> axis;
+    std::vector<float> on_value;
+    std::vector<float> off_value;
+};
+
+PRETTY_PARAM(oneHot_test_params, OneHotParams);
+
+void ref_oneHot(const InferenceEngine::Blob::Ptr src,
+                InferenceEngine::Blob::Ptr dst,
+                const unsigned int depth,
+                const int axis,
+                const float onValue,
+                const float offValue)
+{
+    const ie_fp16 onValueFp16 = PrecisionUtils::f32tof16(onValue);
+    const ie_fp16 offValueFp16 = PrecisionUtils::f32tof16(offValue);
+    const auto* srcPtr = src->buffer().as<const int32_t*>();
+    auto* dstPtr = dst->buffer().as<ie_fp16*>();
+    const TensorDesc srcDesc = src->getTensorDesc();
+
+    auto inputDims = srcDesc.getDims();
+    std::reverse(inputDims.begin(), inputDims.end());
+    const int actualAxis = (axis == -1) ? 0 : inputDims.size() - axis;
+
+    const int prefixSize = std::accumulate(inputDims.cbegin(), inputDims.cbegin() + actualAxis, 1, std::multiplies<int>());
+    const int suffixSize = src->size() / prefixSize;
+
+    size_t dstOffset = 0;
+    for (int suffixIdx = 0; suffixIdx < suffixSize; suffixIdx++) {
+        for (int depthIdx = 0; depthIdx < depth; depthIdx++) {
+            for (int prefixIdx = 0; prefixIdx < prefixSize; prefixIdx++) {
+                const int idx = suffixIdx * prefixSize + prefixIdx;
+                const size_t v = static_cast<size_t>(srcPtr[idx]);
+                dstPtr[dstOffset++] = (v == depthIdx) ? onValueFp16 : offValueFp16;
+            }
+        }
+    }
+}
+
+typedef myriadLayerTestBaseWithParam<oneHot_test_params> myriadLayerTestOneHot_nightly;
+
+TEST_P(myriadLayerTestOneHot_nightly, OneHot) {
+    _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+
+    OneHotParams testParams = GetParam();
+
+    int axis = -1;
+    unsigned int depth = testParams.depth;
+    float onValue = 1.0f;
+    float offValue = 0.0f;
+    SizeVector inputDims = testParams.dims;
+
+    std::map<std::string, std::string> params;
+    params["depth"] = std::to_string(depth);
+    if (!testParams.axis.empty()) {
+        axis = testParams.axis[0];
+        params["axis"] = std::to_string(axis);
+    }
+    if (!testParams.on_value.empty()) {
+        onValue = testParams.on_value[0];
+        params["on_value"] = std::to_string(onValue);
+    }
+    if (!testParams.off_value.empty()) {
+        offValue = testParams.off_value[0];
+        params["off_value"] = std::to_string(offValue);
+    }
+
+    auto outputDims = inputDims;
+    int actualAxis = axis == -1 ? inputDims.size() : axis;
+    outputDims.insert(outputDims.begin() + actualAxis, depth);
+
+    SetInputTensors({inputDims});
+    SetOutputTensors({outputDims});
+    makeSingleLayerNetwork(
+        LayerInitParams("OneHot").params(params),
+        NetworkInitParams().inputPrecision(Precision::I32).lockLayout(true));
+
+    auto inputBlob = _inputMap.begin()->second;
+    auto outputBlob = _outputMap.begin()->second;
+
+    ASSERT_TRUE(Infer());
+
+    ref_oneHot(inputBlob, _refBlob, depth, axis, onValue, offValue);
+    CompareCommonAbsolute(outputBlob, _refBlob, ERROR_BOUND);
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_pad_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_pad_test.cpp
new file mode 100644 (file)
index 0000000..fbbbf92
--- /dev/null
@@ -0,0 +1,16 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_pad_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayerPad,
+                        ::testing::Combine(
+                            ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 64, 16, 16)),
+                            ::testing::Values<pad_parameters>(MAKE_STRUCT(pad_parameters, 0, 32, 1, 2,  0, 32, 3, 4)),
+                            ::testing::Values<layoutPreference>(vpu::LayoutPreference::ChannelMajor,
+                                                                vpu::LayoutPreference::ChannelMinor),
+                            ::testing::Values<pad_mode>(std::string("constant"), std::string("edge"), std::string("reflect"), std::string("symmetric")),
+                            ::testing::Values<IRVersion>(IRVersion::v7, IRVersion::v10)
+                        )
+                       );
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_pad_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_pad_test.hpp
new file mode 100644 (file)
index 0000000..82a375c
--- /dev/null
@@ -0,0 +1,170 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tests.hpp"
+#include <algorithm>
+
+using std::tuple;
+using std::get;
+
+using namespace InferenceEngine;
+
+struct pad_parameters {
+    size_t padb_begin;
+    size_t padc_begin;
+    size_t padh_begin;
+    size_t padw_begin;
+
+    size_t padb_end;
+    size_t padc_end;
+    size_t padh_end;
+    size_t padw_end;
+
+    friend std::ostream& operator<<(std::ostream& os, pad_parameters const& tst)
+    {
+        return os << " pads_begin=" << tst.padb_begin << ", " << tst.padc_begin << ", " << tst.padh_begin << ", " << tst.padw_begin << "; "
+                  << " pads_end=" << tst.padb_end << ", " << tst.padc_end << ", " << tst.padh_end << ", " << tst.padw_end;
+    };
+};
+
+PRETTY_PARAM(layoutPreference, vpu::LayoutPreference);
+PRETTY_PARAM(pad_mode, std::string);
+
+typedef myriadLayerTestBaseWithParam<std::tuple<DimsInput, pad_parameters, layoutPreference, pad_mode, IRVersion>> myriadLayerPad;
+
+const float pad_value = 42.0f;
+
+void ref_pad(const Blob::Ptr src,
+             Blob::Ptr dst,
+             pad_parameters pad_params,
+             const std::string mode) {
+    ASSERT_NE(src, nullptr);
+    ASSERT_NE(dst, nullptr);
+
+    ie_fp16 *src_data = static_cast<ie_fp16*>(src->buffer());
+    ie_fp16 *dst_data = static_cast<ie_fp16*>(dst->buffer());
+    ASSERT_NE(src_data, nullptr);
+    ASSERT_NE(dst_data, nullptr);
+
+    int32_t padb_begin = pad_params.padb_begin;
+    int32_t padb_end = pad_params.padb_end;
+
+    int32_t padc_begin = pad_params.padc_begin;
+    int32_t padc_end = pad_params.padc_end;
+
+    int32_t padh_begin = pad_params.padh_begin;
+    int32_t padh_end = pad_params.padh_end;
+
+    int32_t padw_begin = pad_params.padw_begin;
+    int32_t padw_end = pad_params.padw_end;
+
+    int32_t IW = 0;
+    int32_t IH = 0;
+    int32_t IC = 0;
+    int32_t OW = 0;
+    int32_t OH = 0;
+    int32_t OC = 0;
+
+    get_dims(src, IW, IH, IC);
+    get_dims(dst, OW, OH, OC);
+
+    for (int32_t oc = 0; oc < OC; oc++) {
+        for (int32_t oh = 0; oh < OH; oh++) {
+            for (int32_t ow = 0; ow < OW; ow++) {
+                int32_t ic = oc - padc_begin;
+                int32_t iw = ow - padw_begin;
+                int32_t ih = oh - padh_begin;
+
+                float v = 0.0f;
+                if (ic >= 0 && ic < IC && iw >= 0 && iw < IW && ih >= 0 && ih < IH)
+                {
+                    int32_t iidx = ic + iw * IC + ih * IC * IW;
+                    ASSERT_LT(iidx, src->size());
+                    v = PrecisionUtils::f16tof32(src_data[iidx]);
+                }
+                else
+                {
+                    if (mode == std::string("constant"))
+                    {
+                        v = pad_value;
+                    }
+                    else if (mode == std::string("edge"))
+                    {
+                        iw = std::min(std::max(iw, 0), IW - 1);
+                        ih = std::min(std::max(ih, 0), IH - 1);
+                        ic = std::min(std::max(ic, 0), IC - 1);
+
+                        int32_t iidx = ic + iw * IC + ih * IC * IW;
+                        ASSERT_LT(iidx, src->size());
+                        v = PrecisionUtils::f16tof32(src_data[iidx]);
+                    }
+                    else if (mode == std::string("reflect") || mode == std::string("symmetric"))
+                    {
+                        int mode_offset = (mode == std::string("symmetric")) ? 1 : 0;
+
+                        if (iw > IW - 1) iw = IW-1 - (iw - (IW-1)) + mode_offset;
+                        if (iw < 0) iw = -iw - mode_offset;
+
+                        if (ih > IH - 1) ih = IH-1 - (ih - (IH-1)) + mode_offset;
+                        if (ih < 0) ih = -ih - mode_offset;
+
+                        if (ic > IC - 1) ic = IC-1 - (ic - (IC-1)) + mode_offset;
+                        if (ic < 0) ic = -ic - mode_offset;
+
+                        int32_t iidx = ic + iw * IC + ih * IC * IW;
+                        ASSERT_LT(iidx, src->size());
+                        v = PrecisionUtils::f16tof32(src_data[iidx]);
+                    }
+                }
+
+                int32_t oidx = oc + ow * OC + oh * OC * OW;
+                ASSERT_LT(oidx, dst->size());
+                dst_data[oidx] = PrecisionUtils::f32tof16(v);
+            }
+        }
+    }
+}
+
+TEST_P(myriadLayerPad, Pad) {
+    tensor_test_params input_dims = get<0>(GetParam());
+    pad_parameters pad_parameter = get<1>(GetParam());
+    auto layoutPreference = get<2>(GetParam());
+    std::string pad_mode = get<3>(GetParam());
+    _irVersion           = get<4>(GetParam());
+
+    int padb_begin = pad_parameter.padb_begin;
+    int padb_end = pad_parameter.padb_end;
+
+    int padc_begin = pad_parameter.padc_begin;
+    int padc_end = pad_parameter.padc_end;
+
+    int padh_begin = pad_parameter.padh_begin;
+    int padh_end = pad_parameter.padh_end;
+
+    int padw_begin = pad_parameter.padw_begin;
+    int padw_end = pad_parameter.padw_end;
+
+    tensor_test_params output_dims = {1, input_dims.c + padc_begin + padc_end, input_dims.h + padh_begin + padh_end, input_dims.w + padw_begin + padw_end};
+
+    SetInputTensor(input_dims);
+    SetOutputTensor(output_dims);
+
+    std::map<std::string, std::string> params;
+    params["pads_begin"] = std::to_string(padb_begin)+","+std::to_string(padc_begin)+","+std::to_string(padh_begin)+","+std::to_string(padw_begin);
+    params["pads_end"] = std::to_string(padb_end)+","+std::to_string(padc_end)+","+std::to_string(padh_end)+","+std::to_string(padw_end);
+    params["pad_mode"] = pad_mode;
+    if (pad_mode == std::string("constant"))
+        params["pad_value"] = std::to_string(pad_value);
+
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("Pad").params(params), NetworkInitParams().layoutPreference(layoutPreference)));
+    SetFirstInputToRange(1.0f, 100.0f);
+
+    ASSERT_TRUE(Infer());
+    auto inputBlob = _inputMap.begin()->second;
+    auto outputBlob = _outputMap.begin()->second;
+
+    ref_pad(inputBlob, _refBlob, pad_parameter, pad_mode);
+
+    CompareCommonAbsolute(outputBlob, _refBlob, 0);
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_permute_nd_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_permute_nd_test.cpp
new file mode 100644 (file)
index 0000000..8d88946
--- /dev/null
@@ -0,0 +1,70 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_permute_nd_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(accuracy_2D, myriadLayersPermuteNDTests_nightly,
+        ::testing::Combine(
+            ::testing::ValuesIn(s_inTensors_2D)
+          , ::testing::ValuesIn(s_permuteTensors_2D)
+          , ::testing::Values<IRVersion>(IRVersion::v7, IRVersion::v10)
+          , ::testing::ValuesIn(s_permutePrecisions)
+));
+
+INSTANTIATE_TEST_CASE_P(accuracy_3D, myriadLayersPermuteNDTests_nightly,
+        ::testing::Combine(
+            ::testing::ValuesIn(s_inTensors_3D)
+          , ::testing::ValuesIn(s_permuteTensors_3D)
+          , ::testing::Values<IRVersion>(IRVersion::v7, IRVersion::v10)
+          , ::testing::ValuesIn(s_permutePrecisions)
+));
+
+INSTANTIATE_TEST_CASE_P(accuracy_4D, myriadLayersPermuteNDTests_nightly,
+        ::testing::Combine(
+            ::testing::ValuesIn(s_inTensors_4D)
+          , ::testing::ValuesIn(s_permuteTensors_4D)
+          , ::testing::Values<IRVersion>(IRVersion::v7, IRVersion::v10)
+          , ::testing::ValuesIn(s_permutePrecisions)
+));
+
+INSTANTIATE_TEST_CASE_P(accuracy_5D, myriadLayersPermuteNDTests_nightly,
+        ::testing::Combine(
+            ::testing::ValuesIn(s_inTensors_5D)
+          , ::testing::ValuesIn(s_permuteTensors_5D)
+          , ::testing::Values<IRVersion>(IRVersion::v7, IRVersion::v10)
+          , ::testing::ValuesIn(s_permutePrecisions)
+));
+
+INSTANTIATE_TEST_CASE_P(fc_to_conv_case, myriadLayersPermuteNDTests_nightly,
+    ::testing::Values(
+        std::make_tuple(
+            SizeVector{8, 50, 256, 7, 7},
+            SizeVector{2, 0, 3, 1, 4},
+            IRVersion::v7,
+            InferenceEngine::Precision::FP16
+        ),
+        std::make_tuple(
+            SizeVector{1024, 8, 1, 50, 1},
+            SizeVector{1, 3, 0, 2, 4},
+            IRVersion::v7,
+            InferenceEngine::Precision::FP16
+        )
+    )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy_FasterRCNN, myriadLayersPermuteNDTests_nightly,
+        ::testing::Combine(
+             ::testing::Values<InferenceEngine::SizeVector>({1, 24, 14, 14})
+            ,::testing::Values<InferenceEngine::SizeVector>({0, 2, 3, 1})
+            ,::testing::Values<IRVersion>(IRVersion::v7)
+            ,::testing::ValuesIn(s_permutePrecisions)
+            ));
+
+INSTANTIATE_TEST_CASE_P(accuracy_MaskRCNN, myriadLayersPermuteNDTests_nightly,
+        ::testing::Combine(
+             ::testing::Values<InferenceEngine::SizeVector>({4, 3, 1, 88, 120})
+            ,::testing::Values<InferenceEngine::SizeVector>({0, 3, 4, 1, 2})
+            ,::testing::Values<IRVersion>(IRVersion::v7)
+            ,::testing::ValuesIn(s_permutePrecisions)
+            ));
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_permute_nd_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_permute_nd_test.hpp
new file mode 100644 (file)
index 0000000..aac7cc2
--- /dev/null
@@ -0,0 +1,188 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tests.hpp"
+#include "vpu/model/data_desc.hpp"
+
+#include <iostream>
+
+#define ERROR_BOUND 0
+
+using namespace InferenceEngine;
+
+namespace {
+
+void calculateRefBlob(const Blob::Ptr& src, Blob::Ptr dst, const SizeVector& permutationVector) {
+    const auto precision = src->getTensorDesc().getPrecision();
+    switch (precision)
+    {
+        case InferenceEngine::Precision::I32:
+            ref_Permute<int>(src, dst, permutationVector);
+            break;
+        case InferenceEngine::Precision::FP16:
+            ref_Permute<ie_fp16>(src, dst, permutationVector);
+            break;
+        default: THROW_IE_EXCEPTION << "Unsupported precision";
+    }
+}
+
+template <typename T>
+void genRefData(Blob::Ptr blob) {
+    ASSERT_NE(blob, nullptr);
+
+    const auto  dataPtr    = blob->buffer().as<T*>();
+    const auto& tensorDims = blob->getTensorDesc().getDims();
+    const auto  precision  = blob->getTensorDesc().getPrecision();
+    const auto  numDims    = tensorDims.size();
+
+    std::function<T(float)> calculate;
+    if (precision == InferenceEngine::Precision::I32) {
+        calculate = [](float counter){return counter * 4; };
+    }
+    else if (precision == InferenceEngine::Precision::FP16) {
+        calculate = [](float counter) { return PrecisionUtils::f32tof16(counter); };
+    }
+    else {
+        calculate = [](float counter){return counter; };
+    }
+
+    SizeVector current_index(numDims);
+
+    float counter = 0;
+    const auto data_size = std::accumulate(tensorDims.begin(), tensorDims.end(), size_t{1}, std::multiplies<size_t>{});
+    for (auto i = 0; i < data_size; ++i) {
+        dataPtr[i] = calculate(counter);
+        counter += 0.25f;
+    }
+}
+
+}
+
+using PermuteNDParams = std::tuple<InferenceEngine::SizeVector,  // input tensor sizes
+                                   InferenceEngine::SizeVector,  // permutation vector
+                                   IRVersion,
+                                   InferenceEngine::Precision>;
+
+class myriadLayersPermuteNDTests_nightly:
+    public myriadLayersTests_nightly,
+    public testing::WithParamInterface<PermuteNDParams> {
+};
+
+
+TEST_P(myriadLayersPermuteNDTests_nightly, Permute) {
+    const auto& testParams = GetParam();
+    const auto& inputTensorSizes   = std::get<0>(testParams);
+    const auto& permutationVector  = std::get<1>(testParams);
+    _irVersion                     = std::get<2>(testParams);
+    const auto  precision          = std::get<3>(testParams);
+
+    const auto numDims = inputTensorSizes.size();
+    SizeVector outputTensorSizes(numDims);
+    for (size_t i = 0; i < numDims; i++) {
+        outputTensorSizes[i] = inputTensorSizes[permutationVector[i]];
+    }
+
+    const auto order = std::accumulate(std::next(permutationVector.begin()), permutationVector.end(),
+                                       std::to_string(permutationVector.front()),
+                                       [](std::string& res, size_t s) {
+                                           return res + "," + std::to_string(s);
+                                       });
+
+    const std::map<std::string, std::string> layerParams{{"order", order}};
+
+    _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+
+    switch (precision)
+    {
+        case InferenceEngine::Precision::I32:
+              _genDataCallback = genRefData<int>;
+              break;
+        case InferenceEngine::Precision::FP16:
+              _genDataCallback = genRefData<ie_fp16>;
+              break;
+        default:
+              VPU_THROW_EXCEPTION << "Unsupported precision";
+    }
+
+    _testNet.addLayer(LayerInitParams(_irVersion == IRVersion::v10 ? "Transpose" : "Permute")
+             .params(layerParams)
+             .in({inputTensorSizes})
+             .out({outputTensorSizes}));
+
+    ASSERT_TRUE(generateNetAndInfer(NetworkInitParams()
+                                    .useHWOpt(CheckMyriadX())
+                                    .runRefGraph(false)
+                                    .inputPrecision(precision)
+                                    .outputPrecision(precision)));
+
+    const auto& inputBlob = _inputMap.begin()->second;
+    const auto& outputBlob = _outputMap.begin()->second;
+
+    calculateRefBlob(inputBlob, _refBlob, permutationVector);
+    CompareCommonAbsolute(outputBlob, _refBlob, ERROR_BOUND);
+}
+
+static const std::vector<InferenceEngine::SizeVector> s_inTensors_2D = {
+    {17, 19},
+    { 7,  8},
+    { 12, 2}
+};
+
+static const std::vector<InferenceEngine::SizeVector> s_permuteTensors_2D = {
+    {0, 1},
+    {1, 0},
+};
+
+static const std::vector<InferenceEngine::SizeVector> s_inTensors_3D = {
+    {36, 17, 19},
+    { 2,  7,  8},
+    {196, 12, 2}
+};
+static const std::vector<InferenceEngine::SizeVector> s_permuteTensors_3D = {
+    {0, 1, 2},
+    {0, 2, 1},
+    {2, 1, 0},
+    {1, 0, 2}
+};
+
+static const std::vector<InferenceEngine::SizeVector> s_inTensors_4D = {
+    {1, 36, 17, 19},
+    {1,  2,  7,  8},
+    {1, 196, 12, 2}
+};
+static const std::vector<InferenceEngine::SizeVector> s_permuteTensors_4D = {
+    {0, 1, 2, 3},
+    {0, 1, 3, 2},
+    {0, 2, 1, 3},
+    {0, 2, 3, 1},
+    {0, 3, 1, 2},
+    {0, 3, 2, 1}
+};
+
+static const std::vector<InferenceEngine::SizeVector> s_inTensors_5D = {
+    {1, 36, 17, 19, 23},
+    {1,  2,  7,  8,  9},
+    {1, 196, 12, 2,  5}
+};
+
+static const std::vector<InferenceEngine::SizeVector> s_permuteTensors_5D = {
+    {0, 1, 2, 3, 4},
+    {0, 2, 1, 3, 4},
+    {0, 3, 4, 1, 2},
+    {0, 2, 3, 4, 1},
+    {0, 2, 1, 3, 4},
+    {0, 1, 3, 2, 4},
+    {0, 1, 3, 4, 2},
+    {0, 3, 1, 2, 4},
+    {0, 1, 3, 2, 4},
+    {0, 1, 2, 4, 3},
+    {0, 4, 1, 2, 3},
+    {0, 1, 4, 2, 3},
+    {0, 1, 2, 4, 3}
+};
+
+static const std::vector<InferenceEngine::Precision> s_permutePrecisions = {
+        InferenceEngine::Precision::I32,
+        InferenceEngine::Precision::FP16,
+};
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_permute_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_permute_test.cpp
new file mode 100644 (file)
index 0000000..3638bd2
--- /dev/null
@@ -0,0 +1,19 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_permute_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersPermuteTests_nightly,
+        ::testing::Combine(
+            ::testing::ValuesIn(s_inTensors)
+          , ::testing::ValuesIn(s_permuteTensors)
+));
+
+INSTANTIATE_TEST_CASE_P(accuracyFasterRCNN, myriadLayersPermuteTests_nightly,
+        ::testing::Combine(
+             ::testing::Values<InferenceEngine::SizeVector>({1, 24, 14, 14})
+            ,::testing::Values<InferenceEngine::SizeVector>({0, 2, 3, 1})
+            ));
+
+
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_permute_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_permute_test.hpp
new file mode 100644 (file)
index 0000000..1711086
--- /dev/null
@@ -0,0 +1,116 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tests.hpp"
+
+using namespace InferenceEngine;
+
+struct offset_test_params {
+    size_t order0;
+    size_t order1;
+    size_t order2;
+    size_t order3;
+};
+
+PRETTY_PARAM(Offsets, offset_test_params);
+
+// Show contents of offset test param by not hexadecimal but integer
+static inline void PrintTo(const offset_test_params& param, ::std::ostream* os)
+{
+    *os << "{ " << param.order0 << ", " << param.order1 << ", " << param.order2 << ", " << param.order3 << "}";
+}
+typedef std::tuple<InferenceEngine::SizeVector, InferenceEngine::SizeVector> PermuteParams;
+
+class myriadLayersPermuteTests_nightly: public myriadLayersTests_nightly, /*input tensor, order */
+                                        public testing::WithParamInterface<PermuteParams> {
+};
+
+static void genRefData(InferenceEngine::Blob::Ptr blob) {
+    ASSERT_NE(blob, nullptr);
+    Layout layout = blob->getTensorDesc().getLayout();
+    SizeVector dims = blob->getTensorDesc().getDims();
+
+    ie_fp16* ptr = blob->buffer().as<ie_fp16*>();
+    if (layout == NCHW || layout == NHWC) {
+        size_t N = dims[0];
+        size_t C = dims[1];
+        size_t H = dims[2];
+        size_t W = dims[3];
+        float counter = 0.f;
+        for (size_t n = 0; n < N; n++) {
+            for (size_t c = 0; c < C; c++) {
+                for (size_t h = 0; h < H; h++) {
+                    for (size_t w = 0; w < W; w++) {
+                        size_t actualIdx = layout == NCHW ?
+                                           w + h * W + c * W * H + n * W * H * C : c + w * C + h * C * W +
+                                                                                   n * W * H * C;
+                        ptr[actualIdx] = PrecisionUtils::f32tof16(counter);
+                        counter += 0.25f;
+                    }
+                }
+            }
+        }
+    } else {
+        ASSERT_TRUE(false);
+    }
+}
+
+TEST_P(myriadLayersPermuteTests_nightly, Permute) {
+    std::map<std::string, std::string> params;
+    InferenceEngine::SizeVector output_tensor;
+    int32_t IW = 0;
+    int32_t IH = 0;
+    int32_t IC = 0;
+    int32_t I_N = 0;
+    size_t  group = 0;
+
+    auto p = ::testing::WithParamInterface<PermuteParams>::GetParam();
+    auto input_tensor = std::get<0>(p);
+    auto order =        std::get<1>(p);
+    get_dims(input_tensor, IW, IH, IC, I_N);
+    if (I_N > 1)
+        _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+    else
+        _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(YES);
+    if (input_tensor.size()) {
+        gen_dims(output_tensor, input_tensor.size(), input_tensor[order[3]],
+                                                     input_tensor[order[2]], 
+                                                     input_tensor[order[1]], 
+                                                     input_tensor[order[0]]);
+    }
+    std::string orderStr;
+    for (int i = 0; i < order.size() - 1; ++i) {
+        orderStr += std::to_string(order[i]);
+        orderStr += ",";
+    }
+    if (!order.empty()) {
+        orderStr += std::to_string(order.back());
+    }
+    std::map<std::string, std::string> layer_params = {
+              {"order", orderStr}
+    };
+    _genDataCallback = genRefData;
+    _testNet.addLayer(LayerInitParams("Permute")
+             .params(layer_params)
+             .in({input_tensor})
+             .out({output_tensor}),
+             ref_permute_wrap);
+    ASSERT_TRUE(generateNetAndInfer(NetworkInitParams().useHWOpt( CheckMyriadX() )));
+    CompareCommonAbsolute(_outputMap.begin()->second, getReferenceOutput(), 0.0f);
+}
+
+static const std::vector<InferenceEngine::SizeVector> s_inTensors = {
+    {1, 36, 19, 19},
+    {1, 2, 7, 8},
+    {1, 196, 12, 2}
+};
+
+static const std::vector<InferenceEngine::SizeVector> s_permuteTensors = {
+    {0, 1, 2, 3},
+    {0, 1, 3, 2},
+    {0, 2, 1, 3},
+    {0, 2, 3, 1},
+    {0, 3, 1, 2},
+    {0, 3, 2, 1}
+};
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_pool_nd_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_pool_nd_test.cpp
new file mode 100644 (file)
index 0000000..a004fb7
--- /dev/null
@@ -0,0 +1,405 @@
+// Copyright (C) 2019-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_pool_nd_test.hpp"
+
+using namespace testing;
+
+//======================================================================
+//
+// 3D, tricky input size, kernel shape, strides, pads
+//
+//======================================================================
+
+INSTANTIATE_TEST_CASE_P(tricky_ncdhw_avg_userpad,
+                        myriadLayersPoolNDTest_nightly,
+    Combine(
+        Values(InputShape {1, 3, 19, 65, 47}),
+        Values(KernelShape {1, 3, 5}),
+        Values(Strides {1, 2, 3}),
+        Values(PadsBegin {0, 1, 1}),
+        Values(PadsEnd {0, 1, 3}),
+        Values(AutoPad("")),
+        Values(PoolingMethod("avg")),
+        Values(RoundingType("")),
+        Values(ExcludePad(false),
+               ExcludePad(true))
+    )
+);
+
+INSTANTIATE_TEST_CASE_P(tricky_ncdhw_max_userpad,
+                        myriadLayersPoolNDTest_nightly,
+    Combine(
+        Values(InputShape {1, 3, 19, 65, 47}),
+        Values(KernelShape {1, 3, 5}),
+        Values(Strides {1, 2, 3}),
+        Values(PadsBegin {0, 1, 1}),
+        Values(PadsEnd {0, 1, 3}),
+        Values(AutoPad("")),
+        Values(PoolingMethod("max")),
+        Values(RoundingType("")),
+        Values(ExcludePad(false))
+    )
+);
+
+INSTANTIATE_TEST_CASE_P(tricky_ncdhw_avg_autopad,
+                        myriadLayersPoolNDTest_nightly,
+    Combine(
+        Values(InputShape {1, 3, 19, 65, 47}),
+        Values(KernelShape {1, 3, 5}),
+        Values(Strides {1, 2, 3}),
+        Values(PadsBegin {}),
+        Values(PadsEnd {}),
+        Values(AutoPad("valid"),
+               AutoPad("same_lower"),
+               AutoPad("same_upper")),
+        Values(PoolingMethod("avg")),
+        Values(RoundingType("")),
+        Values(ExcludePad(false),
+               ExcludePad(true))
+    )
+);
+
+INSTANTIATE_TEST_CASE_P(tricky_ncdhw_max_autopad,
+                        myriadLayersPoolNDTest_nightly,
+    Combine(
+        Values(InputShape {1, 3, 19, 65, 47}),
+        Values(KernelShape {1, 3, 5}),
+        Values(Strides {1, 2, 3}),
+        Values(PadsBegin {}),
+        Values(PadsEnd {}),
+        Values(AutoPad("valid"),
+               AutoPad("same_lower"),
+               AutoPad("same_upper")),
+        Values(PoolingMethod("max")),
+        Values(RoundingType("")),
+        Values(ExcludePad(false))
+    )
+);
+
+//======================================================================
+//
+// 3D, simple input size, kernel shape, strides, pads
+//
+//======================================================================
+
+INSTANTIATE_TEST_CASE_P(simple_ncdhw_avg_userpad,
+                        myriadLayersPoolNDTest_nightly,
+    Combine(
+        Values(InputShape {1, 3, 20, 64, 48}),
+        Values(KernelShape {3, 3, 3}),
+        Values(Strides {2, 2, 2}),
+        Values(PadsBegin {1, 1, 1}),
+        Values(PadsEnd {1, 1, 1}),
+        Values(AutoPad("")),
+        Values(PoolingMethod("avg")),
+        Values(RoundingType("")),
+        Values(ExcludePad(false),
+               ExcludePad(true))
+    )
+);
+
+INSTANTIATE_TEST_CASE_P(simple_ncdhw_max_userpad,
+                        myriadLayersPoolNDTest_nightly,
+    Combine(
+        Values(InputShape {1, 3, 20, 64, 48}),
+        Values(KernelShape {3, 3, 3}),
+        Values(Strides {2, 2, 2}),
+        Values(PadsBegin {1, 1, 1}),
+        Values(PadsEnd {1, 1, 1}),
+        Values(AutoPad("")),
+        Values(PoolingMethod("max")),
+        Values(RoundingType("")),
+        Values(ExcludePad(false))
+    )
+);
+
+//----------------------------------------------------------------------
+//
+// HACK: Exclude "same_upper" with excludePad=false case,
+//       as 2D pool for Myriad seems to always exclude pad
+//
+// CVS-25902 does software 2D avg pooling for Myriad support exclude-pad
+// CVS-15146 HW AvgPool doesn't support excludePad parameter
+//----------------------------------------------------------------------
+
+INSTANTIATE_TEST_CASE_P(simple_ncdhw_avg_autopad_1,
+                        myriadLayersPoolNDTest_nightly,
+    Combine(
+        Values(InputShape {1, 3, 20, 64, 48}),
+        Values(KernelShape {3, 3, 3}),
+        Values(Strides {2, 2, 2}),
+        Values(PadsBegin {}),
+        Values(PadsEnd {}),
+        Values(AutoPad("valid"),
+               AutoPad("same_lower")),
+        Values(PoolingMethod("avg")),
+        Values(RoundingType("")),
+        Values(ExcludePad(false),
+               ExcludePad(true))
+    )
+);
+
+INSTANTIATE_TEST_CASE_P(simple_ncdhw_avg_autopad_2,
+                        myriadLayersPoolNDTest_nightly,
+    Combine(
+        Values(InputShape {1, 3, 20, 64, 48}),
+        Values(KernelShape {3, 3, 3}),
+        Values(Strides {2, 2, 2}),
+        Values(PadsBegin {}),
+        Values(PadsEnd {}),
+        Values(AutoPad("same_upper")),
+        Values(PoolingMethod("avg")),
+        Values(RoundingType("")),
+        Values(ExcludePad(true))
+    )
+);
+
+//----------------------------------------------------------------------
+
+INSTANTIATE_TEST_CASE_P(simple_ncdhw_max_autopad,
+                        myriadLayersPoolNDTest_nightly,
+    Combine(
+        Values(InputShape {1, 3, 20, 64, 48}),
+        Values(KernelShape {3, 3, 3}),
+        Values(Strides {2, 2, 2}),
+        Values(PadsBegin {}),
+        Values(PadsEnd {}),
+        Values(AutoPad("valid"),
+               AutoPad("same_lower"),
+               AutoPad("same_upper")),
+        Values(PoolingMethod("max")),
+        Values(RoundingType("")),
+        Values(ExcludePad(false))
+    )
+);
+
+//======================================================================
+//
+// 2D, tricky input size, kernel shape, strides, pads
+//
+//======================================================================
+
+INSTANTIATE_TEST_CASE_P(tricky_nchw_avg_userpad,
+                        myriadLayersPoolNDTest_nightly,
+    Combine(
+        Values(InputShape {1, 3, 65, 47}),
+        Values(KernelShape {1, 5}),
+        Values(Strides {1, 3}),
+        Values(PadsBegin {0, 1}),
+        Values(PadsEnd {0, 3}),
+        Values(AutoPad("")),
+        Values(PoolingMethod("avg")),
+        Values(RoundingType("")),
+        Values(ExcludePad(false),
+               ExcludePad(true))
+    )
+);
+
+INSTANTIATE_TEST_CASE_P(tricky_nchw_max_userpad,
+                        myriadLayersPoolNDTest_nightly,
+    Combine(
+        Values(InputShape {1, 3, 65, 47}),
+        Values(KernelShape {1, 5}),
+        Values(Strides {1, 3}),
+        Values(PadsBegin {0, 1}),
+        Values(PadsEnd {0, 3}),
+        Values(AutoPad("")),
+        Values(PoolingMethod("max")),
+        Values(RoundingType("")),
+        Values(ExcludePad(false))
+    )
+);
+
+INSTANTIATE_TEST_CASE_P(tricky_nchw_avg_autopad,
+                        myriadLayersPoolNDTest_nightly,
+    Combine(
+        Values(InputShape {1, 3, 65, 47}),
+        Values(KernelShape {1, 5}),
+        Values(Strides {1, 3}),
+        Values(PadsBegin {}),
+        Values(PadsEnd {}),
+        Values(AutoPad("valid"),
+               AutoPad("same_lower"),
+               AutoPad("same_upper")),
+        Values(PoolingMethod("avg")),
+        Values(RoundingType("")),
+        Values(ExcludePad(false),
+               ExcludePad(true))
+    )
+);
+
+INSTANTIATE_TEST_CASE_P(tricky_nchw_max_autopad,
+                        myriadLayersPoolNDTest_nightly,
+    Combine(
+        Values(InputShape {1, 3, 65, 47}),
+        Values(KernelShape {1, 5}),
+        Values(Strides {1, 3}),
+        Values(PadsBegin {}),
+        Values(PadsEnd {}),
+        Values(AutoPad("valid"),
+               AutoPad("same_lower"),
+               AutoPad("same_upper")),
+        Values(PoolingMethod("max")),
+        Values(RoundingType("")),
+        Values(ExcludePad(false))
+    )
+);
+
+//======================================================================
+//
+// 2D, simple input size, kernel shape, strides, pads
+//
+//======================================================================
+
+INSTANTIATE_TEST_CASE_P(simple_nchw_avg_userpad,
+                        myriadLayersPoolNDTest_nightly,
+    Combine(
+        Values(InputShape {1, 3, 64, 48}),
+        Values(KernelShape {3, 3}),
+        Values(Strides {2, 2}),
+        Values(PadsBegin {1, 1}),
+        Values(PadsEnd {1, 1}),
+        Values(AutoPad("")),
+        Values(PoolingMethod("avg")),
+        Values(RoundingType("")),
+        Values(ExcludePad(false),
+               ExcludePad(true))
+    )
+);
+
+INSTANTIATE_TEST_CASE_P(simple_nchw_max_userpad,
+                        myriadLayersPoolNDTest_nightly,
+    Combine(
+        Values(InputShape {1, 3, 64, 48}),
+        Values(KernelShape {3, 3}),
+        Values(Strides {2, 2}),
+        Values(PadsBegin {1, 1}),
+        Values(PadsEnd {1, 1}),
+        Values(AutoPad("")),
+        Values(PoolingMethod("max")),
+        Values(RoundingType("")),
+        Values(ExcludePad(false))
+    )
+);
+
+//----------------------------------------------------------------------
+//
+// HACK: Exclude "same_upper" with excludePad=false case,
+//       as 2D pool for Myriad seems to always exclude pad
+//
+// CVS-25902 does software 2D avg pooling for Myriad support exclude-pad
+// CVS-15146 HW AvgPool doesn't support excludePad parameter
+//----------------------------------------------------------------------
+
+INSTANTIATE_TEST_CASE_P(simple_nchw_avg_autopad_1,
+                        myriadLayersPoolNDTest_nightly,
+    Combine(
+        Values(InputShape {1, 3, 64, 48}),
+        Values(KernelShape {3, 3}),
+        Values(Strides {2, 2}),
+        Values(PadsBegin {}),
+        Values(PadsEnd {}),
+        Values(AutoPad("valid"),
+               AutoPad("same_lower")),
+        Values(PoolingMethod("avg")),
+        Values(RoundingType("")),
+        Values(ExcludePad(false),
+               ExcludePad(true))
+    )
+);
+
+INSTANTIATE_TEST_CASE_P(simple_nchw_avg_autopad_2,
+                        myriadLayersPoolNDTest_nightly,
+    Combine(
+        Values(InputShape {1, 3, 64, 48}),
+        Values(KernelShape {3, 3}),
+        Values(Strides {2, 2}),
+        Values(PadsBegin {}),
+        Values(PadsEnd {}),
+        Values(AutoPad("same_upper")),
+        Values(PoolingMethod("avg")),
+        Values(RoundingType("")),
+        Values(ExcludePad(true))
+    )
+);
+
+//----------------------------------------------------------------------
+
+INSTANTIATE_TEST_CASE_P(simple_nchw_max_autopad,
+                        myriadLayersPoolNDTest_nightly,
+    Combine(
+        Values(InputShape {1, 3, 64, 48}),
+        Values(KernelShape {3, 3}),
+        Values(Strides {2, 2}),
+        Values(PadsBegin {}),
+        Values(PadsEnd {}),
+        Values(AutoPad("valid"),
+               AutoPad("same_lower"),
+               AutoPad("same_upper")),
+        Values(PoolingMethod("max")),
+        Values(RoundingType("")),
+        Values(ExcludePad(false))
+    )
+);
+
+//======================================================================
+//
+//  Test cases from the I3D network
+//
+//======================================================================
+
+INSTANTIATE_TEST_CASE_P(i3d_id10,
+                        myriadLayersPoolNDTest_nightly,
+                        Combine(
+                                Values(InputShape {1, 64, 40, 112, 112}),
+                                Values(KernelShape {1, 3, 3}),
+                                Values(Strides {1, 2, 2}),
+                                Values(PadsBegin {}),
+                                Values(PadsEnd {}),
+                                Values(AutoPad("same_upper")),
+                                Values(PoolingMethod("max")),
+                                Values(RoundingType("")),
+                                Values(ExcludePad(true))));
+
+INSTANTIATE_TEST_CASE_P(i3d_id47,
+                        myriadLayersPoolNDTest_nightly,
+                        Combine(
+                                Values(InputShape {1, 192, 40, 28, 28}),
+                                Values(KernelShape {3, 3, 3}),
+                                Values(Strides {1, 1, 1}),
+                                Values(PadsBegin {}),
+                                Values(PadsEnd {}),
+                                Values(AutoPad("same_upper")),
+                                Values(PoolingMethod("max")),
+                                Values(RoundingType("")),
+                                Values(ExcludePad(true))));
+
+INSTANTIATE_TEST_CASE_P(i3d_id247,
+                        myriadLayersPoolNDTest_nightly,
+                        Combine(
+                                Values(InputShape {1, 832, 20, 14, 14}),
+                                Values(KernelShape {2, 2, 2}),
+                                Values(Strides {2, 2, 2}),
+                                Values(PadsBegin {}),
+                                Values(PadsEnd {}),
+                                Values(AutoPad("same_upper")),
+                                Values(PoolingMethod("max")),
+                                Values(RoundingType("")),
+                                Values(ExcludePad(true))));
+
+INSTANTIATE_TEST_CASE_P(i3d_id312,
+                        myriadLayersPoolNDTest_nightly,
+                        Combine(
+                                Values(InputShape {1, 1024, 10, 7, 7}),
+                                Values(KernelShape {2, 7, 7}),
+                                Values(Strides {1, 1, 1}),
+                                Values(PadsBegin {}),
+                                Values(PadsEnd {}),
+                                Values(AutoPad("valid")),
+                                Values(PoolingMethod("avg")),
+                                Values(RoundingType("")),
+                                Values(ExcludePad(true))));
+
+//======================================================================
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_pool_nd_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_pool_nd_test.hpp
new file mode 100644 (file)
index 0000000..b104393
--- /dev/null
@@ -0,0 +1,782 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include "myriad_layers_tests.hpp"
+#include "vpu_tests_config.hpp"
+#include "vpu_case_common.hpp"
+#include "precision_utils.h"
+
+#include <memory>
+#include <string>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+#include <cmath>
+#include <cstdlib>
+
+#define DEBUG 0
+
+using namespace InferenceEngine;
+
+using InputShape  = std::vector<int>;
+using KernelShape = std::vector<int>;
+using Strides     = std::vector<int>;
+using PadsBegin   = std::vector<int>;
+using PadsEnd     = std::vector<int>;
+using AutoPad       = std::string;
+using PoolingMethod = std::string;
+using RoundingType  = std::string;
+using ExcludePad    = bool;
+
+using PoolNDTestParams =
+    std::tuple<
+        InputShape,
+        KernelShape,
+        Strides,
+        PadsBegin,
+        PadsEnd,
+        AutoPad,
+        PoolingMethod,
+        RoundingType,
+        ExcludePad>;
+
+class PoolNDTest: public myriadLayerTestBaseWithParam<PoolNDTestParams>
+{
+protected:
+
+    void testPoolND() {
+        SKIP_IF_CURRENT_TEST_IS_DISABLED();
+        DISABLE_IF(!CheckMyriadX() && !CheckMA2085());
+
+        //
+        // Get test parameters
+        //
+
+        const auto& params = GetParam();
+
+        const std::vector<int>  inputShape = std::get<0>(params);
+        const std::vector<int> kernelShape = std::get<1>(params);
+        const std::vector<int>     strides = std::get<2>(params);
+        const std::vector<int>   padsBegin = std::get<3>(params);
+        const std::vector<int>   padsEnd   = std::get<4>(params);
+        const std::string          autoPad = std::get<5>(params);
+        const std::string    poolingMethod = std::get<6>(params);
+        const std::string     roundingType = std::get<7>(params);
+        const bool              excludePad = std::get<8>(params);
+
+        // Only support not-interleaved layouts: CHW, NCHW, NCDHW, ...
+        const bool interleaved = false;
+
+        const int     inputNDims =  inputShape.size();
+        const int    kernelNDims = kernelShape.size();
+        const int   stridesNDims =     strides.size();
+        const int padsBeginNDims =   padsBegin.size();
+        const int   padsEndNDims =     padsEnd.size();
+
+        //
+        // Verify test parameters
+        //
+
+        IE_ASSERT(inputNDims >= 3); // CHW, NCHW, NCDHW, ...
+
+        const int channelsNDims = 1;
+        const int batchNDims = inputNDims > 3; // 0 if CHW, 1 if NCHW etc
+        IE_ASSERT(inputNDims == kernelNDims + channelsNDims + batchNDims);
+
+        //
+        // Assume dims order like {N, C, ..., H, W}
+        // where {..., H, W} are spacial dimensions
+        //
+
+        const int channelsDim = batchNDims;
+        const int spacialDimsBegin = channelsDim + 1;
+        const int inputChannels = inputShape[channelsDim];
+
+        IE_ASSERT(inputChannels > 0);
+
+        IE_ASSERT(kernelNDims > 0);
+        IE_ASSERT(kernelNDims == stridesNDims || stridesNDims == 0);
+
+        IE_ASSERT(autoPad == "same_lower" ||
+                  autoPad == "same_upper" ||
+                  autoPad == "valid" ||
+                  autoPad == "");
+
+        if (autoPad == "") {
+            IE_ASSERT(padsBeginNDims == kernelNDims);
+            IE_ASSERT(padsEndNDims == kernelNDims);
+        } else {
+            IE_ASSERT(padsBeginNDims == 0);
+            IE_ASSERT(padsEndNDims == 0);
+        }
+
+        IE_ASSERT(poolingMethod == "avg" ||
+                  poolingMethod == "max");
+
+        IE_ASSERT(roundingType == "floor" ||
+                  roundingType == "ceil" ||
+                  roundingType == "");
+
+        //
+        // Derive other parameters of layer
+        //
+
+        std::vector<int> outputShape(inputNDims);
+        for (int i = 0; i < kernelNDims; i++) {
+            int strides_i = stridesNDims ? strides[i] : 1;
+
+            int remainder_i = inputShape[i + spacialDimsBegin] % strides_i;
+            int pads_i = kernelShape[i] - (remainder_i? remainder_i: strides_i);
+
+            int pads_begin_i, pads_end_i;
+            if (autoPad == "") {
+                pads_begin_i = padsBegin[i];
+                pads_end_i   = padsEnd[i];
+            } else if (autoPad == "valid") {
+                pads_begin_i = 0;
+                pads_end_i   = 0;
+            } else if (autoPad == "same_lower") {
+                pads_end_i   = pads_i / 2;           // floor(pads_i / 2.)
+                pads_begin_i = pads_i - pads_end_i;  //  ceil(pads_i / 2.)
+            } else if (autoPad == "same_upper") {
+                pads_begin_i = pads_i / 2;
+                pads_end_i   = pads_i - pads_begin_i;
+            } else {
+                IE_ASSERT(false); // this must never happen
+            }
+
+            outputShape[i + spacialDimsBegin] = (inputShape[i + spacialDimsBegin]
+                                               + pads_begin_i + pads_end_i
+                                               - kernelShape[i]
+                                                ) / strides_i + 1;
+        }
+        outputShape[channelsDim] = inputChannels;
+        if (batchNDims) {
+            outputShape[0] = inputShape[0]; // copy batch size
+        }
+
+        //
+        // Initialize data
+        //
+
+        TBlob<uint8_t>::Ptr inputBlob = createPlainTBlob(inputShape, Precision::FP16);
+        TBlob<uint8_t>::Ptr outputBlob = createPlainTBlob(outputShape, Precision::FP16);
+
+        inputBlob->allocate();
+        outputBlob->allocate();
+
+        int inputNum = getTotalNum(inputShape);
+        uint8_t* inputBlobDataPtr = inputBlob->data();
+
+        // HACK: Fulfill random data with Gaussian distribution! (not uniform)
+        //
+        // WHY: While uniform distribution is OK for reference implementation,
+        //      hardware convolution on Myriad X uses tricky quantization that
+        //      is not accurace enough if input is white-noise.
+        //
+        //      Such quantization adjusts to image's histogram, which Gaussian
+        //      noise may simulate more-or-less adequately.
+        #if 0
+        fulfillUniformly(inputBlobDataPtr, inputNum, Precision::FP16, 0, 255);
+        #else
+        fulfillGaussian(inputBlobDataPtr, inputNum, Precision::FP16, 128, 32);
+        #endif
+
+        //
+        // Initialize network
+        //
+
+        _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+
+        std::string model = createModel(inputShape,
+                                        outputShape,
+                                        kernelShape,
+                                        strides,
+                                        padsBegin,
+                                        padsEnd,
+                                        autoPad,
+                                        poolingMethod,
+                                        roundingType,
+                                        excludePad);
+        #if DEBUG
+        std::cout << "model:\n" << model << "\n";
+        #endif
+
+        ASSERT_NO_THROW(readNetwork(model));
+
+        const CNNNetwork& network = _cnnNetwork;
+
+        _inputsInfo = network.getInputsInfo();
+        _inputsInfo["input"]->setPrecision(Precision::FP16);
+
+        _outputsInfo = network.getOutputsInfo();
+        _outputsInfo["pooling"]->setPrecision(Precision::FP16);
+
+        //
+        // Infer
+        //
+
+        StatusCode st = OK;
+
+        ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network, _config, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+        ASSERT_NE(_exeNetwork, nullptr) << _resp.msg;
+
+        ASSERT_NO_THROW(st = _exeNetwork->CreateInferRequest(_inferRequest, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        Blob::Ptr inputValuesBlob;
+        ASSERT_NO_THROW(st = _inferRequest->GetBlob("input", inputValuesBlob, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        void* inputValuesBlobDataPtr = inputValuesBlob->buffer();
+        std::memcpy(inputValuesBlobDataPtr, inputBlobDataPtr, inputNum * sizeof(ie_fp16));
+
+        ASSERT_NO_THROW(st = _inferRequest->Infer(&_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        Blob::Ptr outputValuesBlob;
+        ASSERT_NO_THROW(st = _inferRequest->GetBlob("pooling", outputValuesBlob, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        //
+        // Check result
+        //
+
+        Blob::Ptr refValuesBlob = make_shared_blob<ie_fp16>(outputValuesBlob->getTensorDesc());
+        refValuesBlob->allocate();
+
+        const ie_fp16 *inputData = inputValuesBlob->cbuffer().as<ie_fp16*>();
+              ie_fp16 *referenceData = refValuesBlob->buffer().as<ie_fp16*>();
+
+        ref_poolnd(inputData,
+                   referenceData,
+                   inputShape,
+                   outputShape,
+                   kernelShape,
+                   strides,
+                   padsBegin,
+                   padsEnd,
+                   autoPad,
+                   interleaved,
+                   poolingMethod,
+                   roundingType,
+                   excludePad);
+
+        float tolerance = std::pow(getTotalNum(kernelShape), 1.f/kernelNDims) / 1000;
+
+        CompareCommonRelative(outputValuesBlob, refValuesBlob, tolerance);
+    }
+
+private:
+
+    // Convolution ND reference for FP16
+    static
+    void ref_poolnd(const ie_fp16           input[],
+                          ie_fp16           output[],
+                    const std::vector<int>& inputShape,
+                    const std::vector<int>& outputShape,
+                    const std::vector<int>& kernelShape,
+                    const std::vector<int>& strides,
+                    const std::vector<int>& padsBegin,
+                    const std::vector<int>& padsEnd,
+                    const std::string       autoPad,
+                    const bool              interleaved,
+                    const std::string       poolingMethod,
+                    const std::string       roundingType,
+                    const bool              excludePad) {
+        //
+        // Convert input into fp32 format
+        //
+
+        int  inputNDims =  inputShape.size();
+        int kernelNDims = kernelShape.size();
+
+        int batchDim    = inputNDims > 3 ? 0 : -1;
+        int channelsDim = interleaved ? inputNDims - 1 : batchDim + 1;
+
+        int  inputChannels =  inputShape[channelsDim];
+        int outputChannels = outputShape[channelsDim];
+        IE_ASSERT(inputChannels == outputChannels);
+
+        size_t  inputsNum = getTotalNum(inputShape);
+        size_t outputsNum = getTotalNum(outputShape);
+
+        std::unique_ptr<float[]>  inputF32(new float  [inputsNum]);
+        std::unique_ptr<float[]> outputF32(new float [outputsNum]);
+
+        copyF16ToF32(input, inputF32.get(), inputsNum);
+
+        //
+        // Execute reference convolution
+        //
+
+        ref_poolnd_common(inputF32.get(),
+                          outputF32.get(),
+                          inputShape,
+                          outputShape,
+                          kernelShape,
+                          strides,
+                          padsBegin,
+                          padsEnd,
+                          autoPad,
+                          interleaved,
+                          poolingMethod,
+                          roundingType,
+                          excludePad);
+
+        //
+        // Convert output to fp16
+        //
+
+        copyF32ToF16(outputF32.get(), output, outputsNum);
+    }
+
+    // Convolution ND: reference for FP32
+    //
+    // Assume dims order like {N, C, ..., H, W}
+    // where {..., H, W} are spacial dimensions
+    //
+    // Either {N, ..., H, W, C} if interleaved
+    //
+    // TODO: move this code into "conv_ref.cpp"
+    static
+    void ref_poolnd_common(const float input[],
+                                 float output[],
+               const std::vector<int>& inputShape,
+               const std::vector<int>& outputShape,
+               const std::vector<int>& kernelShape,
+               const std::vector<int>& strides,
+               const std::vector<int>& padsBegin,
+               const std::vector<int>& padsEnd,
+               const std::string     & autoPad,
+                           const bool  interleaved,
+               const std::string     & poolingMethod,
+               const std::string     & roundingType,
+                           const bool  excludePad) {
+        //
+        // Verify parameters
+        //
+
+        const int     inputNDims =  inputShape.size();
+        const int    outputNDims = outputShape.size();
+        const int    kernelNDims = kernelShape.size();
+        const int   stridesNDims =     strides.size();
+        const int padsBeginNDims =   padsBegin.size();
+        const int   padsEndNDims =     padsEnd.size();
+
+        IE_ASSERT(inputNDims == outputNDims);
+        IE_ASSERT(inputNDims >= 3); // CHW, NCHW, NCDHW, ...
+
+        const int channelsNDims = 1;
+        const int batchNDims = inputNDims > 3; // 0 if CHW, 1 if NCHW etc
+        IE_ASSERT(inputNDims == kernelNDims + channelsNDims + batchNDims);
+
+        const int channelsDim      = interleaved ? inputNDims - 1 : batchNDims;
+        const int spacialDimsBegin = interleaved ? batchNDims     : channelsDim + 1;
+
+        const int  inputChannels =  inputShape[channelsDim];
+        const int outputChannels = outputShape[channelsDim];
+
+        IE_ASSERT(inputChannels > 0);
+        IE_ASSERT(inputChannels == outputChannels);
+
+        IE_ASSERT(kernelNDims > 0);
+        IE_ASSERT(kernelNDims == stridesNDims || stridesNDims == 0);
+
+        IE_ASSERT(autoPad == "same_lower" ||
+                  autoPad == "same_upper" ||
+                  autoPad == "valid" ||
+                  autoPad == "");
+
+        if (autoPad == "") {
+            IE_ASSERT(padsBeginNDims == kernelNDims);
+            IE_ASSERT(padsEndNDims == kernelNDims);
+        } else {
+            IE_ASSERT(padsBeginNDims == 0);
+            IE_ASSERT(padsEndNDims == 0);
+        }
+
+        IE_ASSERT(poolingMethod == "avg" || poolingMethod == "max");
+
+        enum PoolingMethodEnum { Max = 1, Avg = 2 };
+        int pooling_method = poolingMethod == "avg" ? Avg : Max;
+
+        IE_ASSERT(roundingType == "floor" || roundingType == "ceil" || roundingType == "");
+
+        //
+        // Update pads, strides, dilations
+        //
+
+        std::vector<int> padsBeginUpdate(kernelNDims);
+        std::vector<int> padsEndUpdate(kernelNDims);
+        std::vector<int> stridesUpdate(kernelNDims);
+
+        for (int i = 0; i < kernelNDims; i++) {
+            stridesUpdate[i] = strides.empty() ? 1 : strides[i];
+
+            int remainder = inputShape[i + spacialDimsBegin] % stridesUpdate[i];
+            int padsTotal = kernelShape[i] - (remainder? remainder: stridesUpdate[i]);
+
+            if (autoPad == "") {
+                padsBeginUpdate[i] = padsBegin[i];
+                padsEndUpdate[i]   = padsEnd[i];
+            } else if (autoPad == "valid") {
+                padsBeginUpdate[i] = 0;
+                padsEndUpdate[i]   = 0;
+            } else if (autoPad == "same_lower") {
+                padsEndUpdate[i]   = padsTotal / 2;
+                padsBeginUpdate[i] = padsTotal - padsEndUpdate[i];
+            } else if (autoPad == "same_upper") {
+                padsBeginUpdate[i] = padsTotal / 2;
+                padsEndUpdate[i]   = padsTotal - padsBeginUpdate[i];
+            } else {
+                IE_ASSERT(false); // this must never happen
+            }
+        }
+
+        for (int i = 0; i < kernelNDims; i++) {
+            int outputShapeExpected = (inputShape[i + spacialDimsBegin]
+                                       + padsBeginUpdate[i] + padsEndUpdate[i]
+                                       - kernelShape[i]
+                                      ) / stridesUpdate[i] + 1;
+            IE_ASSERT(outputShape[i + spacialDimsBegin] == outputShapeExpected);
+
+        }
+
+        int kernel_total = getTotalNum(kernelShape);
+        int kernel_hits;
+
+        //
+        // Cycle over batch dimension (if any)
+        //
+        int N = batchNDims ? inputShape[0] : 1;
+        for (int n = 0; n < N; n++) {
+            std::vector<int> inputIndices(inputNDims);
+            std::vector<int> outputIndices(outputNDims, 0);  // initialize with 0s
+            if (batchNDims) {
+                inputIndices[0] = n;
+                outputIndices[0] = n;
+            }
+
+            //
+            // Cycle over spacial dims of output tensor
+            //
+            do {
+                //
+                // Cycle over output channels
+                //
+                int C = outputChannels;
+                for (int c = 0; c < C; c++) {
+                    inputIndices[channelsDim] = c;
+                    outputIndices[channelsDim] = c;
+
+                    kernel_hits = 0;
+                    float result = 0;
+                    float value;
+
+                    std::vector<int> kernelIndices(kernelNDims, 0);  // init with 0s
+
+                    //
+                    // Cycle over kernel
+                    //
+                    do {
+                        //
+                        // Setup spacial dims of inputIndices
+                        //
+                        for (int i = 0; i < kernelNDims; i++) {
+                            int output_index_i = outputIndices[i + spacialDimsBegin];
+                            int strided_output_index_i = output_index_i * stridesUpdate[i];
+
+                            int index = strided_output_index_i
+                                      + kernelIndices[i]
+                                      - padsBeginUpdate[i];
+                                
+                            if (index < 0 || index >= inputShape[i + spacialDimsBegin]) {
+                                goto nextKernelIndices;
+                            }
+
+                            inputIndices[i + spacialDimsBegin] = index;
+                        }
+
+                        value = input[offsetByIndex(inputIndices.data(), inputShape.data(), inputNDims)];
+
+                        if (pooling_method == Avg) {
+                            result = result + value;
+                        } else {
+                            result = std::max(result, value);
+                        }
+
+                        kernel_hits++;
+
+                    nextKernelIndices:
+                        continue;
+                    } while (nextIndices(kernelIndices.data(), kernelShape.data(), kernelNDims));
+
+                    if (pooling_method == Avg) {
+                        if (excludePad) {
+                            if (kernel_hits > 0) {
+                                result = result / kernel_hits;
+                            } else {
+                                IE_ASSERT(result == 0);
+                            }
+                        } else {
+                            result = result / kernel_total;
+                        }
+                    }
+
+                    output[offsetByIndex(outputIndices.data(), outputShape.data(), outputNDims)] = result;
+                }
+            } while (nextIndices(&outputIndices[spacialDimsBegin],
+                                 &outputShape[spacialDimsBegin],
+                                  kernelNDims));
+        }
+    }
+
+    static
+    bool nextIndices(int indices[],
+               const int shape[],
+                     int nDims) {
+        // let W's index change quicker than H's:
+        // note that dims order is like ..., H, W
+        for (int i = nDims - 1; i >= 0; i--) {
+            if (++indices[i] < shape[i])
+                return true;
+            indices[i] = 0;
+        }
+        return false; // cannot get next indices
+    }
+
+    // Get element offset by ND index
+    static
+    int offsetByIndex(const int index[],
+                      const int shape[],
+                      const int ndims) {
+        int offset = 0;
+        int stride = 1;
+        for (int i = ndims - 1; i >= 0; i--) {
+            offset += index[i] * stride;
+            stride *= shape[i];
+        }
+        return offset;
+    }
+
+    // Count total number of elements in ND tensor
+    static
+    int getTotalNum(const std::vector<int>& shape) {
+        int totalNum = 1;
+        for (int i = 0; i < shape.size(); i++) {
+            totalNum *= shape[i];
+        }
+        return totalNum;
+    }
+
+    // Convert FP16 tensor data into FP32 format
+    static
+    void copyF16ToF32(const ie_fp16 f16Data[],
+                            float   f32Data[],
+                      const int     num) {
+        for (int i = 0; i < num; i++) {
+            f32Data[i] = PrecisionUtils::f16tof32(f16Data[i]);
+        }
+    }
+
+    // Convert FP32 tensor data into FP16 format
+    static
+    void copyF32ToF16(const float   f32Data[],
+                            ie_fp16 f16Data[],
+                      const int     num) {
+        for (int i = 0; i < num; i++) {
+            f16Data[i] = PrecisionUtils::f32tof16(f32Data[i]);
+        }
+    }
+
+    // Fulfill data[] array with random numbers
+    // distributed uniformly in the interval [a,b]
+    static
+    void fulfillUniformly(uint8_t* data, int num, Precision precision, double a, double b) {
+        IE_ASSERT(Precision::FP16 == precision);
+        std::srand(1);
+        for (int i = 0; i < num; i++) {
+            double r = std::rand() / (double)RAND_MAX;
+            float v = static_cast<float>(a*(1 - r) + b*r);
+            reinterpret_cast<ie_fp16*>(data)[i] = PrecisionUtils::f32tof16(v);
+        }
+    }
+
+    // Fulfill data[] array with random numbers,
+    // Gaissian distribution with the given mean and standard deviation
+    static
+    void fulfillGaussian(uint8_t* data, int num, Precision precision,
+                         double mean, double stdDev) {
+        IE_ASSERT(Precision::FP16 == precision);
+        std::srand(1);
+        for (int i = 0; i < num; i++) {
+            float value = static_cast<float>(randomGaussian(mean, stdDev));
+            reinterpret_cast<ie_fp16*>(data)[i] = PrecisionUtils::f32tof16(value);
+        }
+    }
+
+    // https://en.wikipedia.org/wiki/Marsaglia_polar_method
+    static double randomGaussian(double mean, double stdDev) {
+        static const double epsilon = std::numeric_limits<double>::min();
+        thread_local static double spare, hasSpare = false;
+
+        if (hasSpare) {
+            hasSpare = false;
+            return mean + stdDev * spare;
+        }
+
+        double u, v, s;
+        do {
+            u = rand() / static_cast<double>(RAND_MAX);
+            v = rand() / static_cast<double>(RAND_MAX);
+            s = u*u + v*v;
+        } while (s > 1 || s < epsilon);
+        s = std::sqrt(-2. * std::log(s) / s);
+
+        spare = v * s;
+        hasSpare = true;
+        return mean + stdDev * (u * s);
+    }
+
+    static
+    TBlob<uint8_t>::Ptr createPlainTBlob(const std::vector<int>& shape,
+                                         const Precision& precision)
+    {
+        int ndims = shape.size();
+        int length = 1;
+        for (int i = 0; i < ndims; i++) {
+            length *= shape[i];
+        }
+        SizeVector dims { length * precision.size() };
+        Layout layout = Layout::ANY; // as plain memory
+        TensorDesc tensorDesc(Precision::U8, dims, layout);
+        TBlob<uint8_t>::Ptr blob = std::make_shared<TBlob<uint8_t>>(tensorDesc);
+        return blob;
+    }
+
+    static
+    std::string createModel(const std::vector<int>& inputShape,
+                            const std::vector<int>& outputShape,
+                            const std::vector<int>& kernelShape,
+                            const std::vector<int>& strides,
+                            const std::vector<int>& padsBegin,
+                            const std::vector<int>& padsEnd,
+                            const std::string       autoPad,
+                            const std::string       poolingMethod,
+                            const std::string       roundingType,
+                            const bool              excludePad)
+    {
+        std::string model = R"V0G0N(
+            <?xml version="1.0" ?>
+            <net name="testPoolND" version="6">
+                <layers>
+                    <layer id="0" name="input" type="Input" precision="__PRECISION__">
+                        <output>
+                            <port id="0">
+                                __INPUT_DIMS__
+                            </port>
+                        </output>
+                    </layer>
+                    <layer id="1" name="pooling" type="Pooling" precision="__PRECISION__">
+                        <data kernel="__KERNEL__"
+                              strides="__STRIDES__"
+                              pads_begin="__PADS_BEGIN__"
+                              pads_end="__PADS_END__"
+                              auto_pad="__AUTO_PAD__"
+                              pool-method="__POOLING_METHOD__"
+                              rounding_type="__ROUNDING_TYPE__"
+                              exclude-pad="__EXCLUDE_PAD__"
+                        />
+                        <input>
+                            <port id="0">
+                                __INPUT_DIMS__
+                            </port>
+                        </input>
+                        <output>
+                            <port id="1">
+                                __OUTPUT_DIMS__
+                            </port>
+                        </output>
+                    </layer>
+                </layers>
+                <edges>
+                    <edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
+                </edges>
+            </net>
+        )V0G0N";
+
+        REPLACE_WITH_STR(model, "__PRECISION__", "FP16");
+
+        const std::string inputDimsStr = shapeToDimsString(inputShape);
+        const std::string outputDimsStr = shapeToDimsString(outputShape);
+        REPLACE_WITH_STR(model, "__INPUT_DIMS__", inputDimsStr);
+        REPLACE_WITH_STR(model, "__OUTPUT_DIMS__", outputDimsStr);
+
+        const std::string kernelShapeStr = shapeToString(kernelShape);
+        REPLACE_WITH_STR(model, "__KERNEL__", kernelShapeStr);
+
+        if (strides.empty()) {
+            REPLACE_WITH_STR(model, "strides=\"__STRIDES__\"", "");
+        } else {
+            const std::string stridesStr = shapeToString(strides);
+            REPLACE_WITH_STR(model, "__STRIDES__", stridesStr);
+        }
+
+        if (autoPad == "") {
+            const std::string padsBeginStr = shapeToString(padsBegin);
+            const std::string padsEndStr = shapeToString(padsEnd);
+            REPLACE_WITH_STR(model, "__PADS_BEGIN__", padsBeginStr);
+            REPLACE_WITH_STR(model, "__PADS_END__", padsEndStr);
+            REPLACE_WITH_STR(model, "auto_pad=\"__AUTO_PAD__\"", "");
+        } else {
+            REPLACE_WITH_STR(model, "pads_begin=\"__PADS_BEGIN__\"", "");
+            REPLACE_WITH_STR(model, "pads_end=\"__PADS_END__\"", "");
+            REPLACE_WITH_STR(model, "__AUTO_PAD__", autoPad);
+        }
+
+        REPLACE_WITH_STR(model, "__POOLING_METHOD__", poolingMethod);
+
+        if (roundingType == "") {
+            REPLACE_WITH_STR(model, "rounding_type=\"__ROUNDING_TYPE__\"", "");
+        } else {
+            REPLACE_WITH_STR(model, "__ROUNDING_TYPE__", roundingType);
+        }
+
+        REPLACE_WITH_STR(model, "__EXCLUDE_PAD__", (excludePad? "true": "false"));
+
+        return model;
+    }
+
+    static
+    std::string shapeToString(const std::vector<int>& shape) {
+        std::string str;
+        for (int i = 0; i < shape.size(); i++) {
+            str += (i? ", ": "");
+            str += std::to_string(shape[i]);
+        }
+        return str;
+    }
+
+    static
+    std::string shapeToDimsString(const std::vector<int>& shape)
+    {
+        std::string str;
+        for (int i = 0; i < shape.size(); i++) {
+            str += (i? " ": "");
+            str += "<dim>" + std::to_string(shape[i]) + "</dim>";
+        }
+        return str;
+    }
+};
+
+class myriadLayersPoolNDTest_nightly: public PoolNDTest {};
+
+TEST_P(myriadLayersPoolNDTest_nightly, PoolND) {
+    testPoolND();
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_pooling_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_pooling_test.cpp
new file mode 100644 (file)
index 0000000..b312c13
--- /dev/null
@@ -0,0 +1,250 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_pooling_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayers_IR3_BatchPoolingTests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<InferenceEngine::SizeVector>({10, 192, 56, 56})
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 3, 3)) /* kernel     */
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 2, 2)) /* stride     */
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 0, 0)) /* pads_begin */
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 1, 1)) /* pads_end   */
+                                , ::testing::ValuesIn(s_poolingAutoPad)
+                                , ::testing::ValuesIn(s_poolingExcludePad)
+                                , ::testing::ValuesIn(s_poolingMethod)
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy_1, myriadLayers_IR3_BatchPoolingTests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<InferenceEngine::SizeVector>({10, 576, 14, 14})
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 2, 2)) /* kernel     */
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 2, 2)) /* stride     */
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 0, 0)) /* pads_begin */
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 0, 0)) /* pads_end   */
+                                , ::testing::ValuesIn(s_poolingAutoPad)
+                                , ::testing::ValuesIn(s_poolingExcludePad)
+                                , ::testing::ValuesIn(s_poolingMethod)
+                        )
+);
+
+
+INSTANTIATE_TEST_CASE_P(accuracy_4X4, myriadLayers_IR3_PoolingTests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<InferenceEngine::SizeVector>({10, 1024, 4, 4})
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 4, 4)) /* kernel     */
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 1, 1)) /* stride     */
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 0, 0)) /* pads_begin */
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 0, 0)) /* pads_end   */
+                                , ::testing::ValuesIn(s_poolingAutoPad)
+                                , ::testing::ValuesIn(s_poolingExcludePad)
+                                , ::testing::ValuesIn(s_poolingMethod)
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy_1X1, myriadLayers_IR3_BatchPoolingTests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<InferenceEngine::SizeVector>({1, 3, 5, 7})
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 1, 1)) /* kernel     */
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 1, 1)) /* stride     */
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 0, 0)) /* pads_begin */
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 0, 0)) /* pads_end   */
+                                , ::testing::ValuesIn(s_poolingAutoPad)
+                                , ::testing::ValuesIn(s_poolingExcludePad)
+                                , ::testing::ValuesIn(s_poolingMethod)
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy_2X2p0000, myriadLayers_IR3_BatchPoolingTests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<InferenceEngine::SizeVector>({1, 512, 26, 26})
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 2, 2)) /* kernel     */
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 1, 1)) /* stride     */
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 0, 0)) /* pads_begin */
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 0, 0)) /* pads_end   */
+                                , ::testing::ValuesIn(s_poolingAutoPad)
+                                , ::testing::ValuesIn(s_poolingExcludePad)
+                                , ::testing::ValuesIn(s_poolingMethod)
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy_2X2p0001, myriadLayers_IR3_BatchPoolingTests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<InferenceEngine::SizeVector>({1, 512, 26, 26})
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 2, 2)) /* kernel     */
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 1, 1)) /* stride     */
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 0, 0)) /* pads_begin */
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 0, 1)) /* pads_end   */
+                                , ::testing::ValuesIn(s_poolingAutoPad)
+                                , ::testing::ValuesIn(s_poolingExcludePad)
+                                , ::testing::ValuesIn(s_poolingMethod)
+                        )
+);
+INSTANTIATE_TEST_CASE_P(accuracy_2X2p0011, myriadLayers_IR3_BatchPoolingTests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<InferenceEngine::SizeVector>({1, 512, 26, 26})
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 2, 2)) /* kernel     */
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 1, 1)) /* stride     */
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 0, 0)) /* pads_begin */
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 1, 1)) /* pads_end   */
+                                , ::testing::ValuesIn(s_poolingAutoPad)
+                                , ::testing::ValuesIn(s_poolingExcludePad)
+                                , ::testing::ValuesIn(s_poolingMethod)
+                        )
+);
+INSTANTIATE_TEST_CASE_P(accuracy_2X2p0111, myriadLayers_IR3_BatchPoolingTests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<InferenceEngine::SizeVector>({1, 512, 26, 26})
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 2, 2)) /* kernel     */
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 1, 1)) /* stride     */
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 0, 1)) /* pads_begin */
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 1, 1)) /* pads_end   */
+                                , ::testing::ValuesIn(s_poolingAutoPad)
+                                , ::testing::ValuesIn(s_poolingExcludePad)
+                                , ::testing::ValuesIn(s_poolingMethod)
+                        )
+);
+INSTANTIATE_TEST_CASE_P(accuracy_2X2p1111, myriadLayers_IR3_BatchPoolingTests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<InferenceEngine::SizeVector>({1, 512, 26, 26})
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 2, 2)) /* kernel     */
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 1, 1)) /* stride     */
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 1, 1)) /* pads_begin */
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 1, 1)) /* pads_end   */
+                                , ::testing::ValuesIn(s_poolingAutoPad)
+                                , ::testing::ValuesIn(s_poolingExcludePad)
+                                , ::testing::ValuesIn(s_poolingMethod)
+                        )
+);
+INSTANTIATE_TEST_CASE_P(accuracy_2X2p1110, myriadLayers_IR3_BatchPoolingTests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<InferenceEngine::SizeVector>({1, 512, 26, 26})
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 2, 2)) /* kernel     */
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 1, 1)) /* stride     */
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 1, 1)) /* pads_begin */
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 1, 0)) /* pads_end   */
+                                , ::testing::ValuesIn(s_poolingAutoPad)
+                                , ::testing::ValuesIn(s_poolingExcludePad)
+                                , ::testing::ValuesIn(s_poolingMethod)
+                        )
+);
+INSTANTIATE_TEST_CASE_P(accuracy_2X2p1100, myriadLayers_IR3_BatchPoolingTests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<InferenceEngine::SizeVector>({1, 512, 26, 26})
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 2, 2)) /* kernel     */
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 1, 1)) /* stride     */
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 1, 1)) /* pads_begin */
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 0, 0)) /* pads_end   */
+                                , ::testing::ValuesIn(s_poolingAutoPad)
+                                , ::testing::ValuesIn(s_poolingExcludePad)
+                                , ::testing::ValuesIn(s_poolingMethod)
+                        )
+);
+INSTANTIATE_TEST_CASE_P(accuracy_2X2p1000, myriadLayers_IR3_BatchPoolingTests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<InferenceEngine::SizeVector>({1, 512, 26, 26})
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 2, 2)) /* kernel     */
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 1, 1)) /* stride     */
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 1, 0)) /* pads_begin */
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 0, 0)) /* pads_end   */
+                                , ::testing::ValuesIn(s_poolingAutoPad)
+                                , ::testing::ValuesIn(s_poolingExcludePad)
+                                , ::testing::ValuesIn(s_poolingMethod)
+                        )
+);
+INSTANTIATE_TEST_CASE_P(accuracy_2X2p1101, myriadLayers_IR3_BatchPoolingTests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<InferenceEngine::SizeVector>({1, 512, 26, 26})
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 2, 2)) /* kernel     */
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 1, 1)) /* stride     */
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 1, 1)) /* pads_begin */
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 0, 1)) /* pads_end   */
+                                , ::testing::ValuesIn(s_poolingAutoPad)
+                                , ::testing::ValuesIn(s_poolingExcludePad)
+                                , ::testing::ValuesIn(s_poolingMethod)
+                        )
+);
+INSTANTIATE_TEST_CASE_P(accuracy_2X2p1011, myriadLayers_IR3_BatchPoolingTests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<InferenceEngine::SizeVector>({1, 512, 26, 26})
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 2, 2)) /* kernel     */
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 1, 1)) /* stride     */
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 1, 0)) /* pads_begin */
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 1, 1)) /* pads_end   */
+                                , ::testing::ValuesIn(s_poolingAutoPad)
+                                , ::testing::ValuesIn(s_poolingExcludePad)
+                                , ::testing::ValuesIn(s_poolingMethod)
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsMax_nightly,
+                        ::testing::Combine(
+                                ::testing::ValuesIn(g_poolingInput),
+                                ::testing::ValuesIn(g_poolingLayerParamsFull),
+                                ::testing::ValuesIn(g_poolingLayout))
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsMaxOverlappedByKernel_nightly,
+                        ::testing::Combine(
+                            ::testing::Values<InferenceEngine::SizeVector>({1, 1024, 6, 6}),
+                            ::testing::Values<param_size>(MAKE_STRUCT(param_size, 7, 7)),
+                            ::testing::Values<param_size>(MAKE_STRUCT(param_size, 1, 1)),
+                            ::testing::Values<paddings4>(MAKE_STRUCT(paddings4, 0, 0, 1, 1)),
+                            ::testing::ValuesIn(g_poolingLayout))
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsMaxPad4_nightly,
+                        ::testing::Combine(
+                                ::testing::ValuesIn(g_poolingInputPad4),
+                                ::testing::ValuesIn(g_poolingKernelPad4),
+                                ::testing::ValuesIn(g_poolingStridePad4),
+                                ::testing::ValuesIn(g_poolingPad4),
+                                ::testing::ValuesIn(g_poolingLayout))
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsAvgPad4_nightly,
+                        ::testing::Combine(
+                                ::testing::ValuesIn(g_poolingInputPad4),
+                                ::testing::ValuesIn(g_poolingKernelPad4),
+                                ::testing::ValuesIn(g_poolingStridePad4),
+                                ::testing::ValuesIn(g_poolingPad4),
+                                ::testing::ValuesIn(g_poolingLayout))
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsGlobalMax_nightly,
+                        ::testing::ValuesIn(g_GlobalPoolingInput ));
+
+INSTANTIATE_TEST_CASE_P(accuracy_3x3, myriadLayersTestsMax_nightly,
+                        ::testing::Combine(
+                                ::testing::ValuesIn(g_poolingInput),
+                                ::testing::ValuesIn(s_poolingLayerParams_k3x3),
+                                ::testing::ValuesIn(g_poolingLayout))
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsAvg_nightly,
+                        ::testing::Combine(
+                                ::testing::ValuesIn(g_poolingInput),
+                                ::testing::ValuesIn(g_poolingLayerParamsFull),
+                                ::testing::ValuesIn(g_poolingLayout))
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsAvgOverlappedByKernel_nightly,
+                        ::testing::Combine(
+                            ::testing::Values<InferenceEngine::SizeVector>({1, 1024, 6, 6}),
+                            ::testing::Values<param_size>(MAKE_STRUCT(param_size, 7, 7)),
+                            ::testing::Values<param_size>(MAKE_STRUCT(param_size, 1, 1)),
+                            ::testing::Values<paddings4>(MAKE_STRUCT(paddings4, 0, 0, 1, 1)),
+                            ::testing::ValuesIn(g_poolingLayout))
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy_3x3, myriadLayersTestsAvg_nightly,
+                        ::testing::Combine(
+                                ::testing::ValuesIn(g_poolingInput),
+                                ::testing::ValuesIn(s_poolingLayerParams_k3x3),
+                                ::testing::ValuesIn(g_poolingLayout))
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsGlobalAvg_nightly,
+                        ::testing::ValuesIn(g_GlobalPoolingInput));
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_pooling_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_pooling_test.hpp
new file mode 100644 (file)
index 0000000..73123ac
--- /dev/null
@@ -0,0 +1,357 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <cmath>
+#include "myriad_layers_tests.hpp"
+#include "myriad_layers_reference_functions.hpp"
+
+
+#define ERROR_BOUND (1.2e-2f)
+
+using namespace InferenceEngine;
+
+extern const char POOLING_MAX[] = "max";
+extern const char POOLING_AVG[] = "avg";
+
+
+class myriadLayersTestsMax_nightly: public PoolingTest<POOLING_MAX>
+{
+};
+
+class myriadLayersTestsMaxOverlappedByKernel_nightly: public PoolingTestPad4<POOLING_MAX, true>
+{
+};
+
+class myriadLayersTestsMaxPad4_nightly: public PoolingTestPad4<POOLING_MAX>
+{
+};
+
+class myriadLayersTestsGlobalMax_nightly: public GlobalPoolingTest<POOLING_MAX>
+{
+};
+
+class myriadLayersTestsAvg_nightly: public PoolingTest<POOLING_AVG>
+{
+};
+
+class myriadLayersTestsAvgOverlappedByKernel_nightly: public PoolingTestPad4<POOLING_AVG, true>
+{
+};
+
+class myriadLayersTestsAvgPad4_nightly: public PoolingTestPad4<POOLING_AVG>
+{
+};
+
+class myriadLayersTestsGlobalAvg_nightly: public GlobalPoolingTest<POOLING_AVG>
+{
+};
+
+/* IR version 3 tests, main difference is a changes in padding parameters definitions */
+/*                   input tensor,               kernel,     stride,    pads_begin, pads_end,  auto_pad,     exclude_pad  method */
+typedef std::tuple<InferenceEngine::SizeVector, param_size, param_size, param_size, param_size, const char*, const char*, const char*> IR3_PoolParams;
+
+class myriadLayers_IR3_PoolingTests_nightly: public myriadLayersTests_nightly, /*input tensor, kernel, stride, pads_begin, pads_end, out_channel, group */
+                                             public testing::WithParamInterface<IR3_PoolParams> {
+};
+
+static void genTestData(InferenceEngine::Blob::Ptr blob) {
+    ASSERT_NE(blob, nullptr);
+    Layout layout = blob->getTensorDesc().getLayout();
+    SizeVector dims = blob->getTensorDesc().getDims();
+
+    ie_fp16* ptr = blob->buffer().as<ie_fp16*>();
+    if (layout == NCHW || layout == NHWC) {
+        size_t N = dims[0];
+        size_t C = dims[1];
+        size_t H = dims[2];
+        size_t W = dims[3];
+
+        float counter = 0.125f;
+        for (size_t n = 0; n < N; n++) {
+            for (size_t c = 0; c < C; c++) {
+                for (size_t h = 0; h < H; h++) {
+                    for (size_t w = 0; w < W; w++) {
+                        size_t actualIdx = layout == NCHW ?
+                                           w + h * W + c * W * H + n * W * H * C : c + w * C + h * C * W +
+                                                                                   n * W * H * C;
+                        ptr[actualIdx] = PrecisionUtils::f32tof16(counter);
+                        counter += 0.025f;
+                        if (counter > 5.0f) {
+                            counter = -0.5f;
+                        }
+                    }
+                }
+            }
+        }
+    } else {
+        ASSERT_TRUE(false);
+    }
+}
+
+
+TEST_P(myriadLayers_IR3_PoolingTests_nightly, Pooling) {
+    std::map<std::string, std::string> params;
+    InferenceEngine::SizeVector output_tensor;
+    int32_t IW = 0;
+    int32_t IH = 0;
+    int32_t IC = 0;
+    int32_t I_N = 0;
+    size_t  group = 0;
+    /*input tensor,               kernel,     stride,    pads_begin, pads_end,  auto_pad,     exclude_pad  method */
+    auto p = ::testing::WithParamInterface<IR3_PoolParams>::GetParam();
+    auto input_tensor       = std::get<0>(p);
+    param_size kernel       = std::get<1>(p);
+    param_size stride       = std::get<2>(p);
+    param_size pads_begin   = std::get<3>(p);
+    param_size pads_end     = std::get<4>(p);
+    const char* auto_pad    = std::get<5>(p);
+    const std::string exclude_pad = std::get<6>(p);
+    const std::string method      = std::get<7>(p);
+
+    get_dims(input_tensor, IW, IH, IC, I_N);
+    if (I_N > 1)
+        _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+    else
+        _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(YES);
+    int32_t OW = 1;
+    int32_t OH = 1;
+    int32_t OC = 1;
+    int32_t ON = 1;
+    if (strncmp(auto_pad, "same_upper", strlen(auto_pad)) == 0) {
+        OW = input_tensor[3]/2;
+        OH = input_tensor[2]/2;
+        OC = input_tensor[1];
+        ON = input_tensor[0];
+    } else {
+        ASSERT_TRUE(false);
+    }
+    if (kernel.x == 4 && kernel.y == 4) {
+        /* particular case  for Faster-RCNN */
+        OW = input_tensor[3] / kernel.x;
+        OH = input_tensor[2] / kernel.y;
+        OC = input_tensor[1];
+        ON = input_tensor[0];
+    }
+
+    gen_dims(output_tensor, input_tensor.size(), OW, OH, OC, ON);
+
+    std::string padsB   = gen_param(pads_begin);
+    std::string padsE   = gen_param(pads_end);
+    std::string strides = gen_param(stride);
+    std::string kern    = gen_param(kernel);
+
+    std::map<std::string, std::string> layer_params = {
+              {"kernel",      kern}
+            , {"strides",     strides}
+            , {"pads_begin",  padsB}
+            , {"pads_end",    padsE}
+            , {"auto_pad",    auto_pad}
+            , {"exclude_pad", exclude_pad}
+            , {"pool-method",      method}
+    };
+    if (kernel.x == 4 && kernel.y == 4) {
+        layer_params.erase("auto_pad");
+        layer_params["rounding-type"] = "ceil";
+    }
+    _genDataCallback = genTestData;
+
+    _testNet.addLayer(LayerInitParams("Pooling")
+             .params(layer_params)
+             .in({input_tensor})
+             .out({output_tensor}),
+             ref_pooling_wrap);
+    ASSERT_TRUE(generateNetAndInfer(NetworkInitParams().useHWOpt(CheckMyriadX())));
+    float maxerr = 0.0001f;
+    CompareCommonAbsolute(_outputMap.begin()->second, getReferenceOutput(), maxerr);
+}
+
+class myriadLayers_IR3_BatchPoolingTests_nightly: public myriadLayersTests_nightly, /*input tensor, kernel, stride, pads_begin, pads_end, out_channel, group */
+                                                  public testing::WithParamInterface<IR3_PoolParams> {
+};
+
+TEST_P(myriadLayers_IR3_BatchPoolingTests_nightly, Pooling) {
+    std::map<std::string, std::string> params;
+    InferenceEngine::SizeVector output_tensor;
+    int32_t IW = 0;
+    int32_t IH = 0;
+    int32_t IC = 0;
+    int32_t I_N = 0;
+    size_t  group = 0;
+    /*input tensor,               kernel,     stride,    pads_begin, pads_end,  auto_pad,     exclude_pad  method */
+    auto p = ::testing::WithParamInterface<IR3_PoolParams>::GetParam();
+    auto input_tensor       = std::get<0>(p);
+    param_size kernel       = std::get<1>(p);
+    param_size stride       = std::get<2>(p);
+    param_size pads_begin   = std::get<3>(p);
+    param_size pads_end     = std::get<4>(p);
+    const char* auto_pad    = std::get<5>(p);
+    const std::string exclude_pad = std::get<6>(p);
+    const std::string method      = std::get<7>(p);
+
+    get_dims(input_tensor, IW, IH, IC, I_N);
+    if (I_N > 1)
+        _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+    else
+        _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(YES);
+    int32_t OW = 1;
+    int32_t OH = 1;
+    int32_t OC = 1;
+    int32_t ON = 1;
+    if (strncmp(auto_pad, "same_upper", strlen(auto_pad)) == 0) {
+        OW = input_tensor[3]/2;
+        OH = input_tensor[2]/2;
+        OC = input_tensor[1];
+        ON = input_tensor[0];
+    }
+    if (kernel.x == 1 && kernel.y == 1 &&
+        pads_begin.x == 0 && pads_begin.y == 0 &&
+        pads_end.x == 0 && pads_end.y == 0) {
+        OW = input_tensor[3];
+        OH = input_tensor[2];
+        OC = input_tensor[1];
+        ON = input_tensor[0];
+    }
+    if (kernel.x == 2 && kernel.y == 2 && stride.x == 1 && stride.y == 1) {
+        OW = input_tensor[3];
+        OH = input_tensor[2];
+        OC = input_tensor[1];
+        ON = input_tensor[0];
+    }
+    gen_dims(output_tensor, input_tensor.size(), OW, OH, OC, ON);
+
+    std::string padsB   = gen_param(pads_begin);
+    std::string padsE   = gen_param(pads_end);
+    std::string strides = gen_param(stride);
+    std::string kern    = gen_param(kernel);
+
+    std::map<std::string, std::string> layer_params = {
+              {"kernel",      kern}
+            , {"strides",     strides}
+            , {"pads_begin",  padsB}
+            , {"pads_end",    padsE}
+            , {"auto_pad",    auto_pad}
+            , {"exclude_pad", exclude_pad}
+            , {"pool-method",      method}
+    };
+    _genDataCallback = genTestData;
+    /*
+    */
+    _testNet.addLayer(LayerInitParams("Pooling")
+             .params(layer_params)
+             .in({input_tensor})
+             .out({output_tensor}),
+             ref_pooling_wrap);
+    ASSERT_TRUE(generateNetAndInfer(NetworkInitParams().useHWOpt(CheckMyriadX())));
+    float maxerr = 0.0001f;
+    CompareCommonAbsolute(_outputMap.begin()->second, getReferenceOutput(), maxerr);
+}
+
+static const std::vector<const char*> s_poolingAutoPad = {
+        "same_upper"
+};
+
+static const std::vector<const char*> s_poolingExcludePad = {
+        "true"
+};
+
+static const std::vector<const char*> s_poolingMethod = {
+        "max"
+};
+
+TEST_P(myriadLayersTestsMax_nightly, MaxPooling)
+{
+    ASSERT_TRUE(generateNetAndInfer(NetworkInitParams().layoutPreference(_layout_preference)));
+    CompareCommonAbsolute(_outputMap.begin()->second, getReferenceOutput(), ERROR_BOUND);
+}
+
+TEST_P(myriadLayersTestsMaxOverlappedByKernel_nightly, MaxPooling)
+{
+    ASSERT_TRUE(generateNetAndInfer(NetworkInitParams().layoutPreference(_layout_preference)));
+    CompareCommonAbsolute(_outputMap.begin()->second, getReferenceOutput(), ERROR_BOUND);
+}
+
+TEST_P(myriadLayersTestsMaxPad4_nightly, MaxPoolingPad4)
+{
+    ASSERT_TRUE(generateNetAndInfer(NetworkInitParams().layoutPreference(_layout_preference)));
+    auto refBlob = getReferenceOutput();
+    CompareCommonAbsolute(_outputMap.begin()->second, refBlob, ERROR_BOUND);
+}
+
+TEST_P(myriadLayersTestsAvg_nightly, AvgPooling)
+{
+    ASSERT_TRUE(generateNetAndInfer(NetworkInitParams().layoutPreference(_layout_preference)));
+    CompareCommonAbsolute(_outputMap.begin()->second, getReferenceOutput(), ERROR_BOUND);
+}
+
+TEST_P(myriadLayersTestsAvgOverlappedByKernel_nightly, AvgPooling)
+{
+    ASSERT_TRUE(generateNetAndInfer(NetworkInitParams().layoutPreference(_layout_preference)));
+    CompareCommonAbsolute(_outputMap.begin()->second, getReferenceOutput(), ERROR_BOUND);
+}
+
+TEST_P(myriadLayersTestsAvgPad4_nightly, AvgPoolingPad4)
+{
+    ASSERT_TRUE(generateNetAndInfer(NetworkInitParams().layoutPreference(_layout_preference)));
+    auto refBlob = getReferenceOutput();
+    CompareCommonAbsolute(_outputMap.begin()->second, refBlob, ERROR_BOUND);
+}
+
+TEST_P(myriadLayersTestsGlobalMax_nightly, GlobalMaxPooling)
+{
+    ASSERT_TRUE(generateNetAndInfer(NetworkInitParams()));
+    auto refBlob = getReferenceOutput();
+    CompareCommonAbsolute(_outputMap.begin()->second, refBlob, ERROR_BOUND);
+}
+
+TEST_P(myriadLayersTestsGlobalAvg_nightly, GlobalAvgPooling)
+{
+    if(_pad_val.x != 0 || _pad_val.y != 0) {
+        GTEST_SKIP() << "paddings should not be exist for GlobalAvgPool";
+    }
+
+    ASSERT_TRUE(generateNetAndInfer(NetworkInitParams()));
+    auto refBlob = getReferenceOutput();
+    CompareCommonAbsolute(_outputMap.begin()->second, refBlob, ERROR_BOUND);
+}
+
+static std::vector<pooling_layer_params> s_poolingLayerParams_k3x3 = {
+        {{3, 3}, {1, 1}, {1, 1}},
+};
+
+const std::vector<InferenceEngine::SizeVector> g_poolingInputPad4 = {
+        {{1, 3,  224,  224}}
+};
+
+const std::vector<param_size> g_poolingKernelPad4 = {
+        {4, 4},
+        {6, 6},
+        {8, 8},
+};
+
+const std::vector<param_size> g_poolingStridePad4 = {
+        {1, 1},
+};
+
+const std::vector<paddings4> g_poolingPad4 = {
+        {0, 0, 2, 0},
+        {1, 2, 3, 2},
+        {2, 2, 0, 0},
+};
+
+const std::vector<GlobalPoolingTestParam> g_GlobalPoolingInput = {
+#if 0 // temporary OFF because of HACKS for rfcn #ifdef MORE_DIMENSIONS // 4DGP
+        {{2,  8,    7,  7}},
+#endif
+        {GlobalPoolingTestParam{{1,  128,  2,  2}, { 3,  3}, {1, 1}, {2, 2}}},
+        {GlobalPoolingTestParam{{1, 1024, 64, 32}, {32, 64}, {0, 0}, {1, 1}}},
+        {GlobalPoolingTestParam{{1, 2048,  8,  8}, { 8,  8}, {0, 0}, {1, 1}}},
+        {GlobalPoolingTestParam{{1, 2048,  7,  7}, { 7,  7}, {0, 0}, {1, 1}}},
+        {GlobalPoolingTestParam{{1, 1000, 15, 15}, {15, 15}, {0, 0}, {1, 1}}},
+        {GlobalPoolingTestParam{{1, 1000, 14, 14}, {14, 14}, {0, 0}, {1, 1}}},
+        {GlobalPoolingTestParam{{1, 1000, 12, 12}, {12, 12}, {0, 0}, {1, 1}}},
+        {GlobalPoolingTestParam{{1,  8,    7,  7}, { 7,  7}, {0, 0}, {1, 1}}},
+        {GlobalPoolingTestParam{{1,  2,    7,  7}, { 7,  7}, {0, 0}, {1, 1}}},
+        {GlobalPoolingTestParam{{1,  8,    7,  7}, { 7,  7}, {0, 0}, {1, 1}}},
+        {GlobalPoolingTestParam{{1,  1000, 2,  3}, { 3,  2}, {0, 0}, {1, 1}}},
+};
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_power_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_power_test.cpp
new file mode 100644 (file)
index 0000000..5ed5eb6
--- /dev/null
@@ -0,0 +1,10 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_power_test.hpp"
+INSTANTIATE_TEST_CASE_P( accuracy, myriadLayersTestsPowerParams_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(s_powerTensors),
+        ::testing::ValuesIn(s_powerParams))
+);
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_power_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_power_test.hpp
new file mode 100644 (file)
index 0000000..ebe4eb9
--- /dev/null
@@ -0,0 +1,96 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tests.hpp"
+#include <cmath>
+
+#define BOUND (5.0f)
+#define ERROR_BOUND (1.0f)
+
+using namespace InferenceEngine;
+
+struct pwr_test_params {
+    float power;
+    float scale;
+    float shift;
+    friend std::ostream& operator<<(std::ostream& os, pwr_test_params const& tst)
+    {
+        return os << " power=" << tst.power
+                  << ", scale=" << tst.scale
+                  << ", shift=" << tst.shift;
+    };
+};
+
+static void gen_ref_power(const InferenceEngine::Blob::Ptr src,
+                          InferenceEngine::Blob::Ptr dst,
+                          pwr_test_params& p) {
+    ASSERT_NE(src, nullptr);
+    ASSERT_NE(dst, nullptr);
+    ASSERT_EQ(src->getTensorDesc().getDims().size(), dst->getTensorDesc().getDims().size());
+    uint16_t *srcData = src->buffer();
+    uint16_t *dstData = dst->buffer();
+    ASSERT_NE(srcData, nullptr);
+    ASSERT_NE(dstData, nullptr);
+
+    //Compute y = (shift + scale * x) ^ power
+    for (size_t indx = 0; indx < src->size(); indx++) {
+        dstData[indx] = PrecisionUtils::f32tof16(pow((p.shift + p.scale * PrecisionUtils::f16tof32(srcData[indx])), p.power));
+    }
+}
+
+typedef myriadLayerTestBaseWithParam<std::tuple<SizeVector, pwr_test_params>> myriadLayersTestsPowerParams_nightly;
+
+TEST_P(myriadLayersTestsPowerParams_nightly, TestsPower) {
+    _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+    auto param = GetParam();
+    SizeVector tensor = std::get<0>(param);
+    pwr_test_params p = std::get<1>(param);
+
+    std::map<std::string, std::string> params;
+    params["power"] = std::to_string(p.power);
+    params["scale"] = std::to_string(p.scale);
+    params["shift"] = std::to_string(p.shift);
+
+    SetInputTensors({tensor});
+    SetOutputTensors({tensor});
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("Power").params(params)));
+    /* input data preparation */
+    SetFirstInputToRange(0, BOUND);
+    ASSERT_TRUE(Infer());
+
+    /* output check */
+    auto outputBlob =_outputMap[_outputsInfo.begin()->first];
+    auto inputBlob = _inputMap[_inputsInfo.begin()->first];
+
+    gen_ref_power(inputBlob, _refBlob, p);
+
+    float eps_err = ERROR_BOUND;
+
+    /* for "dst = -src" case results have to be equal */
+    if ((p.power == 1.0f) && ((p.scale == -1.0f) || (p.scale == 1.0f)) && (p.shift == 0.0f))
+        eps_err = 0.0f;
+
+    CompareCommonAbsolute(outputBlob, _refBlob, eps_err);
+}
+
+static std::vector<SizeVector> s_powerTensors = {
+    {{1, 1, 32*10, 16*10}},
+    {{6, 5, 4, 3, 32, 16}},
+};
+
+static std::vector<pwr_test_params> s_powerParams = {
+    {0.f,  1.0f,  0.0f},
+    {1.f,  1.0f,  0.0f},
+    {1.f, -1.0f,  0.0f},
+    {1.f, -1.0f, 0.71f},
+    {2.f, -1.4f,  3.1f},
+    {3.f,  1.1f, -2.1f},
+    {7.f,  0.1f,  1.0f},
+    {-8.f,  0.1f,  1.0f},
+    /* various power */
+    { 3.1f,  0.5f, 3.0f},
+    { 0.50f, 0.50f, 1.0f},
+    {-1.50f, 1.50f, 1.0f},
+    { 10.50f,  0.1f, 0.1f}
+};
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_prelu_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_prelu_test.cpp
new file mode 100644 (file)
index 0000000..3f952ce
--- /dev/null
@@ -0,0 +1,79 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_prelu_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(accuracy_PReLU, myriadLayerPReLU_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(s_PReLUTensors)
+      , ::testing::Values<ChannelSharedPrelu>(0, 1)
+    )
+);
+
+INSTANTIATE_TEST_CASE_P(
+    accuracy, myriadLayerFullyConnectedWithPReLU_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(g_fcTestParamsSubset),
+        ::testing::Values(g_dimensionsFC[0]),
+        ::testing::ValuesIn(g_addBiasFC),
+        ::testing::ValuesIn(s_PReluLayerParams)
+    )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsMaxPoolingWithPReLU_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(g_poolingInput),
+        ::testing::ValuesIn(g_poolingLayerParamsLite),
+        ::testing::ValuesIn(g_poolingLayout),
+        ::testing::ValuesIn(s_PReluLayerParams))
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsAvgPoolingWithPReLU_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(g_poolingInput),
+        ::testing::ValuesIn(g_poolingLayerParamsLite),
+        ::testing::ValuesIn(g_poolingLayout),
+        ::testing::ValuesIn(s_PReluLayerParams))
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy_postop, myriadLayersTestsMaxPoolingWithPReLU_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(g_poolingInput_postOp),
+        ::testing::Values<pooling_layer_params>(MAKE_STRUCT(pooling_layer_params, {3, 3}, {1, 1}, {1, 1})),
+        ::testing::ValuesIn(g_poolingLayout),
+        ::testing::Values<PReLULayerDef>(MAKE_STRUCT(PReLULayerDef, {{{PRELU_PARAM, "0"}}})))
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy_postop, myriadLayersTestsAvgPoolingWithPReLU_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(g_poolingInput_postOp),
+        ::testing::Values<pooling_layer_params>(MAKE_STRUCT(pooling_layer_params, {3, 3}, {1, 1}, {1, 1})),
+        ::testing::ValuesIn(g_poolingLayout),
+        ::testing::Values<PReLULayerDef>(MAKE_STRUCT(PReLULayerDef, {{{PRELU_PARAM, "0"}}})))
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayerConvolutionWithPReLU_nightly,
+        ::testing::Combine(
+            ::testing::ValuesIn(g_convolutionTensors)
+          , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 3, 3))
+          , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<uint32_t>(16)
+          , ::testing::Values<uint32_t>(1)
+          , ::testing::ValuesIn(s_PReluLayerParams)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy_postop, myriadLayerConvolutionWithPReLU_nightly,
+        ::testing::Combine(
+            ::testing::ValuesIn(g_poolingInput_postOp)
+          , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 3, 3))
+          , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 1, 1)/*, MAKE_STRUCT(param_size, 2, 2)*/)
+          , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<uint32_t>(32)
+          , ::testing::Values<uint32_t>(1)
+          , ::testing::Values<PReLULayerDef>(MAKE_STRUCT(PReLULayerDef, {{{PRELU_PARAM, "0"}}}))
+          )
+);
+
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_prelu_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_prelu_test.hpp
new file mode 100644 (file)
index 0000000..ca1a14e
--- /dev/null
@@ -0,0 +1,137 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tests.hpp"
+#include "myriad_layers_reference_functions.hpp"
+#include <algorithm>
+
+using std::tuple;
+using std::get;
+
+using namespace InferenceEngine;
+
+PRETTY_PARAM(ChannelSharedPrelu, int);
+typedef myriadLayerTestBaseWithParam<tuple<SizeVector, ChannelSharedPrelu >> myriadLayerPReLU_nightly;
+
+TEST_P(myriadLayerPReLU_nightly, PReLU) {
+    _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+
+    SizeVector dims = get<0>(GetParam());
+    int channel_shared = get<1>(GetParam());
+
+    SetInputTensors({dims});
+    SetOutputTensors({dims});
+
+    int num_weights = channel_shared ? 1 : dims[dims.size() - 3];
+    
+    InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(GenWeights(num_weights));
+    uint16_t* weights = weights_ptr->data().as<uint16_t*>();
+
+    std::map<std::string, std::string> layer_params = {{"channel_shared", std::to_string(channel_shared)}};
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("PReLU")
+                                        .params(layer_params)
+                                        .weights(weights_ptr->byteSize() /sizeof (uint16_t)),
+                                        {},
+                                        weights_ptr));
+    SetFirstInputToRange(0, 5.0f);
+    ASSERT_TRUE(Infer());
+
+    auto inputBlob = _inputMap.begin()->second;
+    auto outputBlob = _outputMap.begin()->second;
+
+    ref_PReLU(inputBlob, _refBlob, weights, num_weights);
+    CompareCommonAbsolute(outputBlob, _refBlob, 0);
+}
+
+static std::vector<InferenceEngine::SizeVector> s_PReLUTensors = {
+    {
+        {13, 38, 38},
+        {1, 13, 77,  99},
+        {4,  3, 11,   8},
+        {3,  11, 11,  8, 8}
+    },
+};
+
+struct  PReLULayerDef {
+    ParamsStruct list;
+}PReLULayer;
+
+static std::vector<PReLULayerDef> s_PReluLayerParams = {
+    {{{PRELU_PARAM, "0"}}},
+    {{{PRELU_PARAM, "1"}}}
+};
+
+class myriadLayerFullyConnectedWithPReLU_nightly: public FCTest<PReLULayerDef>{
+};
+
+#define TEST_BODY \
+    int channel_shared = 0;\
+    if (!extraLayerParams.list.empty()) {\
+        auto iter = extraLayerParams.list.find(PRELU_PARAM);\
+        if (iter != extraLayerParams.list.end()) {\
+             channel_shared = std::stoi(iter->second);\
+        }\
+    }\
+    size_t weightsSize = 1;\
+    if (channel_shared == 0) {\
+        int32_t OW;\
+        int32_t OH;\
+        int32_t OC;\
+        get_dims(_output_tensor, OW, OH, OC);\
+        weightsSize = OC;\
+    }\
+    _testNet.addLayer(LayerInitParams("PReLU")\
+             .params(extraLayerParams.list)\
+             .weights(weightsSize).fillWeights(defaultWeightsRange)\
+             .in({_output_tensor})\
+             .out({_output_tensor}),\
+             ref_PReLU_wrap);\
+    ASSERT_TRUE(generateNetAndInfer(NetworkInitParams()));
+
+TEST_P(myriadLayerFullyConnectedWithPReLU_nightly, TestsFullyConnected)
+{
+    auto p = ::testing::WithParamInterface<std::tuple<fcon_test_params, int32_t, int32_t, PReLULayerDef>>::GetParam();
+    auto extraLayerParams = std::get<3>(p);
+    TEST_BODY;
+    CompareCommonAbsolute(_outputMap.begin()->second, getReferenceOutput(), _par.error_bound);
+}
+
+#define ERROR_BOUND_WITH_RELU (4.e-3f)
+
+class myriadLayersTestsMaxPoolingWithPReLU_nightly: public PoolingTest<POOLING_MAX, PReLULayerDef>{
+};
+
+class myriadLayersTestsAvgPoolingWithPReLU_nightly: public PoolingTest<POOLING_AVG, PReLULayerDef>{
+};
+
+TEST_P(myriadLayersTestsMaxPoolingWithPReLU_nightly, TestsMaxPoolingWithPReLU)
+{
+    auto p = ::testing::WithParamInterface<std::tuple<InferenceEngine::SizeVector, pooling_layer_params, vpu::LayoutPreference, PReLULayerDef>>::GetParam();
+    auto extraLayerParams = std::get<3>(p);
+    TEST_BODY;
+    CompareCommonAbsolute(_outputMap.begin()->second, getReferenceOutput(), ERROR_BOUND_WITH_RELU);
+}
+
+TEST_P(myriadLayersTestsAvgPoolingWithPReLU_nightly, TestsAvgPoolingWithPReLU)
+{
+    auto p = ::testing::WithParamInterface<std::tuple<InferenceEngine::SizeVector, pooling_layer_params, vpu::LayoutPreference, PReLULayerDef>>::GetParam();
+    auto extraLayerParams = std::get<3>(p);
+    TEST_BODY;
+    CompareCommonAbsolute(_outputMap.begin()->second, getReferenceOutput(), ERROR_BOUND_WITH_RELU);
+}
+
+class myriadLayerConvolutionWithPReLU_nightly: public ConvolutionTest<PReLULayerDef>{
+};
+
+TEST_P(myriadLayerConvolutionWithPReLU_nightly, Convolution) {
+    auto p = ::testing::WithParamInterface<std::tuple<InferenceEngine::SizeVector, param_size, param_size, param_size, uint32_t, uint32_t, PReLULayerDef>>::GetParam();
+    auto extraLayerParams = std::get<6>(p);
+    TEST_BODY;
+    float maxerr = 0;
+    if (group == 1)
+        maxerr = 0.00055 * IC * kernel.x * kernel.y;
+    else // TODO: currently dephConv is slightly less accurate
+        maxerr = 0.00066 * (IC / group) * kernel.x * kernel.y;
+    CompareCommonAbsolute(_outputMap.begin()->second, getReferenceOutput(), maxerr);
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_prior_box_clustered_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_prior_box_clustered_test.cpp
new file mode 100644 (file)
index 0000000..7c0c64b
--- /dev/null
@@ -0,0 +1,260 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <cmath>
+#include "myriad_layers_tests.hpp"
+
+using namespace InferenceEngine;
+
+struct PriorBoxClusteredParams {
+    tensor_test_params in1 = {1, 384, 19, 19};
+    tensor_test_params in2 = {1, 3, 300, 300};
+
+    std::vector<float> widths = {9.4f, 25.1f, 14.7f, 34.7f, 143.0f, 77.4f, 128.8f, 51.1f, 75.6f};
+    std::vector<float> heights = {15.0f, 39.6f, 25.5f, 63.2f, 227.5f, 162.9f, 124.5f, 105.1f, 72.6f};
+    int clip = 0;
+    std::vector<float> variance = {0.1f, 0.1f, 0.2f, 0.2f};
+    int img_h = 0;
+    int img_w = 0;
+    float step = 16.0;
+    float step_h = 0.0;
+    float step_w = 0.0;
+    float offset = 0.5;
+};
+
+void refPriorBoxClustered(Blob::Ptr dst, const PriorBoxClusteredParams &p) {
+    int num_priors = p.widths.size();
+
+    int layer_width  = p.in1.w;
+    int layer_height = p.in1.h;
+
+    int32_t img_width  = p.img_w == 0 ? p.in2.w : p.img_w;
+    int32_t img_height = p.img_h == 0 ? p.in2.h : p.img_h;
+
+    float step_w = p.step_w == 0 ? p.step : p.step_w;
+    float step_h = p.step_h == 0 ? p.step : p.step_h;
+    if (step_w == 0 || step_h == 0) {
+        step_w = static_cast<float>(img_width) / layer_width;
+        step_h = static_cast<float>(img_height) / layer_height;
+    }
+
+    int offset = dst->getTensorDesc().getDims().back();
+    int var_size = p.variance.size();
+
+    ie_fp16* top_data_0 = static_cast<ie_fp16*>(dst->buffer());
+    ie_fp16* top_data_1 = top_data_0 + offset;
+
+    for (int h = 0; h < layer_height; ++h) {
+        for (int w = 0; w < layer_width;  ++w) {
+            float center_x = (w + p.offset) * step_w;
+            float center_y = (h + p.offset) * step_h;
+
+            for (int s = 0; s < num_priors; ++s) {
+                float box_width  = p.widths[s];
+                float box_height = p.heights[s];
+
+                float xmin = (center_x - box_width  / 2.) / img_width;
+                float ymin = (center_y - box_height / 2.) / img_height;
+                float xmax = (center_x + box_width  / 2.) / img_width;
+                float ymax = (center_y + box_height / 2.) / img_height;
+
+                if (p.clip) {
+                    xmin = std::min(std::max(xmin, 0.0f), 1.0f);
+                    ymin = std::min(std::max(ymin, 0.0f), 1.0f);
+                    xmax = std::min(std::max(xmax, 0.0f), 1.0f);
+                    ymax = std::min(std::max(ymax, 0.0f), 1.0f);
+                }
+
+                top_data_0[h * layer_width * num_priors * 4 + w * num_priors * 4 + s * 4 + 0] = PrecisionUtils::f32tof16(xmin);
+                top_data_0[h * layer_width * num_priors * 4 + w * num_priors * 4 + s * 4 + 1] = PrecisionUtils::f32tof16(ymin);
+                top_data_0[h * layer_width * num_priors * 4 + w * num_priors * 4 + s * 4 + 2] = PrecisionUtils::f32tof16(xmax);
+                top_data_0[h * layer_width * num_priors * 4 + w * num_priors * 4 + s * 4 + 3] = PrecisionUtils::f32tof16(ymax);
+
+                for (int j = 0; j < var_size; j++) {
+                    int index = h * layer_width * num_priors * var_size + w * num_priors * var_size + s * var_size + j;
+                    top_data_1[index] = PrecisionUtils::f32tof16(p.variance[j]);
+                }
+            }
+        }
+    }
+}
+
+TEST_F(myriadLayersTests_nightly, PriorBoxClustered) {
+    std::string model = R"V0G0N(
+        <net name="PriorBoxClustered" version="2" batch="1">
+            <layers>
+                <layer name="data1" type="Input" precision="FP16" id="1">
+                    <output>
+                        <port id="11">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>300</dim>
+                            <dim>300</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="data1_copy" type="Power" precision="FP16" id="2">
+                    <power_data power="1" scale="1" shift="0"/>
+                    <input>
+                        <port id="21">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>300</dim>
+                            <dim>300</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="22">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>300</dim>
+                            <dim>300</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="data2" type="Input" precision="FP16" id="3">
+                    <output>
+                        <port id="31">
+                            <dim>1</dim>
+                            <dim>384</dim>
+                            <dim>19</dim>
+                            <dim>19</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="data2_copy" type="Power" precision="FP16" id="4">
+                    <power_data power="1" scale="1" shift="0"/>
+                    <input>
+                        <port id="41">
+                            <dim>1</dim>
+                            <dim>384</dim>
+                            <dim>19</dim>
+                            <dim>19</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="42">
+                            <dim>1</dim>
+                            <dim>384</dim>
+                            <dim>19</dim>
+                            <dim>19</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="priorboxclustered" type="PriorBoxClustered" precision="FP16" id="5">
+                    <data
+                        min_size="#"
+                        max_size="#"
+                        aspect_ratio="#"
+                        flip="1"
+                        clip="0"
+                        variance="0.100000,0.100000,0.200000,0.200000"
+                        img_size="0"
+                        img_h="0"
+                        img_w="0"
+                        step="16.000000"
+                        step_h="0.000000"
+                        step_w="0.000000"
+                        offset="0.500000"
+                        width="9.400000,25.100000,14.700000,34.700001,143.000000,77.400002,128.800003,51.099998,75.599998"
+                        height="15.000000,39.599998,25.500000,63.200001,227.500000,162.899994,124.500000,105.099998,72.599998"/>
+                    <input>
+                        <port id="51">
+                            <dim>1</dim>
+                            <dim>384</dim>
+                            <dim>19</dim>
+                            <dim>19</dim>
+                        </port>
+                        <port id="52">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>300</dim>
+                            <dim>300</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="53">
+                            <dim>1</dim>
+                            <dim>2</dim>
+                            <dim>12996</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="priorboxclustered_copy" type="Power" precision="FP16" id="6">
+                    <power_data power="1" scale="1" shift="0"/>
+                    <input>
+                        <port id="61">
+                            <dim>1</dim>
+                            <dim>2</dim>
+                            <dim>12996</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="62">
+                            <dim>1</dim>
+                            <dim>2</dim>
+                            <dim>12996</dim>
+                        </port>
+                    </output>
+                </layer>
+            </layers>
+            <edges>
+                <edge from-layer="1" from-port="11" to-layer="2" to-port="21"/>
+                <edge from-layer="3" from-port="31" to-layer="4" to-port="41"/>
+                <edge from-layer="3" from-port="31" to-layer="5" to-port="51"/>
+                <edge from-layer="1" from-port="11" to-layer="5" to-port="52"/>
+                <edge from-layer="5" from-port="53" to-layer="6" to-port="61"/>
+            </edges>
+        </net>
+    )V0G0N";
+    SetSeed(DEFAULT_SEED_VALUE + 6);
+    PriorBoxClusteredParams params;
+
+    StatusCode st;
+
+    ASSERT_NO_THROW(readNetwork(model));
+
+    const auto& network = _cnnNetwork;
+
+    _inputsInfo = network.getInputsInfo();
+    _inputsInfo["data1"]->setPrecision(Precision::FP16);
+    _inputsInfo["data2"]->setPrecision(Precision::FP16);
+
+    _outputsInfo = network.getOutputsInfo();
+    _outputsInfo["data1_copy"]->setPrecision(Precision::FP16);
+    _outputsInfo["data2_copy"]->setPrecision(Precision::FP16);
+    _outputsInfo["priorboxclustered_copy"]->setPrecision(Precision::FP16);
+
+    ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network, {}, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    ASSERT_NE(_exeNetwork, nullptr) << _resp.msg;
+
+    ASSERT_NO_THROW(st = _exeNetwork->CreateInferRequest(_inferRequest, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    Blob::Ptr data1;
+    ASSERT_NO_THROW(st = _inferRequest->GetBlob("data1", data1, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    Blob::Ptr data2;
+    ASSERT_NO_THROW(st = _inferRequest->GetBlob("data2", data2, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    GenRandomData(data1);
+    GenRandomData(data2);
+
+    ASSERT_NO_THROW(st = _inferRequest->Infer(&_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    Blob::Ptr outputBlob;
+    ASSERT_NO_THROW(_inferRequest->GetBlob("priorboxclustered_copy", outputBlob, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    _refBlob = make_shared_blob<ie_fp16>({Precision::FP16, outputBlob->getTensorDesc().getDims(), ANY});
+    _refBlob->allocate();
+
+    refPriorBoxClustered(_refBlob, params);
+
+    CompareCommonAbsolute(outputBlob, _refBlob, 0.0);
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_prior_box_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_prior_box_test.cpp
new file mode 100644 (file)
index 0000000..34aad16
--- /dev/null
@@ -0,0 +1,1890 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tests.hpp"
+
+using namespace InferenceEngine;
+
+struct PriorBoxParams {
+    tensor_test_params in1 = {1, 512, 38, 38};
+    tensor_test_params in2 = {1, 3, 300, 300};
+
+    std::vector<float> min_size = {21.0};
+    std::vector<float> max_size = {45.0};
+    std::vector<float> aspect_ratio = {2.0};
+    int flip = 1;
+    int clip = 0;
+    std::vector<float> variance = {0.1f, 0.1f, 0.2f, 0.2f};
+    int img_size = 0;
+    int img_h = 0;
+    int img_w = 0;
+    float step_ = 8.0;
+    float step_h = 0.0;
+    float step_w = 0.0;
+    float offset = 0.5;
+    int scale_all_sizes = 1;
+    std::vector<float> fixed_sizes = {};
+    std::vector<float> fixed_ratios = {};
+    std::vector<float> density = {};
+};
+
+// The code was taken from caffe and adopted to InferenceEngine reality
+void refPriorBox(Blob::Ptr dst, const PriorBoxParams &p) {
+    std::vector<float> aspect_ratios_;
+    aspect_ratios_.reserve(p.aspect_ratio.size() + 1);
+    aspect_ratios_.push_back(1.0f);
+    for (const auto& aspect_ratio : p.aspect_ratio) {
+        bool already_exist = false;
+        for (const auto& aspect_ratio_ : aspect_ratios_) {
+            if (fabsf(aspect_ratio - aspect_ratio_) < 1e-6) {
+                already_exist = true;
+                break;
+            }
+        }
+        if (!already_exist) {
+            aspect_ratios_.push_back(aspect_ratio);
+            if (p.flip) {
+                aspect_ratios_.push_back(1.0 / aspect_ratio);
+            }
+        }
+    }
+
+    int num_priors_ = 0;
+    if (p.scale_all_sizes) {
+        num_priors_ = static_cast<int>(aspect_ratios_.size() * p.min_size.size());
+    } else {
+        num_priors_ = static_cast<int>(aspect_ratios_.size() + p.min_size.size() - 1);
+    }
+
+    if (!p.fixed_sizes.empty()) {
+        num_priors_ = static_cast<int>(aspect_ratios_.size() * p.fixed_sizes.size());
+    }
+
+    if (!p.density.empty()) {
+        for (const auto& _density : p.density) {
+            if (!p.fixed_ratios.empty()) {
+                num_priors_ += (p.fixed_ratios.size()) * (static_cast<int>(pow(_density, 2)) - 1);
+            } else {
+                num_priors_ += (aspect_ratios_.size()) * (static_cast<int>(pow(_density, 2)) - 1);
+            }
+        }
+    }
+
+    num_priors_ += p.max_size.size();
+
+    const auto layer_width  = p.in1.w;
+    const auto layer_height = p.in1.h;
+
+    const auto img_width  = p.img_w == 0 ? p.in2.w : p.img_w;
+    const auto img_height = p.img_h == 0 ? p.in2.h : p.img_h;
+    const auto img_width_inv = 1.f / static_cast<float>(img_width);
+    const auto img_height_inv = 1.f / static_cast<float>(img_height);
+
+    auto step_w = p.step_w == 0 ? p.step_ : p.step_w;
+    auto step_h = p.step_h == 0 ? p.step_ : p.step_h;
+
+    if (step_w == 0 || step_h == 0) {
+        step_w = static_cast<float>(img_width) / static_cast<float>(layer_width);
+        step_h = static_cast<float>(img_height) / static_cast<float>(layer_height);
+    }
+
+    std::vector<float> top_data(dst->size());
+    int dim = layer_height * layer_width * num_priors_ * 4;
+
+    float center_x = 0.f;
+    float center_y = 0.f;
+    float box_width = 0.f;
+    float box_height = 0.f;
+
+    size_t idx = 0;
+    for (int h = 0; h < layer_height; ++h) {
+        for (int w = 0; w < layer_width;  ++w) {
+            if (p.step_ == 0) {
+                center_x = (static_cast<float>(w) + 0.5f) * p.step_w;
+                center_y = (static_cast<float>(h) + 0.5f) * p.step_h;
+            } else {
+                center_x = (p.offset + static_cast<float>(w)) * p.step_;
+                center_y = (p.offset + static_cast<float>(h)) * p.step_;
+            }
+
+            for (size_t s = 0; s < p.fixed_sizes.size(); ++s) {
+                auto fixed_size_ = static_cast<size_t>(p.fixed_sizes[s]);
+                box_width = box_height = fixed_size_ * 0.5f;
+
+                int density_ = 0;
+                int shift = 0;
+                if (s < p.density.size()) {
+                    density_ = static_cast<size_t>(p.density[s]);
+                    shift = static_cast<int>(p.fixed_sizes[s] / density_);
+                }
+
+                if (!p.fixed_ratios.empty()) {
+                    for (const auto& fr : p.fixed_ratios) {
+                        const auto box_width_ratio = p.fixed_sizes[s] * 0.5f * std::sqrt(fr);
+                        const auto box_height_ratio = p.fixed_sizes[s] * 0.5f / std::sqrt(fr);
+
+                        for (size_t r = 0; r < density_; ++r) {
+                            for (size_t c = 0; c < density_; ++c) {
+                                const auto center_x_temp = center_x - fixed_size_ / 2 + shift / 2.f + c * shift;
+                                const auto center_y_temp = center_y - fixed_size_ / 2 + shift / 2.f + r * shift;
+
+                                top_data[idx++] = std::fmax((center_x_temp - box_width_ratio) * img_width_inv, 0);
+                                top_data[idx++] = std::fmax((center_y_temp - box_height_ratio) * img_height_inv, 0.f);
+                                top_data[idx++] = std::fmin((center_x_temp + box_width_ratio) * img_width_inv, 1.f);
+                                top_data[idx++] = std::fmin((center_y_temp + box_height_ratio) * img_height_inv, 1.f);
+                            }
+                        }
+                    }
+                } else {
+                    if (!p.density.empty()) {
+                        for (int r = 0; r < density_; ++r) {
+                            for (int c = 0; c < density_; ++c) {
+                                const auto center_x_temp = center_x - fixed_size_ / 2 + shift / 2.f + c * shift;
+                                const auto center_y_temp = center_y - fixed_size_ / 2 + shift / 2.f + r * shift;
+
+                                top_data[idx++] = std::fmax((center_x_temp - box_width) * img_width_inv, 0);
+                                top_data[idx++] = std::fmax((center_y_temp - box_height) * img_height_inv, 0);
+                                top_data[idx++] = std::fmin((center_x_temp + box_width) * img_width_inv, 1);
+                                top_data[idx++] = std::fmin((center_y_temp + box_height) * img_height_inv, 1);
+                            }
+                        }
+                    }
+                    // Rest of priors
+                    for (const auto& ar : p.aspect_ratio) {
+                        if (fabs(ar - 1.) < 1e-6) {
+                            continue;
+                        }
+
+                        const auto box_width_ratio = p.fixed_sizes[s] * 0.5f * std::sqrt(ar);
+                        const auto box_height_ratio = p.fixed_sizes[s] * 0.5f / std::sqrt(ar);
+                        for (int r = 0; r < density_; ++r) {
+                            for (int c = 0; c < density_; ++c) {
+                                const auto center_x_temp = center_x - fixed_size_ / 2 + shift / 2.f + c * shift;
+                                const auto center_y_temp = center_y - fixed_size_ / 2 + shift / 2.f + r * shift;
+
+                                top_data[idx++] = std::fmax((center_x_temp - box_width_ratio) * img_width_inv, 0);
+                                top_data[idx++] = std::fmax((center_y_temp - box_height_ratio) * img_height_inv, 0);
+                                top_data[idx++] = std::fmin((center_x_temp + box_width_ratio) * img_width_inv, 1);
+                                top_data[idx++] = std::fmin((center_y_temp + box_height_ratio) * img_height_inv, 1);
+                            }
+                        }
+                    }
+                }
+            }
+
+            for (size_t s = 0; s < p.min_size.size(); ++s) {
+                const auto min_size_ = p.min_size[s];
+
+                // first prior: aspect_ratio = 1, size = min_size
+                box_width = box_height = min_size_;
+                // xmin
+                top_data[idx++] = (center_x - box_width / 2.) * img_width_inv;
+                // ymin
+                top_data[idx++] = (center_y - box_height / 2.) * img_height_inv;
+                // xmax
+                top_data[idx++] = (center_x + box_width / 2.) * img_width_inv;
+                // ymax
+                top_data[idx++] = (center_y + box_height / 2.) * img_height_inv;
+
+                if (!p.max_size.empty()) {
+                    const auto max_size_ = p.max_size[s];
+
+                    // second prior: aspect_ratio = 1, size = sqrt(min_size * max_size)
+                    box_width = box_height = std::sqrt(min_size_ * max_size_);
+                    // xmin
+                    top_data[idx++] = (center_x - box_width / 2.) * img_width_inv;
+                    // ymin
+                    top_data[idx++] = (center_y - box_height / 2.) * img_height_inv;
+                    // xmax
+                    top_data[idx++] = (center_x + box_width / 2.) * img_width_inv;
+                    // ymax
+                    top_data[idx++] = (center_y + box_height / 2.) * img_height_inv;
+                }
+
+                // rest of priors
+                for (const auto& ar : aspect_ratios_) {
+                    if (fabs(ar - 1.) < 1e-6) {
+                        continue;
+                    }
+
+                    box_width = min_size_ * std::sqrt(ar);
+                    box_height = min_size_ / std::sqrt(ar);
+
+                    // xmin
+                    top_data[idx++] = (center_x - box_width / 2.) * img_width_inv;
+                    // ymin
+                    top_data[idx++] = (center_y - box_height / 2.) * img_height_inv;
+                    // xmax
+                    top_data[idx++] = (center_x + box_width / 2.) * img_width_inv;
+                    // ymax
+                    top_data[idx++] = (center_y + box_height / 2.) * img_height_inv;
+                }
+            }
+        }
+    }
+
+    auto output_data = static_cast<ie_fp16*>(dst->buffer());
+
+    // clip the prior's coordidate such that it is within [0, 1]
+    if (p.clip) {
+        for (int d = 0; d < dim; ++d) {
+            float val = std::min(std::max(top_data[d], 0.0f), 1.0f);
+            output_data[d] = PrecisionUtils::f32tof16(val);
+        }
+    } else {
+        for (int d = 0; d < dim; ++d) {
+            output_data[d] = PrecisionUtils::f32tof16(top_data[d]);
+        }
+    }
+
+    output_data += dst->getTensorDesc().getDims().back();
+
+    // set the variance.
+    if (p.variance.empty()) {
+        // Set default to 0.1.
+        for (int d = 0; d < dim; ++d) {
+            output_data[d] = PrecisionUtils::f32tof16(0.1f);
+        }
+    } else if (p.variance.size() == 1) {
+        for (int d = 0; d < dim; ++d) {
+            output_data[d] = PrecisionUtils::f32tof16(p.variance[0]);
+        }
+    } else {
+        // Must and only provide 4 variance.
+        ASSERT_EQ(4u, p.variance.size());
+
+        idx = 0;
+        for (int h = 0; h < layer_height; ++h) {
+            for (int w = 0; w < layer_width; ++w) {
+                for (int i = 0; i < num_priors_; ++i) {
+                    for (int j = 0; j < 4; ++j) {
+                        output_data[idx++] = PrecisionUtils::f32tof16(p.variance[j]);
+                    }
+                }
+            }
+        }
+    }
+}
+
+class myriadLayersPriorBoxTests_nightly : public myriadLayersTests_nightly {
+public:
+    Blob::Ptr getFp16Blob(const Blob::Ptr& in) {
+        if (in->getTensorDesc().getPrecision() == Precision::FP16)
+            return in;
+
+        auto out = make_shared_blob<ie_fp16>(TensorDesc(Precision::FP16, in->getTensorDesc().getDims(), in->getTensorDesc().getLayout()));
+        out->allocate();
+
+        if (in->getTensorDesc().getPrecision() == Precision::FP32) {
+            PrecisionUtils::f32tof16Arrays(out->buffer().as<ie_fp16 *>(), in->cbuffer().as<float *>(), in->size());
+        } else {
+            ADD_FAILURE() << "Unsupported precision " << in->getTensorDesc().getPrecision();
+        }
+
+        return out;
+    }
+
+    void RunOnModelWithParams(const std::string& model, const std::string& outputName,
+                              const PriorBoxParams& params, Precision outPrec = Precision::FP16) {
+        SetSeed(DEFAULT_SEED_VALUE + 5);
+
+        StatusCode st;
+
+        ASSERT_NO_THROW(readNetwork(model));
+
+        const auto& network = _cnnNetwork;
+
+        _inputsInfo = network.getInputsInfo();
+        _inputsInfo["data1"]->setPrecision(Precision::FP16);
+        _inputsInfo["data2"]->setPrecision(Precision::FP16);
+
+        _outputsInfo = network.getOutputsInfo();
+        _outputsInfo["data1_copy"]->setPrecision(Precision::FP16);
+        _outputsInfo["data2_copy"]->setPrecision(Precision::FP16);
+        _outputsInfo[outputName]->setPrecision(outPrec);
+
+        ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network, {}, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+        ASSERT_NE(_exeNetwork, nullptr) << _resp.msg;
+
+        ASSERT_NO_THROW(st = _exeNetwork->CreateInferRequest(_inferRequest, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        Blob::Ptr data1;
+        ASSERT_NO_THROW(st = _inferRequest->GetBlob("data1", data1, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        Blob::Ptr data2;
+        ASSERT_NO_THROW(st = _inferRequest->GetBlob("data2", data2, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        GenRandomData(data1);
+        GenRandomData(data2);
+
+        ASSERT_NO_THROW(st = _inferRequest->Infer(&_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        Blob::Ptr outputBlob;
+        ASSERT_NO_THROW(_inferRequest->GetBlob(outputName.c_str(), outputBlob, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        _refBlob = make_shared_blob<ie_fp16>(TensorDesc(Precision::FP16, outputBlob->getTensorDesc().getDims(), ANY));
+        _refBlob->allocate();
+
+        refPriorBox(_refBlob, params);
+
+        CompareCommonAbsolute(getFp16Blob(outputBlob), _refBlob, 0.0);
+    }
+
+    void RunOnModel(const std::string& model, const std::string& outputName, Precision outPrec = Precision::FP16) {
+        RunOnModelWithParams(model, outputName, PriorBoxParams(), outPrec);
+    }
+};
+
+TEST_F(myriadLayersPriorBoxTests_nightly, NotLastLayer)
+{
+    std::string model = R"V0G0N(
+        <net name="PriorBox" version="2" batch="1">
+            <layers>
+                <layer name="data1" type="Input" precision="FP16" id="1">
+                    <output>
+                        <port id="11">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>300</dim>
+                            <dim>300</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="data1_copy" type="Power" precision="FP16" id="2">
+                    <power_data power="1" scale="1" shift="0"/>
+                    <input>
+                        <port id="21">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>300</dim>
+                            <dim>300</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="22">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>300</dim>
+                            <dim>300</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="data2" type="Input" precision="FP16" id="3">
+                    <output>
+                        <port id="31">
+                            <dim>1</dim>
+                            <dim>512</dim>
+                            <dim>38</dim>
+                            <dim>38</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="data2_copy" type="Power" precision="FP16" id="4">
+                    <power_data power="1" scale="1" shift="0"/>
+                    <input>
+                        <port id="41">
+                            <dim>1</dim>
+                            <dim>512</dim>
+                            <dim>38</dim>
+                            <dim>38</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="42">
+                            <dim>1</dim>
+                            <dim>512</dim>
+                            <dim>38</dim>
+                            <dim>38</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="priorbox" type="PriorBox" precision="FP16" id="5">
+                    <data
+                        min_size="21.000000"
+                        max_size="45.000000"
+                        aspect_ratio="2.000000"
+                        flip="1"
+                        clip="0"
+                        variance="0.100000,0.100000,0.200000,0.200000"
+                        img_size="0"
+                        img_h="0"
+                        img_w="0"
+                        step="8.000000"
+                        step_h="0.000000"
+                        step_w="0.000000"
+                        offset="0.500000"
+                        width="#"
+                        height="#"
+                    />
+                    <input>
+                        <port id="51">
+                            <dim>1</dim>
+                            <dim>512</dim>
+                            <dim>38</dim>
+                            <dim>38</dim>
+                        </port>
+                        <port id="52">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>300</dim>
+                            <dim>300</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="53">
+                            <dim>1</dim>
+                            <dim>2</dim>
+                            <dim>23104</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="priorbox_copy" type="Power" precision="FP16" id="6">
+                    <power_data power="1" scale="1" shift="0"/>
+                    <input>
+                        <port id="61">
+                            <dim>1</dim>
+                            <dim>2</dim>
+                            <dim>23104</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="62">
+                            <dim>1</dim>
+                            <dim>2</dim>
+                            <dim>23104</dim>
+                        </port>
+                    </output>
+                </layer>
+            </layers>
+            <edges>
+                <edge from-layer="1" from-port="11" to-layer="2" to-port="21"/>
+                <edge from-layer="3" from-port="31" to-layer="4" to-port="41"/>
+                <edge from-layer="3" from-port="31" to-layer="5" to-port="51"/>
+                <edge from-layer="1" from-port="11" to-layer="5" to-port="52"/>
+                <edge from-layer="5" from-port="53" to-layer="6" to-port="61"/>
+            </edges>
+        </net>
+    )V0G0N";
+
+    RunOnModel(model, "priorbox_copy");
+}
+
+TEST_F(myriadLayersPriorBoxTests_nightly, LastLayer_FP16)
+{
+    std::string model = R"V0G0N(
+        <net name="PriorBox" version="2" batch="1">
+            <layers>
+                <layer name="data1" type="Input" precision="FP16" id="1">
+                    <output>
+                        <port id="11">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>300</dim>
+                            <dim>300</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="data1_copy" type="Power" precision="FP16" id="2">
+                    <power_data power="1" scale="1" shift="0"/>
+                    <input>
+                        <port id="21">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>300</dim>
+                            <dim>300</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="22">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>300</dim>
+                            <dim>300</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="data2" type="Input" precision="FP16" id="3">
+                    <output>
+                        <port id="31">
+                            <dim>1</dim>
+                            <dim>512</dim>
+                            <dim>38</dim>
+                            <dim>38</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="data2_copy" type="Power" precision="FP16" id="4">
+                    <power_data power="1" scale="1" shift="0"/>
+                    <input>
+                        <port id="41">
+                            <dim>1</dim>
+                            <dim>512</dim>
+                            <dim>38</dim>
+                            <dim>38</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="42">
+                            <dim>1</dim>
+                            <dim>512</dim>
+                            <dim>38</dim>
+                            <dim>38</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="priorbox" type="PriorBox" precision="FP16" id="5">
+                    <data
+                        min_size="21.000000"
+                        max_size="45.000000"
+                        aspect_ratio="2.000000"
+                        flip="1"
+                        clip="0"
+                        variance="0.100000,0.100000,0.200000,0.200000"
+                        img_size="0"
+                        img_h="0"
+                        img_w="0"
+                        step="8.000000"
+                        step_h="0.000000"
+                        step_w="0.000000"
+                        offset="0.500000"
+                        width="#"
+                        height="#"
+                    />
+                    <input>
+                        <port id="51">
+                            <dim>1</dim>
+                            <dim>512</dim>
+                            <dim>38</dim>
+                            <dim>38</dim>
+                        </port>
+                        <port id="52">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>300</dim>
+                            <dim>300</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="53">
+                            <dim>1</dim>
+                            <dim>2</dim>
+                            <dim>23104</dim>
+                        </port>
+                    </output>
+                </layer>
+            </layers>
+            <edges>
+                <edge from-layer="1" from-port="11" to-layer="2" to-port="21"/>
+                <edge from-layer="3" from-port="31" to-layer="4" to-port="41"/>
+                <edge from-layer="3" from-port="31" to-layer="5" to-port="51"/>
+                <edge from-layer="1" from-port="11" to-layer="5" to-port="52"/>
+            </edges>
+        </net>
+    )V0G0N";
+
+    RunOnModel(model, "priorbox", Precision::FP16);
+}
+
+TEST_F(myriadLayersPriorBoxTests_nightly, LastLayer_FP32)
+{
+    std::string model = R"V0G0N(
+        <net name="PriorBox" version="2" batch="1">
+            <layers>
+                <layer name="data1" type="Input" precision="FP16" id="1">
+                    <output>
+                        <port id="11">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>300</dim>
+                            <dim>300</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="data1_copy" type="Power" precision="FP16" id="2">
+                    <power_data power="1" scale="1" shift="0"/>
+                    <input>
+                        <port id="21">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>300</dim>
+                            <dim>300</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="22">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>300</dim>
+                            <dim>300</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="data2" type="Input" precision="FP16" id="3">
+                    <output>
+                        <port id="31">
+                            <dim>1</dim>
+                            <dim>512</dim>
+                            <dim>38</dim>
+                            <dim>38</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="data2_copy" type="Power" precision="FP16" id="4">
+                    <power_data power="1" scale="1" shift="0"/>
+                    <input>
+                        <port id="41">
+                            <dim>1</dim>
+                            <dim>512</dim>
+                            <dim>38</dim>
+                            <dim>38</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="42">
+                            <dim>1</dim>
+                            <dim>512</dim>
+                            <dim>38</dim>
+                            <dim>38</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="priorbox" type="PriorBox" precision="FP16" id="5">
+                    <data
+                        min_size="21.000000"
+                        max_size="45.000000"
+                        aspect_ratio="2.000000"
+                        flip="1"
+                        clip="0"
+                        variance="0.100000,0.100000,0.200000,0.200000"
+                        img_size="0"
+                        img_h="0"
+                        img_w="0"
+                        step="8.000000"
+                        step_h="0.000000"
+                        step_w="0.000000"
+                        offset="0.500000"
+                        width="#"
+                        height="#"
+                    />
+                    <input>
+                        <port id="51">
+                            <dim>1</dim>
+                            <dim>512</dim>
+                            <dim>38</dim>
+                            <dim>38</dim>
+                        </port>
+                        <port id="52">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>300</dim>
+                            <dim>300</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="53">
+                            <dim>1</dim>
+                            <dim>2</dim>
+                            <dim>23104</dim>
+                        </port>
+                    </output>
+                </layer>
+            </layers>
+            <edges>
+                <edge from-layer="1" from-port="11" to-layer="2" to-port="21"/>
+                <edge from-layer="3" from-port="31" to-layer="4" to-port="41"/>
+                <edge from-layer="3" from-port="31" to-layer="5" to-port="51"/>
+                <edge from-layer="1" from-port="11" to-layer="5" to-port="52"/>
+            </edges>
+        </net>
+    )V0G0N";
+
+    RunOnModel(model, "priorbox", Precision::FP32);
+}
+
+TEST_F(myriadLayersTests_nightly, PriorBox_WithConcat)
+{
+    std::string model = R"V0G0N(
+        <net name="PriorBox_WithConcat" version="2" batch="1">
+            <layers>
+                <layer name="input" type="Input" precision="FP16" id="1">
+                    <output>
+                        <port id="1">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>300</dim>
+                            <dim>300</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="input_copy" type="Power" precision="FP16" id="2">
+                    <power_data power="1" scale="1" shift="0"/>
+                    <input>
+                        <port id="2">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>300</dim>
+                            <dim>300</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="3">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>300</dim>
+                            <dim>300</dim>
+                        </port>
+                    </output>
+                </layer>
+
+                <layer name="conv4_3_norm" type="Input" precision="FP16" id="3">
+                    <output>
+                        <port id="4">
+                            <dim>1</dim>
+                            <dim>512</dim>
+                            <dim>38</dim>
+                            <dim>38</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="conv4_3_norm_copy" type="Power" precision="FP16" id="4">
+                    <power_data power="1" scale="1" shift="0"/>
+                    <input>
+                        <port id="5">
+                            <dim>1</dim>
+                            <dim>512</dim>
+                            <dim>38</dim>
+                            <dim>38</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="6">
+                            <dim>1</dim>
+                            <dim>512</dim>
+                            <dim>38</dim>
+                            <dim>38</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="conv4_3_norm_mbox_priorbox" type="PriorBox" precision="FP16" id="5">
+                    <data
+                        min_size="21.000000"
+                        max_size="45.000000"
+                        aspect_ratio="2.000000"
+                        flip="1"
+                        clip="0"
+                        variance="0.100000,0.100000,0.200000,0.200000"
+                        img_size="0"
+                        img_h="0"
+                        img_w="0"
+                        step="8.000000"
+                        step_h="0.000000"
+                        step_w="0.000000"
+                        offset="0.500000"
+                        width="#"
+                        height="#"
+                    />
+                    <input>
+                        <port id="7">
+                            <dim>1</dim>
+                            <dim>512</dim>
+                            <dim>38</dim>
+                            <dim>38</dim>
+                        </port>
+                        <port id="8">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>300</dim>
+                            <dim>300</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="9">
+                            <dim>1</dim>
+                            <dim>2</dim>
+                            <dim>23104</dim>
+                        </port>
+                    </output>
+                </layer>
+
+                <layer name="fc7" type="Input" precision="FP16" id="6">
+                    <output>
+                        <port id="10">
+                            <dim>1</dim>
+                            <dim>1024</dim>
+                            <dim>19</dim>
+                            <dim>19</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="fc7_copy" type="Power" precision="FP16" id="7">
+                    <power_data power="1" scale="1" shift="0"/>
+                    <input>
+                        <port id="11">
+                            <dim>1</dim>
+                            <dim>1024</dim>
+                            <dim>19</dim>
+                            <dim>19</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="12">
+                            <dim>1</dim>
+                            <dim>1024</dim>
+                            <dim>19</dim>
+                            <dim>19</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="fc7_mbox_priorbox" type="PriorBox" precision="FP16" id="8">
+                    <data
+                        min_size="45.000000"
+                        max_size="99.000000"
+                        aspect_ratio="2.000000,3.000000"
+                        flip="1"
+                        clip="0"
+                        variance="0.100000,0.100000,0.200000,0.200000"
+                        img_size="0"
+                        img_h="0"
+                        img_w="0"
+                        step="16.000000"
+                        step_h="0.000000"
+                        step_w="0.000000"
+                        offset="0.500000"
+                        width="#"
+                        height="#"
+                    />
+                    <input>
+                        <port id="13">
+                            <dim>1</dim>
+                            <dim>1024</dim>
+                            <dim>19</dim>
+                            <dim>19</dim>
+                        </port>
+                        <port id="14">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>300</dim>
+                            <dim>300</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="15">
+                            <dim>1</dim>
+                            <dim>2</dim>
+                            <dim>8664</dim>
+                        </port>
+                    </output>
+                </layer>
+
+                <layer name="conv6_2" type="Input" precision="FP16" id="9">
+                    <output>
+                        <port id="16">
+                            <dim>1</dim>
+                            <dim>512</dim>
+                            <dim>10</dim>
+                            <dim>10</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="conv6_2_copy" type="Power" precision="FP16" id="10">
+                    <power_data power="1" scale="1" shift="0"/>
+                    <input>
+                        <port id="17">
+                            <dim>1</dim>
+                            <dim>512</dim>
+                            <dim>10</dim>
+                            <dim>10</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="18">
+                            <dim>1</dim>
+                            <dim>512</dim>
+                            <dim>10</dim>
+                            <dim>10</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="conv6_2_mbox_priorbox" type="PriorBox" precision="FP16" id="11">
+                    <data
+                        min_size="99.000000"
+                        max_size="153.000000"
+                        aspect_ratio="2.000000,3.000000"
+                        flip="1"
+                        clip="0"
+                        variance="0.100000,0.100000,0.200000,0.200000"
+                        img_size="0"
+                        img_h="0"
+                        img_w="0"
+                        step="32.000000"
+                        step_h="0.000000"
+                        step_w="0.000000"
+                        offset="0.500000"
+                        width="#"
+                        height="#"
+                    />
+                    <input>
+                        <port id="19">
+                            <dim>1</dim>
+                            <dim>512</dim>
+                            <dim>10</dim>
+                            <dim>10</dim>
+                        </port>
+                        <port id="20">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>300</dim>
+                            <dim>300</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="21">
+                            <dim>1</dim>
+                            <dim>2</dim>
+                            <dim>2400</dim>
+                        </port>
+                    </output>
+                </layer>
+
+                <layer name="conv7_2" type="Input" precision="FP16" id="12">
+                    <output>
+                        <port id="22">
+                            <dim>1</dim>
+                            <dim>256</dim>
+                            <dim>5</dim>
+                            <dim>5</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="conv7_2_copy" type="Power" precision="FP16" id="13">
+                    <power_data power="1" scale="1" shift="0"/>
+                    <input>
+                        <port id="23">
+                            <dim>1</dim>
+                            <dim>256</dim>
+                            <dim>5</dim>
+                            <dim>5</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="24">
+                            <dim>1</dim>
+                            <dim>256</dim>
+                            <dim>5</dim>
+                            <dim>5</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="conv7_2_mbox_priorbox" type="PriorBox" precision="FP16" id="14">
+                    <data
+                        min_size="153.000000"
+                        max_size="207.000000"
+                        aspect_ratio="2.000000,3.000000"
+                        flip="1"
+                        clip="0"
+                        variance="0.100000,0.100000,0.200000,0.200000"
+                        img_size="0"
+                        img_h="0"
+                        img_w="0"
+                        step="64.000000"
+                        step_h="0.000000"
+                        step_w="0.000000"
+                        offset="0.500000"
+                        width="#"
+                        height="#"
+                    />
+                    <input>
+                        <port id="25">
+                            <dim>1</dim>
+                            <dim>256</dim>
+                            <dim>5</dim>
+                            <dim>5</dim>
+                        </port>
+                        <port id="26">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>300</dim>
+                            <dim>300</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="27">
+                            <dim>1</dim>
+                            <dim>2</dim>
+                            <dim>600</dim>
+                        </port>
+                    </output>
+                </layer>
+
+                <layer name="conv8_2" type="Input" precision="FP16" id="15">
+                    <output>
+                        <port id="28">
+                            <dim>1</dim>
+                            <dim>256</dim>
+                            <dim>3</dim>
+                            <dim>3</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="conv8_2_copy" type="Power" precision="FP16" id="16">
+                    <power_data power="1" scale="1" shift="0"/>
+                    <input>
+                        <port id="29">
+                            <dim>1</dim>
+                            <dim>256</dim>
+                            <dim>3</dim>
+                            <dim>3</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="30">
+                            <dim>1</dim>
+                            <dim>256</dim>
+                            <dim>3</dim>
+                            <dim>3</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="conv8_2_mbox_priorbox" type="PriorBox" precision="FP16" id="17">
+                    <data
+                        min_size="207.000000"
+                        max_size="261.000000"
+                        aspect_ratio="2.000000"
+                        flip="1"
+                        clip="0"
+                        variance="0.100000,0.100000,0.200000,0.200000"
+                        img_size="0"
+                        img_h="0"
+                        img_w="0"
+                        step="100.000000"
+                        step_h="0.000000"
+                        step_w="0.000000"
+                        offset="0.500000"
+                        width="#"
+                        height="#"
+                    />
+                    <input>
+                        <port id="31">
+                            <dim>1</dim>
+                            <dim>256</dim>
+                            <dim>3</dim>
+                            <dim>3</dim>
+                        </port>
+                        <port id="32">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>300</dim>
+                            <dim>300</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="33">
+                            <dim>1</dim>
+                            <dim>2</dim>
+                            <dim>144</dim>
+                        </port>
+                    </output>
+                </layer>)V0G0N";
+
+    model += R"V0G0N(
+                <layer name="conv9_2" type="Input" precision="FP16" id="18">
+                    <output>
+                        <port id="34">
+                            <dim>1</dim>
+                            <dim>256</dim>
+                            <dim>1</dim>
+                            <dim>1</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="conv9_2_copy" type="Power" precision="FP16" id="19">
+                    <power_data power="1" scale="1" shift="0"/>
+                    <input>
+                        <port id="35">
+                            <dim>1</dim>
+                            <dim>256</dim>
+                            <dim>1</dim>
+                            <dim>1</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="36">
+                            <dim>1</dim>
+                            <dim>256</dim>
+                            <dim>1</dim>
+                            <dim>1</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="conv9_2_mbox_priorbox" type="PriorBox" precision="FP16" id="20">
+                    <data
+                        min_size="261.000000"
+                        max_size="315.000000"
+                        aspect_ratio="2.000000"
+                        flip="1"
+                        clip="0"
+                        variance="0.100000,0.100000,0.200000,0.200000"
+                        img_size="0"
+                        img_h="0"
+                        img_w="0"
+                        step="300.000000"
+                        step_h="0.000000"
+                        step_w="0.000000"
+                        offset="0.500000"
+                        width="#"
+                        height="#"
+                    />
+                    <input>
+                        <port id="37">
+                            <dim>1</dim>
+                            <dim>256</dim>
+                            <dim>1</dim>
+                            <dim>1</dim>
+                        </port>
+                        <port id="38">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>300</dim>
+                            <dim>300</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="39">
+                            <dim>1</dim>
+                            <dim>2</dim>
+                            <dim>16</dim>
+                        </port>
+                    </output>
+                </layer>
+
+                <layer name="mbox_priorbox" type="Concat" precision="FP16" id="21">
+                    <concat_data axis="2"/>
+                    <input>
+                        <port id="40">
+                            <dim>1</dim>
+                            <dim>2</dim>
+                            <dim>23104</dim>
+                        </port>
+                        <port id="41">
+                            <dim>1</dim>
+                            <dim>2</dim>
+                            <dim>8664</dim>
+                        </port>
+                        <port id="42">
+                            <dim>1</dim>
+                            <dim>2</dim>
+                            <dim>2400</dim>
+                        </port>
+                        <port id="43">
+                            <dim>1</dim>
+                            <dim>2</dim>
+                            <dim>600</dim>
+                        </port>
+                        <port id="44">
+                            <dim>1</dim>
+                            <dim>2</dim>
+                            <dim>144</dim>
+                        </port>
+                        <port id="45">
+                            <dim>1</dim>
+                            <dim>2</dim>
+                            <dim>16</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="46">
+                            <dim>1</dim>
+                            <dim>2</dim>
+                            <dim>34928</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="mbox_priorbox_copy" type="Power" precision="FP16" id="22">
+                    <power_data power="1" scale="1" shift="0"/>
+                    <input>
+                        <port id="47">
+                            <dim>1</dim>
+                            <dim>2</dim>
+                            <dim>34928</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="48">
+                            <dim>1</dim>
+                            <dim>2</dim>
+                            <dim>34928</dim>
+                        </port>
+                    </output>
+                </layer>
+            </layers>
+
+            <edges>
+                <!-- input > input_copy -->
+                <edge from-layer="1" from-port="1" to-layer="2" to-port="2"/>
+
+                <!-- conv4_3_norm > conv4_3_norm_copy -->
+                <edge from-layer="3" from-port="4" to-layer="4" to-port="5"/>
+
+                <!-- conv4_3_norm > conv4_3_norm_mbox_priorbox -->
+                <edge from-layer="3" from-port="4" to-layer="5" to-port="7"/>
+                <!-- input > conv4_3_norm_mbox_priorbox -->
+                <edge from-layer="1" from-port="1" to-layer="5" to-port="8"/>
+
+                <!-- fc7 > fc7_copy -->
+                <edge from-layer="6" from-port="10" to-layer="7" to-port="11"/>
+
+                <!-- fc7 > fc7_mbox_priorbox -->
+                <edge from-layer="6" from-port="10" to-layer="8" to-port="13"/>
+                <!-- input > fc7_mbox_priorbox -->
+                <edge from-layer="1" from-port="1" to-layer="8" to-port="14"/>
+
+                <!-- conv6_2 > conv6_2_copy -->
+                <edge from-layer="9" from-port="16" to-layer="10" to-port="17"/>
+
+                <!-- conv6_2 > conv6_2_mbox_priorbox -->
+                <edge from-layer="9" from-port="16" to-layer="11" to-port="19"/>
+                <!-- input > conv6_2_mbox_priorbox -->
+                <edge from-layer="1" from-port="1" to-layer="11" to-port="20"/>
+
+                <!-- conv7_2 > conv7_2_copy -->
+                <edge from-layer="12" from-port="22" to-layer="13" to-port="23"/>
+
+                <!-- conv7_2 > conv7_2_mbox_priorbox -->
+                <edge from-layer="12" from-port="22" to-layer="14" to-port="25"/>
+                <!-- input > conv7_2_mbox_priorbox -->
+                <edge from-layer="1" from-port="1" to-layer="14" to-port="26"/>
+
+                <!-- conv8_2 > conv8_2_copy -->
+                <edge from-layer="15" from-port="28" to-layer="16" to-port="29"/>
+
+                <!-- conv8_2 > conv8_2_mbox_priorbox -->
+                <edge from-layer="15" from-port="28" to-layer="17" to-port="31"/>
+                <!-- input > conv8_2_mbox_priorbox -->
+                <edge from-layer="1" from-port="1" to-layer="17" to-port="32"/>
+
+                <!-- conv9_2 > conv9_2_copy -->
+                <edge from-layer="18" from-port="34" to-layer="19" to-port="35"/>
+
+                <!-- conv9_2 > conv9_2_mbox_priorbox -->
+                <edge from-layer="18" from-port="34" to-layer="20" to-port="37"/>
+                <!-- input > conv9_2_mbox_priorbox -->
+                <edge from-layer="1" from-port="1" to-layer="20" to-port="38"/>
+
+                <!-- conv4_3_norm_mbox_priorbox > mbox_priorbox -->
+                <edge from-layer="5" from-port="9" to-layer="21" to-port="40"/>
+                <!-- fc7_mbox_priorbox > mbox_priorbox -->
+                <edge from-layer="8" from-port="15" to-layer="21" to-port="41"/>
+                <!-- conv6_2_mbox_priorbox > mbox_priorbox -->
+                <edge from-layer="11" from-port="21" to-layer="21" to-port="42"/>
+                <!-- conv7_2_mbox_priorbox > mbox_priorbox -->
+                <edge from-layer="14" from-port="27" to-layer="21" to-port="43"/>
+                <!-- conv8_2_mbox_priorbox > mbox_priorbox -->
+                <edge from-layer="17" from-port="33" to-layer="21" to-port="44"/>
+                <!-- conv9_2_mbox_priorbox > mbox_priorbox -->
+                <edge from-layer="20" from-port="39" to-layer="21" to-port="45"/>
+
+                <!-- mbox_priorbox > mbox_priorbox_copy -->
+                <edge from-layer="21" from-port="46" to-layer="22" to-port="47"/>
+            </edges>
+        </net>
+    )V0G0N";
+
+    StatusCode st;
+
+    ASSERT_NO_THROW(readNetwork(model));
+
+    const auto& network = _cnnNetwork;
+
+    _inputsInfo = network.getInputsInfo();
+    _inputsInfo["input"]->setPrecision(Precision::FP16);
+    _inputsInfo["conv4_3_norm"]->setPrecision(Precision::FP16);
+    _inputsInfo["fc7"]->setPrecision(Precision::FP16);
+    _inputsInfo["conv6_2"]->setPrecision(Precision::FP16);
+    _inputsInfo["conv7_2"]->setPrecision(Precision::FP16);
+    _inputsInfo["conv8_2"]->setPrecision(Precision::FP16);
+    _inputsInfo["conv9_2"]->setPrecision(Precision::FP16);
+
+    _outputsInfo = network.getOutputsInfo();
+    _outputsInfo["input_copy"]->setPrecision(Precision::FP16);
+    _outputsInfo["conv4_3_norm_copy"]->setPrecision(Precision::FP16);
+    _outputsInfo["fc7_copy"]->setPrecision(Precision::FP16);
+    _outputsInfo["conv6_2_copy"]->setPrecision(Precision::FP16);
+    _outputsInfo["conv7_2_copy"]->setPrecision(Precision::FP16);
+    _outputsInfo["conv8_2_copy"]->setPrecision(Precision::FP16);
+    _outputsInfo["conv9_2_copy"]->setPrecision(Precision::FP16);
+    _outputsInfo["mbox_priorbox_copy"]->setPrecision(Precision::FP16);
+
+    ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network, {}, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    ASSERT_NE(_exeNetwork, nullptr) << _resp.msg;
+
+    ASSERT_NO_THROW(st = _exeNetwork->CreateInferRequest(_inferRequest, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    ASSERT_NO_THROW(st = _inferRequest->Infer(&_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    // TODO: uncomment this code when GraphTransformer will be updated
+    // to optimize out extra copies in case of PriorBox+Concat pair.
+#if 0
+    {
+        std::map<std::string, InferenceEngineProfileInfo> perfMap;
+        ASSERT_NO_THROW(st = _inferRequest->GetPerformanceCounts(perfMap, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        int count = 0;
+        for (auto p : perfMap) {
+            auto layerName = p.first;
+            auto status = p.second.status;
+            if (layerName.find("mbox_priorbox@copy") == 0) {
+                EXPECT_EQ(InferenceEngineProfileInfo::OPTIMIZED_OUT, status) << layerName;
+                ++count;
+            }
+        }
+        EXPECT_EQ(6, count);
+    }
+#endif
+
+    Blob::Ptr outputBlob;
+    ASSERT_NO_THROW(_inferRequest->GetBlob("mbox_priorbox_copy", outputBlob, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    auto conv4_3_norm_mbox_priorbox = make_shared_blob<ie_fp16>(TensorDesc(Precision::FP16, {1, 2, 23104}, Layout::ANY));
+    {
+        conv4_3_norm_mbox_priorbox->allocate();
+
+        PriorBoxParams params;
+        params.in1 = {1, 512, 38, 38};
+        params.in2 = {1, 3, 300, 300};
+        params.min_size = {21.0};
+        params.max_size = {45.0};
+        params.aspect_ratio = {2.0};
+        params.flip = 1;
+        params.clip = 0;
+        params.variance = {0.1f, 0.1f, 0.2f, 0.2f};
+        params.img_size = 0;
+        params.img_h =  0;
+        params.img_w = 0;
+        params.step_ = 8.0;
+        params.step_h = 0.0;
+        params.step_w = 0.0;
+        params.offset = 0.5;
+
+        refPriorBox(conv4_3_norm_mbox_priorbox, params);
+    }
+
+    auto fc7_mbox_priorbox = make_shared_blob<ie_fp16>(TensorDesc(Precision::FP16, {1, 2, 8664}, Layout::ANY));
+    {
+        fc7_mbox_priorbox->allocate();
+
+        PriorBoxParams params;
+        params.in1 = {1, 1024, 19, 19};
+        params.in2 = {1, 3, 300, 300};
+        params.min_size = {45.0};
+        params.max_size = {99.0};
+        params.aspect_ratio = {2.0, 3.0};
+        params.flip = 1;
+        params.clip = 0;
+        params.variance = {0.1f, 0.1f, 0.2f, 0.2f};
+        params.img_size = 0;
+        params.img_h =  0;
+        params.img_w = 0;
+        params.step_ = 16.0;
+        params.step_h = 0.0;
+        params.step_w = 0.0;
+        params.offset = 0.5;
+
+        refPriorBox(fc7_mbox_priorbox, params);
+    }
+
+    auto conv6_2_mbox_priorbox = make_shared_blob<ie_fp16>(TensorDesc(Precision::FP16, {1, 2, 2400}, Layout::ANY));
+    {
+        conv6_2_mbox_priorbox->allocate();
+
+        PriorBoxParams params;
+        params.in1 = {1, 512, 10, 10};
+        params.in2 = {1, 3, 300, 300};
+        params.min_size = {99.0};
+        params.max_size = {153.0};
+        params.aspect_ratio = {2.0, 3.0};
+        params.flip = 1;
+        params.clip = 0;
+        params.variance = {0.1f, 0.1f, 0.2f, 0.2f};
+        params.img_size = 0;
+        params.img_h =  0;
+        params.img_w = 0;
+        params.step_ = 32.0;
+        params.step_h = 0.0;
+        params.step_w = 0.0;
+        params.offset = 0.5;
+
+        refPriorBox(conv6_2_mbox_priorbox, params);
+    }
+
+    auto conv7_2_mbox_priorbox = make_shared_blob<ie_fp16>(TensorDesc(Precision::FP16, {1, 2, 600}, Layout::ANY));
+    {
+        conv7_2_mbox_priorbox->allocate();
+
+        PriorBoxParams params;
+        params.in1 = {1, 256, 5, 5};
+        params.in2 = {1, 3, 300, 300};
+        params.min_size = {153.0};
+        params.max_size = {207.0};
+        params.aspect_ratio = {2.0, 3.0};
+        params.flip = 1;
+        params.clip = 0;
+        params.variance = {0.1f, 0.1f, 0.2f, 0.2f};
+        params.img_size = 0;
+        params.img_h =  0;
+        params.img_w = 0;
+        params.step_ = 64.0;
+        params.step_h = 0.0;
+        params.step_w = 0.0;
+        params.offset = 0.5;
+
+        refPriorBox(conv7_2_mbox_priorbox, params);
+    }
+
+    auto conv8_2_mbox_priorbox = make_shared_blob<ie_fp16>(TensorDesc(Precision::FP16, {1, 2, 144}, Layout::ANY));
+    {
+        conv8_2_mbox_priorbox->allocate();
+
+        PriorBoxParams params;
+        params.in1 = {1, 256, 3, 3};
+        params.in2 = {1, 3, 300, 300};
+        params.min_size = {207.0};
+        params.max_size = {261.0};
+        params.aspect_ratio = {2.0};
+        params.flip = 1;
+        params.clip = 0;
+        params.variance = {0.1f, 0.1f, 0.2f, 0.2f};
+        params.img_size = 0;
+        params.img_h =  0;
+        params.img_w = 0;
+        params.step_ = 100.0;
+        params.step_h = 0.0;
+        params.step_w = 0.0;
+        params.offset = 0.5;
+
+        refPriorBox(conv8_2_mbox_priorbox, params);
+    }
+
+    auto conv9_2_mbox_priorbox = make_shared_blob<ie_fp16>(TensorDesc(Precision::FP16, {1, 2, 16}, Layout::ANY));
+    {
+        conv9_2_mbox_priorbox->allocate();
+
+        PriorBoxParams params;
+        params.in1 = {1, 256, 1, 1};
+        params.in2 = {1, 3, 300, 300};
+        params.min_size = {261.0};
+        params.max_size = {315.0};
+        params.aspect_ratio = {2.0};
+        params.flip = 1;
+        params.clip = 0;
+        params.variance = {0.1f, 0.1f, 0.2f, 0.2f};
+        params.img_size = 0;
+        params.img_h =  0;
+        params.img_w = 0;
+        params.step_ = 300.0;
+        params.step_h = 0.0;
+        params.step_w = 0.0;
+        params.offset = 0.5;
+
+        refPriorBox(conv9_2_mbox_priorbox, params);
+    }
+
+    _refBlob = make_shared_blob<ie_fp16>(TensorDesc(Precision::FP16, {1, 2, 34928}, ANY));
+    _refBlob->allocate();
+    {
+        ie_fp16* dst_ptr = _refBlob->buffer().as<ie_fp16*>();
+        int dst_stride = _refBlob->getTensorDesc().getDims().back();
+
+        int dst_offset = 0;
+
+        auto concat = [&](const Blob::Ptr& src) {
+            const ie_fp16* src_ptr = src->cbuffer().as<const ie_fp16*>();
+            int num = src->getTensorDesc().getDims().back();
+
+            for (int y = 0; y < 2; ++y) {
+                for (int x = 0; x < num; ++x) {
+                    dst_ptr[dst_offset + x + y * dst_stride] = src_ptr[x + y * num];
+                }
+            }
+
+            dst_offset += num;
+        };
+
+        concat(conv4_3_norm_mbox_priorbox);
+        concat(fc7_mbox_priorbox);
+        concat(conv6_2_mbox_priorbox);
+        concat(conv7_2_mbox_priorbox);
+        concat(conv8_2_mbox_priorbox);
+        concat(conv9_2_mbox_priorbox);
+    }
+
+    CompareCommonAbsolute(_refBlob, outputBlob, 0.0);
+}
+
+TEST_F(myriadLayersPriorBoxTests_nightly, FaceBoxLayer)
+{
+    std::string model = R"V0G0N(
+        <net name="PriorBox" version="2" batch="1">
+            <layers>
+                <layer name="data1" type="Input" precision="FP16" id="1">
+                    <output>
+                        <port id="11">
+                            <dim>1</dim>
+                            <dim>128</dim>
+                            <dim>32</dim>
+                            <dim>32</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="data1_copy" type="Power" precision="FP16" id="2">
+                    <power_data power="1" scale="1" shift="0"/>
+                    <input>
+                        <port id="21">
+                            <dim>1</dim>
+                            <dim>128</dim>
+                            <dim>32</dim>
+                            <dim>32</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="22">
+                            <dim>1</dim>
+                            <dim>128</dim>
+                            <dim>32</dim>
+                            <dim>32</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="data2" type="Input" precision="FP16" id="3">
+                    <output>
+                        <port id="31">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>1024</dim>
+                            <dim>1024</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="data2_copy" type="Power" precision="FP16" id="4">
+                    <power_data power="1" scale="1" shift="0"/>
+                    <input>
+                        <port id="41">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>1024</dim>
+                            <dim>1024</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="42">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>1024</dim>
+                            <dim>1024</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="priorbox" type="PriorBox" precision="FP16" id="5">
+                    <data
+                        aspect_ratio=""
+                        clip="0"
+                        density="4.0,2.0,1.0"
+                        fixed_ratio=""
+                        fixed_size="32.0,64.0,128.0"
+                        flip="1"
+                        max_size=""
+                        min_size=""
+                        offset="0.5"
+                        step="32.0"
+                        variance="0.10000000149011612,0.10000000149011612,0.20000000298023224,0.20000000298023224"
+                    />
+                    <input>
+                        <port id="51">
+                            <dim>1</dim>
+                            <dim>128</dim>
+                            <dim>32</dim>
+                            <dim>32</dim>
+                        </port>
+                        <port id="52">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>1024</dim>
+                            <dim>1024</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="53">
+                            <dim>1</dim>
+                            <dim>2</dim>
+                            <dim>86016</dim>
+                        </port>
+                    </output>
+                </layer>
+            </layers>
+            <edges>
+                <edge from-layer="1" from-port="11" to-layer="2" to-port="21"/>
+                <edge from-layer="3" from-port="31" to-layer="4" to-port="41"/>
+                <edge from-layer="1" from-port="11" to-layer="5" to-port="51"/>
+                <edge from-layer="3" from-port="31" to-layer="5" to-port="52"/>
+            </edges>
+        </net>
+    )V0G0N";
+
+    PriorBoxParams params;
+    params.in1 = {1, 128, 32, 32};
+    params.in2 = {1, 3, 1024, 1024};
+    params.min_size = {};
+    params.max_size = {};
+    params.aspect_ratio = {};
+    params.flip = 1;
+    params.clip = 0;
+    params.variance = {0.10000000149011612, 0.10000000149011612, 0.20000000298023224, 0.20000000298023224};
+    params.img_size = 0;
+    params.img_h =  0;
+    params.img_w = 0;
+    params.step_ = 32.0;
+    params.step_h = 0.0;
+    params.step_w = 0.0;
+    params.offset = 0.5;
+    params.density = {4.0, 2.0, 1.0};
+    params.fixed_sizes = {32.0, 64.0, 128.0};
+    params.fixed_ratios = {};
+
+    RunOnModelWithParams(model, "priorbox", params, Precision::FP16);
+}
+
+TEST_F(myriadLayersPriorBoxTests_nightly, TwoPriorBoxLayersWithUnusedInput)
+{
+    std::string model = R"V0G0N(
+        <net name="PriorBox" version="2" batch="1">
+            <layers>
+                <layer name="data1" type="Input" precision="FP16" id="1">
+                    <output>
+                        <port id="11">
+                            <dim>3</dim>
+                            <dim>300</dim>
+                            <dim>300</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="data1_reshaped" type="Reshape" precision="FP16" id="2">
+                    <input>
+                        <port id="21">
+                            <dim>3</dim>
+                            <dim>300</dim>
+                            <dim>300</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="22">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>300</dim>
+                            <dim>300</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="data1_copy" type="Power" precision="FP16" id="3">
+                    <power_data power="1" scale="1" shift="0"/>
+                    <input>
+                        <port id="31">
+                            <dim>3</dim>
+                            <dim>300</dim>
+                            <dim>300</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="32">
+                            <dim>3</dim>
+                            <dim>300</dim>
+                            <dim>300</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="data2" type="Input" precision="FP16" id="4">
+                    <output>
+                        <port id="41">
+                            <dim>1</dim>
+                            <dim>512</dim>
+                            <dim>38</dim>
+                            <dim>38</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="data2_copy" type="Power" precision="FP16" id="5">
+                    <power_data power="1" scale="1" shift="0"/>
+                    <input>
+                        <port id="51">
+                            <dim>1</dim>
+                            <dim>512</dim>
+                            <dim>38</dim>
+                            <dim>38</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="52">
+                            <dim>1</dim>
+                            <dim>512</dim>
+                            <dim>38</dim>
+                            <dim>38</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="priorbox1" type="PriorBox" precision="FP16" id="6">
+                    <data
+                        min_size="21.000000"
+                        max_size="45.000000"
+                        aspect_ratio="2.000000"
+                        flip="1"
+                        clip="0"
+                        variance="0.100000,0.100000,0.200000,0.200000"
+                        img_size="0"
+                        img_h="0"
+                        img_w="0"
+                        step="8.000000"
+                        step_h="0.000000"
+                        step_w="0.000000"
+                        offset="0.500000"
+                        width="#"
+                        height="#"
+                    />
+                    <input>
+                        <port id="61">
+                            <dim>1</dim>
+                            <dim>512</dim>
+                            <dim>38</dim>
+                            <dim>38</dim>
+                        </port>
+                        <port id="62">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>300</dim>
+                            <dim>300</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="63">
+                            <dim>1</dim>
+                            <dim>2</dim>
+                            <dim>23104</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="priorbox2" type="PriorBox" precision="FP16" id="7">
+                    <data
+                        min_size="21.000000"
+                        max_size="45.000000"
+                        aspect_ratio="2.000000"
+                        flip="1"
+                        clip="0"
+                        variance="0.100000,0.100000,0.200000,0.200000"
+                        img_size="0"
+                        img_h="0"
+                        img_w="0"
+                        step="8.000000"
+                        step_h="0.000000"
+                        step_w="0.000000"
+                        offset="0.500000"
+                        width="#"
+                        height="#"
+                    />
+                    <input>
+                        <port id="71">
+                            <dim>1</dim>
+                            <dim>512</dim>
+                            <dim>38</dim>
+                            <dim>38</dim>
+                        </port>
+                        <port id="72">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>300</dim>
+                            <dim>300</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="73">
+                            <dim>1</dim>
+                            <dim>2</dim>
+                            <dim>23104</dim>
+                        </port>
+                    </output>
+                </layer>
+            </layers>
+            <edges>
+                <edge from-layer="1" from-port="11" to-layer="2" to-port="21"/>
+                <edge from-layer="1" from-port="11" to-layer="3" to-port="31"/>
+                <edge from-layer="4" from-port="41" to-layer="5" to-port="51"/>
+                <edge from-layer="4" from-port="41" to-layer="6" to-port="61"/>
+                <edge from-layer="4" from-port="41" to-layer="7" to-port="71"/>
+                <edge from-layer="2" from-port="22" to-layer="6" to-port="62"/>
+                <edge from-layer="2" from-port="22" to-layer="7" to-port="72"/>
+            </edges>
+        </net>
+    )V0G0N";
+
+    SetSeed(DEFAULT_SEED_VALUE + 5);
+
+    StatusCode st;
+
+    ASSERT_NO_THROW(readNetwork(model));
+
+    const auto& network = _cnnNetwork;
+
+    _inputsInfo = network.getInputsInfo();
+    _inputsInfo["data1"]->setPrecision(Precision::FP16);
+    _inputsInfo["data2"]->setPrecision(Precision::FP16);
+
+    _outputsInfo = network.getOutputsInfo();
+    _outputsInfo["data1_copy"]->setPrecision(Precision::FP16);
+    _outputsInfo["data2_copy"]->setPrecision(Precision::FP16);
+    _outputsInfo["priorbox1"]->setPrecision(Precision::FP16);
+    _outputsInfo["priorbox2"]->setPrecision(Precision::FP16);
+
+    ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network, {}, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    ASSERT_NE(_exeNetwork, nullptr) << _resp.msg;
+
+    ASSERT_NO_THROW(st = _exeNetwork->CreateInferRequest(_inferRequest, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    Blob::Ptr data1;
+    ASSERT_NO_THROW(st = _inferRequest->GetBlob("data1", data1, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    Blob::Ptr data2;
+    ASSERT_NO_THROW(st = _inferRequest->GetBlob("data2", data2, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    GenRandomData(data1);
+    GenRandomData(data2);
+
+    ASSERT_NO_THROW(st = _inferRequest->Infer(&_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    Blob::Ptr outputBlob1;
+    Blob::Ptr outputBlob2;
+    ASSERT_NO_THROW(_inferRequest->GetBlob("priorbox1", outputBlob1, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    ASSERT_NO_THROW(_inferRequest->GetBlob("priorbox2", outputBlob2, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    _refBlob = make_shared_blob<ie_fp16>(TensorDesc(Precision::FP16, outputBlob1->getTensorDesc().getDims(), ANY));
+    _refBlob->allocate();
+
+    refPriorBox(_refBlob, PriorBoxParams());
+
+    CompareCommonAbsolute(getFp16Blob(outputBlob1), _refBlob, 0.0);
+    CompareCommonAbsolute(getFp16Blob(outputBlob2), _refBlob, 0.0);
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_proposal_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_proposal_test.cpp
new file mode 100644 (file)
index 0000000..e70318b
--- /dev/null
@@ -0,0 +1,616 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <cmath>
+#include <gtest/gtest.h>
+#include "myriad_layers_tests.hpp"
+#include <blob_factory.hpp>
+
+using namespace InferenceEngine;
+
+#define PROPOSAL_ELEMENT_SIZE (5)   // batch_num, left, top, right, bottom
+#define OUTPUT_SAMPLING_NUM   (20)  // Validate only top 20 rois
+#define OUTPUT_ROI_MATCH_THRESHOLD   (18)  // At least 18 rois should be matched
+
+class myriadLayersTestsProposal_nightly : public myriadLayersTests_nightly {
+
+protected:
+ std::string model;
+ std::string cls_prob_file;
+ std::string bbox_pred_file;
+ Blob::Ptr outputBlob;
+
+ //TODO: make test_p
+ std::string clip_before_nms="true";
+ std::string clip_after_nms="false";
+ std::string normalize="false";
+
+
+ /**
+  * to generate reference mkldnn-can be used, but even in hetero cpu,cpu it cannot handle fp16 input
+  */
+ Precision precision = Precision::FP16; // (or FP32)
+
+ public:
+    void InferProposalLayer() {
+        SetSeed(DEFAULT_SEED_VALUE + 13);
+
+        StatusCode st;
+
+        REPLACE_WITH_STR(model, "__PRECISION__", precision.name());
+
+        REPLACE_WITH_STR(model, "__CLIP_BEFORE_NMS__", clip_before_nms);
+        REPLACE_WITH_STR(model, "__CLIP_AFTER_NMS__", clip_after_nms);
+        REPLACE_WITH_STR(model, "__NORMALIZE__", normalize);
+
+        ASSERT_NO_THROW(readNetwork(model));
+
+        const auto& network = _cnnNetwork;
+
+        _inputsInfo = network.getInputsInfo();
+        _inputsInfo["rpn_cls_prob_reshape"]->setPrecision(precision);
+        _inputsInfo["rpn_cls_prob_reshape"]->setLayout(NCHW);
+        _inputsInfo["rpn_bbox_pred"]->setPrecision(precision);
+        _inputsInfo["rpn_bbox_pred"]->setLayout(NCHW);
+        _inputsInfo["im_info"]->setPrecision(precision);
+
+        _outputsInfo = network.getOutputsInfo();
+        _outputsInfo["proposal"]->setPrecision(precision);
+        _outputsInfo["proposal"]->setLayout(NC);
+
+        ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network, {}, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+        ASSERT_NE(_exeNetwork, nullptr) << _resp.msg;
+
+        ASSERT_NO_THROW(st = _exeNetwork->CreateInferRequest(_inferRequest, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        Blob::Ptr rpn_cls_prob_reshape;
+        ASSERT_NO_THROW(st = _inferRequest->GetBlob("rpn_cls_prob_reshape", rpn_cls_prob_reshape, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        Blob::Ptr rpn_bbox_pred;
+        ASSERT_NO_THROW(st = _inferRequest->GetBlob("rpn_bbox_pred", rpn_bbox_pred, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        Blob::Ptr img_info;
+        ASSERT_NO_THROW(st = _inferRequest->GetBlob("im_info", img_info, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        // for rpn_cls_prob_reshape
+        std::string inputTensor1Binary = TestDataHelpers::get_data_path() + cls_prob_file;
+        ASSERT_TRUE(fromBinaryFile(inputTensor1Binary, rpn_cls_prob_reshape));
+
+        // rpn_bbox_pred
+        std::string inputTensor2Binary = TestDataHelpers::get_data_path() + bbox_pred_file;
+        ASSERT_TRUE(fromBinaryFile(inputTensor2Binary, rpn_bbox_pred));
+
+        if (precision == Precision::FP16) {
+            ie_fp16 *img_info_data = img_info->buffer().as<ie_fp16 *>();
+            img_info_data[0] = PrecisionUtils::f32tof16(224.f);
+            img_info_data[1] = PrecisionUtils::f32tof16(224.f);
+            img_info_data[2] = PrecisionUtils::f32tof16(1.0f);
+        }
+        if (precision == Precision::FP32) {
+            float *img_info_data = img_info->buffer().as<float *>();
+            img_info_data[0] = 224.f;
+            img_info_data[1] = 224.f;
+            img_info_data[2] = 1.0f;
+        }
+
+
+        ASSERT_NO_THROW(st = _inferRequest->Infer(&_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        ASSERT_NO_THROW(_inferRequest->GetBlob("proposal", outputBlob, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    }
+
+    void compareOutputSampleToRef(std::vector<float> & gt_values, const float error_threshold) {
+        const int gt_values_size = gt_values.size();
+        auto tmpTensorDesc = outputBlob->getTensorDesc();
+        tmpTensorDesc.getDims()[tmpTensorDesc.getDims().size() - 2] = OUTPUT_SAMPLING_NUM;
+
+        // Generate GT
+        _refBlob = make_blob_with_precision(tmpTensorDesc);
+        _refBlob->allocate();
+
+        if (precision != Precision::FP32) {
+            std::transform(std::begin(gt_values), std::end(gt_values), _refBlob->buffer().as<ie_fp16 *>(),
+                           [](float gt) -> ie_fp16 { return PrecisionUtils::f32tof16(gt); });
+        } else {
+            std::copy(std::begin(gt_values), std::end(gt_values), _refBlob->buffer().as<float *>());
+        }
+
+        // Sampling top 20 results from output
+        Blob::Ptr outputSample = make_blob_with_precision(tmpTensorDesc);
+        outputSample->allocate();
+
+        if (precision != Precision::FP32)
+            std::copy_n(outputBlob->cbuffer().as<const ie_fp16*>(), gt_values_size, outputSample->buffer().as<ie_fp16*>());
+        else
+            std::copy_n(outputBlob->cbuffer().as<const float *>(), gt_values_size, outputSample->buffer().as<float *>());
+
+        CompareCommonAbsolute(outputSample, _refBlob, error_threshold);
+    }
+
+    int calcIoU(std::vector<float> &gt_values) {
+        // Validate top 20 rois with GT
+        const float iou_threshold = 0.7f;
+        const float num_gt_rois = 100;
+        const int num_roi_elem = 4;  // ignore score value, only get rois
+        const auto actual_ptr = outputBlob->cbuffer().as<ie_fp16*>();
+        const auto actual_ptr_float = outputBlob->cbuffer().as<float*>();
+        int matched_count = 0;
+        for (int i = 0; i < OUTPUT_SAMPLING_NUM; i++)  // Verify only Top 20 rois with highest scores
+        {
+            float actual_values[num_roi_elem];
+            for (int j = 0; j < num_roi_elem; j++)
+            {
+                if (precision == Precision::FP16)
+                    actual_values[j] = PrecisionUtils::f16tof32(actual_ptr[i*PROPOSAL_ELEMENT_SIZE + (j+1)]);
+                else
+                    actual_values[j] = actual_ptr_float[i*PROPOSAL_ELEMENT_SIZE + (j+1)];
+            }
+
+            float max_iou = 0.f;
+            for (int j = 0; j < num_gt_rois; j++)
+            {
+                float cur_iou = check_iou(&actual_values[0], &gt_values[j*PROPOSAL_ELEMENT_SIZE+1]);  // start index 1 to ignore score value
+                if (cur_iou > max_iou)
+                {
+                    max_iou = cur_iou;
+                }
+            }
+
+            if (max_iou > iou_threshold)
+            {
+                matched_count++;
+            }
+        }
+
+        return matched_count;
+    }
+    /**
+    * "IoU = intersection area / union area" of two boxes A, B
+    *   A, B: 4-dim array (x1, y1, x2, y2)
+    */
+    static float check_iou(const float* A, const float* B)
+    {
+        if (A[0] > B[2] || A[1] > B[3] || A[2] < B[0] || A[3] < B[1]) {
+            return 0;
+        }
+        else {
+            // overlapped region (= box)
+            const float x1 = std::max(A[0], B[0]);
+            const float y1 = std::max(A[1], B[1]);
+            const float x2 = std::min(A[2], B[2]);
+            const float y2 = std::min(A[3], B[3]);
+
+            // intersection area
+            const float width = std::max(0.0f, x2 - x1 + 1.0f);
+            const float height = std::max(0.0f, y2 - y1 + 1.0f);
+            const float area = width * height;
+
+            // area of A, B
+            const float A_area = (A[2] - A[0] + 1.0f) * (A[3] - A[1] + 1.0f);
+            const float B_area = (B[2] - B[0] + 1.0f) * (B[3] - B[1] + 1.0f);
+
+            // IoU
+            return area / (A_area + B_area - area);
+        }
+    }
+
+};
+
+std::string caffeModel() {
+    return R"V0G0N(
+        <net name="testProposal" version="2" batch="1">
+            <layers>
+                <layer id="0" name="rpn_cls_prob_reshape" precision="__PRECISION__" type="Input">
+                    <output>
+                        <port id="0">
+                            <dim>1</dim>
+                            <dim>18</dim>
+                            <dim>14</dim>
+                            <dim>14</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer id="1" name="rpn_bbox_pred" precision="__PRECISION__" type="Input">
+                    <output>
+                        <port id="0">
+                            <dim>1</dim>
+                            <dim>36</dim>
+                            <dim>14</dim>
+                            <dim>14</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer id="2" name="im_info" precision="__PRECISION__" type="Input">
+                    <output>
+                        <port id="0">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer id="3" name="proposal" precision="__PRECISION__" type="Proposal">
+                    <data base_size="16" feat_stride="16" min_size="16" nms_thresh="0.7" post_nms_topn="300"
+                     pre_nms_topn="6000" ratio="0.5,1,2" scale="8,16,32" pre_nms_thresh="0.0"
+                     clip_before_nms="__CLIP_BEFORE_NMS__"
+                     clip_after_nms="__CLIP_AFTER_NMS__"
+                     normalize="__NORMALIZE__" />
+                    <input>
+                        <port id="0">
+                            <dim>1</dim>
+                            <dim>18</dim>
+                            <dim>14</dim>
+                            <dim>14</dim>
+                        </port>
+                        <port id="1">
+                            <dim>1</dim>
+                            <dim>36</dim>
+                            <dim>14</dim>
+                            <dim>14</dim>
+                        </port>
+                        <port id="2">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="3">
+                            <dim>300</dim>
+                            <dim>5</dim>
+                        </port>
+                    </output>
+                </layer>
+            </layers>
+            <edges>
+                <edge from-layer="0" from-port="0" to-layer="3" to-port="0"/>
+                <edge from-layer="1" from-port="0" to-layer="3" to-port="1"/>
+                <edge from-layer="2" from-port="0" to-layer="3" to-port="2"/>
+            </edges>
+        </net>
+    )V0G0N";
+}
+
+TEST_F(myriadLayersTestsProposal_nightly, Caffe) {
+
+    // Verify only 20 ranked proposal output with GT values
+    std::vector<float> gt_values = {
+        0.f,     72.363f,  58.942f,  197.141f, 177.96f,  // batch_num, left, top, right, bottom
+        0.f,      6.885f,  75.163f,  50.71f,   144.593f,
+        0.f,     14.197f,  94.857f,  35.119f,  149.677f,
+        0.f,     22.245f,  66.52f,   223.f,    170.073f,
+        0.f,      9.534f,  68.92f,   45.507f,  135.162f,
+        0.f,     20.249f,  83.337f,  43.703f,  141.187f,
+        0.f,     56.778f,  48.152f,  202.341f, 204.147f,
+        0.f,      0.f,     34.429f,  223.f,    195.942f,
+        0.f,     13.293f,  76.712f,  69.431f,  171.435f,
+        0.f,     12.007f,  71.093f,  54.662f,  162.261f,
+        0.f,     68.36f,   90.737f,  210.758f, 175.72f ,
+        0.f,    107.295f,  114.45f,  174.194f, 155.064f,
+        0.f,     22.078f,  84.345f,  136.421f, 217.647f,
+        0.f,     80.583f,  76.407f,  187.699f, 146.992f,
+        0.f,     35.923f,  78.907f,  209.749f, 159.349f,
+        0.f,      0.f,     69.083f,  223.f,    223.f   ,
+        // NOTE : on CPU this two proposal boxes have been swapped
+        0.f,     10.249f,  55.264f,  142.18f,  192.755f,
+        0.f,    109.44f,   120.758f, 177.096f, 161.314f,
+
+        0.f,     47.69f,   81.621f,  179.488f, 152.899f,
+        0.f,     24.46f,   52.069f,  73.301f,  151.432f};
+
+
+    cls_prob_file  = "/vpu/proposal_input_rpn_cls_prob_reshape.bin";
+    bbox_pred_file = "/vpu/proposal_input_rpn_bbox_pred.bin";
+    model          = caffeModel();
+
+    ASSERT_NO_FATAL_FAILURE(InferProposalLayer());
+    ASSERT_NO_FATAL_FAILURE(compareOutputSampleToRef(gt_values, 0.26f));
+}
+
+TEST_F(myriadLayersTestsProposal_nightly, CaffeNoClipBeforeNms) {
+
+    // Verify only 20 ranked proposal output with GT values - reference get from MKLDNN plugin
+    std::vector<float> gt_values = {
+        0, 72.408f,   58.925f,  197.062f, 177.856f,
+        0, 6.907f,    75.193f,  50.726f,  144.589f,
+        0, 14.264f,   94.921f,  35.206f,  149.710f,
+        0, 22.460f,   66.425f,  250.893f, 170.004f,
+        0, 9.483f,    68.801f,  45.493f,  135.213f,
+        0, 20.271f,   83.327f,  43.680f,  141.196f,
+        0, 13.909f,   51.622f,  223.455f, 191.746f,
+        0, 56.799f,   48.177f,  202.286f, 203.900f,
+        0, -72.978f,  34.416f,  343.053f, 195.997f,
+        0, -107.360f, 52.610f,  326.355f, 222.650f,
+        0, -223.833f, 21.986f,  428.385f, 299.044f,
+
+        // swapped on mkldnn
+        0, 13.290f,   76.607f,  69.404f,  171.320f,
+        0, 12.085f,   71.267f,  54.660f,  162.396f,
+
+        0, -361.772f, 56.140f,  531.011f, 291.391f,
+        0, 68.313f,   90.692f,  210.803f, 175.800f,
+        0, 107.404f,  114.274f, 174.122f, 154.990f,
+        0, 22.029f,   84.392f,  136.439f, 217.658f,
+        0, 80.502f,   76.457f,  187.633f, 147.042f,
+        0, -253.904f, 4.927f,   451.118f, 235.572f,
+        0, 36.144f,   78.806f,  209.537f, 159.228f};
+
+
+    // Test settings
+    cls_prob_file   = "/vpu/proposal_input_rpn_cls_prob_reshape.bin";
+    bbox_pred_file  = "/vpu/proposal_input_rpn_bbox_pred.bin";
+    clip_before_nms = "false";
+    model           = caffeModel();
+
+    ASSERT_NO_FATAL_FAILURE(InferProposalLayer());
+    ASSERT_NO_FATAL_FAILURE(compareOutputSampleToRef(gt_values, 0.26f));
+}
+
+TEST_F(myriadLayersTestsProposal_nightly, CaffeClipAfterNms) {
+
+    // Verify only 20 ranked proposal output with GT values
+    std::vector<float> gt_values = {
+        0, 72.408f,  58.925f, 197.062f, 177.856f,
+        0, 6.907f,   75.193f, 50.726f, 144.589f,
+        0, 14.264f,  94.921f, 35.206f, 149.710f,
+        0, 22.460f,  66.425f, 223.f, 170.004f,
+        0, 9.483f,   68.801f, 45.493f, 135.213f,
+        0, 20.271f,  83.327f, 43.680f, 141.196f,
+        0, 56.799f,  48.177f, 202.286f, 203.900f,
+        0, 0,        34.416f, 223.f, 195.997f,
+
+        //swapped on CPU
+        0, 13.290f,  76.607f, 69.404f, 171.320f,
+        0, 12.085f,  71.267f, 54.660f, 162.396f,
+
+        0, 68.313f,  90.692f, 210.803f, 175.800f,
+        0, 107.404f, 114.274f, 174.122f, 154.990f,
+        0, 22.029f,  84.392f, 136.439f, 217.658f,
+        0, 80.502f,  76.457f, 187.633f, 147.042f,
+        0, 36.144f,  78.806f, 209.537f, 159.228f,
+        0, 0,        68.821f, 223.f, 223.f,
+
+        //swapped on CPU
+        0, 10.207f,  55.363f, 142.246f, 192.734f,
+        0, 109.509f, 120.650f, 176.948f, 161.247f,
+
+        0, 47.612f,  81.498f, 179.434f, 152.845f,
+        0, 24.402f,  51.990f, 73.238f, 151.443f};
+
+
+    // Test settings
+    cls_prob_file   = "/vpu/proposal_input_rpn_cls_prob_reshape.bin";
+    bbox_pred_file  = "/vpu/proposal_input_rpn_bbox_pred.bin";
+    clip_after_nms  = "true";
+    model           = caffeModel();
+
+    ASSERT_NO_FATAL_FAILURE(InferProposalLayer());
+    ASSERT_NO_FATAL_FAILURE(compareOutputSampleToRef(gt_values, 0.26f));
+}
+
+TEST_F(myriadLayersTestsProposal_nightly, CaffeNormalizedOutput) {
+
+    // Verify only 20 ranked proposal output with GT values
+    std::vector<float> gt_values = {
+        0, 0.323f, 0.263f, 0.879f, 0.794f,
+        0, 0.030f, 0.335f, 0.226f, 0.645f,
+        0, 0.063f, 0.423f, 0.157f, 0.668f,
+        0, 0.100f, 0.296f, 0.995f, 0.758f,
+        0, 0.042f, 0.307f, 0.203f, 0.603f,
+        0, 0.090f, 0.371f, 0.195f, 0.630f,
+        0, 0.253f, 0.215f, 0.903f, 0.910f,
+        0, 0,      0.153f, 0.995f, 0.874f,
+
+        // swapped on CPU
+        0, 0.059f, 0.341f, 0.309f, 0.764f,
+        0, 0.053f, 0.318f, 0.244f, 0.724f,
+
+        0, 0.304f, 0.404f, 0.941f, 0.784f,
+        0, 0.479f, 0.510f, 0.777f, 0.691f,
+        0, 0.098f, 0.376f, 0.609f, 0.971f,
+        0, 0.359f, 0.341f, 0.837f, 0.656f,
+        0, 0.161f, 0.351f, 0.935f, 0.710f,
+
+        //swapped on CPU
+        0, 0,      0.307f, 0.995f, 0.995f,
+        0, 0.045f, 0.247f, 0.635f, 0.860f,
+
+        0, 0.488f, 0.538f, 0.789f, 0.719f,
+        0, 0.212f, 0.363f, 0.801f, 0.682f,
+        0, 0.108f, 0.232f, 0.326f, 0.676f};
+
+
+    // Test settings
+    cls_prob_file   = "/vpu/proposal_input_rpn_cls_prob_reshape.bin";
+    bbox_pred_file  = "/vpu/proposal_input_rpn_bbox_pred.bin";
+    normalize       = "true";
+    model           = caffeModel();
+
+    ASSERT_NO_FATAL_FAILURE(InferProposalLayer());
+    ASSERT_NO_FATAL_FAILURE(compareOutputSampleToRef(gt_values, 0.026f));
+}
+
+TEST_F(myriadLayersTestsProposal_nightly, TensorFlow) {
+
+     model = R"V0G0N(
+        <net name="testProposal" version="2" batch="1">
+            <layers>
+                <layer id="0" name="rpn_cls_prob_reshape" precision="__PRECISION__" type="Input">
+                    <output>
+                        <port id="0">
+                            <dim>1</dim>
+                            <dim>24</dim>
+                            <dim>14</dim>
+                            <dim>14</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer id="1" name="rpn_bbox_pred" precision="__PRECISION__" type="Input">
+                    <output>
+                        <port id="0">
+                            <dim>1</dim>
+                            <dim>48</dim>
+                            <dim>14</dim>
+                            <dim>14</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer id="2" name="im_info" precision="__PRECISION__" type="Input">
+                    <output>
+                        <port id="0">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer id="3" name="proposal" precision="__PRECISION__" type="Proposal">
+                    <data base_size="256" box_coordinate_scale="10" box_size_scale="5" feat_stride="16" framework="tensorflow" min_size="10" nms_thresh="0.7" post_nms_topn="100" pre_nms_topn="21474" ratio="0.5,1.0,2.0" scale="0.25,0.5,1.0,2.0"/>
+                    <input>
+                        <port id="0">
+                            <dim>1</dim>
+                            <dim>24</dim>
+                            <dim>14</dim>
+                            <dim>14</dim>
+                        </port>
+                        <port id="1">
+                            <dim>1</dim>
+                            <dim>48</dim>
+                            <dim>14</dim>
+                            <dim>14</dim>
+                        </port>
+                        <port id="2">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="3">
+                            <dim>100</dim>
+                            <dim>5</dim>
+                        </port>
+                    </output>
+                </layer>
+            </layers>
+            <edges>
+                <edge from-layer="0" from-port="0" to-layer="3" to-port="0"/>
+                <edge from-layer="1" from-port="0" to-layer="3" to-port="1"/>
+                <edge from-layer="2" from-port="0" to-layer="3" to-port="2"/>
+            </edges>
+        </net>
+    )V0G0N";
+
+    // Verify only 20 ranked proposal output is existed in GT values
+    std::vector<float> gt_values = {
+        0.f, 53.8881f, 29.2742f, 195.151f, 176.956f,  // batch_num, left, top, right, bottom
+        0.f, 56.9245f, 97.0252f, 191.764f, 211.743f,
+        0.f, 53.7577f, 10.861f,  198.255f, 145.085f,
+        0.f, 66.8727f, 19.5093f, 188.542f, 207.571f,
+        0.f, 72.5254f, 17.1535f, 114.375f, 36.867f,
+        0.f, 74.977f,  16.4198f, 115.675f, 31.661f,
+        0.f, 75.7093f, 14.6141f, 129.408f, 30.7164f,
+        0.f, 68.5911f, 2.17764f, 116.854f, 23.2988f,
+        0.f, 44.069f,  119.669f, 187.943f, 207.186f,
+        0.f, 73.7968f, 27.3531f, 120.728f, 43.4836f,
+        0.f, 65.3355f, 23.6799f, 196.25f,  127.208f,
+        0.f, 72.9499f, 24.7654f, 109.859f, 40.0457f,
+        0.f, 73.9328f, 14.3182f, 129.78f,  37.398f,
+        0.f, 71.6726f, 17.6392f, 103.571f, 32.9039f,
+        0.f, 0.f,      0.677025f,224.f,    224.f,
+        0.f, 80.1852f, 1.5176f,  112.255f, 13.5889f,
+        0.f, 37.3883f, 11.9829f, 202.778f, 213.109f,
+        0.f, 63.1822f, 129.085f, 96.5516f, 149.743f,
+        0.f, 49.5382f, 137.022f, 178.434f, 214.141f,
+        0.f, 90.8469f, 10.7634f, 206.273f, 125.817f,
+        0.f, 52.0368f, 112.601f, 73.9616f, 130.725f,
+        0.f, 60.5855f, 39.8242f, 208.905f, 221.897f,
+        0.f, 46.8725f, 57.9966f, 203.359f, 162.118f,
+        0.f, 42.5881f, 112.609f, 65.114f,  131.432f,
+        0.f, 80.3221f, 37.2223f, 113.974f, 49.9118f,
+        0.f, 93.5535f, 13.3071f, 153.779f, 43.266f,
+        0.f, 70.2708f, 128.093f, 165.02f,  164.439f,
+        0.f, 71.4465f, 20.636f,  86.9464f, 33.2511f,
+        0.f, 67.2406f, 6.66637f, 98.914f,  28.4598f,
+        0.f, 62.9305f, 19.6755f, 175.826f, 160.375f,
+        0.f, 72.9378f, 17.9405f, 88.7149f, 28.9991f,
+        0.f, 55.1827f, 149.366f, 174.829f, 206.638f,
+        0.f, 54.9047f, 132.917f, 84.2464f, 147.54f,
+        0.f, 67.0201f, 162.942f, 96.9201f, 196.988f,
+        0.f, 102.893f, 15.4131f, 205.249f, 191.626f,
+        0.f, 43.3537f, 40.8668f, 190.631f, 142.491f,
+        0.f, 59.8731f, 137.814f, 169.612f, 200.622f,
+        0.f, 86.5134f, 16.734f,  149.479f, 30.3793f,
+        0.f, 65.7841f, 158.012f, 104.35f,  204.026f,
+        0.f, 56.3348f, 130.639f, 101.328f, 150.503f,
+        0.f, 110.973f, 11.6659f, 186.115f, 51.8234f,
+        0.f, 56.8438f, 139.659f, 90.965f,  154.652f,
+        0.f, 50.0912f, 10.1794f, 164.545f, 194.869f,
+        0.f, 50.6326f, 116.361f, 68.4287f, 132.11f,
+        0.f, 52.6678f, 11.2994f, 201.231f, 100.594f,
+        0.f, 22.428f,  0.f,      180.848f, 194.578f,
+        0.f, 121.799f, 10.7808f, 174.07f,  54.3841f,
+        0.f, 61.9675f, 174.689f, 163.129f, 212.789f,
+        0.f, 68.3703f, 86.9163f, 102.297f, 131.793f,
+        0.f, 63.1688f, 141.842f, 144.259f, 209.627f,
+        0.f, 91.0212f, 0.f,      143.93f,  22.4325f,
+        0.f, 85.0419f, 42.8284f, 205.168f, 175.15f,
+        0.f, 67.4256f, 131.442f, 98.0033f, 144.358f,
+        0.f, 69.3273f, 106.276f, 96.8871f, 128.973f,
+        0.f, 104.049f, 4.0801f,  124.051f, 20.6967f,
+        0.f, 71.2328f, 69.3853f, 184.252f, 197.295f,
+        0.f, 72.0265f, 0.426311f,96.3345f, 12.0004f,
+        0.f, 55.4309f, 138.711f, 84.8696f, 152.147f,
+        0.f, 63.7001f, 133.453f, 123.394f, 157.344f,
+        0.f, 79.3643f, 34.2525f, 105.982f, 49.3115f,
+        0.f, 59.3775f, 99.2007f, 104.55f,  139.959f,
+        0.f, 126.697f, 5.20008f, 224.0f,   196.069f,
+        0.f, 73.7945f, 86.5779f, 100.509f, 117.225f,
+        0.f, 69.5673f, 168.975f, 100.984f, 208.11f,
+        0.f, 67.1213f, 177.04f,  136.411f, 216.875f,
+        0.f, 58.0965f, 147.441f, 81.145f,  158.661f,
+        0.f, 59.6378f, 17.1767f, 109.574f, 50.6673f,
+        0.f, 77.5341f, 19.9996f, 110.322f, 46.983f,
+        0.f, 55.6317f, 49.5741f, 155.302f, 200.88f,
+        0.f, 89.4005f, 126.625f, 147.32f,  147.27f,
+        0.f, 81.515f,  127.61f,  103.404f, 148.08f,
+        0.f, 47.3904f, 89.3484f, 204.269f, 179.917f,
+        0.f, 84.4783f, 177.696f, 173.98f,  211.952f,
+        0.f, 97.373f,  36.2838f, 115.779f, 53.6433f,
+        0.f, 66.7269f, 126.805f, 108.757f, 144.899f,
+        0.f, 53.6802f, 137.088f, 96.0449f, 152.658f,
+        0.f, 39.4418f, 9.47529f, 188.078f, 83.2332f,
+        0.f, 72.0867f, 25.4984f, 90.6464f, 39.8829f,
+        0.f, 110.994f, 5.94071f, 162.54f,  32.1407f,
+        0.f, 76.3537f, 183.958f, 155.425f, 215.032f,
+        0.f, 79.7699f, 102.186f, 105.326f, 129.827f,
+        0.f, 70.6935f, 36.0157f, 97.075f,  48.5611f,
+        0.f, 47.4715f, 9.66474f, 201.332f, 49.1294f,
+        0.f, 111.987f, 26.351f, 144.638f,  39.3359f,
+        0.f, 59.4611f, 141.302f, 87.7403f, 152.782f,
+        0.f, 70.3368f, 150.324f, 97.0087f, 178.19f,
+        0.f, 82.5671f, 103.536f, 124.713f, 130.838f,
+        0.f, 82.0366f, 189.618f, 110.221f, 205.794f,
+        0.f, 64.6987f, 73.8598f, 107.236f, 131.602f,
+        0.f, 100.596f, 192.156f, 125.981f, 207.468f,
+        0.f, 44.5534f, 116.114f, 73.3897f, 137.017f,
+        0.f, 84.5608f, 18.7706f, 140.066f, 39.775f,
+        0.f, 83.5766f, 89.4399f, 103.842f, 112.501f,
+        0.f, 49.7645f, 130.757f, 66.1343f, 145.153f,
+        0.f, 48.9145f, 115.85f,  76.7334f, 134.724f,
+        0.f, 68.791f,  2.68682f, 90.5532f, 20.4226f,
+        0.f, 67.3277f, 175.467f, 108.399f, 215.786f,
+        0.f, 53.4548f, 120.664f, 70.7046f, 139.624f,
+        0.f, 49.5098f, 111.469f, 90.2394f, 136.238f,
+        0.f, 75.1919f, 0.f,      223.661f, 211.529f};
+
+    cls_prob_file = "/vpu/proposal_tf_input_Reshape_Permute_Class.bin";
+    bbox_pred_file = "/vpu/proposal_tf_input_FirstStageBoxPredictor_BoxEncodingPredictor_Conv2D.bin";
+
+    ASSERT_NO_FATAL_FAILURE(InferProposalLayer());
+    ASSERT_GE(calcIoU(gt_values), OUTPUT_ROI_MATCH_THRESHOLD); // Passed if all top 20 rois are in gt rois.
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_psroipooling_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_psroipooling_test.cpp
new file mode 100644 (file)
index 0000000..ae8f101
--- /dev/null
@@ -0,0 +1,11 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_psroipooling_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsPSROIPooling_nightly,
+        ::testing::Combine(
+        ::testing::ValuesIn(s_PSROIPoolingLayerInput),
+        ::testing::ValuesIn(s_PSROIPoolingLayerParam),
+        ::testing::ValuesIn(s_PSROIPoolingNumROIs)));
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_psroipooling_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_psroipooling_test.hpp
new file mode 100644 (file)
index 0000000..abc9584
--- /dev/null
@@ -0,0 +1,238 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tests.hpp"
+
+#define ERROR_BOUND (1.2e-2f)
+#define DIV_THEN_CEIL(a, b) (((a) + (b) - 1) / (b))
+
+using namespace InferenceEngine;
+
+struct PSROIPoolingParams {
+    int in_width;
+    int in_height;
+    uint32_t group_size;
+    uint32_t output_dim;
+    float spatial_scale;
+};
+
+PRETTY_PARAM(psroipooling_param, PSROIPoolingParams);
+
+static inline void PrintTo(const PSROIPoolingParams& param, ::std::ostream* os)
+{
+    PSROIPoolingParams data = param;
+    *os << "psroipooling_param: " << data.in_width << ", " << data.in_height << ", " << data.group_size << ", " << data.output_dim << "," << data.spatial_scale;
+}
+
+using PSROIPoolingTestParams = std::tuple<Dims, psroipooling_param, uint32_t>;
+
+class myriadLayersTestsPSROIPooling_nightly: public myriadLayerTestBaseWithParam<PSROIPoolingTestParams> {
+public:
+    void genROIs(InferenceEngine::Blob::Ptr rois,
+                 const PSROIPoolingParams& params,
+                 const uint32_t num_rois) {
+        ie_fp16 *roisBlob_data = rois->buffer().as<ie_fp16*>();
+        const int max_range_width = params.in_width * 4 / 5;
+        const int max_range_height = params.in_height * 4 / 5;
+        for (int i = 0; i < num_rois; i++)
+        {
+            int x0 = std::rand() % max_range_width;
+            int x1 = x0 + (std::rand() % (params.in_width - x0 - 1)) + 1;
+            int y0 = std::rand() % max_range_height;
+            int y1 = y0 + (std::rand() % (params.in_height - y0 - 1)) + 1;
+
+            roisBlob_data[i * 5 + 0] = PrecisionUtils::f32tof16(0);
+            roisBlob_data[i * 5 + 1] = PrecisionUtils::f32tof16(x0);
+            roisBlob_data[i * 5 + 2] = PrecisionUtils::f32tof16(y0);
+            roisBlob_data[i * 5 + 3] = PrecisionUtils::f32tof16(x1);
+            roisBlob_data[i * 5 + 4] = PrecisionUtils::f32tof16(y1);
+        }
+    }
+
+    void refPSROIPooling(const InferenceEngine::Blob::Ptr src,
+                         const InferenceEngine::Blob::Ptr rois,
+                         InferenceEngine::Blob::Ptr dst,
+                         const int num_rois,
+                         const PSROIPoolingParams& params,
+                         const tensor_test_params& in) {
+        const int group_size = params.group_size;
+        const float spatial_scale = params.spatial_scale;
+        const int pooled_height = params.group_size;
+        const int pooled_width = params.group_size;
+
+        const int channels = in.c;
+        const int height = in.h;
+        const int width = in.w;
+
+        const int nn = num_rois;
+        const int nc = params.output_dim;
+        const int nh = params.group_size;
+        const int nw = params.group_size;
+
+        ie_fp16* dst_data = dst->buffer().as<ie_fp16 *>();
+        const ie_fp16* bottom_data_beginning = src->cbuffer().as<ie_fp16 *>();
+        const ie_fp16* bottom_rois_beginning = rois->cbuffer().as<ie_fp16 *>();
+
+        for (int n = 0; n < nn; ++n)
+        {
+            const ie_fp16* bottom_rois = bottom_rois_beginning + n * 5;
+            int roi_batch_ind = static_cast<int>(bottom_rois[0]);
+            float roi_start_w = round(PrecisionUtils::f16tof32(bottom_rois[1])) * spatial_scale;
+            float roi_start_h = round(PrecisionUtils::f16tof32(bottom_rois[2])) * spatial_scale;
+            float roi_end_w = round(PrecisionUtils::f16tof32(bottom_rois[3]) + 1.0f) * spatial_scale;
+            float roi_end_h = round(PrecisionUtils::f16tof32(bottom_rois[4]) + 1.0f) * spatial_scale;
+
+            float roi_width = std::max(roi_end_w - roi_start_w, 0.1f);
+            float roi_height = std::max(roi_end_h - roi_start_h, 0.1f);
+
+            int top_roi_offset = n * nc * nh * nw;
+            for (int c = 0; c < nc; ++c)
+            {
+                int top_plane_offset = top_roi_offset + c * nh * nw;
+                for (int h = 0; h < nh; ++h)
+                {
+                    int top_row_offset = top_plane_offset + h * nw;
+                    for (int w = 0; w < nw; ++w)
+                    {
+                        const int index = top_row_offset + w;
+                        dst_data[index] = 0;
+
+                        int hstart = std::min(height, std::max(0, static_cast<int>(floor(roi_start_h + (h * roi_height) / pooled_height))));
+                        int hend = std::min(height, std::max(0, static_cast<int>(ceil(roi_start_h + (h + 1) * roi_height / pooled_height))));
+                        int wstart = std::min(width, std::max(0, static_cast<int>(floor(roi_start_w + (w * roi_width) / pooled_width))));
+                        int wend = std::min(width, std::max(0, static_cast<int>(ceil(roi_start_w + (w + 1) * roi_width / pooled_width))));
+
+                        float bin_area = (hend - hstart) * (wend - wstart);
+                        if (bin_area)
+                        {
+                            int gc = (c * group_size + h) * group_size + w;
+                            const ie_fp16* bottom_data =
+                                    bottom_data_beginning + ((roi_batch_ind * channels + gc) * height * width);
+
+                            float out_sum = 0.0f;
+                            for (int hh = hstart; hh < hend; ++hh)
+                                for (int ww = wstart; ww < wend; ++ww)
+                                    out_sum += PrecisionUtils::f16tof32(bottom_data[hh * width + ww]);
+
+                            dst_data[index] = PrecisionUtils::f32tof16(out_sum / bin_area);
+                        }
+                    }
+                }
+            }
+        }
+    }
+    using myriadLayersTests_nightly::makeSingleLayerNetwork;
+    void makeSingleLayerNetwork(const std::map<std::string, std::string>& params,
+                     const PSROIPoolingParams& test_params,
+                     const uint32_t num_rois) {
+        makeSingleLayerNetwork(LayerInitParams("PSROIPooling").params(params),
+                               NetworkInitParams().createInference(false));
+        createInferRequest(test_params, num_rois);
+    }
+    void createInferRequest(const PSROIPoolingParams& params,
+                            const uint32_t num_rois) {
+        ASSERT_NO_THROW(_inputsInfo = _cnnNetwork.getInputsInfo());
+        ASSERT_TRUE(_inputsInfo.size() == 2);
+        _inputsInfo.begin()->second->setLayout(NHWC);
+        for (auto inputInfo : _inputsInfo) {
+            inputInfo.second->setPrecision(Precision::FP16);
+            if (inputInfo.first == "input0") {
+                inputInfo.second->setLayout(NCHW);
+            }
+        }
+
+        ASSERT_NO_THROW(_outputsInfo = _cnnNetwork.getOutputsInfo());
+        ASSERT_TRUE(_outputsInfo.size() == 1);
+        for (auto outputInfo : _outputsInfo) {
+            outputInfo.second->setPrecision(Precision::FP16);
+            outputInfo.second->setLayout(NCHW);
+        }
+
+        InferenceEngine::StatusCode st = InferenceEngine::StatusCode::GENERAL_ERROR;
+        ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, _cnnNetwork, {}, &_resp));
+        ASSERT_NE(_exeNetwork, nullptr) << _resp.msg;
+        ASSERT_NO_THROW(_exeNetwork->CreateInferRequest(_inferRequest, &_resp)) << _resp.msg;
+        ASSERT_EQ((int)InferenceEngine::StatusCode::OK, st) << _resp.msg;
+        ASSERT_NE(_inferRequest, nullptr) << _resp.msg;
+
+        ASSERT_NO_THROW(_inputsInfo = _cnnNetwork.getInputsInfo());
+        ASSERT_NO_THROW(_outputsInfo = _cnnNetwork.getOutputsInfo());
+        SetSeed(DEFAULT_SEED_VALUE);
+
+        for (auto inputInfo: _inputsInfo)
+        {
+            InferenceEngine::SizeVector inputDims = inputInfo.second->getTensorDesc().getDims();
+            InferenceEngine::Layout layout = inputInfo.second->getTensorDesc().getLayout();
+
+            Blob::Ptr data;
+            ASSERT_NO_THROW(st = _inferRequest->GetBlob(inputInfo.first.c_str(), data, &_resp));
+            ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+            if (inputInfo.first == _inputsInfo.begin()->first)
+            {
+                GenRandomData(data);
+            }
+            else
+            {
+                genROIs(data, params, num_rois);
+            }
+            _inputMap[inputInfo.first] = data;
+        }
+
+        Blob::Ptr data;
+        ASSERT_NO_THROW(st = _inferRequest->GetBlob(_outputsInfo.begin()->first.c_str(), data, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+        _outputMap[_outputsInfo.begin()->first] = data;
+    }
+
+};
+
+static std::vector<Dims> s_PSROIPoolingLayerInput = {
+        {{1, 1029, 14, 14}},
+};
+
+static std::vector<psroipooling_param> s_PSROIPoolingLayerParam = {
+        {{224, 224, 7, 21, 0.0625}},
+};
+
+static std::vector<uint32_t> s_PSROIPoolingNumROIs = {
+        1, 10, 30, 50, 100, 300
+};
+
+TEST_P(myriadLayersTestsPSROIPooling_nightly, PSROIPooling) {
+#if defined(_WIN32) || defined(WIN32)
+    SKIP() << "Disabled for Windows. CVS-13239";
+#endif
+    tensor_test_params dims_layer_in = std::get<0>(GetParam());
+    PSROIPoolingParams test_params = std::get<1>(GetParam());
+    const uint32_t num_rois = std::get<2>(GetParam());
+    IN_OUT_desc input_tensors, output_tensors;
+    input_tensors.push_back({1, dims_layer_in.c, dims_layer_in.h, dims_layer_in.w});
+    input_tensors.push_back({num_rois, 5});
+    output_tensors.push_back({num_rois,  test_params.output_dim, test_params.group_size, test_params.group_size});
+
+    SetInputTensors(input_tensors);
+    SetOutputTensors(output_tensors);
+
+    std::map<std::string, std::string> layer_params = {
+        {"group_size", std::to_string(test_params.group_size)},
+        {"output_dim", std::to_string(test_params.output_dim)},
+        {"spatial_scale", std::to_string(test_params.spatial_scale)},
+    };
+
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(layer_params, test_params, num_rois));
+
+    ASSERT_TRUE(Infer());
+
+    auto src = _inputMap.begin()->second;
+    auto rois = std::next(_inputMap.begin())->second;
+    auto dst = _outputMap.begin()->second;
+
+    InferenceEngine::TBlob<ie_fp16>::Ptr _refBlob = make_shared_blob<ie_fp16>(dst->getTensorDesc());
+    _refBlob->allocate();
+
+    refPSROIPooling(src, rois, _refBlob, num_rois, test_params, dims_layer_in);
+
+    CompareCommonAbsolute(dst, _refBlob, ERROR_BOUND);
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_reduce_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_reduce_test.cpp
new file mode 100644 (file)
index 0000000..f9620a3
--- /dev/null
@@ -0,0 +1,45 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_reduce_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadTestsReduceAnd_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(s_input_dims),
+        ::testing::ValuesIn(s_axes_list),
+        ::testing::ValuesIn(s_data_precision),
+        ::testing::ValuesIn(s_keep_dims))
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadTestsReduceMin_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(s_input_dims),
+        ::testing::ValuesIn(s_axes_list),
+        ::testing::ValuesIn(s_data_precision),
+        ::testing::ValuesIn(s_keep_dims))
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadTestsReduceMax_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(s_input_dims),
+        ::testing::ValuesIn(s_axes_list),
+        ::testing::ValuesIn(s_data_precision),
+        ::testing::ValuesIn(s_keep_dims))
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadTestsReduceSum_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(s_input_dims),
+        ::testing::ValuesIn(s_axes_list),
+        ::testing::ValuesIn(s_data_precision),
+        ::testing::ValuesIn(s_keep_dims))
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadTestsReduceMean_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(s_input_dims),
+        ::testing::ValuesIn(s_axes_list),
+        ::testing::ValuesIn(s_data_precision),
+        ::testing::ValuesIn(s_keep_dims))
+);
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_reduce_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_reduce_test.hpp
new file mode 100644 (file)
index 0000000..aa49a09
--- /dev/null
@@ -0,0 +1,560 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_reference_functions.hpp"
+#include "myriad_layers_tests.hpp"
+#include "tests_vpu_common.hpp"
+
+#include <algorithm>
+#include <functional>
+#include <string>
+#include <limits>
+
+using namespace InferenceEngine;
+
+extern const char REDUCE_AND[] = "ReduceAnd";
+extern const char REDUCE_MIN[] = "ReduceMin";
+extern const char REDUCE_MAX[] = "ReduceMax";
+extern const char REDUCE_SUM[] = "ReduceSum";
+extern const char REDUCE_MEAN[] = "ReduceMean";
+
+template <typename DataType> class ConstantTraits;
+
+template<> class ConstantTraits<ie_fp16>
+{
+public:
+    static ie_fp16 one;
+    static ie_fp16 zero;
+};
+
+template<> class ConstantTraits<int32_t>
+{
+public:
+    static int32_t one;
+    static int32_t zero;
+};
+
+template<> class ConstantTraits<float>
+{
+public:
+    static float one;
+    static float zero;
+};
+
+ie_fp16 ConstantTraits<ie_fp16>::one = PrecisionUtils::f32tof16(1.0f);
+ie_fp16 ConstantTraits<ie_fp16>::zero = PrecisionUtils::f32tof16(0.0f);
+
+int32_t ConstantTraits<int32_t>::one = 1;
+int32_t ConstantTraits<int32_t>::zero = 0;
+
+float ConstantTraits<float>::one = 1.0f;
+float ConstantTraits<float>::zero = 0.0f;
+
+template <typename Internal, typename External> Internal toInternal(External value) { return value; }
+template <typename Internal, typename External> External toExternal(Internal value) { return value; }
+template<> float toInternal<float, ie_fp16>(ie_fp16 val) { return PrecisionUtils::f16tof32(val); }
+template<> ie_fp16 toExternal<float, ie_fp16>(float val) { return PrecisionUtils::f32tof16(val); }
+
+template<typename DataType, typename DataTypeInternal=DataType>
+class RefReduceAnd: public IReduceKernel<DataType>
+{
+    typedef ConstantTraits<DataType> Constants;
+    typedef ConstantTraits<DataTypeInternal> InternalTypeConstants;
+public:
+    virtual void init() override
+    { m_val = true; }
+    virtual void accumulate(const DataType& val) override
+    {
+        auto actualVal = toInternal<DataTypeInternal, DataType>(val);
+        m_val &= bool(actualVal != InternalTypeConstants::zero);
+    }
+    virtual DataType result() const override
+    { return (m_val ? Constants::one : Constants::zero); }
+    virtual DataType copy(const DataType& val) const override
+    {
+        auto actualVal = toInternal<DataTypeInternal, DataType>(val);
+        return (actualVal != InternalTypeConstants::zero ? Constants::one : Constants::zero);
+    }
+    static void generateData(Blob::Ptr blob)
+    {
+        GenRandomData(blob);
+
+        DataType* data = blob->buffer().as<DataType*>();
+        const auto dims = blob->getTensorDesc().getDims();
+        const int total = std::accumulate(dims.begin(), dims.end(), 1, std::multiplies<int32_t>());
+
+        // For consistent testing of ReduceAnd, we need to generate mostly non-zero input data,
+        // with randomly placed 0s, but not in all reduced sub-tensors - not so often, not so seldom.
+
+        for (int i = 0; i < total; ++i)
+            data[i] = (data[i] == Constants::zero) ? Constants::one : data[i];
+        for (int i = 0; i < total; i += 13)
+            data[i] = Constants::zero;
+    }
+private:
+    bool m_val;
+};
+
+
+template<typename DataType, typename DataTypeInternal=DataType>
+class RefReduceMin: public IReduceKernel<DataType>
+{
+public:
+    virtual void init() override
+    { m_val = std::numeric_limits<DataTypeInternal>::max(); }
+    virtual void accumulate(const DataType& val) override
+    {
+        DataTypeInternal fval = toInternal<DataTypeInternal, DataType>(val);
+        m_val = m_val < fval ? m_val : fval;
+    }
+    virtual DataType result() const override
+    { return toExternal<DataTypeInternal, DataType>(m_val); }
+    virtual DataType copy(const DataType& val) const override
+    { return val;}
+    static void generateData(Blob::Ptr blob)
+    {
+        GenRandomData(blob);
+    }
+private:
+    DataTypeInternal m_val;
+};
+
+
+template<typename DataType, typename DataTypeInternal=DataType>
+class RefReduceMax: public IReduceKernel<DataType>
+{
+public:
+    virtual void init() override
+    {
+        m_val = std::numeric_limits<DataTypeInternal>::lowest();
+    }
+    virtual void accumulate(const DataType& val) override
+    {
+        DataTypeInternal fval = toInternal<DataTypeInternal, DataType>(val);
+        m_val = m_val > fval ? m_val : fval;
+    }
+    virtual DataType result() const override
+    {
+        return toExternal<DataTypeInternal, DataType>(m_val);
+    }
+    virtual DataType copy(const DataType& val) const override
+    { return val;}
+    static void generateData(Blob::Ptr blob) {
+        GenRandomData(blob);
+    }
+private:
+    DataTypeInternal m_val;
+};
+
+template<typename DataType, typename DataTypeInternal=DataType>
+class RefReduceSum: public IReduceKernel<DataType>
+{
+public:
+    virtual void init() override
+    {
+        m_val = 0;
+    }
+    virtual void accumulate(const DataType& val) override
+    {
+        m_val += toInternal<DataTypeInternal, DataType>(val);
+    }
+    virtual DataType result() const override
+    {
+        return toExternal<DataTypeInternal, DataType>(m_val);
+    }
+    virtual DataType copy(const DataType& val) const override
+    { return val;}
+    static void generateData(Blob::Ptr blob) {
+        GenRandomData(blob);
+    }
+private:
+    DataTypeInternal m_val;
+};
+
+
+template<typename DataType, typename DataTypeInternal=DataType>
+class RefReduceMean: public IReduceKernel<DataType>
+{
+public:
+    virtual void init() override
+    {
+        m_val = 0;
+        m_count = 0;
+    }
+    virtual void accumulate(const DataType& val) override
+    {
+        m_val += toInternal<DataTypeInternal, DataType>(val);
+        m_count++;
+    }
+    virtual DataType result() const override
+    {
+        if (m_count == 0) {
+            return toExternal<DataTypeInternal, DataType>(m_val);
+        } else {
+            return toExternal<DataTypeInternal, DataType>(m_val / static_cast<DataTypeInternal>(m_count));
+        }
+    }
+    virtual DataType copy(const DataType& val) const override
+    { return val;}
+    static void generateData(Blob::Ptr blob) {
+        GenRandomData(blob);
+    }
+private:
+    DataTypeInternal m_val;
+    size_t m_count;
+};
+
+static RefReduceAnd<ie_fp16, float> refReduceAndFP16;
+static RefReduceMin<ie_fp16, float> refReduceMinFP16;
+static RefReduceMax<ie_fp16, float> refReduceMaxFP16;
+static RefReduceSum<ie_fp16, float> refReduceSumFP16;
+static RefReduceMean<ie_fp16, float> refReduceMeanFP16;
+
+static RefReduceAnd<int32_t> refReduceAndI32;
+static RefReduceMin<int32_t> refReduceMinI32;
+static RefReduceMax<int32_t> refReduceMaxI32;
+static RefReduceSum<int32_t> refReduceSumI32;
+static RefReduceMean<int32_t> refReduceMeanI32;
+
+typedef void GenData(Blob::Ptr blob);
+
+template <typename DataType>
+struct ReduceOpParams
+{
+    IReduceKernel<DataType>* op;
+    float compare_threshold;
+    GenData* generateData;
+};
+
+static const std::map<const char*, ReduceOpParams<ie_fp16>> refMapFP16 =
+        {
+                {REDUCE_AND, {&refReduceAndFP16, 0.0f, RefReduceAnd<ie_fp16>::generateData}},
+                {REDUCE_MIN, {&refReduceMinFP16, 0.0f, RefReduceMin<ie_fp16>::generateData}},
+                {REDUCE_MAX, {&refReduceMaxFP16, 0.0f, RefReduceMax<ie_fp16>::generateData}},
+                {REDUCE_SUM, {&refReduceSumFP16, 0.01f, RefReduceSum<ie_fp16>::generateData}},
+                {REDUCE_MEAN, {&refReduceMeanFP16, 0.01f, RefReduceMean<ie_fp16>::generateData}},
+        };
+
+static const std::map<const char*, ReduceOpParams<int32_t>> refMapI32 =
+        {
+                {REDUCE_AND, {&refReduceAndI32,  0.0f,  RefReduceAnd<int32_t>::generateData}},
+                {REDUCE_MIN, {&refReduceMinI32,  0.0f,  RefReduceMin<int32_t>::generateData}},
+                {REDUCE_MAX, {&refReduceMaxI32,  0.0f,  RefReduceMax<int32_t>::generateData}},
+                {REDUCE_SUM, {&refReduceSumI32,  0.0f,  RefReduceSum<int32_t>::generateData}},
+                {REDUCE_MEAN, {&refReduceMeanI32, 0.0f, RefReduceMean<int32_t>::generateData}},
+        };
+
+using ReduceTestParams = std::tuple<SizeVector, SizeVector, Precision, bool>;
+
+static const Precision axesPrecision = Precision::I32;
+
+class ReduceUtils
+{
+public:
+    static std::string getModel(const SizeVector& inputDims, const SizeVector& axesList,
+                                const SizeVector& outputDims, const std::string& reduceType,
+                                const Precision dataPrecision, int keep_dims)
+    {
+        std::string model = R"V0G0N(
+                <net name="testReduce" version="5">
+                    <layers>
+                        <layer id="0" name="reduce_input" precision="__DATA_PRECISION__" type="Input">
+                            <output>
+                                <port id="0">__INPUT_DIMS__</port>
+                            </output>
+                        </layer>
+                        <layer id="1" name="reduce_axes" precision="__AXES_PRECISION__" type="Const">
+                            <output>
+                                <port id="1">__AXES_DIMS__</port>
+                            </output>
+                            <blobs>
+                                <custom offset="0" size="__AXES_SIZE__"/>
+                            </blobs>
+                        </layer>
+                        <layer id="2" name="reduce" precision="__DATA_PRECISION__" type="__REDUCE_TYPE__">
+                            <data keep_dims="__KEEP_DIMS__"/>
+                            <input>
+                                <port id="0">__INPUT_DIMS__</port>
+                                <port id="1">__AXES_DIMS__</port>
+                            </input>
+                            <output>
+                                <port id="2">__OUTPUT_DIMS__</port>
+                            </output>
+                        </layer>
+                    </layers>
+                    <edges>
+                        <edge from-layer="0" from-port="0" to-layer="2" to-port="0"/>
+                        <edge from-layer="1" from-port="1" to-layer="2" to-port="1"/>
+                    </edges>
+                </net>
+            )V0G0N";
+
+        const SizeVector axesDims = { axesList.size() };
+
+        std::string input_dims = dimsToString(inputDims);
+        std::string axes_dims = dimsToString(axesDims);
+        std::string output_dims = dimsToString(outputDims);
+        size_t axes_size = axesList.size() * sizeof(int32_t);
+
+        REPLACE_WITH_STR(model, "__REDUCE_TYPE__", reduceType);
+        REPLACE_WITH_STR(model, "__DATA_PRECISION__", dataPrecision.name());
+        REPLACE_WITH_STR(model, "__AXES_PRECISION__", axesPrecision.name());
+        REPLACE_WITH_STR(model, "__INPUT_DIMS__", input_dims);
+        REPLACE_WITH_STR(model, "__AXES_DIMS__", axes_dims);
+        REPLACE_WITH_NUM(model, "__AXES_SIZE__", axes_size);
+        REPLACE_WITH_STR(model, "__OUTPUT_DIMS__", output_dims);
+        REPLACE_WITH_STR(model, "__KEEP_DIMS__", (keep_dims ? "True" : "False"));
+
+        return model;
+    }
+    static std::string dimsToString(const SizeVector& dims)
+    {
+        std::string str;
+        for (auto& d : dims)
+            str += "<dim>" + std::to_string(d) + "</dim>";
+        return str;
+    }
+    static SizeVector calcOutputDims(const SizeVector& inputDims, const SizeVector& axesList, int keep_dims)
+    {
+        auto mask = list2mask(inputDims.size(), axesList);
+        if (keep_dims)
+        {
+            SizeVector outputDims(inputDims.size(), 0);
+            for (int i = 0; i < (int)inputDims.size(); ++i)
+            {
+                if (mask & (1 << i))
+                    outputDims[i] = 1;
+                else
+                    outputDims[i] = inputDims[i];
+            }
+            return outputDims;
+        }
+        else
+        {
+            SizeVector outputDims;
+            for (int i = 0; i < (int)inputDims.size(); ++i)
+            {
+                if (!(mask & (1 << i)))
+                    outputDims.push_back(inputDims[i]);
+            }
+            if (!(outputDims.size() > 0)) // 0D -> 1D
+                outputDims.push_back(1);
+            return outputDims;
+        }
+    }
+    static unsigned list2mask(int ndims, const SizeVector& list)
+    {
+        unsigned mask = 0;
+        for (int i : list)
+        {
+            if (i < 0) // handle negative indices
+                i = ndims - i;
+            EXPECT_TRUE((i >= 0) && (i < ndims));
+            mask |= (1 << i);
+        }
+        return mask;
+    }
+    static Layout defaultLayout(int ndims)
+    {
+        switch (ndims)
+        {
+        case 5: return NCDHW;
+        case 4: return NCHW;
+        case 3: return CHW;
+        case 2: return NC;
+        case 1: return C;
+        }
+        return ANY;
+    }
+    static void getAxesBlob(const SizeVector& axesList, TBlob<uint8_t>::Ptr& weightsBlob, TBlob<int32_t>::Ptr& axesBlob)
+    {
+        size_t axes_size = axesList.size();
+        size_t weights_size = axesList.size() * sizeof(int32_t);
+
+        TBlob<uint8_t>* weights_raw = new TBlob<uint8_t>(TensorDesc(Precision::U8, {weights_size}, C));
+        weights_raw->allocate();
+        int32_t* weightsData = weights_raw->data().as<int32_t*>();
+
+        TBlob<int32_t>* axes_raw = new TBlob<int32_t>(TensorDesc(Precision::I32, {axes_size}, C));
+        axes_raw->allocate();
+        int32_t* axesData = axes_raw->data().as<int32_t*>();
+
+        for (size_t index = 0; index < axesList.size(); ++index) {
+            weightsData[index] = axesList[index];
+            axesData[index] = axesList[index];
+        }
+
+        weightsBlob = TBlob<uint8_t>::Ptr(weights_raw);
+        axesBlob = TBlob<int32_t>::Ptr(axes_raw);
+    }
+};
+
+template <const char* ReduceType>
+class ReduceTest: public myriadLayerTestBaseWithParam<ReduceTestParams>
+{
+protected:
+    void testReduce()
+    {
+        DISABLE_IF(!CheckMyriadX());
+        _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+
+        const auto params = GetParam();
+        const auto inputDims = std::get<0>(params);
+        const auto axesList = std::get<1>(params);
+        const auto dataPrecision = std::get<2>(params);
+        const int keepDims = std::get<3>(params) ? 1 : 0;
+
+        const auto outputDims = ReduceUtils::calcOutputDims(inputDims, axesList, keepDims);
+        const auto model = ReduceUtils::getModel(inputDims, axesList, outputDims, ReduceType, dataPrecision, keepDims);
+
+        TBlob<uint8_t>::Ptr weightsBlob;
+        TBlob<int32_t>::Ptr axesBlob;
+        ReduceUtils::getAxesBlob(axesList, weightsBlob, axesBlob);
+        ASSERT_NE(weightsBlob, nullptr);
+
+        ASSERT_NO_THROW(readNetwork(model, weightsBlob));
+
+        const auto& network = _cnnNetwork;
+
+        _inputsInfo = network.getInputsInfo();
+        _inputsInfo["reduce_input"]->setPrecision(dataPrecision);
+        _inputsInfo["reduce_input"]->setLayout(ReduceUtils::defaultLayout(inputDims.size()));
+
+        _outputsInfo = network.getOutputsInfo();
+        _outputsInfo["reduce"]->setPrecision(dataPrecision);
+        _outputsInfo["reduce"]->setLayout(ReduceUtils::defaultLayout(outputDims.size()));
+
+        StatusCode st = OK;
+
+        ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network, _config, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+        ASSERT_NE(_exeNetwork, nullptr) << _resp.msg;
+
+        ASSERT_NO_THROW(st = _exeNetwork->CreateInferRequest(_inferRequest, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        Blob::Ptr inputBlob;
+        ASSERT_NO_THROW(st = _inferRequest->GetBlob("reduce_input", inputBlob, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        Blob::Ptr outputBlob;
+        ASSERT_NO_THROW(st = _inferRequest->GetBlob("reduce", outputBlob, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        Blob::Ptr refBlob = nullptr;
+        float compareThreshold = 0.0f;
+        if (dataPrecision == Precision::FP16) {
+            auto opIt = refMapFP16.find(ReduceType);
+            ASSERT_TRUE(opIt != refMapFP16.end());
+            compareThreshold = opIt->second.compare_threshold;
+            auto reduceOp = opIt->second.op;
+            auto generateData = opIt->second.generateData;
+            generateData(inputBlob);
+
+            ASSERT_NO_THROW(st = _inferRequest->Infer(&_resp));
+            ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+            refBlob = make_shared_blob<ie_fp16>(outputBlob->getTensorDesc());
+            refBlob->allocate();
+            ref_reduce(inputBlob, axesBlob, refBlob, keepDims, reduceOp);
+            CompareCommonAbsolute(outputBlob, refBlob, compareThreshold);
+        } else if (dataPrecision == Precision::I32) {
+            auto opIt = refMapI32.find(ReduceType);
+            ASSERT_TRUE(opIt != refMapI32.end());
+            auto reduceOp = opIt->second.op;
+            auto generateData = opIt->second.generateData;
+            generateData(inputBlob);
+
+            ASSERT_NO_THROW(st = _inferRequest->Infer(&_resp));
+            ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+            refBlob = make_shared_blob<int32_t>(outputBlob->getTensorDesc());
+            refBlob->allocate();
+            ref_reduce(inputBlob, axesBlob, refBlob, keepDims, reduceOp);
+            CompareCommonExact(outputBlob, refBlob);
+        }
+    }
+};
+
+class myriadTestsReduceAnd_nightly: public ReduceTest<REDUCE_AND>
+{
+};
+
+class myriadTestsReduceMin_nightly: public ReduceTest<REDUCE_MIN>
+{
+};
+
+class myriadTestsReduceMax_nightly: public ReduceTest<REDUCE_MAX>
+{
+};
+
+class myriadTestsReduceSum_nightly: public ReduceTest<REDUCE_SUM>
+{
+};
+
+class myriadTestsReduceMean_nightly: public ReduceTest<REDUCE_MEAN>
+{
+};
+
+// Tests are disabled due to hang: #-28315
+
+TEST_P(myriadTestsReduceAnd_nightly, And)
+{
+    testReduce();
+}
+TEST_P(myriadTestsReduceMin_nightly, Min)
+{
+    testReduce();
+}
+TEST_P(myriadTestsReduceMax_nightly, Max)
+{
+    testReduce();
+}
+TEST_P(myriadTestsReduceSum_nightly, Sum)
+{
+    testReduce();
+}
+TEST_P(myriadTestsReduceMean_nightly, Mean)
+{
+    testReduce();
+}
+
+static const std::vector<SizeVector> s_input_dims =
+        {
+                {1, 3, 2, 14, 32},
+                {2, 2, 2, 14, 32},
+                {3, 5, 4, 8, 16},
+                {4, 2, 16, 16, 8},
+
+                {3, 2, 14, 32},
+                {2, 2, 14, 32},
+                {5, 4, 8, 16},
+                {2, 16, 16, 8},
+
+                {3, 2, 14},
+                {2, 2, 14},
+                {5, 4, 8},
+                {2, 16, 16},
+
+                { 7, 3, 5, 1, 7, 11, 12},
+        };
+
+static const std::vector<SizeVector> s_axes_list =
+        {
+                {1},
+                {0, 2},
+                {0, 1, 2},
+        };
+
+static const std::vector<Precision> s_data_precision =
+        {
+                Precision::FP16,
+                Precision::I32
+        };
+
+static const std::vector<bool> s_keep_dims =
+        {
+                false,
+                true,
+        };
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_region_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_region_test.cpp
new file mode 100644 (file)
index 0000000..cad6b54
--- /dev/null
@@ -0,0 +1,36 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_region_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(
+        accuracy, myriadLayerRegionYolo_nightly,
+        ::testing::ValuesIn(s_regionData)
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsRegion_CHW_HW_nightly,
+        ::testing::Combine(
+            ::testing::Values<InferenceEngine::SizeVector>({1, 125, 13, 13})
+          , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 0, 0))
+          , ::testing::Values<uint32_t>(125)
+          , ::testing::Values<uint32_t>(1)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsRegion_CHW_HW_80cl_nightly,
+        ::testing::Combine(
+            ::testing::Values<InferenceEngine::SizeVector>({1, 425, 13, 13})
+          , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 0, 0))
+          , ::testing::Values<uint32_t>(425)
+          , ::testing::Values<uint32_t>(1)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayerRegionYolo_CHW_nightly,
+        ::testing::ValuesIn(s_classes)
+);
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_region_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_region_test.hpp
new file mode 100644 (file)
index 0000000..768aba5
--- /dev/null
@@ -0,0 +1,330 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include "myriad_layers_tests.hpp"
+#include <math.h>
+
+using namespace InferenceEngine;
+
+struct region_test_params {
+    tensor_test_params in;
+    int coords;
+    int classes;
+    int num;
+    int maskSize;
+    int doSoftMax;
+    std::string customLayers;
+    friend std::ostream& operator<<(std::ostream& os, region_test_params const& tst)
+    {
+        return os << "tensor (" << tst.in
+                  << "),coords=" << tst.coords
+                  << ", classes=" << tst.classes
+                  << ", num=" << tst.num
+                  << ", maskSize=" << tst.maskSize
+                  << ", doSoftMax=" << tst.doSoftMax
+                  << ", by using custom layer=" << (tst.customLayers.empty() ? "no" : "yes");
+    };
+};
+
+class myriadLayerRegionYolo_nightly: public myriadLayersTests_nightly,
+                             public testing::WithParamInterface<region_test_params> {
+};
+
+TEST_P(myriadLayerRegionYolo_nightly, BaseTestsRegion) {
+    region_test_params p = ::testing::WithParamInterface<region_test_params>::GetParam();
+
+    // TODO: M2 mode is not working for OpenCL compiler
+    if(!p.customLayers.empty() && !CheckMyriadX()) {
+        GTEST_SKIP()<<"Custom layers for MYRIAD2 not supported";
+    }
+
+    std::map<std::string, std::string> params;
+
+    params["coords"] = std::to_string(p.coords);
+    params["classes"] = std::to_string(p.classes);
+    params["num"] = std::to_string(p.num);
+    params["mask"] = "0,1,2";
+    params["do_softmax"] = std::to_string(p.doSoftMax);
+
+    InferenceEngine::SizeVector tensor;
+    tensor.resize(4);
+    tensor[3] = p.in.w;
+    tensor[2] = p.in.h;
+    tensor[1] = p.in.c;
+    tensor[0] = 1;
+    _config[VPU_CONFIG_KEY(CUSTOM_LAYERS)] = p.customLayers;
+    _testNet.addLayer(LayerInitParams("RegionYolo")
+             .params(params)
+             .in({tensor})
+             .out({tensor}),
+             ref_RegionYolo_wrap);
+    ASSERT_TRUE(generateNetAndInfer(NetworkInitParams().layoutPreference(vpu::LayoutPreference::ChannelMinor)));
+    CompareCommonAbsolute(_outputMap.begin()->second, getReferenceOutput(), 0.0025);
+}
+
+static std::vector<region_test_params> s_regionData = {
+    region_test_params{{1, (4+20+1)*5, 13, 13}, 4, 20, 5, 3, 1, ""},
+    region_test_params{{1, (4+80+1)*5, 13, 13}, 4, 80, 5, 3, 1, ""},
+    region_test_params{{1, (4+20+1)*3, 13, 13}, 4, 20, 9, 3, 0, ""},
+    region_test_params{{1, (4+80+1)*3, 13, 13}, 4, 80, 9, 3, 0, ""},
+
+#ifdef VPU_HAS_CUSTOM_KERNELS
+   region_test_params{{1, (4+20+1)*5, 13, 13}, 4, 20, 5, 3, 1, getIELibraryPath() + "/vpu_custom_kernels/customLayerBindings.xml"},
+   region_test_params{{1, (4+80+1)*5, 13, 13}, 4, 80, 5, 3, 1, getIELibraryPath() + "/vpu_custom_kernels/customLayerBindings.xml"},
+   region_test_params{{1, (4+20+1)*3, 13, 13}, 4, 20, 9, 3, 0, getIELibraryPath() + "/vpu_custom_kernels/customLayerBindings.xml"},
+   region_test_params{{1, (4+80+1)*3, 13, 13}, 4, 80, 9, 3, 0, getIELibraryPath() + "/vpu_custom_kernels/customLayerBindings.xml"},
+#endif
+};
+
+/* HW network needs to be created to test strides influence to RegionYolo input */
+/* so convolution layer added as the first layer to this test                   */
+class myriadLayersTestsRegion_CHW_HW_nightly: public ConvolutionTest<>{
+};
+
+/*80 input classes */
+class myriadLayersTestsRegion_CHW_HW_80cl_nightly: public ConvolutionTest<>{
+};
+
+/* to passthrough "original" data */
+template<size_t width>
+void constWeightsRange(uint16_t* ptr, size_t weightsSize) {
+    ASSERT_NE(ptr, nullptr);
+    ASSERT_EQ(weightsSize, width * width);
+    std::memset(ptr, 0, sizeof(uint16_t) * (weightsSize));
+    for (int i = 0; i < weightsSize/width; ++i) {
+        ptr[i * width + i] = PrecisionUtils::f32tof16(1.0f);
+    }
+}
+
+void constBiasesRange(uint16_t* ptr, size_t weightsSize) {
+    std::memset(ptr, 0, sizeof(uint16_t) * (weightsSize));
+}
+
+void loadData(InferenceEngine::Blob::Ptr blob) {
+    /* input blob has predefined size and CHW layout */
+    ASSERT_NE(blob, nullptr);
+    auto inDims = blob->getTensorDesc().getDims();
+    InferenceEngine::Blob::Ptr inputBlobRef =
+            InferenceEngine::make_shared_blob<float>({InferenceEngine::Precision::FP32, inDims, InferenceEngine::NCHW});
+    inputBlobRef->allocate();
+    const float* ref_values = inputBlobRef->buffer();
+
+    std::string inputTensorBinary = TestDataHelpers::get_data_path();
+    inputTensorBinary += "/vpu/InputYoLoV2Tiny.bin";
+    ASSERT_TRUE(fromBinaryFile(inputTensorBinary, inputBlobRef));
+    uint16_t *inputBlobRawDataFp16 = static_cast<uint16_t *>(blob->buffer());
+    ASSERT_NE(inputBlobRawDataFp16, nullptr);
+
+    switch(blob->getTensorDesc().getLayout()) {
+    case InferenceEngine::NCHW:
+        for (int indx = 0; indx < blob->size(); indx++) {
+            inputBlobRawDataFp16[indx] = PrecisionUtils::f32tof16(ref_values[indx]);
+        }
+        break;
+    case InferenceEngine::NHWC:
+        for (int h = 0 ; h < inDims[2]; ++h) {
+            for (int w = 0 ; w < inDims[3]; ++w) {
+                for (int c = 0 ; c < inDims[1]; ++c) {
+                    int src_i = w + inDims[3] * h + inDims[3] * inDims[2] * c;
+                    int dst_i = c + inDims[1] * w + inDims[3] * inDims[1] * h;
+                    inputBlobRawDataFp16[dst_i] = PrecisionUtils::f32tof16(ref_values[src_i]);
+                }
+            }
+        }
+        break;
+    default:
+        FAIL() << "unsupported layout: " << blob->getTensorDesc().getLayout();
+    }
+}
+
+void loadData_80cl(InferenceEngine::Blob::Ptr blob) {
+    /* input blob has predefined size and CHW layout */
+    ASSERT_NE(blob, nullptr);
+    auto inDims = blob->getTensorDesc().getDims();
+    InferenceEngine::Blob::Ptr inputBlobRef =
+            InferenceEngine::make_shared_blob<float>({InferenceEngine::Precision::FP32, inDims, InferenceEngine::NCHW});
+    inputBlobRef->allocate();
+    const float* ref_values = inputBlobRef->buffer();
+
+    std::string inputTensorBinary = TestDataHelpers::get_data_path();
+    inputTensorBinary += "/vpu/InputYoLoV2_80cl.bin";
+    ASSERT_TRUE(fromBinaryFile(inputTensorBinary, inputBlobRef));
+    uint16_t *inputBlobRawDataFp16 = static_cast<uint16_t *>(blob->buffer());
+    ASSERT_NE(inputBlobRawDataFp16, nullptr);
+
+    switch(blob->getTensorDesc().getLayout()) {
+    case InferenceEngine::NCHW:
+        for (int indx = 0; indx < blob->size(); indx++) {
+            inputBlobRawDataFp16[indx] = PrecisionUtils::f32tof16(ref_values[indx]);
+        }
+        break;
+    case InferenceEngine::NHWC:
+        for (int h = 0 ; h < inDims[2]; ++h) {
+            for (int w = 0 ; w < inDims[3]; ++w) {
+                for (int c = 0 ; c < inDims[1]; ++c) {
+                    int src_i = w + inDims[3] * h + inDims[3] * inDims[2] * c;
+                    int dst_i = c + inDims[1] * w + inDims[3] * inDims[1] * h;
+                    inputBlobRawDataFp16[dst_i] = PrecisionUtils::f32tof16(ref_values[src_i]);
+                }
+            }
+        }
+        break;
+    default:
+        FAIL() << "unsupported layout: " << blob->getTensorDesc().getLayout();
+     }
+}
+
+TEST_P(myriadLayersTestsRegion_CHW_HW_nightly, RegionYolo) {
+    std::map<std::string, std::string> params;
+    params["coords"] = "4";
+    params["classes"] = "20";
+    params["num"] = "5";
+    params["mask"] = std::string("0,1,2");
+    params["do_softmax"] = "1";
+    _testNet.addLayer(LayerInitParams("RegionYolo")
+             .params(params)
+             .in({_output_tensor})
+             .out({{1, _output_tensor[0] * _output_tensor[1] * _output_tensor[2] * _output_tensor[3]}}),
+             ref_RegionYolo_wrap);
+    _testNet.setWeightsCallbackForLayer(0, constWeightsRange<125>);
+    _testNet.setBiasesCallbackForLayer(0, constBiasesRange);
+    _genDataCallback = loadData;
+    ASSERT_TRUE(generateNetAndInfer(NetworkInitParams().useHWOpt(true)));
+    CompareCommonAbsolute(_outputMap.begin()->second, getReferenceOutput(), 0.0035);
+}
+
+TEST_P(myriadLayersTestsRegion_CHW_HW_80cl_nightly, RegionYolol) {
+    std::map<std::string, std::string> params;
+    params["coords"] = "4";
+    params["classes"] = "80";
+    params["num"] = "5";
+    params["mask"] = std::string("0,1,2");
+    params["do_softmax"] = "1";
+    _testNet.addLayer(LayerInitParams("RegionYolo")
+             .params(params)
+             .in({_output_tensor})
+             .out({{1, _output_tensor[0] * _output_tensor[1] * _output_tensor[2] * _output_tensor[3]}}),
+             ref_RegionYolo_wrap);
+    _testNet.setWeightsCallbackForLayer(0, constWeightsRange<425>);
+    _testNet.setBiasesCallbackForLayer(0, constBiasesRange);
+    _genDataCallback = loadData_80cl;
+    ASSERT_TRUE(generateNetAndInfer(NetworkInitParams().useHWOpt(true)));
+    CompareCommonAbsolute(_outputMap.begin()->second, getReferenceOutput(), 0.0060);
+}
+
+class myriadLayerRegionYolo_CHW_nightly: public myriadLayersTests_nightly,
+                             public testing::WithParamInterface<int> {
+};
+
+TEST_P(myriadLayerRegionYolo_CHW_nightly, TestsRegion) {
+    auto classes = GetParam();
+    InferenceEngine::SizeVector input_dims = {1, 125, 13, 13};
+    if (classes == 80) {
+        input_dims[1] = 425;
+    }
+    IN_OUT_desc input_tensor;
+    input_tensor.push_back(input_dims);
+
+    std::map<std::string, std::string> params;
+    params["coords"] = "4";
+    params["classes"] = std::to_string(classes);
+    params["num"] = "5";
+    params["mask"] = std::string("0,1,2");
+    params["do_softmax"] = "1";
+    _testNet.addLayer(LayerInitParams("RegionYolo")
+             .params(params)
+             .in(input_tensor)
+             .out({{1, input_dims[0] * input_dims[1] * input_dims[2] * input_dims[3]}}),
+             ref_RegionYolo_wrap);
+    _genDataCallback = loadData;
+    if (classes == 80) {
+        _genDataCallback = loadData_80cl;
+    }
+    ASSERT_TRUE(generateNetAndInfer(NetworkInitParams()));
+    /* bound is too high , set for M2 tests */
+    CompareCommonAbsolute(_outputMap.begin()->second, getReferenceOutput(), 0.006);
+}
+
+TEST_P(myriadLayerRegionYolo_CHW_nightly, Test_CHW_HWC_Compare) {
+    auto classes = GetParam();
+    IN_OUT_desc input_tensor;
+    InferenceEngine::SizeVector input_dims = {1, 125, 13, 13};
+    if (classes == 80) {
+        input_dims[1] = 425;
+    }
+
+    input_tensor.push_back(input_dims);
+
+    std::map<std::string, std::string> params;
+    params["coords"] = "4";
+    params["classes"] = std::to_string(classes);
+    params["num"] = "5";
+    params["mask"] = std::string("0,1,2");
+    params["do_softmax"] = "1";
+    _testNet.addLayer(LayerInitParams("RegionYolo")
+             .params(params)
+             .in(input_tensor)
+             .out({{1, input_dims[0] * input_dims[1] * input_dims[2] * input_dims[3]}}),
+             ref_RegionYolo_wrap);
+    if (classes == 80) {
+        _genDataCallback = loadData_80cl;
+    }
+    _config[VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION)] = CONFIG_VALUE(NO);
+    ASSERT_TRUE(generateNetAndInfer(NetworkInitParams().useHWOpt(false).runRefGraph(false)));
+    /* create  NHWC version                                */
+    /* we cannot use the same generateNetAndInfer call due */
+    /* to IE bug.                                          */
+    InferenceEngine::InputsDataMap           inputsInfo;
+    InferenceEngine::BlobMap                 outputMap;
+    InferenceEngine::OutputsDataMap          outputsInfo;
+    InferenceEngine::IExecutableNetwork::Ptr exeNetwork;
+    InferenceEngine::IInferRequest::Ptr      inferRequest;
+
+    _inputsInfo.begin()->second->setLayout(NHWC);
+    _outputsInfo.begin()->second->setLayout(NC);
+
+    InferenceEngine::StatusCode st = InferenceEngine::StatusCode::GENERAL_ERROR;
+    ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(exeNetwork, _cnnNetwork, _config, &_resp));
+    ASSERT_NE(exeNetwork, nullptr) << _resp.msg;
+    ASSERT_NO_THROW(exeNetwork->CreateInferRequest(inferRequest, &_resp)) << _resp.msg;
+    ASSERT_NE(inferRequest, nullptr) << _resp.msg;
+    ASSERT_NO_THROW(inputsInfo = _cnnNetwork.getInputsInfo());
+    auto inIt = _inputsInfo.begin();
+    for (auto in = _inputsInfo.begin(); in != _inputsInfo.end(); in++) {
+        Blob::Ptr inpt;
+        ASSERT_NO_THROW(_inferRequest->GetBlob(inIt->first.c_str(), inpt, &_resp));
+        ASSERT_NO_THROW(inferRequest->SetBlob(inIt->first.c_str(), inpt, &_resp));
+        ++inIt;
+    }
+    ASSERT_NO_THROW(outputsInfo = _cnnNetwork.getOutputsInfo());
+    auto outIt = _outputsInfo.begin();
+    for (auto outputInfo : outputsInfo) {
+        outputInfo.second->setPrecision(outIt->second->getTensorDesc().getPrecision());
+        InferenceEngine::SizeVector outputDims = outputInfo.second->getTensorDesc().getDims();
+        Blob::Ptr outputBlob = nullptr;
+        Layout layout = outIt->second->getTensorDesc().getLayout();
+        // work only with NHWC layout if size of the input dimensions == NHWC
+        switch (outputInfo.second->getPrecision()) {
+        case Precision::FP16:
+            outputBlob = InferenceEngine::make_shared_blob<ie_fp16>({Precision::FP16, outputDims, layout});
+            break;
+        case Precision::FP32:
+            outputBlob = InferenceEngine::make_shared_blob<float>({Precision::FP32, outputDims, layout});
+            break;
+        default:
+            THROW_IE_EXCEPTION << "Unsupported precision for output. Supported FP16, FP32";
+        }
+        outputBlob->allocate();
+        st = inferRequest->SetBlob(outputInfo.first.c_str(), outputBlob, &_resp);
+        outputMap[outputInfo.first] = outputBlob;
+        ASSERT_EQ((int) InferenceEngine::StatusCode::OK, st) << _resp.msg;
+        ++outIt;
+    }
+    ASSERT_EQ(inferRequest->Infer(&_resp), InferenceEngine::OK);
+    /* bound is too high !!!! investigation TBD */
+    CompareCommonAbsolute(_outputMap.begin()->second, outputMap.begin()->second, 0.001);
+}
+
+const std::vector<int> s_classes = {20, 80};
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_relu_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_relu_test.cpp
new file mode 100644 (file)
index 0000000..63218e5
--- /dev/null
@@ -0,0 +1,292 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_relu_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayerReLU_nightly,
+                        ::testing::Combine(
+                                ::testing::ValuesIn(s_copyTensors),
+                                ::testing::ValuesIn(s_reluLayerParams)
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(
+        accuracy, myriadLayerFullyConnectedWithReLU_nightly,
+        ::testing::Combine(
+                ::testing::ValuesIn(g_fcTestParamsSubset),
+                ::testing::Values(g_dimensionsFC[0]),
+                ::testing::ValuesIn(g_addBiasFC),
+                ::testing::ValuesIn(s_reluLayerParams)
+        )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsMaxPoolingWithReLU_nightly,
+                        ::testing::Combine(
+                                ::testing::ValuesIn(g_poolingInput),
+                                ::testing::ValuesIn(g_poolingLayerParamsLite),
+                                ::testing::ValuesIn(g_poolingLayout),
+                                ::testing::ValuesIn(s_reluLayerParams))
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsAvgPoolingWithReLU_nightly,
+                        ::testing::Combine(
+                                ::testing::ValuesIn(g_poolingInput),
+                                ::testing::ValuesIn(g_poolingLayerParamsLite),
+                                ::testing::ValuesIn(g_poolingLayout),
+                                ::testing::ValuesIn(s_reluLayerParams))
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy_postop, myriadLayersTestsMaxPoolingWithReLU_nightly,
+                        ::testing::Combine(
+                                ::testing::ValuesIn(g_poolingInput_postOp),
+                                ::testing::Values<pooling_layer_params>(MAKE_STRUCT(pooling_layer_params, {3, 3}, {1, 1}, {1, 1})),
+                                ::testing::ValuesIn(g_poolingLayout),
+                                ::testing::Values<ReLULayerDef>(MAKE_STRUCT(ReLULayerDef, {{{"negative_slope", "0.0"}}})))
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy_postop, myriadLayersTestsAvgPoolingWithReLU_nightly,
+                        ::testing::Combine(
+                                ::testing::ValuesIn(g_poolingInput_postOp),
+                                ::testing::Values<pooling_layer_params>(MAKE_STRUCT(pooling_layer_params, {3, 3}, {1, 1}, {1, 1})),
+                                ::testing::ValuesIn(g_poolingLayout),
+                                ::testing::Values<ReLULayerDef>(MAKE_STRUCT(ReLULayerDef, {{{"negative_slope", "0.0"}}})))
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayerConvolutionWithReLU_nightly,
+                        ::testing::Combine(
+                                ::testing::ValuesIn(g_convolutionTensors)
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 3, 3))
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<uint32_t>(16)
+                                , ::testing::Values<uint32_t>(1)
+                                , ::testing::ValuesIn(s_reluLayerParams)
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy_postop, myriadLayerConvolutionWithReLU_nightly,
+                        ::testing::Combine(
+                                ::testing::ValuesIn(g_poolingInput_postOp)
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 3, 3))
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 1, 1)/*, MAKE_STRUCT(param_size, 2, 2)*/)
+                                , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<uint32_t>(32)
+                                , ::testing::Values<uint32_t>(32)
+                                , ::testing::Values<ReLULayerDef>(MAKE_STRUCT(ReLULayerDef, {{{"negative_slope", "0.0"}}}))
+                        )
+);
+
+TEST_F(myriadLayersTests_nightly, graphTransformerNotThrowExceptionIfConvOutputIsInputForReLUAndGroupDeconv) {
+    const std::string model = R"V0G0N(
+    <net name="multi_hcp01" version="2" batch="1">
+            <layers>
+                <layer name="input" type="Input" precision="FP16" id="0">
+                    <output>
+                        <port id="0">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>23</dim>
+                            <dim>40</dim>
+                        </port>
+                    </output>
+                </layer>
+               <layer name="conv1" type="Convolution" precision="FP16" id="1">
+                    <convolution_data stride-x="1" stride-y="1" pad-x="0" pad-y="0" kernel-x="1" kernel-y="1" output="3" group="1"/>
+                    <input>
+                        <port id="1">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>23</dim>
+                            <dim>40</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="2">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>23</dim>
+                            <dim>40</dim>
+                        </port>
+                    </output>
+                    <weights offset="0" size="18"/>
+                    <biases offset="18" size="6"/>
+                </layer>
+                <layer name="conv1/relu" type="ReLU" precision="FP16" id="2">
+                    <data negative_slope="0.000000" engine="caffe.ReLUParameter.DEFAULT"/>
+                    <input>
+                        <port id="3">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>23</dim>
+                            <dim>40</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="4">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>23</dim>
+                            <dim>40</dim>
+                        </port>
+                    </output>
+                </layer>
+            <layer name="deconv" type="Deconvolution" precision="FP16" id="3">
+                <deconvolution_data stride-x="2" stride-y="2" pad-x="1" pad-y="1" kernel-x="4" kernel-y="4" output="3" group="3"/>
+                <input>
+                    <port id="5">
+                        <dim>1</dim>
+                        <dim>3</dim>
+                        <dim>23</dim>
+                        <dim>40</dim>
+                    </port>
+                </input>
+                <output>
+                    <port id="6">
+                        <dim>1</dim>
+                        <dim>3</dim>
+                        <dim>46</dim>
+                        <dim>80</dim>
+                    </port>
+                </output>
+                <weights offset="24" size="96"/>
+                <biases offset="120" size="0"/>
+            </layer>
+        </layers>
+            <edges>
+                <edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
+                <edge from-layer="1" from-port="2" to-layer="2" to-port="3"/>
+                <edge from-layer="1" from-port="2" to-layer="3" to-port="5"/>
+            </edges>
+        </net>
+        )V0G0N";
+
+    TBlob<uint8_t>::Ptr weightsBlob(GenWeights(120));
+    StatusCode st;
+
+    ASSERT_NO_THROW(readNetwork(model, weightsBlob));
+
+    const auto& network = _cnnNetwork;
+
+    _inputsInfo = network.getInputsInfo();
+    _inputsInfo["input"]->setPrecision(Precision::FP16);
+
+    _outputsInfo = network.getOutputsInfo();
+    _outputsInfo["conv1/relu"]->setPrecision(Precision::FP16);
+    _outputsInfo["deconv"]->setPrecision(Precision::FP16);
+
+    ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network, {}, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+}
+
+TEST_F(myriadLayersTests_nightly, ReLU_PostOp_Conflict) {
+    const std::string model = R"V0G0N(
+        <Net name="ReLU_PostOp_Conflict" version="2" batch="1">
+            <layers>
+                <layer name="input" type="Input" precision="FP16" id="1">
+                    <output>
+                        <port id="1">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>64</dim>
+                            <dim>64</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="conv" type="Convolution" precision="FP16" id="2">
+                    <convolution_data
+                        stride-x="1"
+                        stride-y="1"
+                        pad-x="1"
+                        pad-y="1"
+                        kernel-x="3"
+                        kernel-y="3"
+                        output="16"
+                        group="1"/>
+                    <input>
+                        <port id="2">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>64</dim>
+                            <dim>64</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="3">
+                            <dim>1</dim>
+                            <dim>16</dim>
+                            <dim>64</dim>
+                            <dim>64</dim>
+                        </port>
+                    </output>
+                    <weights offset="0" size="864"/>
+                    <biases offset="864" size="32"/>
+                </layer>
+                <layer name="relu" type="ReLU" precision="FP16" id="3">
+                    <data negative_slope="0.0" engine="caffe.ReLUParameter.DEFAULT"/>
+                    <input>
+                        <port id="4">
+                            <dim>1</dim>
+                            <dim>16</dim>
+                            <dim>64</dim>
+                            <dim>64</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="5">
+                            <dim>1</dim>
+                            <dim>16</dim>
+                            <dim>64</dim>
+                            <dim>64</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="power" type="Power" precision="FP16" id="4">
+                    <power_data power="1" scale="1" shift="0"/>
+                    <input>
+                        <port id="6">
+                            <dim>1</dim>
+                            <dim>16</dim>
+                            <dim>64</dim>
+                            <dim>64</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="7">
+                            <dim>1</dim>
+                            <dim>16</dim>
+                            <dim>64</dim>
+                            <dim>64</dim>
+                        </port>
+                    </output>
+                </layer>
+            </layers>
+            <edges>
+                <edge from-layer="1" from-port="1" to-layer="2" to-port="2"/>
+                <edge from-layer="2" from-port="3" to-layer="3" to-port="4"/>
+                <edge from-layer="2" from-port="3" to-layer="4" to-port="6"/>
+            </edges>
+        </Net>
+    )V0G0N";
+
+    size_t num_weights = 432;
+    size_t num_bias = 16;
+
+    TBlob<uint8_t>::Ptr weights(GenWeights(num_weights + num_bias));
+
+    StatusCode st;
+
+    ASSERT_NO_THROW(readNetwork(model, weights));
+
+    const auto& network = _cnnNetwork;
+
+    _inputsInfo = network.getInputsInfo();
+    _inputsInfo["input"]->setPrecision(Precision::FP16);
+
+    _outputsInfo = network.getOutputsInfo();
+    _outputsInfo["relu"]->setPrecision(Precision::FP16);
+    _outputsInfo["power"]->setPrecision(Precision::FP16);
+
+    ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network, {}, &_resp));
+    ASSERT_EQ(st, StatusCode::OK);
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_relu_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_relu_test.hpp
new file mode 100644 (file)
index 0000000..eb1e2ce
--- /dev/null
@@ -0,0 +1,171 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <precision_utils.h>
+#include "myriad_layers_tests.hpp"
+#include "myriad_layers_reference_functions.hpp"
+
+using namespace InferenceEngine;
+
+const std::string relu_param = "negative_slope";
+
+class myriadLayersTestsReLUMergeWithBias_nightly : public myriadLayersTests_nightly {
+public:
+    void RunTest(const std::string& model, size_t num_weights, size_t num_bias) {
+        StatusCode st;
+
+        TBlob<uint8_t>::Ptr weights(GenWeights(num_weights + num_bias));
+
+        ASSERT_NO_THROW(readNetwork(model, weights));
+
+        const auto& network = _cnnNetwork;
+
+        _inputsInfo = network.getInputsInfo();
+        _inputsInfo["input"]->setPrecision(Precision::FP16);
+
+        _outputsInfo = network.getOutputsInfo();
+        _outputsInfo["relu"]->setPrecision(Precision::FP16);
+
+        ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network, { {CONFIG_KEY(PERF_COUNT), CONFIG_VALUE(YES)},
+                                                                                  {VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), CONFIG_VALUE(NO)} },
+                                                          &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+        ASSERT_NE(_exeNetwork, nullptr) << _resp.msg;
+
+        ASSERT_NO_THROW(st = _exeNetwork->CreateInferRequest(_inferRequest, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        ASSERT_NO_THROW(st = _inferRequest->Infer(&_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        std::map<std::string, InferenceEngineProfileInfo> perfMap;
+        ASSERT_NO_THROW(st = _inferRequest->GetPerformanceCounts(perfMap, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        {
+            auto reluAndBiasLayerIt = perfMap.find("relu+Bias");
+            ASSERT_TRUE(reluAndBiasLayerIt != perfMap.end());
+            EXPECT_EQ(InferenceEngineProfileInfo::EXECUTED, reluAndBiasLayerIt->second.status);
+        }
+    }
+};
+
+#define ERROR_BOUND (1.e-4f)
+
+using namespace InferenceEngine;
+
+struct ReLULayerDef {
+    ParamsStruct list;
+}ReLULayer;
+
+static std::vector<ReLULayerDef> s_reluLayerParams = {
+    {{{"negative_slope", "0.0"}}},
+    {{{"negative_slope", "0.1"}}},
+};
+
+typedef myriadLayerTestBaseWithParam<std::tuple<InferenceEngine::SizeVector, ReLULayerDef>> myriadLayerReLU_nightly;
+
+TEST_P(myriadLayerReLU_nightly, ReLU) {
+    _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+    auto input_dims = std::get<0>(GetParam());
+    auto extraLayerParams = std::get<1>(GetParam());
+    IN_OUT_desc input_tensor;
+    input_tensor.push_back(input_dims);
+
+    /* Copy is implemented to perform filling of the output buffer */
+    _testNet.addLayer(LayerInitParams("Copy")
+             .in(input_tensor)
+             .out(input_tensor),
+             ref_copy_wrap);
+
+    _testNet.addLayer(LayerInitParams("ReLU")
+             .params(extraLayerParams.list)
+             .in({input_tensor})
+             .out({input_tensor}),
+             ref_ReLU_wrap);
+
+    ASSERT_TRUE(generateNetAndInfer(NetworkInitParams()));
+    CompareCommonAbsolute(_outputMap.begin()->second, getReferenceOutput(), ERROR_BOUND);
+}
+
+static std::vector<InferenceEngine::SizeVector> s_copyTensors = {
+    {
+        {16, 18},
+        {1, 8, 16, 32},
+        {12, 32, 64, 32, 12},
+        {24, 32, 16},
+    },
+};
+
+class myriadLayerFullyConnectedWithReLU_nightly: public FCTest<ReLULayerDef>{
+};
+
+TEST_P(myriadLayerFullyConnectedWithReLU_nightly, TestsFullyConnected)
+{
+    auto p = ::testing::WithParamInterface<std::tuple<fcon_test_params, int32_t, int32_t, ReLULayerDef>>::GetParam();
+    auto extraLayerParams = std::get<3>(p);
+    _testNet.addLayer(LayerInitParams("ReLU")
+             .params(extraLayerParams.list)
+             .in({_output_tensor})
+             .out({_output_tensor}),
+             ref_ReLU_wrap);
+    ASSERT_TRUE(generateNetAndInfer(NetworkInitParams()));
+    CompareCommonAbsolute(_outputMap.begin()->second, getReferenceOutput(), _par.error_bound);
+}
+
+#define ERROR_BOUND_WITH_RELU (4.e-3f)
+
+class myriadLayersTestsMaxPoolingWithReLU_nightly: public PoolingTest<POOLING_MAX, ReLULayerDef>{
+};
+
+class myriadLayersTestsAvgPoolingWithReLU_nightly: public PoolingTest<POOLING_AVG, ReLULayerDef>{
+};
+
+TEST_P(myriadLayersTestsMaxPoolingWithReLU_nightly, TestsMaxPoolingWithReLU)
+{
+    auto p = ::testing::WithParamInterface<std::tuple<InferenceEngine::SizeVector, pooling_layer_params, vpu::LayoutPreference, ReLULayerDef>>::GetParam();
+    auto extraLayerParams = std::get<3>(p);
+    _testNet.addLayer(LayerInitParams("ReLU")
+             .params(extraLayerParams.list)
+             .in({_output_tensor})
+             .out({_output_tensor}),
+             ref_ReLU_wrap);
+    ASSERT_TRUE(generateNetAndInfer(NetworkInitParams()));
+    CompareCommonAbsolute(_outputMap.begin()->second, getReferenceOutput(), ERROR_BOUND_WITH_RELU);
+}
+
+TEST_P(myriadLayersTestsAvgPoolingWithReLU_nightly, TestsAvgPoolingWithReLU)
+{
+    auto p = ::testing::WithParamInterface<std::tuple<InferenceEngine::SizeVector, pooling_layer_params, vpu::LayoutPreference, ReLULayerDef>>::GetParam();
+    auto extraLayerParams = std::get<3>(p);
+    _testNet.addLayer(LayerInitParams("ReLU")
+             .params(extraLayerParams.list)
+             .in({_output_tensor})
+             .out({_output_tensor}),
+             ref_ReLU_wrap);
+    ASSERT_TRUE(generateNetAndInfer(NetworkInitParams()));
+    CompareCommonAbsolute(_outputMap.begin()->second, getReferenceOutput(), ERROR_BOUND_WITH_RELU);
+}
+
+class myriadLayerConvolutionWithReLU_nightly: public ConvolutionTest<ReLULayerDef>{
+};
+
+TEST_P(myriadLayerConvolutionWithReLU_nightly, Convolution) {
+    auto p = ::testing::WithParamInterface<std::tuple<InferenceEngine::SizeVector, param_size, param_size, param_size, uint32_t, uint32_t, ReLULayerDef>>::GetParam();
+    auto ReLUParam = std::get<6>(p);
+    _testNet.addLayer(LayerInitParams("ReLU")
+             .params(ReLUParam.list)
+             .in({_output_tensor})
+             .out({_output_tensor}),
+             ref_ReLU_wrap);
+
+    float maxerr = 0;
+    if (group == 1)
+        maxerr = 0.00055 * IC * kernel.x * kernel.y;
+    else // TODO: currently dephConv is slightly less accurate
+        maxerr = 0.00066 * (IC / group) * kernel.x * kernel.y;
+    ASSERT_TRUE(generateNetAndInfer(NetworkInitParams()));
+    CompareCommonAbsolute(_outputMap.begin()->second, getReferenceOutput(), maxerr);
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_reorg_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_reorg_test.cpp
new file mode 100644 (file)
index 0000000..bd604f4
--- /dev/null
@@ -0,0 +1,32 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_reorg_test.hpp"
+
+static std::vector<std::string> s_CustomConfig = {
+    "",
+#ifdef VPU_HAS_CUSTOM_KERNELS
+    getIELibraryPath() + "/vpu_custom_kernels/customLayerBindings.xml"
+#endif
+};
+
+static std::vector<layoutPreference> layoutPreferences = {
+    vpu::LayoutPreference::ChannelMajor,
+#ifndef VPU_HAS_CUSTOM_KERNELS
+    vpu::LayoutPreference::ChannelMinor
+#endif
+};
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsReorg_nightly, ::testing::Combine(
+    ::testing::Values<DimsInput>(
+        MAKE_STRUCT(tensor_test_params, 1, 64, 26, 26),
+        MAKE_STRUCT(tensor_test_params, 1, 192, 6 * 26, 6 * 26),
+        MAKE_STRUCT(tensor_test_params, 1,  4,  6,  6)
+    ),
+    ::testing::Values<ScaleOutput>(2),
+    ::testing::Values<Stride>(2),
+    ::testing::ValuesIn(layoutPreferences),
+    ::testing::ValuesIn(s_CustomConfig),
+    ::testing::Values<IRVersion>(IRVersion::v7, IRVersion::v10)
+));
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_reorg_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_reorg_test.hpp
new file mode 100644 (file)
index 0000000..be5cdad
--- /dev/null
@@ -0,0 +1,125 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include "myriad_layers_tests.hpp"
+
+using std::tuple;
+using std::get;
+
+using namespace InferenceEngine;
+
+static void reorg_calculate(short *inp, int w, int h, int c, int batch, int stride, float *out)
+{
+    int out_c = c / (stride*stride);
+
+    int oc = c * (stride*stride);
+    int oh = h / stride;
+    int ow = w / stride;
+
+    for(int b = 0; b < batch; ++b)
+    {
+        for(int k = 0; k < c; ++k)
+        {
+            for(int j = 0; j < h; ++j)
+            {
+                for(int i = 0; i < w; ++i)
+                {
+                    int in_index = i + w * (j + h * (k + c * b));
+
+                    int new_z = in_index / (oh*ow);
+                    int new_y = (in_index %(oh*ow)) / ow;
+                    int new_x = (in_index %(oh*ow)) % ow;
+                    int new_index = new_z + new_x * oc + new_y * oc * ow;
+
+                    int c2 = k % out_c;
+                    int offset = k / out_c;
+                    int w2 = i*stride + offset % stride;
+                    int h2 = j*stride + offset / stride;
+                    int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b));
+
+                    out[new_index] = PrecisionUtils::f16tof32(inp[out_index]);
+                }
+            }
+        }
+    }
+}
+
+PRETTY_PARAM(Stride, int);
+PRETTY_PARAM(ScaleOutput, int);
+PRETTY_PARAM(layoutPreference, vpu::LayoutPreference);
+
+
+typedef myriadLayerTestBaseWithParam<tuple<DimsInput, ScaleOutput, Stride, layoutPreference, std::string, IRVersion>> myriadLayersTestsReorg_nightly;
+
+TEST_P(myriadLayersTestsReorg_nightly, TestsReorg) {
+
+    // TODO: M2 mode is not working for OpenCL compiler
+    if(!get<4>(GetParam()).empty() && !CheckMyriadX()) {
+        GTEST_SKIP()<<"Custom layers for MYRIAD2 not supported";
+    }
+
+    tensor_test_params dimsInput = get<0>(GetParam());
+
+    int scaleOutput = get<1>(GetParam());
+    tensor_test_params dimsOutput = {dimsInput.n, dimsInput.c * (scaleOutput * scaleOutput), dimsInput.h / scaleOutput, dimsInput.w / scaleOutput};
+
+    int stride = get<2>(GetParam());
+    auto layoutPreference = get<3>(GetParam());
+    _irVersion = get<5>(GetParam());
+    std::map<std::string, std::string> params;
+    std::string type =  "ReorgYolo";
+
+    params["stride"] = std::to_string(stride);
+    SetInputTensor(dimsInput);
+    SetOutputTensor(dimsOutput);
+    _config[VPU_CONFIG_KEY(CUSTOM_LAYERS)] = get<4>(GetParam());
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams(type)
+                                                   .params(params),
+                                                   NetworkInitParams().layoutPreference(layoutPreference)
+                                                   .outputPrecision(InferenceEngine::Precision::FP32)));
+    /* input data preparation */
+    SetInputInOrder();
+
+    ASSERT_TRUE(Infer());
+    InferenceEngine::SizeVector inputDims = _inputsInfo.begin()->second->getTensorDesc().getDims();
+    InferenceEngine::Blob::Ptr inputBlobRef =
+            InferenceEngine::make_shared_blob<short>({InferenceEngine::Precision::FP16, inputDims, InferenceEngine::NHWC});
+    inputBlobRef->allocate();
+    short *inputBlobRefRawData = inputBlobRef->buffer();
+
+    int c = inputDims[1];
+    int h = inputDims[2];
+    int w = inputDims[3];
+
+    auto inputBlob =_inputMap[_inputsInfo.begin()->first];
+    short * inputBlob_data = inputBlob->buffer();
+
+    /* Preliminary repacking */
+    for(int k = 0; k < c; k++)
+    {
+        for(int j = 0; j < h; j++)
+        {
+            for(int i = 0; i < w; i++)
+            {
+                int dst_index = i + w * j + w * h * k;
+                int src_index = k + c * i + c * w * j;
+
+                inputBlobRefRawData[dst_index] = inputBlob_data[src_index];
+            }
+        }
+    }
+
+    auto outputBlob =_outputMap[_outputsInfo.begin()->first];
+    InferenceEngine::SizeVector outputDims = _outputsInfo.begin()->second->getTensorDesc().getDims();
+
+    InferenceEngine::TBlob<float>::Ptr outputBlobRef =
+                InferenceEngine::make_shared_blob<float>(TensorDesc(InferenceEngine::Precision::FP32, outputDims, InferenceEngine::NCHW));
+    outputBlobRef->allocate();
+    float *outputBlobRefRawData = outputBlobRef->buffer();
+
+    reorg_calculate(inputBlobRefRawData, w, h, c, 1, stride, outputBlobRefRawData);
+
+    compare(outputBlob->buffer(), outputBlobRef->buffer(), outputBlob->size(), 0.0);
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_resample_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_resample_test.cpp
new file mode 100644 (file)
index 0000000..7528199
--- /dev/null
@@ -0,0 +1,11 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_resample_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(myriad, myriadResampleLayerTests_nightly,
+        ::testing::Combine(
+        ::testing::Values(CONFIG_VALUE(NO), CONFIG_VALUE(YES)),
+        ::testing::ValuesIn(s_ResampleCustomConfig),
+        ::testing::ValuesIn(s_ResampleAntialias)));
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_resample_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_resample_test.hpp
new file mode 100644 (file)
index 0000000..40b2e76
--- /dev/null
@@ -0,0 +1,210 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <cmath>
+#include "myriad_layers_tests.hpp"
+// #include <iostream>
+
+using namespace InferenceEngine;
+
+#define ERROR_BOUND 1e-3
+
+static inline float triangleCoeff(float x)
+{
+    return (1.0f - fabsf(x));
+}
+void refResample(const Blob::Ptr src, Blob::Ptr dst, int antialias) {
+    ie_fp16 *src_data = static_cast<ie_fp16*>(src->buffer());
+    ie_fp16 *output_sequences = static_cast<ie_fp16*>(dst->buffer());
+    ASSERT_NE(src_data, nullptr);
+    ASSERT_NE(output_sequences, nullptr);
+
+    const auto& src_dims = src->getTensorDesc().getDims();
+    const auto& dst_dims = dst->getTensorDesc().getDims();
+    int OH = dst_dims[2];
+    int OW = dst_dims[3];
+
+    int C  = src_dims[1];
+    int IH = src_dims[2];
+    int IW = src_dims[3];
+
+    if (IH == OH && IW == OW)
+    {
+        int b = 0;
+        for (int c = 0; c < C; c++)
+            for (int h = 0; h < IH; h++)
+                for (int w = 0; w < IW; w++){
+                int dst_index = w + IW * h + IW * IH * c;
+                int src_index = dst_index;
+                output_sequences[dst_index] = src_data[src_index];
+                }
+        return;
+    }
+
+    const float fy = static_cast<float>(IH) / static_cast<float>(OH);
+    const float fx = static_cast<float>(IW) / static_cast<float>(OW);
+
+    float ax = 1.0f / fx;
+    float ay = 1.0f / fy;
+
+    int rx = (fx < 1.0f) ? 2 : ceil((1.0f)/ax);
+    int ry = (fy < 1.0f) ? 2 : ceil((1.0f)/ay);
+
+    for (int c = 0; c < C; c++)
+    {
+        const ie_fp16* in_ptr = src_data + IW*IH*c;
+        ie_fp16* out_ptr = output_sequences + OW*OH*c;
+
+        for (int oy = 0; oy < OH; oy++)
+        {
+            for (int ox = 0; ox < OW; ox++)
+            {
+                float ix = ox*fx + fx / 2.0f - 0.5f;
+                float iy = oy*fy + fy / 2.0f - 0.5f;
+
+                int ix_r = (int)(round(ix));
+                int iy_r = (int)(round(iy));
+
+                float sum=0;
+                float wsum=0;
+
+                if(antialias){
+                    for (int y = iy_r - ry; y <= iy_r + ry; y++)
+                    {
+                        for (int x = ix_r - rx; x <= ix_r + rx; x++)
+                        {
+                            if (y < 0 || x < 0) continue;
+                            if (y >= (int)IH || x >= (int)IW) continue;
+
+                            float dx = ix - x;
+                            float dy = iy - y;
+
+                            float w = ax*triangleCoeff(ax*dx) * ay*triangleCoeff(ay*dy);
+
+                            sum += w * PrecisionUtils::f16tof32(in_ptr[y*IW + x]);
+                            wsum += w;
+                        }
+                    }
+                    out_ptr[oy * OW + ox] = PrecisionUtils::f32tof16((!wsum) ? 0.0f : (sum / wsum));
+                }
+                else{
+                    out_ptr[oy * OW + ox] = in_ptr[iy_r * IW + ix_r];
+                }
+            }
+        }
+    }
+}
+
+PRETTY_PARAM(hwAcceleration, std::string);
+PRETTY_PARAM(customConfig, std::string);
+PRETTY_PARAM(Antialias, int)
+
+typedef myriadLayerTestBaseWithParam<std::tuple<std::string, std::string, Antialias>> myriadResampleLayerTests_nightly;
+
+TEST_P(myriadResampleLayerTests_nightly, Resample) {
+    std::string model = R"V0G0N(
+       <net name="Resample" version="2" batch="1">
+           <layers>
+            <layer id="0" name="data" precision="FP16" type="Input">
+                <output>
+                    <port id="0">
+                        <dim>1</dim>
+                        <dim>128</dim>
+                        <dim>26</dim>
+                        <dim>26</dim>
+                    </port>
+                </output>
+            </layer>
+               <layer id="1" name="detector/yolo-v3/ResizeNearestNeighbor" precision="FP16" type="Resample">
+                  <data antialias="@TEST@" factor="2.0" type="caffe.ResampleParameter.NEAREST" fx="0.5" fy="0.5"/>
+                <input>
+                    <port id="1">
+                        <dim>1</dim>
+                        <dim>128</dim>
+                        <dim>26</dim>
+                        <dim>26</dim>
+                    </port>
+                </input>
+                <output>
+                    <port id="2">
+                        <dim>1</dim>
+                        <dim>128</dim>
+                        <dim>52</dim>
+                        <dim>52</dim>
+                    </port>
+                </output>
+            </layer>
+           </layers>
+           <edges>
+               <edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
+           </edges>
+       </net>
+   )V0G0N";
+
+    SetSeed(DEFAULT_SEED_VALUE + 6);
+
+    std::string HWConfigValue = std::get<0>(GetParam());
+    std::string customConfig = std::get<1>(GetParam());
+    int antialias = std::get<2>(GetParam());
+
+    model.replace( model.find("@TEST@"), sizeof("@TEST@") -1, std::to_string(antialias));
+    if((customConfig != "") || (antialias != 1)){
+        if(!customConfig.empty() && !CheckMyriadX()) {
+            GTEST_SKIP()<<"Custom layers for MYRIAD2 not supported";
+        }
+        _config[VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION)] = HWConfigValue;
+        _config[VPU_CONFIG_KEY(CUSTOM_LAYERS)] = customConfig;
+        StatusCode st;
+
+        ASSERT_NO_THROW(readNetwork(model));
+
+        const auto& network = _cnnNetwork;
+
+        _inputsInfo = network.getInputsInfo();
+        _inputsInfo["data"]->setPrecision(Precision::FP16);
+        _inputsInfo["data"]->setLayout(NCHW);
+
+        _outputsInfo = network.getOutputsInfo();
+        _outputsInfo["detector/yolo-v3/ResizeNearestNeighbor"]->setPrecision(Precision::FP16);
+
+        ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network,
+                                                        {{VPU_CONFIG_KEY(CUSTOM_LAYERS), customConfig}, {VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), HWConfigValue}}, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+        ASSERT_NE(_exeNetwork, nullptr) << _resp.msg;
+
+        ASSERT_NO_THROW(st = _exeNetwork->CreateInferRequest(_inferRequest, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        Blob::Ptr data;
+        ASSERT_NO_THROW(st = _inferRequest->GetBlob("data", data, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        GenRandomData(data);
+
+        ASSERT_NO_THROW(st = _inferRequest->Infer(&_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        Blob::Ptr outputBlob;
+        ASSERT_NO_THROW(_inferRequest->GetBlob("detector/yolo-v3/ResizeNearestNeighbor", outputBlob, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        _refBlob = make_shared_blob<ie_fp16>(TensorDesc(Precision::FP16, outputBlob->getTensorDesc().getDims(), NCHW));
+        _refBlob->allocate();
+
+        refResample(data, _refBlob, antialias);
+
+        CompareCommonAbsolute(outputBlob, _refBlob, ERROR_BOUND);
+    }
+}
+
+static std::vector<std::string> s_ResampleCustomConfig = {
+    "",
+#ifdef VPU_HAS_CUSTOM_KERNELS
+   getIELibraryPath() + "/vpu_custom_kernels/customLayerBindings.xml"
+#endif
+};
+
+static std::vector<Antialias> s_ResampleAntialias = {
+        {0, 1}
+};
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_reshape_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_reshape_test.cpp
new file mode 100644 (file)
index 0000000..3a9f302
--- /dev/null
@@ -0,0 +1,324 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "ngraph_functions/subgraph_builders.hpp"
+#include "myriad_layers_reshape_test.hpp"
+
+TEST_F(myriadEliminateReshapeTests_nightly, SplitConvConcat) {
+    ASSERT_NO_THROW(_cnnNetwork = InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeSplitConvConcat()));
+
+    StatusCode st;
+
+    ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, _cnnNetwork,
+                                                        {
+                                                            {
+                                                                VPU_CONFIG_KEY(PERF_REPORT_MODE),
+                                                                VPU_CONFIG_VALUE(PER_STAGE)
+                                                            },
+                                                            {
+                                                                VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION),
+                                                                CONFIG_VALUE(NO)
+                                                            },
+                                                            {
+                                                                CONFIG_KEY(PERF_COUNT),
+                                                                CONFIG_VALUE(YES)
+                                                            }
+                                                        },
+                                                      &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    ASSERT_NO_THROW(st = _exeNetwork->CreateInferRequest(_inferRequest, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    ASSERT_NO_THROW(st = _inferRequest->Infer(&_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    std::map<std::string, InferenceEngineProfileInfo> perfMap;
+    ASSERT_NO_THROW(st = _inferRequest->GetPerformanceCounts(perfMap, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    auto layerInfo = perfMap["MobilenetV1/Logits/SpatialSqueeze"];
+    ASSERT_EQ(InferenceEngineProfileInfo::NOT_RUN, layerInfo.status);
+}
+
+TEST_F(myriadLayersTests_nightly, ReshapeAfterConcat_Eliminate) {
+    std::string model = R"V0G0N(
+        <net name="ReshapeAfterConcat_Eliminate" version="2" batch="1">
+            <layers>
+                <layer name="input1" type="Input" precision="FP16" id="1">
+                    <output>
+                        <port id="1">
+                            <dim>1</dim>
+                            <dim>30</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="input2" type="Input" precision="FP16" id="2">
+                    <output>
+                        <port id="2">
+                            <dim>1</dim>
+                            <dim>20</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="input3" type="Input" precision="FP16" id="3">
+                    <output>
+                        <port id="3">
+                            <dim>1</dim>
+                            <dim>10</dim>
+                        </port>
+                    </output>
+                </layer>
+
+                <layer name="input1_copy" type="Power" precision="FP16" id="4">
+                    <power_data power="1" scale="1" shift="0"/>
+                    <input>
+                        <port id="4">
+                            <dim>1</dim>
+                            <dim>30</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="5">
+                            <dim>1</dim>
+                            <dim>30</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="input2_copy" type="Power" precision="FP16" id="5">
+                    <power_data power="1" scale="1" shift="0"/>
+                    <input>
+                        <port id="6">
+                            <dim>1</dim>
+                            <dim>20</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="7">
+                            <dim>1</dim>
+                            <dim>20</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="input3_copy" type="Power" precision="FP16" id="6">
+                    <power_data power="1" scale="1" shift="0"/>
+                    <input>
+                        <port id="8">
+                            <dim>1</dim>
+                            <dim>10</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="9">
+                            <dim>1</dim>
+                            <dim>10</dim>
+                        </port>
+                    </output>
+                </layer>
+
+                <layer name="concat" type="Concat" precision="FP16" id="7">
+                    <concat_data axis="1"/>
+                    <input>
+                        <port id="10">
+                            <dim>1</dim>
+                            <dim>30</dim>
+                        </port>
+                        <port id="11">
+                            <dim>1</dim>
+                            <dim>20</dim>
+                        </port>
+                        <port id="12">
+                            <dim>1</dim>
+                            <dim>10</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="13">
+                            <dim>1</dim>
+                            <dim>60</dim>
+                        </port>
+                    </output>
+                </layer>
+
+                <layer name="reshape" type="Reshape" precision="FP16" id="8">
+                    <data dim="0,-1,30" axis="0" num_axes="-1"/>
+                    <input>
+                        <port id="14">
+                            <dim>1</dim>
+                            <dim>60</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="15">
+                            <dim>1</dim>
+                            <dim>2</dim>
+                            <dim>30</dim>
+                        </port>
+                    </output>
+                </layer>
+
+                <layer name="reshape_copy" type="Power" precision="FP16" id="9">
+                    <power_data power="1" scale="1" shift="0"/>
+                    <input>
+                        <port id="16">
+                            <dim>1</dim>
+                            <dim>2</dim>
+                            <dim>30</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="17">
+                            <dim>1</dim>
+                            <dim>2</dim>
+                            <dim>30</dim>
+                        </port>
+                    </output>
+                </layer>
+            </layers>
+            <edges>
+                <edge from-layer="1" from-port="1" to-layer="4" to-port="4"/>
+                <edge from-layer="2" from-port="2" to-layer="5" to-port="6"/>
+                <edge from-layer="3" from-port="3" to-layer="6" to-port="8"/>
+
+                <edge from-layer="4" from-port="5" to-layer="7" to-port="10"/>
+                <edge from-layer="5" from-port="7" to-layer="7" to-port="11"/>
+                <edge from-layer="6" from-port="9" to-layer="7" to-port="12"/>
+
+                <edge from-layer="7" from-port="13" to-layer="8" to-port="14"/>
+
+                <edge from-layer="8" from-port="15" to-layer="9" to-port="16"/>
+            </edges>
+        </net>
+    )V0G0N";
+
+    StatusCode st;
+
+    ASSERT_NO_THROW(readNetwork(model));
+
+    const auto& network = _cnnNetwork;
+
+    _inputsInfo = network.getInputsInfo();
+    _inputsInfo["input1"]->setPrecision(Precision::FP16);
+    _inputsInfo["input2"]->setPrecision(Precision::FP16);
+    _inputsInfo["input3"]->setPrecision(Precision::FP16);
+
+    _outputsInfo = network.getOutputsInfo();
+    _outputsInfo["reshape_copy"]->setPrecision(Precision::FP16);
+
+    ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network, { {VPU_CONFIG_KEY(PERF_REPORT_MODE), VPU_CONFIG_VALUE(PER_STAGE)},
+                                                                              {VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), CONFIG_VALUE(NO)},
+                                                                              {CONFIG_KEY(PERF_COUNT), CONFIG_VALUE(YES)} }, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    ASSERT_NE(_exeNetwork, nullptr) << _resp.msg;
+
+    ASSERT_NO_THROW(st = _exeNetwork->CreateInferRequest(_inferRequest, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    Blob::Ptr input1;
+    ASSERT_NO_THROW(st = _inferRequest->GetBlob("input1", input1, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    GenRandomData(input1);
+
+    Blob::Ptr input2;
+    ASSERT_NO_THROW(st = _inferRequest->GetBlob("input2", input2, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    GenRandomData(input2);
+
+    Blob::Ptr input3;
+    ASSERT_NO_THROW(st = _inferRequest->GetBlob("input3", input3, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    GenRandomData(input3);
+
+    ASSERT_NO_THROW(st = _inferRequest->Infer(&_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    Blob::Ptr output;
+    ASSERT_NO_THROW(st = _inferRequest->GetBlob("reshape_copy", output, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    _refBlob = make_shared_blob<ie_fp16>({Precision::FP16, output->getTensorDesc().getDims(), Layout::ANY});
+    _refBlob->allocate();
+    {
+        ie_fp16* dst_ptr = _refBlob->buffer().as<ie_fp16*>();
+        int dst_offset = 0;
+
+        auto concat = [&](const Blob::Ptr& src) {
+            const ie_fp16* src_ptr = src->cbuffer().as<const ie_fp16*>();
+            int num = src->getTensorDesc().getDims().back();
+            std::copy_n(src_ptr, num, dst_ptr + dst_offset);
+            dst_offset += num;
+        };
+
+        concat(input1);
+        concat(input2);
+        concat(input3);
+    }
+
+    CompareCommonAbsolute(output, _refBlob, 0);
+
+    std::map<std::string, InferenceEngineProfileInfo> perfMap;
+    ASSERT_NO_THROW(st = _inferRequest->GetPerformanceCounts(perfMap, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    auto layerInfo = perfMap["reshape"];
+    EXPECT_EQ(InferenceEngineProfileInfo::NOT_RUN, layerInfo.status);
+}
+
+TEST_F(myriadLayerReshapeFasterRCNN_nightly, Reshape) {
+    InferenceEngine::SizeVector input_tensor = {1, 14, 14, 24};
+    InferenceEngine::SizeVector output_tensor = {1, 2352, 2};
+    std::map<std::string, std::string> layer_params = {
+              {"axis", "0"}
+            ,{"dim", "0,-1,2"}
+            ,{"num_axes", std::to_string(-1)}
+    };
+    _testNet.addLayer(LayerInitParams("Reshape")
+             .params(layer_params)
+             .in({input_tensor})
+             .out({output_tensor}),
+            ref_reshape_wrap);
+    ASSERT_TRUE(generateNetAndInfer(NetworkInitParams().useHWOpt( CheckMyriadX())));
+}
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayerReshape_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(s_reshapeInParams),
+        ::testing::ValuesIn(s_reshapeOutParams))
+);
+
+INSTANTIATE_TEST_CASE_P(fc_to_conv_case, myriadLayerReshape_nightly,
+    ::testing::Values(
+        std::make_tuple(
+            SizeVector{400, 12544},
+            SizeVector{8, 50, 256, 7, 7}
+        ),
+        std::make_tuple(
+            SizeVector{256, 8, 7, 50, 7},
+            SizeVector{1, 256, 56, 350}
+        ),
+        std::make_tuple(
+            SizeVector{1, 1024, 8, 50},
+            SizeVector{1024, 8, 1, 50, 1}
+        ),
+        std::make_tuple(
+            SizeVector{8, 50, 1024, 1, 1},
+            SizeVector{400, 1024}
+        )
+    )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsReshapeBeforeFC_nightly,
+        ::testing::Values(CONFIG_VALUE(YES), CONFIG_VALUE(NO))
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsReshapeFasterRCNN_nightly,
+        ::testing::Combine(
+            ::testing::ValuesIn(s_convTensor)
+          , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 0, 0))
+          , ::testing::Values<uint32_t>(24)
+          , ::testing::Values<uint32_t>(1)
+          )
+);
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_reshape_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_reshape_test.hpp
new file mode 100644 (file)
index 0000000..22fa254
--- /dev/null
@@ -0,0 +1,284 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include "ie_layouts.h"
+#include "myriad_layers_tests.hpp"
+#include <vpu/private_plugin_config.hpp>
+#include "myriad_layers_reference_functions.hpp"
+
+using namespace InferenceEngine;
+
+using myriadEliminateReshapeTests_nightly = myriadLayersTests_nightly;
+
+typedef myriadLayerTestBaseWithParam<std::tuple<InferenceEngine::SizeVector, InferenceEngine::SizeVector>> myriadLayerReshape_nightly;
+
+TEST_P(myriadLayerReshape_nightly, Reshape) {
+    auto input_tensor = std::get<0>(GetParam());
+    auto output_tensor = std::get<1>(GetParam());
+
+    std::string shape = std::to_string(output_tensor[0]);
+    for (size_t i = 1; i < output_tensor.size(); ++i) {
+        shape += "," + std::to_string(output_tensor[i]);
+    }
+
+    std::map<std::string, std::string> params;
+    params["dim"] = shape;
+
+    _testNet.addLayer(LayerInitParams("Reshape")
+             .params(params)
+             .in({input_tensor})
+             .out({output_tensor}),
+            ref_reshape_wrap);
+
+    _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+
+    ASSERT_TRUE(generateNetAndInfer(NetworkInitParams().useHWOpt(CheckMyriadX()).layoutPreference(vpu::LayoutPreference::ChannelMinor)));
+}
+
+
+typedef myriadLayersTests_nightly myriadLayerReshapeFasterRCNN_nightly;
+
+static std::vector<InferenceEngine::SizeVector> s_reshapeInParams = {
+    {{1, 4, 2, 16}},
+    {{1, 2, 4, 16}},
+    {{1, 4, 16, 2}},
+    {{1, 16, 4, 2}},
+    {{1, 8,  4,  4}},
+};
+
+static std::vector<InferenceEngine::SizeVector> s_reshapeOutParams = {
+    {{1, 16, 2, 4}},
+    {{1, 4, 16, 2}},
+    {{1, 4, 2, 16}},
+    {{1, 4, 4,  8}},
+    {{1, 4, 8,  4}},
+    {{1, 2, 4, 16}},
+    {{1, 2, 16, 4}},
+    {{1, 64, 2, 1}},
+};
+
+std::string MODEL_WITH_FLATTEN = R"V0G0N(
+    <net name="MODEL_WITH_FLATTEN" version="2" batch="1">
+        <layers>
+            <layer id="0" name="input" precision="FP16" type="Input">
+                <output>
+                    <port id="0">
+                        <dim>1</dim>
+                        <dim>16</dim>
+                        <dim>6</dim>
+                        <dim>6</dim>
+                    </port>
+                </output>
+            </layer>
+            <layer id="1" name="pool5" precision="FP16" type="Pooling">
+                <data exclude-pad="false" kernel-x="2" kernel-y="2" pad-x="0" pad-y="0" pool-method="max" stride="1,1,2,2" stride-x="2" stride-y="2"/>
+                <input>
+                    <port id="0">
+                        <dim>1</dim>
+                        <dim>16</dim>
+                        <dim>6</dim>
+                        <dim>6</dim>
+                    </port>
+                </input>
+                <output>
+                    <port id="1">
+                        <dim>1</dim>
+                        <dim>16</dim>
+                        <dim>3</dim>
+                        <dim>3</dim>
+                    </port>
+                </output>
+            </layer>
+            <layer id="2" name="flatten_0" precision="FP16" type="Reshape">
+                <data axis="1" dim="1,144" num_axes="-1" />
+                <input>
+                    <port id="0">
+                        <dim>1</dim>
+                        <dim>16</dim>
+                        <dim>3</dim>
+                        <dim>3</dim>
+                    </port>
+                </input>
+                <output>
+                    <port id="1">
+                        <dim>1</dim>
+                        <dim>144</dim>
+                    </port>
+                </output>
+            </layer>
+            <layer id="3" name="fc6" precision="FP16" type="FullyConnected">
+                <data out-size="32"/>
+                <input>
+                    <port id="0">
+                        <dim>1</dim>
+                        <dim>144</dim>
+                    </port>
+                </input>
+                <output>
+                    <port id="3">
+                        <dim>1</dim>
+                        <dim>32</dim>
+                    </port>
+                </output>
+                <blobs>
+                    <weights offset="0" size="9216"/>
+                    <biases offset="9216" size="64"/>
+                </blobs>
+            </layer>
+        </layers>
+        <edges>
+            <edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
+            <edge from-layer="1" from-port="1" to-layer="2" to-port="0"/>
+            <edge from-layer="2" from-port="1" to-layer="3" to-port="0"/>
+        </edges>
+    </net>
+)V0G0N";
+
+std::string MODEL_WITHOUT_FLATTEN = R"V0G0N(
+    <net name="MODEL_WITHOUT_FLATTEN" version="2" batch="1">
+        <layers>
+            <layer id="0" name="input" precision="FP16" type="Input">
+                <output>
+                    <port id="0">
+                        <dim>1</dim>
+                        <dim>16</dim>
+                        <dim>6</dim>
+                        <dim>6</dim>
+                    </port>
+                </output>
+            </layer>
+            <layer id="1" name="pool5" precision="FP16" type="Pooling">
+                <data exclude-pad="false" kernel-x="2" kernel-y="2" pad-x="0" pad-y="0" pool-method="max" stride="1,1,2,2" stride-x="2" stride-y="2"/>
+                <input>
+                    <port id="0">
+                        <dim>1</dim>
+                        <dim>16</dim>
+                        <dim>6</dim>
+                        <dim>6</dim>
+                    </port>
+                </input>
+                <output>
+                    <port id="1">
+                        <dim>1</dim>
+                        <dim>16</dim>
+                        <dim>3</dim>
+                        <dim>3</dim>
+                    </port>
+                </output>
+            </layer>
+            <layer id="2" name="fc6" precision="FP16" type="FullyConnected">
+                <data out-size="32"/>
+                <input>
+                    <port id="0">
+                        <dim>1</dim>
+                        <dim>16</dim>
+                        <dim>3</dim>
+                        <dim>3</dim>
+                    </port>
+                </input>
+                <output>
+                    <port id="1">
+                        <dim>1</dim>
+                        <dim>32</dim>
+                    </port>
+                </output>
+                <blobs>
+                    <weights offset="0" size="9216"/>
+                    <biases offset="9216" size="64"/>
+                </blobs>
+            </layer>
+        </layers>
+        <edges>
+            <edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
+            <edge from-layer="1" from-port="1" to-layer="2" to-port="0"/>
+        </edges>
+    </net>
+)V0G0N";
+
+
+typedef myriadLayerTestBaseWithParam<std::string> myriadLayersTestsReshapeBeforeFC_nightly;
+
+TEST_P(myriadLayersTestsReshapeBeforeFC_nightly, OptimizeReshapeIfItIsPlacedBeforeFC) {
+    std::string HWConfigValue = GetParam();
+    if (!CheckMyriadX() && HWConfigValue == CONFIG_VALUE(YES)) {
+        std::cout << "Disable for non-MyriadX devices" << std::endl;
+        return;
+    }
+
+    std::string outputName = "fc6";
+    StatusCode st = InferenceEngine::OK;
+    InferenceEngine::ResponseDesc resp;
+    TBlob<uint8_t>::Ptr weights(GenWeights(9280 / sizeof(ie_fp16)));
+
+    Core ie;
+    auto network = ie.ReadNetwork(MODEL_WITH_FLATTEN, weights);
+
+    auto inputsInfo = network.getInputsInfo();
+    inputsInfo["input"]->setPrecision(Precision::FP16);
+
+    auto outputsInfo = network.getOutputsInfo();
+    outputsInfo[outputName]->setPrecision(Precision::FP16);
+
+    InferenceEngine::IExecutableNetwork::Ptr exeNetwork;
+    ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(exeNetwork, network,
+                                                      { {VPU_CONFIG_KEY(PERF_REPORT_MODE), VPU_CONFIG_VALUE(PER_STAGE)},
+                                                        {VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), HWConfigValue},
+                                                        {CONFIG_KEY(PERF_COUNT), CONFIG_VALUE(YES) }}, &resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    InferenceEngine::IInferRequest::Ptr inferRequest;
+    ASSERT_NO_THROW(st = exeNetwork->CreateInferRequest(inferRequest, &resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    Blob::Ptr input;
+    ASSERT_NO_THROW(st = inferRequest->GetBlob("input", input, &resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    ASSERT_NO_THROW(st = inferRequest->Infer(&_resp));
+    ASSERT_EQ(StatusCode::OK, st) << resp.msg;
+
+    std::map<std::string, InferenceEngineProfileInfo> perfMap;
+    ASSERT_NO_THROW(st = inferRequest->GetPerformanceCounts(perfMap, &resp));
+    ASSERT_EQ(StatusCode::OK, st) << resp.msg;
+
+    auto layerInfo = perfMap["flatten_0"];
+    EXPECT_EQ(InferenceEngineProfileInfo::NOT_RUN, layerInfo.status);
+}
+
+class myriadLayersTestsReshapeFasterRCNN_nightly: public ConvolutionTest<>{
+};
+
+// FIXME: rewrite the test (it doesn't use Convolution) avoid HWC layout for 3D tensor in reference code
+TEST_P(myriadLayersTestsReshapeFasterRCNN_nightly, DISABLED_Convolution) {
+    std::map<std::string, std::string> permute_params = {
+              {"order", "0,2,3,1"}
+    };
+    std::map<std::string, std::string> reshape_params = {
+                {"axis", "0"}
+              , {"dim", "0,-1,2"}
+              , {"num_axes", "-1"}
+    };
+    InferenceEngine::SizeVector perm_out = {1, 14, 14, 24};
+    _testNet.addLayer(LayerInitParams("Permute")
+             .params(permute_params)
+             .in({_output_tensor})
+             .out({perm_out}),
+             ref_permute_wrap);
+
+    _testNet.addLayer(LayerInitParams("Reshape")
+             .params(reshape_params)
+             .in({perm_out})
+             .out({{1, 2352, 2}}),
+             ref_reshape_wrap);
+
+    float maxerr = 0;
+    maxerr = 0.00066 * (IC) * kernel.x * kernel.y;
+    ASSERT_TRUE(generateNetAndInfer(NetworkInitParams()));
+    CompareCommonAbsolute(_outputMap.begin()->second, getReferenceOutput(), maxerr);
+}
+
+static const std::vector<InferenceEngine::SizeVector> s_convTensor = {
+    {{1, 512, 14, 14}} 
+};
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_reverse_sequence_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_reverse_sequence_test.cpp
new file mode 100644 (file)
index 0000000..2a9dfab
--- /dev/null
@@ -0,0 +1,35 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_reverse_sequence_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayerReverseSequence_nightly,
+    ::testing::Combine(
+        ::testing::Values<ReverseSequence>(
+                MAKE_STRUCT(reverse_sequence_test_params, {5, 6, 18}, 0, 0)
+              , MAKE_STRUCT(reverse_sequence_test_params, {1,  2, 5, 2, 5}, 3, 4)
+              , MAKE_STRUCT(reverse_sequence_test_params, {5, 6, 18}, 0, 1)
+              , MAKE_STRUCT(reverse_sequence_test_params, {5, 6, 18}, 0, 2)
+              , MAKE_STRUCT(reverse_sequence_test_params, {1,  4, 2, 5}, 2, 3)
+              , MAKE_STRUCT(reverse_sequence_test_params, {16, 1, 1024}, 0, 1)
+              , MAKE_STRUCT(reverse_sequence_test_params, {20, 1, 1000}, 0, 1)
+              , MAKE_STRUCT(reverse_sequence_test_params, {5, 6, 18}, 2, 2)
+              , MAKE_STRUCT(reverse_sequence_test_params, {3, 4, 6}, 2, 1)
+              , MAKE_STRUCT(reverse_sequence_test_params, {1,  1, 4, 2, 5}, 3, 4)
+              , MAKE_STRUCT(reverse_sequence_test_params, {1,  4, 2, 5}, 2, 3)
+              , MAKE_STRUCT(reverse_sequence_test_params, {12, 44, 23, 15}, 0, 3)
+              , MAKE_STRUCT(reverse_sequence_test_params, {3, 4, 3, 1}, 2, 3)
+              , MAKE_STRUCT(reverse_sequence_test_params, {100, 1, 1, 1}, 0, 3)
+              , MAKE_STRUCT(reverse_sequence_test_params, {100, 1, 1, 1}, 0, 2)
+              , MAKE_STRUCT(reverse_sequence_test_params, {100, 1, 1, 1}, 0, 0)
+              , MAKE_STRUCT(reverse_sequence_test_params, {103, 1, 1, 1}, 0, 0)
+              , MAKE_STRUCT(reverse_sequence_test_params, {100, 10, 24}, 0, 0)
+              , MAKE_STRUCT(reverse_sequence_test_params, {100, 10, 24}, 0, 1)
+              , MAKE_STRUCT(reverse_sequence_test_params, {100, 10, 24}, 0, 2)
+              , MAKE_STRUCT(reverse_sequence_test_params, {100, 10, 24}, 1, 2)
+              , MAKE_STRUCT(reverse_sequence_test_params, {100, 10, 24}, 1, 1)
+        ),
+        ::testing::Values<IRVersion>(IRVersion::v7, IRVersion::v10)
+    )
+);
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_reverse_sequence_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_reverse_sequence_test.hpp
new file mode 100644 (file)
index 0000000..3481c9a
--- /dev/null
@@ -0,0 +1,147 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tests.hpp"
+#include <algorithm>
+
+using std::tuple;
+using std::get;
+
+#define f32Tof16 PrecisionUtils::f32tof16
+#define f16Tof32 PrecisionUtils::f16tof32
+
+using namespace InferenceEngine;
+
+struct reverse_sequence_test_params {
+    SizeVector dims;
+    int seq_axis;
+    int batch_axis;
+    friend std::ostream& operator<<(std::ostream& os, reverse_sequence_test_params const& tst)
+    {
+        os << "dims = (";
+        for (int i = 0; i < tst.dims.size()-1; i++)
+            os << tst.dims[i] << ", ";
+        os << tst.dims[tst.dims.size()-1] << ")";
+        return os << ", " <<
+                  " sequence axis = " << tst.seq_axis
+                  << ", batch axis = " << tst.batch_axis;
+    };
+};
+
+PRETTY_PARAM(ReverseSequence, reverse_sequence_test_params);
+typedef myriadLayerTestBaseWithParam<std::tuple<ReverseSequence, IRVersion>> myriadLayerReverseSequence_nightly;
+
+static int nchw_to_nhwc(InferenceEngine::SizeVector dims, int ind)
+{
+    int ind3 = ind % dims[3];
+    int ind2 = (ind / dims[3]) % dims[2];
+    int ind1 = ((ind / dims[3]) / dims[2]) % dims[1];
+    int ind0 = ((ind / dims[3]) / dims[2]) / dims[1];
+    return ind1 + ind3 * dims[1] + ind2 * dims[1] * dims[3] + ind0 * dims[1] * dims[3] * dims[2];
+}
+
+static void ref_reverse_sequence(
+        const Blob::Ptr& src,
+        const Blob::Ptr& seq_lengths,
+        Blob::Ptr& dst,
+        int seq_axis,
+        int batch_axis
+) {
+    const ie_fp16* src_data = src->cbuffer().as<const ie_fp16*>();
+    const ie_fp16* seq_lengths_data = static_cast<ie_fp16*>(seq_lengths->cbuffer().as<ie_fp16*>());
+    ie_fp16* dst_data = static_cast<ie_fp16*>(dst->cbuffer().as<ie_fp16*>());
+
+    InferenceEngine::SizeVector src_dims = src->getTensorDesc().getDims();
+    InferenceEngine::SizeVector srcStrides = src->getTensorDesc().getBlockingDesc().getStrides();
+
+    if (seq_axis < 0)
+        seq_axis += src_dims.size();
+
+    if (seq_axis < 0 || seq_axis >= src_dims.size())
+        FAIL() << "Incorrect 'seq_axis' parameters dimensions and axis number!";
+
+    if (batch_axis < 0)
+        batch_axis += src_dims.size();
+
+    if (batch_axis < 0 || batch_axis >= src_dims.size())
+        FAIL() << "Incorrect 'batch_axis' parameters dimensions and axis number!";
+
+    for (size_t i = 0; i < src_dims[batch_axis]; i++) {
+        if (f16Tof32(seq_lengths_data[i]) > src_dims[seq_axis])
+        {
+            FAIL() << "Incorrect input 'seq_lengths' values!";
+        }
+    }
+    size_t work_amount_dst = srcStrides[0] * src_dims[0];
+    InferenceEngine::SizeVector counters(src_dims.size(), 0);
+    Layout layout = src->getTensorDesc().getLayout();
+    for (size_t iwork = 0; iwork < work_amount_dst; ++iwork) {
+        size_t srcStride = 1;
+        int i;
+        size_t src_idx;
+        for (i = src_dims.size() - 1, src_idx = 0; i >= 0; i--) {
+            size_t idx = counters[i];
+
+            if (i == seq_axis && idx < f16Tof32(seq_lengths_data[counters[batch_axis]])) {
+                idx = f16Tof32(seq_lengths_data[counters[batch_axis]]) - idx - 1;
+            }
+
+            src_idx += idx * srcStride;
+
+            srcStride *= src_dims[i];
+        }
+
+        if (layout == NHWC)
+            dst_data[nchw_to_nhwc(src_dims, iwork)] = src_data[nchw_to_nhwc(src_dims, src_idx)];
+        else
+            dst_data[iwork] = src_data[src_idx];
+
+        for (int j = src_dims.size() - 1; j >= 0; j--) {
+            counters[j] = (counters[j] + 1) % src_dims[j];
+            if (counters[j] != 0) break;
+        }
+    }
+}
+
+TEST_P(myriadLayerReverseSequence_nightly, ReverseSequence) {
+    _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+
+    reverse_sequence_test_params input_dims = std::get<0>(GetParam());
+    _irVersion = std::get<1>(GetParam());
+    auto dims = input_dims.dims;
+    auto seq_axis = input_dims.seq_axis;
+    auto batch_axis = input_dims.batch_axis;
+
+    SetInputTensors({dims, {dims[batch_axis]}});
+    SetOutputTensors({dims});
+
+    std::map<std::string, std::string> layer_params = {
+              {"seq_axis", std::to_string(seq_axis)}
+            , {"batch_axis", std::to_string(batch_axis)}
+    };
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("ReverseSequence").params(layer_params)));
+
+    /* input tensor generating */
+    auto pInputBlob = _inputMap.begin();
+    Blob::Ptr inputBlob = pInputBlob->second;
+    ie_fp16 *src_data = static_cast<ie_fp16*>(inputBlob->buffer());
+    for (int i = 0; i < inputBlob->size(); i++) {
+        src_data[i] = f32Tof16(float(i % 10000));
+    }
+    pInputBlob++;
+    Blob::Ptr lengthSequenceBlob = pInputBlob->second;
+    ie_fp16* len_sequence = static_cast<ie_fp16*>(lengthSequenceBlob->buffer());
+
+    for (int i = 0; i < dims[batch_axis]; i++) {
+        len_sequence[i] = f32Tof16(static_cast<float>(rand() % ((dims[seq_axis]) + 1)));
+    }
+
+    ref_reverse_sequence(inputBlob, lengthSequenceBlob, _refBlob, seq_axis, batch_axis);
+
+    ASSERT_TRUE(Infer());
+
+    auto outputBlob = _outputMap.begin()->second;
+
+    CompareCommonAbsolute(outputBlob, _refBlob, 0);
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_rfcn_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_rfcn_test.cpp
new file mode 100644 (file)
index 0000000..627b459
--- /dev/null
@@ -0,0 +1,848 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include "myriad_layers_tests.hpp"
+
+#define ERROR_BOUND (1.2e-3f)
+
+using namespace InferenceEngine;
+
+static const int numCLASSes = 21;
+static const int numROIs = 300;
+static const std::string model_to_psroipooling = R"V0G0N(
+    <net name="RFCN_TEST" version="2" batch="1">
+        <layers>
+            <layer name="input0" type="Input" precision="FP16" id="0">
+                <output>
+                     <port id="0">
+                         <dim>1</dim>
+                         <dim>1029</dim>
+                         <dim>14</dim>
+                         <dim>14</dim>
+                     </port>
+                </output>
+            </layer>
+            <layer name="input1" type="Input" precision="FP16" id="1">
+                        <output>
+                     <port id="1">
+                         <dim>
+)V0G0N"
+                            + std::to_string(numROIs) +
+R"V0G0N(
+                         </dim>
+                         <dim>5</dim>
+                     </port>
+                </output>
+            </layer>
+            <layer name="PSROIPooling" type="PSROIPooling" precision="FP16" id="2">
+                <data group_size="7" spatial_scale="0.062500" output_dim="
+)V0G0N"
+                            + std::to_string(numCLASSes) +
+R"V0G0N(
+                " />
+                <input>
+                     <port id="2">
+                         <dim>1</dim>
+                         <dim>1029</dim>
+                         <dim>14</dim>
+                         <dim>14</dim>
+                     </port>
+                     <port id="3">
+                         <dim>
+)V0G0N"
+                            + std::to_string(numROIs) +
+R"V0G0N(
+                         </dim>
+                         <dim>5</dim>
+                     </port>
+                </input>
+                <output>
+                     <port id="4">
+                         <dim>1</dim>
+                         <dim>
+)V0G0N"
+                            + std::to_string(numCLASSes * numROIs) +
+R"V0G0N(
+                         </dim>
+                         <dim>7</dim>
+                         <dim>7</dim>
+                     </port>
+                </output>
+            </layer>
+        </layers>
+        <edges>
+           <edge from-layer="0" from-port="0" to-layer="2" to-port="2"/>
+           <edge from-layer="1" from-port="1" to-layer="2" to-port="3"/>
+        </edges>
+    </net>
+)V0G0N";
+
+static const std::string model_to_pooling = R"V0G0N(
+    <net name="RFCN_TEST" version="2" batch="1">
+        <layers>
+            <layer name="input0" type="Input" precision="FP16" id="0">
+                <output>
+                     <port id="0">
+                         <dim>1</dim>
+                         <dim>1029</dim>
+                         <dim>14</dim>
+                         <dim>14</dim>
+                     </port>
+                </output>
+            </layer>
+            <layer name="input1" type="Input" precision="FP16" id="1">
+                        <output>
+                     <port id="1">
+                         <dim>
+)V0G0N"
+                            + std::to_string(numROIs) +
+R"V0G0N(
+                         </dim>
+                         <dim>5</dim>
+                     </port>
+                </output>
+            </layer>
+            <layer name="PSROIPooling" type="PSROIPooling" precision="FP16" id="2">
+                <data group_size="7" spatial_scale="0.062500" output_dim="
+)V0G0N"
+                            + std::to_string(numCLASSes) +
+R"V0G0N(
+                " />
+                <input>
+                     <port id="2">
+                         <dim>1</dim>
+                         <dim>1029</dim>
+                         <dim>14</dim>
+                         <dim>14</dim>
+                     </port>
+                     <port id="3">
+                         <dim>
+)V0G0N"
+                            + std::to_string(numROIs) +
+R"V0G0N(
+                         </dim>
+                         <dim>5</dim>
+                     </port>
+                </input>
+                <output>
+                     <port id="4">
+                         <dim>
+)V0G0N"
+                            + std::to_string(numROIs) +
+R"V0G0N(
+                         </dim>
+                         <dim>
+)V0G0N"
+                            + std::to_string(numCLASSes) +
+R"V0G0N(
+                         </dim>
+                         <dim>7</dim>
+                         <dim>7</dim>
+                     </port>
+                </output>
+            </layer>
+            <layer name="ave_cls_score_rois" type="Pooling" precision="FP16" id="3">
+                <data exclude-pad="false" kernel-x="7" kernel-y="7" pad-x="0" pad-y="0" pool-method="avg" rounding_type="ceil" stride="1,1,7,7" stride-x="7" stride-y="7"/>
+                <input>
+                    <port id="5">
+                         <dim>
+)V0G0N"
+                            + std::to_string(numROIs) +
+R"V0G0N(
+                         </dim>
+                         <dim>
+)V0G0N"
+                            + std::to_string(numCLASSes) +
+R"V0G0N(
+                         </dim>
+                        <dim>7</dim>
+                        <dim>7</dim>
+                    </port>
+                </input>
+                <output>
+                    <port id="6">
+                         <dim>
+)V0G0N"
+                            + std::to_string(numROIs) +
+R"V0G0N(
+                         </dim>
+                         <dim>
+)V0G0N"
+                            + std::to_string(numCLASSes) +
+R"V0G0N(
+                         </dim>
+                        <dim>1</dim>
+                        <dim>1</dim>
+                    </port>
+                </output>
+            </layer>
+        </layers>
+        <edges>
+           <edge from-layer="0" from-port="0" to-layer="2" to-port="2"/>
+           <edge from-layer="1" from-port="1" to-layer="2" to-port="3"/>
+           <edge from-layer="2" from-port="4" to-layer="3" to-port="5"/>
+        </edges>
+    </net>
+)V0G0N";
+
+static const std::string model_to_softmax = R"V0G0N(
+    <net name="RFCN_TEST" version="2" batch="1">
+        <layers>
+            <layer name="input0" type="Input" precision="FP16" id="0">
+                <output>
+                     <port id="0">
+                         <dim>1</dim>
+                         <dim>1029</dim>
+                         <dim>14</dim>
+                         <dim>14</dim>
+                     </port>
+                </output>
+            </layer>
+            <layer name="input1" type="Input" precision="FP16" id="1">
+                        <output>
+                     <port id="1">
+                         <dim>
+)V0G0N"
+                            + std::to_string(numROIs) +
+R"V0G0N(
+                         </dim>
+                         <dim>5</dim>
+                     </port>
+                </output>
+            </layer>
+            <layer name="PSROIPooling" type="PSROIPooling" precision="FP16" id="2">
+                <data group_size="7" spatial_scale="0.062500" output_dim="
+)V0G0N"
+                            + std::to_string(numCLASSes) +
+R"V0G0N(
+                " />
+                <input>
+                     <port id="2">
+                         <dim>1</dim>
+                         <dim>1029</dim>
+                         <dim>14</dim>
+                         <dim>14</dim>
+                     </port>
+                     <port id="3">
+                         <dim>
+)V0G0N"
+                            + std::to_string(numROIs) +
+R"V0G0N(
+                         </dim>
+                         <dim>5</dim>
+                     </port>
+                </input>
+                <output>
+                     <port id="4">
+                         <dim>
+)V0G0N"
+                            + std::to_string(numROIs) +
+R"V0G0N(
+                         </dim>
+                         <dim>
+)V0G0N"
+                            + std::to_string(numCLASSes) +
+R"V0G0N(
+                         </dim>
+                         <dim>7</dim>
+                         <dim>7</dim>
+                     </port>
+                </output>
+            </layer>
+            <layer name="ave_cls_score_rois" type="Pooling" precision="FP16" id="3">
+                <data exclude-pad="false" kernel-x="7" kernel-y="7" pad-x="0" pad-y="0" pool-method="avg" rounding_type="ceil" stride="1,1,7,7" stride-x="7" stride-y="7"/>
+                <input>
+                    <port id="5">
+                         <dim>
+)V0G0N"
+                            + std::to_string(numROIs) +
+R"V0G0N(
+                         </dim>
+                         <dim>
+)V0G0N"
+                            + std::to_string(numCLASSes) +
+R"V0G0N(
+                         </dim>
+                        <dim>7</dim>
+                        <dim>7</dim>
+                    </port>
+                </input>
+                <output>
+                    <port id="6">
+                         <dim>
+)V0G0N"
+                            + std::to_string(numROIs) +
+R"V0G0N(
+                         </dim>
+                         <dim>
+)V0G0N"
+                            + std::to_string(numCLASSes) +
+R"V0G0N(
+                         </dim>
+                        <dim>1</dim>
+                        <dim>1</dim>
+                    </port>
+                </output>
+            </layer>
+            <layer name="cls_prob" type="SoftMax" precision="FP16" id="4">
+                <data axis="1"/>
+                <input>
+                    <port id="7">
+                         <dim>
+)V0G0N"
+                            + std::to_string(numROIs) +
+R"V0G0N(
+                         </dim>
+                         <dim>
+)V0G0N"
+                            + std::to_string(numCLASSes) +
+R"V0G0N(
+                         </dim>
+                        <dim>1</dim>
+                        <dim>1</dim>
+                    </port>
+                </input>
+                <output>
+                    <port id="8">
+                         <dim>
+)V0G0N"
+                            + std::to_string(numROIs) +
+R"V0G0N(
+                         </dim>
+                         <dim>
+)V0G0N"
+                            + std::to_string(numCLASSes) +
+R"V0G0N(
+                         </dim>
+                        <dim>1</dim>
+                        <dim>1</dim>
+                    </port>
+                </output>
+            </layer>
+        </layers>
+        <edges>
+           <edge from-layer="0" from-port="0" to-layer="2" to-port="2"/>
+           <edge from-layer="1" from-port="1" to-layer="2" to-port="3"/>
+           <edge from-layer="2" from-port="4" to-layer="3" to-port="5"/>
+           <edge from-layer="3" from-port="6" to-layer="4" to-port="7"/>
+        </edges>
+    </net>
+)V0G0N";
+
+static const std::string model_to_reshape = R"V0G0N(
+    <net name="RFCN_TEST" version="2" batch="1">
+        <layers>
+            <layer name="input0" type="Input" precision="FP16" id="0">
+                <output>
+                     <port id="0">
+                         <dim>1</dim>
+                         <dim>1029</dim>
+                         <dim>14</dim>
+                         <dim>14</dim>
+                     </port>
+                </output>
+            </layer>
+            <layer name="input1" type="Input" precision="FP16" id="1">
+                        <output>
+                     <port id="1">
+                         <dim>
+)V0G0N"
+                            + std::to_string(numROIs) +
+R"V0G0N(
+                         </dim>
+                         <dim>5</dim>
+                     </port>
+                </output>
+            </layer>
+            <layer name="PSROIPooling" type="PSROIPooling" precision="FP16" id="2">
+                <data group_size="7" spatial_scale="0.062500" output_dim="
+)V0G0N"
+                            + std::to_string(numCLASSes) +
+R"V0G0N(
+                " />
+                <input>
+                     <port id="2">
+                         <dim>1</dim>
+                         <dim>1029</dim>
+                         <dim>14</dim>
+                         <dim>14</dim>
+                     </port>
+                     <port id="3">
+                         <dim>
+)V0G0N"
+                            + std::to_string(numROIs) +
+R"V0G0N(
+                         </dim>
+                         <dim>5</dim>
+                     </port>
+                </input>
+                <output>
+                     <port id="4">
+                         <dim>
+)V0G0N"
+                            + std::to_string(numROIs) +
+R"V0G0N(
+                         </dim>
+                         <dim>
+)V0G0N"
+                            + std::to_string(numCLASSes) +
+R"V0G0N(
+                         </dim>
+                         <dim>7</dim>
+                         <dim>7</dim>
+                     </port>
+                </output>
+            </layer>
+            <layer name="ave_cls_score_rois" type="Pooling" precision="FP16" id="3">
+                <data exclude-pad="false" kernel-x="7" kernel-y="7" pad-x="0" pad-y="0" pool-method="avg" rounding_type="ceil" stride="1,1,7,7" stride-x="7" stride-y="7"/>
+                <input>
+                    <port id="5">
+                         <dim>
+)V0G0N"
+                            + std::to_string(numROIs) +
+R"V0G0N(
+                         </dim>
+                         <dim>
+)V0G0N"
+                            + std::to_string(numCLASSes) +
+R"V0G0N(
+                         </dim>
+                        <dim>7</dim>
+                        <dim>7</dim>
+                    </port>
+                </input>
+                <output>
+                    <port id="6">
+                         <dim>
+)V0G0N"
+                            + std::to_string(numROIs) +
+R"V0G0N(
+                         </dim>
+                         <dim>
+)V0G0N"
+                            + std::to_string(numCLASSes) +
+R"V0G0N(
+                         </dim>
+                        <dim>1</dim>
+                        <dim>1</dim>
+                    </port>
+                </output>
+            </layer>
+            <layer name="cls_prob" type="SoftMax" precision="FP16" id="4">
+                <data axis="1"/>
+                <input>
+                    <port id="7">
+                         <dim>
+)V0G0N"
+                            + std::to_string(numROIs) +
+R"V0G0N(
+                         </dim>
+                         <dim>
+)V0G0N"
+                            + std::to_string(numCLASSes) +
+R"V0G0N(
+                         </dim>
+                        <dim>1</dim>
+                        <dim>1</dim>
+                    </port>
+                </input>
+                <output>
+                    <port id="8">
+                         <dim>
+)V0G0N"
+                            + std::to_string(numROIs) +
+R"V0G0N(
+                         </dim>
+                         <dim>
+)V0G0N"
+                            + std::to_string(numCLASSes) +
+R"V0G0N(
+                         </dim>
+                        <dim>1</dim>
+                        <dim>1</dim>
+                    </port>
+                </output>
+            </layer>
+            <layer name="cls_prob_reshape" type="Reshape" precision="FP16" id="5">
+                <data axis="0" num_axes="-1" dim="-1,
+)V0G0N"
+                            + std::to_string(numCLASSes) +
+R"V0G0N(
+                                                       "/>
+                <input>
+                    <port id="9">
+                        <dim>
+)V0G0N"
+                            + std::to_string(numROIs) +
+R"V0G0N(
+                        </dim>
+                        <dim>
+)V0G0N"
+                            + std::to_string(numCLASSes) +
+R"V0G0N(
+                        </dim>
+                        <dim>1</dim>
+                        <dim>1</dim>
+                    </port>
+                </input>
+                <output>
+                    <port id="10">
+                         <dim>
+)V0G0N"
+                            + std::to_string(numROIs) +
+R"V0G0N(
+                         </dim>
+                         <dim>
+)V0G0N"
+                            + std::to_string(numCLASSes) +
+R"V0G0N(
+                         </dim>
+                    </port>
+                </output>
+            </layer>
+        </layers>
+        <edges>
+           <edge from-layer="0" from-port="0" to-layer="2" to-port="2"/>
+           <edge from-layer="1" from-port="1" to-layer="2" to-port="3"/>
+           <edge from-layer="2" from-port="4" to-layer="3" to-port="5"/>
+           <edge from-layer="3" from-port="6" to-layer="4" to-port="7"/>
+           <edge from-layer="4" from-port="8" to-layer="5" to-port="9"/>
+        </edges>
+    </net>
+)V0G0N";
+
+// This ref function is modified version from ref_soft_max @ myriad_layers_softmax_test.cpp
+static void ref_soft_max(const Blob::Ptr& src, Blob::Ptr& dst, int axis) {
+    const ie_fp16 *src_data = src->cbuffer().as<const ie_fp16*>();
+    ie_fp16 *dst_data = dst->buffer().as<ie_fp16*>();
+
+    const auto& dims = src->getTensorDesc().getDims();
+    int32_t dimx, dimy, dimz;
+    dimy = dims[2]; // H:1
+    dimz = dims[1]; // C:numCLASS
+    dimx = dims[0]; // N:numROI
+    // arg axis = 1:  axis for numCLASS(dimz=channels)
+
+    switch (src->getTensorDesc().getDims().size()) {
+    case 2:
+        axis += 2;
+        break;
+    case 3:
+        axis++;
+        break;
+    }
+
+    int dim0, dim1, dim2;
+    int stride0, stride1, stride2;
+    switch (axis) {
+    case 1:
+        /* channels */
+        dim0 = dimy; stride0 = dimx * dimz;
+        dim1 = dimx; stride1 = dimz;
+        dim2 = dimz; stride2 = 1;
+        break;
+    case 2:
+        /* height */
+        dim0 = dimx; stride0 = dimz;
+        dim1 = dimz; stride1 = 1;
+        dim2 = dimy; stride2 = dimx * dimz;
+        break;
+    case 3:
+        /* width */
+        dim0 = dimy; stride0 = dimx * dimz;
+        dim1 = dimz; stride1 = 1;
+        dim2 = dimx; stride2 = dimz;
+        break;
+    default:
+        FAIL() << "Unsupported axis value = " << axis;
+    }
+
+    std::vector<float> temp(dim2);
+    for (int i0 = 0; i0 < dim0; ++i0) {
+        for (int i1 = 0; i1 < dim1; ++i1) {
+            float largest = std::numeric_limits<float>::lowest();
+            for (int i2 = 0; i2 < dim2; ++i2) {
+                int ind = i0 * stride0 + i1 * stride1 + i2 * stride2;
+                float val = PrecisionUtils::f16tof32(src_data[ind]);
+                largest = std::max(val, largest);
+            }
+
+            float sum = 0.0f;
+            for (int i2 = 0; i2 < dim2; ++i2) {
+                int ind = i0 * stride0 + i1 * stride1 + i2 * stride2;
+                float val = PrecisionUtils::f16tof32(src_data[ind]);
+                temp[i2] = std::exp(val - largest);
+                sum += temp[i2];
+            }
+
+            for (int i2 = 0; i2 < dim2; ++i2) {
+                int ind = i0 * stride0 + i1 * stride1 + i2 * stride2;
+                dst_data[ind] = PrecisionUtils::f32tof16(temp[i2] / sum);
+            }
+        }
+    }
+}
+
+static void refGlobalAvgPooling7x7Rfcn(const Blob::Ptr src,
+                                Blob::Ptr dst) {
+    ASSERT_NE(src, nullptr);
+    ASSERT_NE(dst, nullptr);
+    const uint16_t *src_data = src->buffer();
+    uint16_t *dst_data = dst->buffer();
+    ASSERT_NE(src_data, nullptr);
+    ASSERT_NE(dst_data, nullptr);
+
+    param_size kernel = {7, 7};
+    param_size stride = {1, 1};
+    param_size pad    = {0, 0};
+
+    auto src_dims = src->getTensorDesc().getDims();
+    int32_t IW =   src_dims[3];   // 7
+    int32_t IH =   src_dims[2];   // 7
+    int32_t IC =   src_dims[1];   // numROI*numCLASS
+    int32_t INUM = src_dims[0]; // 1
+
+    auto dst_dims = dst->getTensorDesc().getDims();
+    int32_t OW =   dst_dims[3];   // 1
+    int32_t OH =   dst_dims[2];   // 1
+    int32_t OC =   dst_dims[1];   // numCLASS
+    int32_t ONUM = dst_dims[0]; // numROI
+
+    // CompareCommonAbsolute multiplied value since input shape might be 3D
+    ASSERT_EQ(IC * INUM, OC * ONUM);
+
+    for (size_t n = 0; n < ONUM; n++) {
+        for (size_t c = 0; c < OC; c++) {
+            for (size_t oh = 0; oh < OH; oh++) {
+                for (size_t ow = 0; ow < OW; ow++) {
+                    size_t oidx = c + ow * OC + oh * OC * OW + n * OC * OW * OH; // Default layout is NHWC
+                    float out_ref = 0.0f;
+                    size_t count = 0;
+
+                    for (uint32_t kh = 0; kh < kernel.y; kh++) {
+                        for (uint32_t kw = 0; kw < kernel.x; kw++) {
+                            int32_t iw = ow * stride.x - pad.x + kw;
+                            int32_t ih = oh * stride.y - pad.y + kh;
+                            if (iw < 0 || iw >= IW || ih < 0 || ih >= IH)
+                                continue;
+
+                            // If PSROIPooling is output of network, its layout is ZYX(NCHW). Use OC instead of IC since IC is actually INUM*IC
+                            size_t iidx = iw + IW * (ih + c * IH) + n * OC * IW * IH;
+
+                            float d = PrecisionUtils::f16tof32(src_data[iidx]);
+                            out_ref += d;
+                            count++;
+                        }
+                    }
+                    if (pad.x || pad.y) {
+                        dst_data[oidx] = PrecisionUtils::f32tof16(out_ref /(kernel.y * kernel.x));
+                    }
+                    else
+                        dst_data[oidx] = PrecisionUtils::f32tof16(out_ref /count);
+                }
+            }
+        }
+    }
+}
+
+class myriadLayersRfcnTests_nightly: public myriadLayersTests_nightly {
+public:
+    void GenROIs(InferenceEngine::Blob::Ptr rois,
+                 const uint32_t in_width, const uint32_t in_height,
+                 const uint32_t num_rois) {
+        ie_fp16 *roisBlob_data = rois->buffer().as<ie_fp16*>();
+        const int max_range = in_width * 4 / 5;
+        std::srand(std::time(nullptr));
+        for (int i = 0; i < num_rois; i++)
+        {
+            int x0 = std::rand() % max_range;
+            int x1 = x0 + (std::rand() % (in_width - x0 - 1)) + 1;
+            int y0 = std::rand() % max_range;
+            int y1 = y0 + (std::rand() % (in_height - y0 - 1)) + 1;
+
+            roisBlob_data[i * 5 + 0] = PrecisionUtils::f32tof16(0);
+            roisBlob_data[i * 5 + 1] = PrecisionUtils::f32tof16(x0);
+            roisBlob_data[i * 5 + 2] = PrecisionUtils::f32tof16(y0);
+            roisBlob_data[i * 5 + 3] = PrecisionUtils::f32tof16(x1);
+            roisBlob_data[i * 5 + 4] = PrecisionUtils::f32tof16(y1);
+        }
+    }
+
+    void PrepareInputAndReference(const std::string& model_prior_network, const std::string& output_layer, StatusCode& st)
+    {
+        SetSeed(DEFAULT_SEED_VALUE);
+
+        // Prior-part of network to generate reference
+        Core ie;
+        auto network_part = ie.ReadNetwork(model_prior_network, Blob::CPtr());
+
+        auto inputsInfo = network_part.getInputsInfo();
+        inputsInfo["input0"]->setPrecision(Precision::FP16);
+        inputsInfo["input0"]->setLayout(NCHW); // Input layout for PSROIPooling should be NCHW order, it's same as psroipooling test
+        inputsInfo["input1"]->setPrecision(Precision::FP16);
+
+        auto outputsInfo = network_part.getOutputsInfo();
+        outputsInfo[output_layer]->setPrecision(Precision::FP16);
+        if (output_layer == "PSROIPooling")
+            outputsInfo[output_layer]->setLayout(NCHW);
+
+        // Disable HW pooling
+        std::map<std::string, std::string> networkConfig;
+        networkConfig["VPU_HW_STAGES_OPTIMIZATION"] = "NO";
+
+        IExecutableNetwork::Ptr exeNetwork;
+        ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(exeNetwork, network_part, networkConfig, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+        ASSERT_NE(exeNetwork, nullptr) << _resp.msg;
+
+        IInferRequest::Ptr inferRequest;
+        ASSERT_NO_THROW(st = exeNetwork->CreateInferRequest(inferRequest, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        Blob::Ptr input0;
+        ASSERT_NO_THROW(st = inferRequest->GetBlob("input0", input0, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        Blob::Ptr input1;
+        ASSERT_NO_THROW(st = inferRequest->GetBlob("input1", input1, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        // Allocate buffer
+        input0_share = make_shared_blob<ie_fp16>({Precision::FP16, input0->getTensorDesc().getDims(), ANY});
+        input0_share->allocate();
+        input1_share = make_shared_blob<ie_fp16>({Precision::FP16, input1->getTensorDesc().getDims(), ANY});
+        input1_share->allocate();
+
+        // Generate random input
+        GenRandomData(input0_share);
+        GenROIs(input1_share, 224, 224, numROIs);
+
+        ASSERT_EQ(input0->size(), input0_share->size());
+        ASSERT_EQ(input1->size(), input1_share->size());
+
+        ie_fp16 *input0_data = static_cast<ie_fp16*>(input0->buffer());
+        ie_fp16 *input0_share_data = static_cast<ie_fp16*>(input0_share->buffer());
+        ie_fp16 *input1_data = static_cast<ie_fp16*>(input1->buffer());
+        ie_fp16 *input1_share_data = static_cast<ie_fp16*>(input1_share->buffer());
+        std::copy(input0_share_data, input0_share_data + input0_share->size(), input0_data);
+        std::copy(input1_share_data, input1_share_data + input1_share->size(), input1_data);
+
+        ASSERT_NO_THROW(st = inferRequest->Infer(&_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        ASSERT_NO_THROW(st = inferRequest->GetBlob(output_layer.c_str(), prior_network_output, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    }
+
+    void RunNetwork(const std::string& model, const std::string& output_layer, StatusCode& st)
+    {
+        ASSERT_NO_THROW(readNetwork(model));
+
+        const auto& network = _cnnNetwork;
+
+        _inputsInfo = network.getInputsInfo();
+        _inputsInfo["input0"]->setPrecision(Precision::FP16);
+        _inputsInfo["input0"]->setLayout(NCHW); // Input layout for PSROIPooling should be NCHW order, it's same as psroipooling test
+        _inputsInfo["input1"]->setPrecision(Precision::FP16);
+
+        _outputsInfo = network.getOutputsInfo();
+        _outputsInfo[output_layer]->setPrecision(Precision::FP16);
+
+        // Disable HW pooling
+        std::map<std::string, std::string> networkConfig;
+        networkConfig["VPU_HW_STAGES_OPTIMIZATION"] = "NO";
+
+        ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network, networkConfig, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+        ASSERT_NE(_exeNetwork, nullptr) << _resp.msg;
+
+        ASSERT_NO_THROW(st = _exeNetwork->CreateInferRequest(_inferRequest, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        Blob::Ptr input0;
+        ASSERT_NO_THROW(st = _inferRequest->GetBlob("input0", input0, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        Blob::Ptr input1;
+        ASSERT_NO_THROW(st = _inferRequest->GetBlob("input1", input1, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        ASSERT_NO_THROW(st = _inferRequest->GetBlob(output_layer.c_str(), outputBlob, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        _refBlob = make_shared_blob<ie_fp16>({Precision::FP16, outputBlob->getTensorDesc().getDims(), ANY});
+        _refBlob->allocate();
+
+        // Set input to run test
+        ASSERT_EQ(input0->size(), input0_share->size());
+        ASSERT_EQ(input1->size(), input1_share->size());
+
+        ie_fp16 *input0_data = static_cast<ie_fp16*>(input0->buffer());
+        ie_fp16 *input0_share_data = static_cast<ie_fp16*>(input0_share->buffer());
+        ie_fp16 *input1_data = static_cast<ie_fp16*>(input1->buffer());
+        ie_fp16 *input1_share_data = static_cast<ie_fp16*>(input1_share->buffer());
+        std::copy(input0_share_data, input0_share_data + input0_share->size(), input0_data);
+        std::copy(input1_share_data, input1_share_data + input1_share->size(), input1_data);
+
+        ASSERT_NO_THROW(st = _inferRequest->Infer(&_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    }
+
+    Blob::Ptr input0_share;
+    Blob::Ptr input1_share;
+    Blob::Ptr prior_network_output;
+    Blob::Ptr outputBlob;
+};
+
+TEST_F(myriadLayersRfcnTests_nightly, ReshapeRfcn)
+{
+    StatusCode st = GENERAL_ERROR;
+
+    std::string prior_network_output_layer = "cls_prob";
+    std::string test_network_output_layer = "cls_prob_reshape";
+
+    ASSERT_NO_THROW(PrepareInputAndReference(model_to_softmax, prior_network_output_layer, st));
+    ASSERT_EQ(StatusCode::OK, st) << "PrepareInputAndReference failed";
+    ASSERT_NO_THROW(RunNetwork(model_to_reshape, test_network_output_layer, st));
+    ASSERT_EQ(StatusCode::OK, st) << "RunNetwork failed";
+
+    ASSERT_EQ(outputBlob->size(), prior_network_output->size());
+    CompareCommonAbsolute(outputBlob, prior_network_output, 0.0f);
+}
+
+TEST_F(myriadLayersRfcnTests_nightly, SoftmaxRfcn)
+{
+    StatusCode st = GENERAL_ERROR;
+
+    std::string prior_network_output_layer = "ave_cls_score_rois";
+    std::string test_network_output_layer = "cls_prob";
+
+    ASSERT_NO_THROW(PrepareInputAndReference(model_to_pooling, prior_network_output_layer, st));
+    ASSERT_EQ(StatusCode::OK, st) << "PrepareInputAndReference failed";
+    ASSERT_NO_THROW(RunNetwork(model_to_softmax, test_network_output_layer, st));
+    ASSERT_EQ(StatusCode::OK, st) << "RunNetwork failed";
+
+    int param_axis = 1;
+    ref_soft_max(prior_network_output, _refBlob, param_axis);
+
+    CompareCommonAbsolute(outputBlob, _refBlob, ERROR_BOUND);
+}
+
+TEST_F(myriadLayersRfcnTests_nightly, GlobalAvgPooling7x7Rfcn)
+{
+    StatusCode st = GENERAL_ERROR;
+
+    std::string prior_network_output_layer = "PSROIPooling";
+    std::string test_network_output_layer = "ave_cls_score_rois";
+
+    ASSERT_NO_THROW(PrepareInputAndReference(model_to_psroipooling, prior_network_output_layer, st));
+    ASSERT_EQ(StatusCode::OK, st) << "PrepareInputAndReference failed";
+    ASSERT_NO_THROW(RunNetwork(model_to_pooling, test_network_output_layer, st));
+    ASSERT_EQ(StatusCode::OK, st) << "RunNetwork failed";
+
+    refGlobalAvgPooling7x7Rfcn(prior_network_output, _refBlob);
+
+    CompareCommonAbsolute(outputBlob, _refBlob, ERROR_BOUND);
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_roi_align_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_roi_align_test.cpp
new file mode 100644 (file)
index 0000000..84649ea
--- /dev/null
@@ -0,0 +1,13 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_roi_align_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsROIAlign_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(s_ROIAlignLayerInput),
+        ::testing::ValuesIn(s_ROIAlignLayerParam),
+        ::testing::ValuesIn(s_ROIAlignNumROIs),
+        ::testing::ValuesIn(s_ROIAlignMode)),
+);
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_roi_align_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_roi_align_test.hpp
new file mode 100644 (file)
index 0000000..0326e83
--- /dev/null
@@ -0,0 +1,258 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include "myriad_layers_reference_functions.hpp"
+#include "myriad_layers_tests.hpp"
+
+#include <random>
+
+using namespace InferenceEngine;
+
+#define ERROR_BOUND (2.5e-3f)
+
+struct roi_align_param {
+    int         in_net_w;
+    int         in_net_h;
+    uint32_t    pooled_w;
+    uint32_t    pooled_h;
+    int         sampling_ratio;
+    float       spatial_scale;
+
+    friend std::ostream& operator<<(std::ostream& os, roi_align_param const& tst)
+    {
+        return os << "input net width = " << tst.in_net_w
+                  << ", input net height = " << tst.in_net_h
+                  << ", pooled_w = " << tst.pooled_w
+                  << ", pooled_h = " << tst.pooled_h
+                  << ", sampling_ratio = " << tst.sampling_ratio
+                  << ", spatial_scale = " << tst.spatial_scale;
+    };
+};
+
+PRETTY_PARAM(roi_align_mode, std::string);
+PRETTY_PARAM(number_rois, uint32_t);
+
+using ROIAlignTestParams = std::tuple<Dims, roi_align_param, number_rois, roi_align_mode>;
+typedef myriadLayerTestBaseWithParam<ROIAlignTestParams> myriadLayersTestsROIAlign_nightly;
+
+const int roi_cols = 4;
+
+static void genROIs(InferenceEngine::Blob::Ptr rois,
+                    const roi_align_param& params,
+                    const uint32_t num_rois) {
+    auto roisBlob_data = rois->buffer().as<ie_fp16*>();
+    const int max_range_width = params.in_net_w * 4 / 5;
+    const int max_range_height = params.in_net_h * 4 / 5;
+
+    float scale_width  = (float)params.in_net_w;
+    float scale_height = (float)params.in_net_h;
+
+    std::mt19937 gen(145781);
+
+    std::uniform_int_distribution<> dis_x0(0, max_range_width - 1);
+    std::uniform_int_distribution<> dis_y0(0, max_range_height - 1);
+    for (int i = 0; i < num_rois; i++) {
+        int x0 = dis_x0(gen);
+        std::uniform_int_distribution<> dis_x1(0, (params.in_net_w - x0 - 1) - 1);
+        int x1 = x0 + dis_x1(gen) + 1;
+
+        int y0 = dis_y0(gen);
+        std::uniform_int_distribution<> dis_y1(0, (params.in_net_h - y0 - 1) - 1);
+        int y1 = y0 + dis_y1(gen) + 1;
+
+        roisBlob_data[i * roi_cols + 0] = PrecisionUtils::f32tof16(x0 / scale_width);
+        roisBlob_data[i * roi_cols + 1] = PrecisionUtils::f32tof16(y0 / scale_height);
+        roisBlob_data[i * roi_cols + 2] = PrecisionUtils::f32tof16(x1 / scale_width);
+        roisBlob_data[i * roi_cols + 3] = PrecisionUtils::f32tof16(y1 / scale_height);
+    }
+}
+
+static void genBatchIndices(InferenceEngine::Blob::Ptr batch_indices,
+                            const uint32_t num_rois,
+                            const uint32_t num_batches) {
+    int32_t* batch_indices_data = batch_indices->buffer().as<int32_t*>();
+
+    std::mt19937 gen(145781);
+    std::uniform_int_distribution<> dis_index(0, num_batches - 1);
+    for (int i = 0; i < num_rois; i++) {
+        batch_indices_data[i] = dis_index(gen);
+    }
+}
+
+static std::string getModel(const int batches, const int channels, const int height, const int width,
+                            const int pooled_h, const int pooled_w, const float spatial_scale,
+                            const int sampling_ratio, const int num_rois, const std::string mode) {
+    std::string model = R"V0G0N(
+                <net name="testROIAlign" version="7">
+                    <layers>
+                        <layer id="0" name="feature_map" precision="FP16" type="Input">
+                            <output>
+                                <port id="0">
+                                    <dim>__BATCHES__</dim>
+                                    <dim>__CHANNELS__</dim>
+                                    <dim>__HEIGHT__</dim>
+                                    <dim>__WIDTH__</dim>
+                                </port>
+                            </output>
+                        </layer>
+                        <layer id="1" name="boxes" precision="FP16" type="Input">
+                            <output>
+                                <port id="0">
+                                    <dim>__NUM_ROIS__</dim>
+                                    <dim>4</dim>
+                                </port>
+                            </output>
+                        </layer>
+                        <layer id="2" name="batch_indices" precision="I32" type="Input">
+                            <output>
+                                <port id="0">
+                                    <dim>__NUM_ROIS__</dim>
+                                </port>
+                            </output>
+                        </layer>
+                        <layer id="3" name="roi_align" type="ROIAlign">
+                            <data pooled_h="__POOLED_H__" pooled_w="__POOLED_W__" spatial_scale="__SPATIAL_SCALE__" sampling_ratio="__SAMPLING_RATIO__" mode="__MODE__"/>
+                            <input>
+                                <port id="0">
+                                    <dim>__BATCHES__</dim>
+                                    <dim>__CHANNELS__</dim>
+                                    <dim>__HEIGHT__</dim>
+                                    <dim>__WIDTH__</dim>
+                                </port>
+                                <port id="1">
+                                    <dim>__NUM_ROIS__</dim>
+                                    <dim>4</dim>
+                                </port>
+                                <port id="2">
+                                    <dim>__NUM_ROIS__</dim>
+                                </port>
+                            </input>
+                            <output>
+                                <port id="0">
+                                    <dim>__NUM_ROIS__</dim>
+                                    <dim>__CHANNELS__</dim>
+                                    <dim>__POOLED_H__</dim>
+                                    <dim>__POOLED_W__</dim>
+                                </port>
+                            </output>
+                        </layer>
+                    </layers>
+                    <edges>
+                        <edge from-layer="0" from-port="0" to-layer="3" to-port="0"/>
+                        <edge from-layer="1" from-port="0" to-layer="3" to-port="1"/>
+                        <edge from-layer="2" from-port="0" to-layer="3" to-port="2"/>
+                    </edges>
+                </net>
+            )V0G0N";
+
+    REPLACE_WITH_STR(model, "__NUM_ROIS__", std::to_string(num_rois));
+    REPLACE_WITH_STR(model, "__BATCHES__",  std::to_string(batches));
+    REPLACE_WITH_STR(model, "__CHANNELS__", std::to_string(channels));
+    REPLACE_WITH_STR(model, "__POOLED_H__", std::to_string(pooled_h));
+    REPLACE_WITH_STR(model, "__POOLED_W__", std::to_string(pooled_w));
+    REPLACE_WITH_STR(model, "__HEIGHT__",   std::to_string(height));
+    REPLACE_WITH_STR(model, "__WIDTH__",    std::to_string(width));
+    REPLACE_WITH_STR(model, "__SPATIAL_SCALE__",  std::to_string(spatial_scale));
+    REPLACE_WITH_STR(model, "__SAMPLING_RATIO__", std::to_string(sampling_ratio));
+    REPLACE_WITH_STR(model, "__MODE__", mode);
+
+    return model;
+}
+
+TEST_P(myriadLayersTestsROIAlign_nightly, ROIAlign) {
+    _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+
+    const tensor_test_params dims_layer_in = std::get<0>(GetParam());
+    const roi_align_param test_params      = std::get<1>(GetParam());
+    const uint32_t num_rois                = std::get<2>(GetParam());
+    const std::string mode_str             = std::get<3>(GetParam());
+
+    const uint32_t num_batches = dims_layer_in.n;
+    const uint32_t pooled_h = test_params.pooled_h;
+    const uint32_t pooled_w = test_params.pooled_w;
+    const float spatial_scale = test_params.spatial_scale;
+
+    const auto model = getModel(num_batches, dims_layer_in.c, dims_layer_in.h, dims_layer_in.w,
+                                pooled_h, pooled_w, spatial_scale,
+                                test_params.sampling_ratio, num_rois, mode_str);
+
+    ASSERT_NO_THROW(readNetwork(model));
+
+    const auto& network = _cnnNetwork;
+    _inputsInfo = network.getInputsInfo();
+    _inputsInfo["boxes"]->setPrecision(Precision::FP16);
+    _inputsInfo["feature_map"]->setPrecision(Precision::FP16);
+    _inputsInfo["batch_indices"]->setPrecision(Precision::I32);
+
+     _outputsInfo = network.getOutputsInfo();
+    _outputsInfo["roi_align"]->setPrecision(Precision::FP16);
+
+    StatusCode st = OK;
+    ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network, _config, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    ASSERT_NE(_exeNetwork, nullptr) << _resp.msg;
+
+    ASSERT_NO_THROW(st = _exeNetwork->CreateInferRequest(_inferRequest, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    Blob::Ptr roisBlob;
+    ASSERT_NO_THROW(st = _inferRequest->GetBlob("boxes", roisBlob, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    genROIs(roisBlob, test_params, num_rois);
+
+    Blob::Ptr featureMapBlob;
+    ASSERT_NO_THROW(st = _inferRequest->GetBlob("feature_map", featureMapBlob, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    GenRandomData(featureMapBlob);
+
+    Blob::Ptr batchIndicesBlob;
+    ASSERT_NO_THROW(st = _inferRequest->GetBlob("batch_indices", batchIndicesBlob, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    genBatchIndices(batchIndicesBlob, num_rois, num_batches);
+
+    ASSERT_NO_THROW(st = _inferRequest->Infer(&_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    Blob::Ptr outputBlob;
+    ASSERT_NO_THROW(st = _inferRequest->GetBlob("roi_align", outputBlob, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    Blob::Ptr refOutputBlob = make_shared_blob<float>({Precision::FP32,
+                                                      outputBlob->getTensorDesc().getDims(),
+                                                      outputBlob->getTensorDesc().getLayout()});
+    refOutputBlob->allocate();
+
+    ref_ROIAlign(featureMapBlob,
+                 roisBlob,
+                 batchIndicesBlob,
+
+                 refOutputBlob,
+
+                 test_params.sampling_ratio,
+                 pooled_h, pooled_w,
+
+                 num_rois,
+                 spatial_scale,
+                 mode_str);
+
+    CompareCommonAbsolute(refOutputBlob, outputBlob, ERROR_BOUND);
+}
+
+static std::vector<Dims> s_ROIAlignLayerInput = {
+    {{5, 256, 160, 157}},
+};
+
+static std::vector<roi_align_param> s_ROIAlignLayerParam = {
+    {{640, 640, 7, 9, 2, 1.4f}},
+};
+
+static std::vector<number_rois> s_ROIAlignNumROIs = {
+    53
+};
+
+static std::vector<roi_align_mode> s_ROIAlignMode = {
+        std::string("avg"),
+        std::string("max")
+};
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_roi_feature_extractor_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_roi_feature_extractor_test.cpp
new file mode 100644 (file)
index 0000000..bfe1307
--- /dev/null
@@ -0,0 +1,12 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_roi_feature_extractor_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsROIFeatureExtractor_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(s_ROIFeatureExtractorLayerInput),
+        ::testing::ValuesIn(s_ROIFeatureExtractorLayerParam),
+        ::testing::ValuesIn(s_ROIFeatureExtractorNumROIs)),
+);
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_roi_feature_extractor_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_roi_feature_extractor_test.hpp
new file mode 100644 (file)
index 0000000..6ad35a6
--- /dev/null
@@ -0,0 +1,148 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include "myriad_layers_reference_functions.hpp"
+#include "myriad_layers_tests.hpp"
+
+using namespace InferenceEngine;
+
+#define NUM_ELEM_ROIS (4)
+#define ERROR_BOUND (2.5e-3f)
+
+struct roi_feature_extractor_param {
+    int         in_net_w;
+    int         in_net_h;
+    uint32_t    output_size;
+    int         sampling_ratio;
+
+    friend std::ostream& operator<<(std::ostream& os, roi_feature_extractor_param const& tst)
+    {
+        return os << "input net width = " << tst.in_net_w
+                  << ", input net height = " << tst.in_net_h
+                  << ", output_size = " << tst.output_size
+                  << ", sampling_ratio = " << tst.sampling_ratio;
+    };
+};
+
+PRETTY_PARAM(number_rois, uint32_t);
+
+using ROIFeatureExtractorTestParams = std::tuple<Dims, roi_feature_extractor_param, number_rois>;
+
+typedef myriadLayerTestBaseWithParam<ROIFeatureExtractorTestParams> myriadLayersTestsROIFeatureExtractor_nightly;
+
+static void genROIs(InferenceEngine::Blob::Ptr rois,
+                    const roi_feature_extractor_param& params,
+                    const uint32_t num_rois) {
+    ie_fp16 *roisBlob_data = rois->buffer().as<ie_fp16*>();
+    const int max_range_width = params.in_net_w * 4 / 5;
+    const int max_range_height = params.in_net_h * 4 / 5;
+
+    float scale_width = (float)params.in_net_w;
+    float scale_height = (float)params.in_net_h;
+
+    for (int i = 0; i < num_rois; i++) {
+        int x0 = std::rand() % max_range_width;
+        int x1 = x0 + (std::rand() % (params.in_net_w - x0 - 1)) + 1;
+        int y0 = std::rand() % max_range_height;
+        int y1 = y0 + (std::rand() % (params.in_net_h - y0 - 1)) + 1;
+
+        roisBlob_data[i * NUM_ELEM_ROIS + 0] = PrecisionUtils::f32tof16(x0);
+        roisBlob_data[i * NUM_ELEM_ROIS + 1] = PrecisionUtils::f32tof16(y0);
+        roisBlob_data[i * NUM_ELEM_ROIS + 2] = PrecisionUtils::f32tof16(x1);
+        roisBlob_data[i * NUM_ELEM_ROIS + 3] = PrecisionUtils::f32tof16(y1);
+    }
+}
+
+TEST_P(myriadLayersTestsROIFeatureExtractor_nightly, ROIFeatureExtractor) {
+    tensor_test_params dims_layer_in = std::get<0>(GetParam());
+    roi_feature_extractor_param test_params = std::get<1>(GetParam());
+    const uint32_t num_rois = std::get<2>(GetParam());
+
+    bool use_output_rois = true;
+    const int levels_num = 4;
+
+    _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+
+    IN_OUT_desc input_tensors, output_tensors;
+    input_tensors.push_back({num_rois, NUM_ELEM_ROIS});
+    for (int i = 0; i < levels_num; i++) {
+        input_tensors.push_back({1, dims_layer_in.c, dims_layer_in.h / (1 << i), dims_layer_in.w / (1 << i)});
+    }
+    output_tensors.push_back({num_rois, dims_layer_in.c, test_params.output_size, test_params.output_size});
+    // adding output ROIs
+    if (use_output_rois)
+        output_tensors.push_back({num_rois, NUM_ELEM_ROIS});
+
+    SetInputTensors(input_tensors);
+    SetOutputTensors(output_tensors);
+
+    std::vector<int> pyramid_scales = {4, 8, 16, 32, 64};
+    std::string pyramid_scales_str = "";
+    for (auto i = 0; i < pyramid_scales.size(); i++) {
+        pyramid_scales_str += std::to_string(pyramid_scales[i]);
+        if (i != pyramid_scales.size() - 1) pyramid_scales_str += ",";
+    }
+
+    std::map<std::string, std::string> layer_params = {
+        {"output_size",     std::to_string(test_params.output_size)},
+        {"sampling_ratio",  std::to_string(test_params.sampling_ratio)},
+        {"pyramid_scales",  pyramid_scales_str},
+        {"distribute_rois_between_levels", "1"},
+        {"preserve_rois_order", "1"},
+        {"image_id", "0"},
+    };
+
+    makeSingleLayerNetwork(LayerInitParams("ExperimentalDetectronROIFeatureExtractor").params(layer_params));
+
+    /* Input data generating */
+    for (auto blob : _inputMap) {
+        if (blob.second == _inputMap.begin()->second) {
+            genROIs(blob.second, test_params, num_rois);
+        } else {
+            GenRandomData(blob.second);
+        }
+    }
+
+    std::vector<InferenceEngine::Blob::Ptr> refInputBlobs;
+    std::vector<InferenceEngine::Blob::Ptr> refOutputBlobs;
+    for (auto blob : _inputMap) {
+        refInputBlobs.push_back(blob.second);
+    }
+    for (auto blob : _outputMap) {
+        auto refOutputBlob = make_shared_blob<float>({Precision::FP32,
+                                                      blob.second->getTensorDesc().getDims(),
+                                                      blob.second->getTensorDesc().getLayout()});
+        refOutputBlob->allocate();
+        refOutputBlobs.push_back(refOutputBlob);
+    }
+    ref_ROIFeatureExtractor(refInputBlobs,
+                            refOutputBlobs[0],
+                            use_output_rois ? refOutputBlobs[1] : nullptr,
+                            pyramid_scales,
+                            test_params.sampling_ratio,
+                            test_params.output_size,
+                            test_params.output_size);
+
+    ASSERT_TRUE(Infer());
+
+    auto dst0 = _outputMap.begin()->second;
+    CompareCommonAbsolute(dst0, refOutputBlobs[0], ERROR_BOUND);
+    if (use_output_rois) {
+        auto dst1 = (++_outputMap.begin())->second;
+        CompareCommonAbsolute(dst1, refOutputBlobs[1], ERROR_BOUND);
+    }
+}
+
+static std::vector<Dims> s_ROIFeatureExtractorLayerInput = {
+    {{1, 256, 160, 160}},
+};
+
+static std::vector<roi_feature_extractor_param> s_ROIFeatureExtractorLayerParam = {
+    {{640, 640, 7, 2}},
+};
+
+static std::vector<number_rois> s_ROIFeatureExtractorNumROIs = {
+    50
+};
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_roi_pooling_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_roi_pooling_test.cpp
new file mode 100644 (file)
index 0000000..8f875a1
--- /dev/null
@@ -0,0 +1,14 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_roi_pooling_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsROIPooling_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(s_ROIPoolingLayerInput),
+        ::testing::ValuesIn(s_ROIPoolingLayerParam),
+        ::testing::ValuesIn(s_ROIPoolingNumRois),
+        ::testing::ValuesIn(s_ROIPoolingMethod),
+        ::testing::Values<IRVersion>(IRVersion::v7, IRVersion::v10)),
+);
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_roi_pooling_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_roi_pooling_test.hpp
new file mode 100644 (file)
index 0000000..6b8d988
--- /dev/null
@@ -0,0 +1,318 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include "myriad_layers_tests.hpp"
+
+using namespace InferenceEngine;
+
+#define NUM_ELEM_ROIS (5)
+#define ERROR_BOUND (2.5e-3f)
+#define DIV_THEN_CEIL(x, y)  (((x) + (y) - 1) / (y))
+
+struct ROIPoolingParams {
+    int         in_net_w;
+    int         in_net_h;
+    uint32_t    pooled_w;
+    uint32_t    pooled_h;
+    float       spatial_scales;
+};
+
+PRETTY_PARAM(roi_pooling_param, ROIPoolingParams);
+
+static inline void PrintTo(const ROIPoolingParams& param, ::std::ostream* os)
+{
+    ROIPoolingParams data = param;
+    *os << "roi_pooling_param: " << data.in_net_w << ", " << data.in_net_h << ", " << data.pooled_w << ", " << data.pooled_h << ", " << data.spatial_scales;
+}
+
+typedef enum {
+    roi_pooling_max = 0,
+    roi_pooling_bilinear =1
+} t_ROIPooling_method;
+
+PRETTY_PARAM(roi_pooling_method, t_ROIPooling_method);
+
+static inline void PrintTo(const t_ROIPooling_method& param, ::std::ostream* os)
+{
+    t_ROIPooling_method data = param;
+    *os << "roi_pooling_method: " << (data == roi_pooling_bilinear? "bilinear" : "max");
+}
+
+using ROIPoolingTestParams = std::tuple<Dims, roi_pooling_param, uint32_t, roi_pooling_method, IRVersion>;
+
+class myriadLayersTestsROIPooling_nightly: public myriadLayerTestBaseWithParam<ROIPoolingTestParams> {
+public:
+    void genROIs(InferenceEngine::Blob::Ptr rois,
+                 const ROIPoolingParams& params,
+                 const uint32_t num_rois,
+                 const t_ROIPooling_method method) {
+
+        ie_fp16 *roisBlob_data = rois->buffer().as<ie_fp16*>();
+        const int max_range_width = params.in_net_w * 4 / 5;
+        const int max_range_height = params.in_net_h * 4 / 5;
+
+        float scale_width = 1.0f;
+        float scale_height = 1.0f;
+        if (method == roi_pooling_bilinear) {
+            scale_width = (params.in_net_w);
+            scale_height = (params.in_net_h);
+        }
+
+        for (int i = 0; i < num_rois; i++)
+        {
+            int x0 = std::rand() % max_range_width;
+            int x1 = x0 + (std::rand() % (params.in_net_w - x0 - 1)) + 1;
+            int y0 = std::rand() % max_range_height;
+            int y1 = y0 + (std::rand() % (params.in_net_h - y0 - 1)) + 1;
+
+            roisBlob_data[i * NUM_ELEM_ROIS + 0] = PrecisionUtils::f32tof16(0);
+            roisBlob_data[i * NUM_ELEM_ROIS + 1] = PrecisionUtils::f32tof16(x0 / scale_width);
+            roisBlob_data[i * NUM_ELEM_ROIS + 2] = PrecisionUtils::f32tof16(y0 / scale_height);
+            roisBlob_data[i * NUM_ELEM_ROIS + 3] = PrecisionUtils::f32tof16(x1 / scale_width);
+            roisBlob_data[i * NUM_ELEM_ROIS + 4] = PrecisionUtils::f32tof16(y1 / scale_height);
+        }
+    }
+
+    void refROIPooling(const InferenceEngine::Blob::Ptr src,
+                                const InferenceEngine::Blob::Ptr rois,
+                                InferenceEngine::Blob::Ptr dst,
+                                const int num_rois,
+                                const ROIPoolingParams& params,
+                                const tensor_test_params& in,
+                                const t_ROIPooling_method method) {
+        const ie_fp16* bottom3d = src->cbuffer().as<ie_fp16 *>();
+        const ie_fp16* roi2d = rois->cbuffer().as<ie_fp16 *>();
+        ie_fp16* top4d = dst->buffer().as<ie_fp16 *>();
+        const int R = num_rois;
+        const int C = in.c;
+        const int H = in.h;
+        const int W = in.w;
+        const int pooled_h = params.pooled_h;
+        const int pooled_w = params.pooled_w;
+        const float spatial_scale = params.spatial_scales;
+        const int top_area = pooled_h * pooled_w;
+        const int top_volume = C * pooled_h * pooled_w;
+        if (method == roi_pooling_max) //  generate GT for roi_pooling_max
+        {
+            for (int r = 0; r < R; ++r) {
+                // RoI in the bottom plane
+                const int x1 = std::round(PrecisionUtils::f16tof32(roi2d[r * NUM_ELEM_ROIS + 1]) * spatial_scale);
+                const int y1 = std::round(PrecisionUtils::f16tof32(roi2d[r * NUM_ELEM_ROIS + 2]) * spatial_scale);
+                const int x2 = std::round(PrecisionUtils::f16tof32(roi2d[r * NUM_ELEM_ROIS + 3]) * spatial_scale);
+                const int y2 = std::round(PrecisionUtils::f16tof32(roi2d[r * NUM_ELEM_ROIS + 4]) * spatial_scale);
+                const int roi_W = x2 - x1 + 1;
+                const int roi_H = y2 - y1 + 1;
+
+                for (int h = 0; h < pooled_h; ++h) {
+                    for (int w = 0; w < pooled_w; ++w) {
+                        const int hb_start  = std::min(H-1, std::max(0, y1 + (h * roi_H) / pooled_h));
+                        const int hb_end    = std::min(H-1, std::max(0, y1 + DIV_THEN_CEIL((h + 1) * roi_H, pooled_h)));
+                        const int wb_start  = std::min(W-1, std::max(0, x1 + (w * roi_W) / pooled_w));
+                        const int wb_end    = std::min(W-1, std::max(0, x1 + DIV_THEN_CEIL((w + 1) * roi_W, pooled_w)));
+
+                        // Usually Myriad data order is top[h][w][r][c]
+                        // But the roipooling output data order is top[r][c][h][w]
+                        const int plane = pooled_w * pooled_h;
+                        const int top_index = (h * pooled_w) + (w) + (r * C * plane);
+
+                        // if the bottom region is empty,
+                        if (hb_start >= hb_end || wb_start >= wb_end) {
+                            for (int c = 0; c < C; ++c) {
+                                top4d[top_index + c * plane] = 0;
+                            }
+                            continue;
+                        }
+
+                        // if the bottom region is not empty,
+                        //   top[r][c][h][w] = "max in the region"
+                        for (int c = 0; c < C; ++c) {
+                            // Myriad data order is different: bottom[h][w][c]
+                            const ie_fp16* p_bottom3d = bottom3d + c;
+                            int max_idx = hb_start * W * C + wb_start * C;
+                            for (int hb = hb_start; hb < hb_end; ++hb) {
+                                for (int wb = wb_start; wb < wb_end; ++wb) {
+                                    // Data order is different
+                                    const int this_idx = hb * W * C + wb * C;
+                                    float this_value = PrecisionUtils::f16tof32(p_bottom3d[this_idx]);
+                                    float max_value = PrecisionUtils::f16tof32(p_bottom3d[max_idx]);
+                                    max_idx = (this_value > max_value) ? this_idx : max_idx;
+                                }
+                            }
+                            top4d[top_index + c * plane] = p_bottom3d[max_idx];
+                        } // endfor c
+                    }
+                } // endfor h, w
+            } // endfor r
+        } else { //  generate GT for roi_pooling_bilinear
+            for (int r = 0; r < R; ++r) {
+                float roi_start_w_ = PrecisionUtils::f16tof32(roi2d[r * NUM_ELEM_ROIS + 1]);//Normalized coordinates
+                float roi_start_h_ = PrecisionUtils::f16tof32(roi2d[r * NUM_ELEM_ROIS + 2]);
+                float roi_end_w_   = PrecisionUtils::f16tof32(roi2d[r * NUM_ELEM_ROIS + 3]);
+                float roi_end_h_   = PrecisionUtils::f16tof32(roi2d[r * NUM_ELEM_ROIS + 4]);
+
+                float height_scale = (roi_end_h_ - roi_start_h_) * (H - 1) / (pooled_h - 1);
+                float width_scale  = (roi_end_w_ - roi_start_w_) * (W - 1) / (pooled_w - 1);
+
+                for (int c = 0; c < C; ++c) {
+                    const ie_fp16* p_bottom3d = bottom3d + c;
+                    for (int ph = 0; ph < pooled_h; ++ph) {
+                        for (int pw = 0; pw < pooled_w; ++pw) {
+                            float in_y = (ph * height_scale + roi_start_h_ * (H - 1));
+                            float in_x = (pw * width_scale  + roi_start_w_ * (W - 1));
+
+                            // Usually Myriad data order is top[h][w][r][c]
+                            // But the roipooling output data order is top[r][c][h][w]
+                            const int top_index = (pw) + (ph * pooled_w) + (c * top_area) + (r * C * top_area);
+                            if (in_y < 0 || in_y > H - 1 || in_x < 0 || in_x > W - 1) {
+                                top4d[top_index] = 0;
+                            } else {
+                                int top_y_index    = static_cast<int>(floorf(in_y));
+                                int bottom_y_index = static_cast<int>(ceilf(in_y));
+                                int left_x_index   = static_cast<int>(floorf(in_x));
+                                int right_x_index  = static_cast<int>(ceilf(in_x));
+
+                                if (right_x_index > W - 1)
+                                    right_x_index = W - 1;
+
+                                if (bottom_y_index > H - 1)
+                                    bottom_y_index = H - 1;
+
+                                const float top_left     = PrecisionUtils::f16tof32(p_bottom3d[top_y_index * W * C + left_x_index * C]);
+                                const float top_right    = PrecisionUtils::f16tof32(p_bottom3d[top_y_index * W * C + right_x_index * C]);
+                                const float bottom_left  = PrecisionUtils::f16tof32(p_bottom3d[bottom_y_index * W * C + left_x_index * C]);
+                                const float bottom_right = PrecisionUtils::f16tof32(p_bottom3d[bottom_y_index * W * C + right_x_index * C]);
+
+                                const float top    = top_left + (top_right - top_left) * (in_x - left_x_index);
+                                const float bottom = bottom_left + (bottom_right - bottom_left) * (in_x - left_x_index);
+
+                                top4d[top_index] = PrecisionUtils::f32tof16(top + (bottom - top) * (in_y - top_y_index));
+                            }
+                        }
+                    }
+                }
+            }
+        }
+    }
+    using myriadLayersTests_nightly::makeSingleLayerNetwork;
+    void makeSingleLayerNetwork(const std::map<std::string, std::string>& params,
+                     const ROIPoolingParams& test_params,
+                     const uint32_t num_rois,
+                     const t_ROIPooling_method method)
+    {
+        makeSingleLayerNetwork(LayerInitParams("ROIPooling").params(params),
+                               NetworkInitParams().createInference(false));
+        createInferRequest(test_params, num_rois, method);
+    }
+    void createInferRequest(const ROIPoolingParams& params,
+                            const uint32_t num_rois,
+                            const t_ROIPooling_method method)
+    {
+        ASSERT_NO_THROW(_inputsInfo = _cnnNetwork.getInputsInfo());
+        ASSERT_TRUE(_inputsInfo.size() == 2);
+        _inputsInfo.begin()->second->setLayout(NHWC);
+        for (auto inputInfo : _inputsInfo) {
+            inputInfo.second->setPrecision(Precision::FP16);
+        }
+
+        ASSERT_NO_THROW(_outputsInfo = _cnnNetwork.getOutputsInfo());
+        ASSERT_TRUE(_outputsInfo.size() == 1);
+        for (auto outputInfo : _outputsInfo) {
+            outputInfo.second->setPrecision(Precision::FP16);
+            outputInfo.second->setLayout(NCHW);
+        }
+
+        InferenceEngine::StatusCode st = InferenceEngine::StatusCode::GENERAL_ERROR;
+        ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, _cnnNetwork, {}, &_resp));
+        ASSERT_NE(_exeNetwork, nullptr) << _resp.msg;
+        ASSERT_NO_THROW(_exeNetwork->CreateInferRequest(_inferRequest, &_resp)) << _resp.msg;
+        ASSERT_EQ((int) InferenceEngine::StatusCode::OK, st) << _resp.msg;
+        ASSERT_NE(_inferRequest, nullptr) << _resp.msg;
+
+        ASSERT_NO_THROW(_inputsInfo = _cnnNetwork.getInputsInfo());
+        ASSERT_NO_THROW(_outputsInfo = _cnnNetwork.getOutputsInfo());
+        for (auto inpt : _inputsInfo)
+        {
+            InferenceEngine::Layout layout = inpt.second->getTensorDesc().getLayout();
+
+            Blob::Ptr data;
+            ASSERT_NO_THROW(st = _inferRequest->GetBlob(inpt.first.c_str(), data, &_resp));
+            ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+            SetSeed(3);
+
+            if (inpt.first == _inputsInfo.begin()->first)
+            {
+                GenRandomData(data);
+            }
+            else
+            {
+                genROIs(data, params, num_rois, method);
+            }
+            _inputMap[inpt.first] = data;
+        }
+
+        Blob::Ptr data;
+        ASSERT_NO_THROW(st = _inferRequest->GetBlob(_outputsInfo.begin()->first.c_str(), data, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+        _outputMap[_outputsInfo.begin()->first] = data;
+    }
+};
+
+TEST_P(myriadLayersTestsROIPooling_nightly, ROIPooling) {
+    tensor_test_params dims_layer_in = std::get<0>(GetParam());
+    ROIPoolingParams test_params = std::get<1>(GetParam());
+    const uint32_t num_rois = std::get<2>(GetParam());
+    const t_ROIPooling_method method = (t_ROIPooling_method)(std::get<3>(GetParam()));
+    _irVersion = std::get<4>(GetParam());
+    IN_OUT_desc input_tensors, output_tensors;
+    input_tensors.push_back({1, dims_layer_in.c, dims_layer_in.h, dims_layer_in.w});
+    input_tensors.push_back({num_rois, NUM_ELEM_ROIS});
+    output_tensors.push_back({num_rois, dims_layer_in.c, test_params.pooled_h, test_params.pooled_w});
+
+    SetInputTensors(input_tensors);
+    SetOutputTensors(output_tensors);
+
+    std::map<std::string, std::string> layer_params = {
+        {"pooled_w",        std::to_string(test_params.pooled_w)},
+        {"pooled_h",        std::to_string(test_params.pooled_h)},
+        {"spatial_scale",  std::to_string(test_params.spatial_scales)},
+        {"method",          (method == roi_pooling_bilinear? "bilinear" : "max")},
+    };
+
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(layer_params, test_params, num_rois, method));
+
+    ASSERT_TRUE(Infer());
+
+    //Verify result
+    auto src  = _inputMap.begin()->second;
+    auto rois = std::next(_inputMap.begin())->second;
+    auto dst  = _outputMap.begin()->second;
+
+    auto _refBlob = make_shared_blob<ie_fp16>(dst->getTensorDesc());
+    _refBlob->allocate();
+
+    refROIPooling(src, rois, _refBlob, num_rois, test_params, dims_layer_in, method);
+
+    CompareCommonAbsolute(dst, _refBlob, ERROR_BOUND);
+}
+
+static std::vector<Dims> s_ROIPoolingLayerInput = {
+    {{1, 1,   14, 14}},
+    {{1, 2,   14, 14}},
+    {{1, 256, 14, 14}},
+};
+
+static std::vector<roi_pooling_param> s_ROIPoolingLayerParam = {
+    {{224, 224, 7, 7, 0.0625}},
+};
+
+static std::vector<uint32_t> s_ROIPoolingNumRois = {
+    1, 10, 30, 50, 100 
+};
+
+static std::vector<roi_pooling_method> s_ROIPoolingMethod = {
+        roi_pooling_max,
+        roi_pooling_bilinear,
+};
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_scale_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_scale_test.cpp
new file mode 100644 (file)
index 0000000..05cedf0
--- /dev/null
@@ -0,0 +1,11 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_scale_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(
+        accuracy, myriadLayersTestsScale_nightly,
+        ::testing::Combine(
+            ::testing::ValuesIn(s_inputScaleTensors),
+            ::testing::ValuesIn(s_inputBiasScale)));
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_scale_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_scale_test.hpp
new file mode 100644 (file)
index 0000000..cdad5d7
--- /dev/null
@@ -0,0 +1,141 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include "myriad_layers_tests.hpp"
+#include "vpu/model/data_desc.hpp"
+
+#define ERROR_BOUND (5.e-3f)
+
+using namespace InferenceEngine;
+
+namespace {
+    bool iter(SizeVector& in, SizeVector& out)
+    {
+        bool flag = true;
+        for(int i = 0; i < out.size(); i++) {
+            if(in[i] < out[i] - 1) {
+                in[i]++;
+                break;
+            } else {
+                if(i == out.size() - 1) {
+                    flag = false;
+                    break;
+                }
+                in[i] = 0;
+            }
+        }
+        return flag;
+    }
+
+    int calcOffset(SizeVector& in, SizeVector& out)
+    {
+        int offset = in.back();
+        for(int i = in.size() - 2; i >= 0; i--) {
+            int mul = in[i];
+            for(int j = i + 1; j < out.size(); j++)
+                mul *= out[j];
+            offset += mul;
+        }
+        return offset;
+    }
+}
+
+void ref_scale(const InferenceEngine::Blob::Ptr src,
+                      const uint16_t *weights,
+                      InferenceEngine::Blob::Ptr dst,
+                      bool bias) {
+    ASSERT_NE(src, nullptr);
+    ASSERT_NE(dst, nullptr);
+
+    SizeVector in_size;
+    SizeVector out_size;
+    in_size = src->getTensorDesc().getDims();
+    out_size = dst->getTensorDesc().getDims();
+    Layout layout = src->getTensorDesc().getLayout();
+    int dims = in_size.size();
+    int dimC = dimToIeInd(vpu::Dim::C, dims);
+    SizeVector curr_size(dims);
+    const uint16_t *src_data = src->buffer();
+    const uint16_t *bias_data = weights + in_size[dimC];
+    uint16_t *dst_data = dst->buffer();
+    // TODO: investigate this case
+    if (layout == NCHW || layout == NHWC) {
+        size_t N1 = out_size[0];
+        size_t C1 = out_size[1];
+        size_t H1 = out_size[2];
+        size_t W1 = out_size[3];
+        for (size_t n = 0; n < N1; n++) {
+            for (size_t c = 0; c < C1; c++) {
+                float val = 0.0f;
+                if (bias)
+                    val = PrecisionUtils::f16tof32(bias_data[c]);
+                for (size_t h = 0; h < H1; h++) {
+                    for (size_t w = 0; w < W1; w++) {
+                        size_t iidx = layout == NCHW ?
+                                           w + h * W1 + c * W1 * H1 + n * W1 * H1 * C1 :
+                                           c + w * C1 + h * C1 * W1 + n * W1 * H1 * C1;
+                        float res = val + PrecisionUtils::f16tof32(src_data[iidx]) *
+                                PrecisionUtils::f16tof32(weights[c]);
+                        dst_data[iidx] = PrecisionUtils::f32tof16(res);
+                    }
+                }
+            }
+        }
+    } else {
+        do {
+            float val = 0.0f;
+            if (bias)
+                val = PrecisionUtils::f16tof32(bias_data[curr_size[dimC]]);
+            float res = val + PrecisionUtils::f16tof32(src_data[calcOffset(curr_size, in_size)]) *
+                              PrecisionUtils::f16tof32(weights[curr_size[dimC]]);
+            dst_data[calcOffset(curr_size, out_size)] = PrecisionUtils::f32tof16(res);
+        } while(iter(curr_size, out_size));
+    }
+}
+
+typedef std::tuple<SizeVector, bool> TestScaleShift;
+
+class myriadLayersTestsScale_nightly: public myriadLayersTests_nightly,
+                              public testing::WithParamInterface<TestScaleShift> {
+};
+
+TEST_P(myriadLayersTestsScale_nightly, TestsScale)
+{
+    _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+
+    SizeVector p = std::get<0>(::testing::WithParamInterface<TestScaleShift>::GetParam());
+    bool biasAdd = std::get<1>(::testing::WithParamInterface<TestScaleShift>::GetParam());
+    auto dims = p.size();
+    int dimC = dimToIeInd(vpu::Dim::C, dims);
+    size_t sz_weights = p[dimC];
+    size_t sz_bias = p[dimC] * biasAdd;
+    size_t sz = sz_weights + sz_bias;
+    InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(GenWeights(sz));
+    uint16_t* weights = weights_ptr->data().as<uint16_t*>();
+    IN_OUT_desc inpt = {p};
+    SetInputTensors(inpt);
+    SetOutputTensors(inpt);
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("ScaleShift")
+                                        .weights(sz_weights)
+                                        .biases(sz_bias),
+                                        {},
+                                        weights_ptr));
+    ASSERT_TRUE(Infer());
+    ref_scale(_inputMap.begin()->second, weights, _refBlob, biasAdd);
+    CompareCommonAbsolute(_outputMap.begin()->second, _refBlob, ERROR_BOUND);
+}
+
+static std::vector<SizeVector> s_inputScaleTensors = {
+    {{1, 16, 8}},              //     CHW
+    {{2, 4, 8, 16}},           //    NCHW
+    {{2, 2, 44, 88, 16}},      //   NCDHW
+    {{2, 2, 2, 16, 32, 32}},   //  6DNCWH
+    {{3, 4, 3, 2, 12, 7, 7}},  // 76DNCHW
+};
+
+static std::vector<bool> s_inputBiasScale = {
+    false,
+    true
+};
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_scatter_elements_update_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_scatter_elements_update_test.cpp
new file mode 100644 (file)
index 0000000..8ecc122
--- /dev/null
@@ -0,0 +1,64 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_scatter_elements_update_test.hpp"
+
+using namespace testing;
+
+static const std::vector<DataType> dataTypeList = { "FP16", "I32" };
+
+static const std::vector<DataShape> dataShapeList_ndTensors = {
+    // tiny `data` tensor
+    { 10 },
+    { 3, 3 },
+    { 2, 3, 2 },
+    { 2, 2, 2, 2 },
+
+    // small `data` tensor
+    { 100 },
+    { 10, 10 },
+    { 5, 5, 5 },
+    { 3, 3, 3, 3 },
+
+    // medium-size `data` tensor
+    { 1000 },
+    { 32, 33 },
+    { 10, 10, 10 },
+    { 5, 5, 5, 8 },
+    { 3, 5, 4, 5, 3 },
+    { 3, 3, 3, 3, 3, 4 },
+    { 2, 3, 3, 3, 3, 3, 2 },
+    { 3, 3, 3, 2, 2, 2, 2, 2 },
+
+    // large `data` tensor
+    { 100000 },
+    { 351, 299 },
+    { 48, 55, 39 },
+    { 23, 14, 19, 17 },
+    { 10, 9, 11, 8, 13 },
+    { 9, 5, 11, 7, 5, 6 },
+    { 7, 6, 5, 7, 6, 3, 4 },
+    { 5, 3, 5, 7, 3, 4, 6, 3 },
+};
+
+static const std::vector<DataShape> dataShapeList_useCases = {
+    // from Mask R-CNN: N = 1000, C = 256, HxW = 7x7
+    { 1000, 256, 7, 7 },
+
+    // large 1D copy: N=1 (hidden), C=64, D=40, H = W = 112
+    { 64, 40, 112, 112 },
+
+    // many planes for 3D copy: N=16, C=512, H=W=56
+    { 16, 512, 56, 56 },
+};
+
+INSTANTIATE_TEST_CASE_P(nd_tensors, myriadLayersScatterElementsUpdateTest_nightly,
+                        Combine(
+                            ValuesIn(dataShapeList_ndTensors),
+                            ValuesIn(dataTypeList)));
+
+INSTANTIATE_TEST_CASE_P(use_cases, myriadLayersScatterElementsUpdateTest_nightly,
+                        Combine(
+                            ValuesIn(dataShapeList_useCases),
+                            ValuesIn(dataTypeList)));
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_scatter_elements_update_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_scatter_elements_update_test.hpp
new file mode 100644 (file)
index 0000000..acfa1b6
--- /dev/null
@@ -0,0 +1,397 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tests.hpp"
+#include "myriad_layers_reference_functions.hpp"
+
+#include "vpu_case_common.hpp"
+#include "precision_utils.h"
+
+#include <algorithm>
+#include <iostream>
+#include <random>
+#include <vector>
+#include <string>
+
+//----------------------------------------------------------
+
+static
+std::ostream& operator << (std:: ostream& out,
+                     const std::vector<int>& shape) {
+    out << "{";
+    const int ndims = shape.size();
+    for (int i = 0; i < ndims; i++) {
+        if (i > 0) {
+            out << ", ";
+        }
+        out << shape[i];
+    }
+    out << "}";
+    return out;
+}
+
+//----------------------------------------------------------
+
+using namespace InferenceEngine;
+
+using DataShape = std::vector<int>;
+using DataType  = std::string;  // "FP16", "I32"
+
+using ScatterElementsUpdateTestParams = std::tuple<DataShape,
+                                                   DataType>;
+
+class myriadLayersScatterElementsUpdateTest_nightly :
+    public myriadLayerTestBaseWithParam<ScatterElementsUpdateTestParams> {
+protected:
+
+    void testScatterElementsUpdate() {
+        SKIP_IF_CURRENT_TEST_IS_DISABLED();
+
+        _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+
+        //
+        // Parse test parameters
+        //
+
+        const ScatterElementsUpdateTestParams& params = GetParam();
+        const std::vector<int>& dataShape = std::get<0>(params);
+        const std::string     & dataType  = std::get<1>(params);
+
+        IE_ASSERT(dataType == "I32" ||
+                  dataType == "FP16");
+
+        const int dataNDims = dataShape.size();
+        IE_ASSERT(dataNDims > 0);
+
+        //
+        // Random axis
+        //
+
+        std::uniform_int_distribution<int> axisDistr(0, dataNDims - 1);
+        const int axis = axisDistr(m_gen);
+
+        //
+        // Random shape for indices and updates
+        //
+
+        std::vector<int> updatesShape(dataNDims);
+        for (int i = 0; i < dataNDims; i++) {
+            std::uniform_int_distribution<int> distr(1, dataShape[i]);
+            updatesShape[i] = distr(m_gen);
+        }
+
+        //
+        // Skip if data is too large
+        //
+
+        const int dataTotal = getTotal(dataShape);
+        const int updatesTotal = getTotal(updatesShape);
+
+        const int bpp = dataType == "I32" ? sizeof(int32_t) : sizeof(ie_fp16);
+        const int dataByteLength = dataTotal * bpp;
+
+        const int dataByteLengthThreshold = 30 * (1 << 20);  // 30 MB
+
+        const bool tooLarge = dataByteLength > dataByteLengthThreshold;
+
+        // Disabling large-data tests at all even for PrismCreek (ma2085). See:
+        // #-30792 [VPU] re-enable Scatter Elements Update tests for Prism Creek
+        DISABLE_IF(tooLarge);  // TODO: fix tests and re-enable if CheckMA2085()
+
+        //
+        // Initialize 1-layer network
+        //
+
+        std::string model = createModel(dataType, dataShape, updatesShape);
+
+        ASSERT_NO_THROW(readNetwork(model));
+
+        Precision precision = dataType == "I32" ? Precision::I32 : Precision::FP16;
+
+        const auto& network = _cnnNetwork;
+
+        _inputsInfo = network.getInputsInfo();
+        _inputsInfo["input"]->setPrecision(precision);
+        _inputsInfo["updates"]->setPrecision(precision);
+        _inputsInfo["indices"]->setPrecision(Precision::I32);
+        _inputsInfo["axis"]->setPrecision(Precision::I32);
+
+        _outputsInfo = network.getOutputsInfo();
+        _outputsInfo["scatter"]->setPrecision(precision);
+
+        //
+        // Create infer request and get its blobs pointers
+        //
+
+        StatusCode st = OK;
+
+        ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network, _config, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+        ASSERT_NE(_exeNetwork, nullptr) << _resp.msg;
+
+        ASSERT_NO_THROW(st = _exeNetwork->CreateInferRequest(_inferRequest, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        Blob::Ptr inputBlob;
+        ASSERT_NO_THROW(st = _inferRequest->GetBlob("input", inputBlob, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        Blob::Ptr indicesBlob;
+        ASSERT_NO_THROW(st = _inferRequest->GetBlob("indices", indicesBlob, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        Blob::Ptr updatesBlob;
+        ASSERT_NO_THROW(st = _inferRequest->GetBlob("updates", updatesBlob, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        Blob::Ptr axisBlob;
+        ASSERT_NO_THROW(st = _inferRequest->GetBlob("axis", axisBlob, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        Blob::Ptr outputBlob;
+        ASSERT_NO_THROW(st = _inferRequest->GetBlob("scatter", outputBlob, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        Blob::Ptr referenceBlob;
+        if (dataType == "I32") {
+            referenceBlob = make_shared_blob<int32_t>(outputBlob->getTensorDesc());
+        } else {
+            referenceBlob = make_shared_blob<ie_fp16>(outputBlob->getTensorDesc());
+        }
+        referenceBlob->allocate();
+
+        //
+        // Initialize blobs: `input`, `indices`, `updates` and `axis`
+        //
+
+        void* inputBlobData = inputBlob->buffer();
+        ASSERT_NE(inputBlobData, nullptr);
+
+        void* indicesBlobData = indicesBlob->buffer();
+        ASSERT_NE(indicesBlobData, nullptr);
+
+        void* updatesBlobData = updatesBlob->buffer();
+        ASSERT_NE(indicesBlobData, nullptr);
+
+        void* axisBlobData = axisBlob->buffer();
+        ASSERT_NE(axisBlobData, nullptr);
+
+        const int indicesLimit = dataShape[axis] - 1;
+
+        fillUniformly(inputBlobData, dataTotal, precision, 0, 50000, m_gen);
+        fillUniformly(updatesBlobData, updatesTotal, precision, 0, 50000, m_gen);
+        fillUniformly(indicesBlobData, updatesTotal, Precision::I32, 0, indicesLimit, m_gen);
+
+        reinterpret_cast<int32_t*>(axisBlobData)[0] = axis;
+
+        //
+        // Infer
+        //
+
+        const auto layoutPreference = vpu::LayoutPreference::ChannelMajor;
+
+        const auto inputLayout = inputBlob->getTensorDesc().getLayout();
+        const auto outputLayout = outputBlob->getTensorDesc().getLayout();
+        const auto indicesLayout = indicesBlob->getTensorDesc().getLayout();
+        const auto updatesLayout = updatesBlob->getTensorDesc().getLayout();
+
+        inputBlob->getTensorDesc().setLayout(vpu::deviceLayout(inputLayout, layoutPreference));
+        indicesBlob->getTensorDesc().setLayout(vpu::deviceLayout(indicesLayout, layoutPreference));
+        updatesBlob->getTensorDesc().setLayout(vpu::deviceLayout(updatesLayout, layoutPreference));
+        outputBlob->getTensorDesc().setLayout(vpu::deviceLayout(outputLayout, layoutPreference));
+        referenceBlob->getTensorDesc().setLayout(vpu::deviceLayout(outputLayout, layoutPreference));
+
+        ASSERT_NO_THROW(st = _inferRequest->Infer(&_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        //
+        // Check result
+        //
+
+        ref_scatter_elements_update(inputBlob, indicesBlob, updatesBlob, axis,
+                                    referenceBlob);
+
+    //  CompareCommonExact(outputBlob, referenceBlob); -- very inconvenient for debugging
+
+        int errors = 0;
+
+        const void* outputData = outputBlob->cbuffer();
+        const void* referenceData = referenceBlob->cbuffer();
+
+        const int outputSize = outputBlob->size();
+
+        for (int i = 0; i < outputSize; i++) {
+            double outputValue, referenceValue;
+
+            if (precision == Precision::I32) {
+                outputValue = reinterpret_cast<const int32_t*>(outputData)[i];
+                referenceValue = reinterpret_cast<const int32_t*>(referenceData)[i];
+            } else /* if (precision == Precision::FP16) */ {
+                outputValue = PrecisionUtils::f16tof32(reinterpret_cast<const ie_fp16*>(outputData)[i]);
+                referenceValue = PrecisionUtils::f16tof32(reinterpret_cast<const ie_fp16*>(referenceData)[i]);
+            }
+
+            if (outputValue != referenceValue) {
+                if (errors++ < 25) {
+                    std::cout << "error: index=" << index1DtoND(i, dataShape)
+                              << ", outputValue=" << outputValue
+                              << ", referenceValue=" << referenceValue
+                              << std::endl;
+                }
+            }
+        }
+
+        ASSERT_EQ(errors, 0);
+    }
+
+private:
+
+    static
+    std::vector<int> index1DtoND(const int index1D,
+                                 const std::vector<int>& shape) {
+        int value = index1D;
+        const int ndims = shape.size();
+        std::vector<int> indexND(ndims);
+        for (int i = ndims - 1; i >= 0; i--) {
+            const int digit = value % shape[i];
+                      value = value / shape[i];
+            indexND[i] = digit;
+        }
+        return indexND;
+    }
+
+    // Count total number of elements in ND tensor
+    static
+    int getTotal(const std::vector<int>& shape) {
+        return std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<int>());
+    }
+
+    // Fill data[] array with random numbers
+    // distributed uniformly in the interval [a,b]
+    static
+    void fillUniformly(void* data,
+                       const int num,
+                       const Precision& precision,
+                       const double a,
+                       const double b,
+                       std::mt19937& gen) {
+        if (Precision::FP16 == precision) {
+            std::uniform_real_distribution<float> uniform(a, b);
+            for (int i = 0; i < num; i++) {
+                const float v = uniform(gen);
+                reinterpret_cast<ie_fp16*>(data)[i] = PrecisionUtils::f32tof16(v);
+            }
+        } else if (Precision::I32 == precision) {
+            const int ia = static_cast<int>(std::round(a));
+            const int ib = static_cast<int>(std::round(b));
+            std::uniform_int_distribution<int> uniform(ia, ib);
+            for (int i = 0; i < num; i++) {
+                const int v = uniform(gen);
+                reinterpret_cast<int32_t*>(data)[i] = v;
+            }
+        } else {
+            IE_ASSERT(precision == Precision::I32 ||
+                      precision == Precision::FP16);
+        }
+    }
+
+    // Note that:
+    // - IR version is v7 (should be v10): as readNetwork() method
+    //   cannot parse / denies IR v10 if there's no weights tensor
+    static
+    std::string createModel(const std::string     & dataType,
+                            const std::vector<int>& dataShape,
+                            const std::vector<int>& updatesShape) {
+        std::string model = R"V0G0N(
+            <?xml version="1.0" ?>
+            <net name="testScatterElementsUpdate" version="7">
+                <layers>
+                    <layer id="0" name="input" type="Input">
+                        <output>
+                            <port id="0" precision="__TYPE__">
+                                __INPUT_DIMS__
+                            </port>
+                        </output>
+                    </layer>
+                    <layer id="1" name="indices" type="Input">
+                        <output>
+                            <port id="0" precision="I32">
+                                __INDICES_DIMS__
+                            </port>
+                        </output>
+                    </layer>
+                    <layer id="2" name="updates" type="Input">
+                        <output>
+                            <port id="0" precision="__TYPE__">
+                                __UPDATES_DIMS__
+                            </port>
+                        </output>
+                    </layer>
+                    <layer id="3" name="axis" type="Input">
+                        <output>
+                            <port id="0" precision="I32">
+                                <dim>1</dim>
+                            </port>
+                        </output>
+                    </layer>
+                    <layer id="4" name="scatter" type="ScatterElementsUpdate">
+                        <input>
+                            <port id="0" precision="__TYPE__">
+                                __INPUT_DIMS__
+                            </port>
+                            <port id="1" precision="I32">
+                                __INDICES_DIMS__
+                            </port>
+                            <port id="2" precision="__TYPE__">
+                                __UPDATES_DIMS__
+                            </port>
+                            <port id="3" precision="I32">
+                                <dim>1</dim>
+                            </port>
+                        </input>
+                        <output>
+                            <port id="4" precision="__TYPE__">
+                                __OUTPUT_DIMS__
+                            </port>
+                        </output>
+                    </layer>
+                </layers>
+                <edges>
+                    <edge from-layer="0" from-port="0" to-layer="4" to-port="0"/>
+                    <edge from-layer="1" from-port="0" to-layer="4" to-port="1"/>
+                    <edge from-layer="2" from-port="0" to-layer="4" to-port="2"/>
+                    <edge from-layer="3" from-port="0" to-layer="4" to-port="3"/>
+                </edges>
+            </net>
+        )V0G0N";
+
+        const std::string dataDimsStr = shapeToDimsString(dataShape);
+        const std::string updatesDimsStr = shapeToDimsString(updatesShape);
+        REPLACE_WITH_STR(model, "__INPUT_DIMS__", dataDimsStr);
+        REPLACE_WITH_STR(model, "__OUTPUT_DIMS__", dataDimsStr);
+        REPLACE_WITH_STR(model, "__INDICES_DIMS__", updatesDimsStr);
+        REPLACE_WITH_STR(model, "__UPDATES_DIMS__", updatesDimsStr);
+        REPLACE_WITH_STR(model, "__TYPE__", dataType);
+
+        return model;
+    }
+
+    static
+    std::string shapeToDimsString(const std::vector<int>& shape)
+    {
+        std::string str;
+        for (int i = 0; i < shape.size(); i++) {
+            str += (i? " ": "");
+            str += "<dim>" + std::to_string(shape[i]) + "</dim>";
+        }
+        return str;
+    }
+
+private:
+    std::mt19937 m_gen;
+};
+
+TEST_P(myriadLayersScatterElementsUpdateTest_nightly, accuracy) {
+    testScatterElementsUpdate();
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_scatter_update_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_scatter_update_test.cpp
new file mode 100644 (file)
index 0000000..1d3d7af
--- /dev/null
@@ -0,0 +1,69 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_scatter_update_test.hpp"
+
+using namespace testing;
+
+//----------------------------------------------------------------------
+//
+// Multi-dimensional input/output and other tensors
+//
+//----------------------------------------------------------------------
+
+INSTANTIATE_TEST_CASE_P(
+    nd_tensors,
+    myriadLayersScatterUpdateTest_nightly,
+    Values(
+        //  1-dimensional `indices`
+        ScatterUpdateTestParams { { 1000 }, { 100000 } },
+        ScatterUpdateTestParams { { 105 },  { 351, 299 } },
+        ScatterUpdateTestParams { { 17 },   { 48, 55, 39 } },
+        ScatterUpdateTestParams { { 10 },   { 23, 14, 19, 17 } },
+        ScatterUpdateTestParams { { 7 },    { 10, 9, 11, 8, 13 } },
+        ScatterUpdateTestParams { { 6 },    { 9, 5, 11, 7, 5, 6 } },
+        ScatterUpdateTestParams { { 5 },    { 7, 6, 5, 7, 6, 3, 4 } },
+        ScatterUpdateTestParams { { 3 },    { 5, 3, 5, 7, 3, 4, 6, 3 } },
+        //  2-dimensional `indices`
+        ScatterUpdateTestParams { { 35, 29 }, { 100000 } },
+        ScatterUpdateTestParams { { 13, 9 },  { 351, 299 } },
+        ScatterUpdateTestParams { { 5, 3 },   { 48, 55, 39 } },
+        ScatterUpdateTestParams { { 3, 3 },   { 23, 14, 19, 17 } },
+        ScatterUpdateTestParams { { 3, 2 },   { 10, 9, 11, 8, 13 } },
+        ScatterUpdateTestParams { { 3, 2 },   { 9, 5, 11, 7, 5, 6 } },
+        ScatterUpdateTestParams { { 2, 2 },   { 7, 6, 5, 7, 6, 3, 4 } },
+        //  3-dimensional `indices`
+        ScatterUpdateTestParams { { 13, 11, 7 }, { 100000 } },
+        ScatterUpdateTestParams { { 5, 7, 3 },   { 351, 299 } },
+        ScatterUpdateTestParams { { 5, 2, 2 },   { 48, 55, 39 } },
+        ScatterUpdateTestParams { { 3, 2, 2 },   { 23, 14, 19, 17 } },
+        ScatterUpdateTestParams { { 2, 2, 2 },   { 10, 9, 11, 8, 13 } },
+        ScatterUpdateTestParams { { 2, 2, 2 },   { 9, 5, 11, 7, 5, 6 } }
+    )
+);
+
+//----------------------------------------------------------------------
+//
+// Real-life (or similar) test cases
+//
+//----------------------------------------------------------------------
+
+INSTANTIATE_TEST_CASE_P(
+    use_cases,
+    myriadLayersScatterUpdateTest_nightly,
+    Values(
+        // use case from Mask R-CNN: N = 1000, C = 256, HxW = 7x7
+        ScatterUpdateTestParams { { 32 },      { 1000, 256, 7, 7} },
+        ScatterUpdateTestParams { { 5, 6 },    { 1000, 256, 7, 7} },
+        ScatterUpdateTestParams { { 5, 3, 2 }, { 1000, 256, 7, 7} },
+        // large 1D copy: N=1 (hidden), C=64, D=40, H = W = 112
+        ScatterUpdateTestParams { { 32 },      { 64, 40, 112, 112 } },
+        ScatterUpdateTestParams { { 5, 6 },    { 64, 40, 112, 112 } },
+        ScatterUpdateTestParams { { 5, 3, 2 }, { 64, 40, 112, 112 } },
+        // many planes for 3D copy: N=16, C=512, H=W=56
+        ScatterUpdateTestParams { { 12, },      { 16, 512, 56, 56 } },
+        ScatterUpdateTestParams { { 3, 4, },    { 16, 512, 56, 56 } },
+        ScatterUpdateTestParams { { 3, 2, 2, }, { 16, 512, 56, 56 } }
+    )
+);
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_scatter_update_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_scatter_update_test.hpp
new file mode 100644 (file)
index 0000000..6685bbe
--- /dev/null
@@ -0,0 +1,464 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+//
+//  Scatter Update layer is similar to Tensor Flow scatter_update operation
+//  except we allow the additional `axis` parameter:
+//
+//  https://tensorflow.org/versions/r1.15/api_docs/python/tf/scatter_update
+//
+//  Yet, only axis == 0 is supported for our Scatter Update layer,
+//  so that it literally implements this Tensor Flow operation
+//
+//  For example, the tensor shapes could be:
+//  -    {N, C, H, W} for `input` and `output`
+//  - {I, J, C, H, W} for `updates` tensor
+//  - {I, J} for `indices`
+//
+//  Given some (i, j), the Scatter Update would copy the subtensor
+//  `updates(i, j, :, :, :)` into the `output(n, :, :, :)`, where
+//  `n = indices(i, j)`.
+//
+
+#include <myriad_layers_tests.hpp>
+#include <vpu_case_common.hpp>
+
+#include <algorithm>
+#include <random>
+#include <vector>
+
+#include <cmath>
+#include <cstring>
+
+#define DEBUG 0
+
+using namespace InferenceEngine;
+
+using InputShape   = SizeVector;
+using IndicesShape = SizeVector;
+
+using ScatterUpdateTestParams = std::tuple<IndicesShape,
+                                           InputShape>;
+
+class myriadLayersScatterUpdateTest_nightly:
+    public myriadLayerTestBaseWithParam<ScatterUpdateTestParams>
+{
+protected:
+
+    void testScatterUpdate() {
+        _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+
+        //
+        // Get and verify test parameters, and deduce other parameters
+        //
+
+        const auto& params = GetParam();
+
+        const SizeVector indicesShape = std::get<0>(params);
+        const SizeVector   inputShape = std::get<1>(params);
+
+        const int indicesNDims = indicesShape.size();
+        const int   inputNDims =   inputShape.size();
+
+        const int axis = 0;
+
+        IE_ASSERT(inputNDims > 0);
+        IE_ASSERT(indicesNDims > 0);
+
+        // Exclude test if input tensor is too large for device with
+        // less than 2 GB of RAM, i.e. for any one except 2085 board
+        bool tooLarge = getTotal(inputShape) > 25 * 1000 * 1000;
+        DISABLE_IF(tooLarge && !CheckMA2085());
+
+        SizeVector outputShape = inputShape;  // copy
+        const int outputNDims = inputNDims;
+
+        SizeVector axisShape = { 1 };
+        const int axisNDims = 1;
+
+        // E.g.:
+        //    {N, C, H, W} could be shape of `input` and `output`
+        // {I, J, C, H, W} could be shape of `update` tensor
+        // {I, J}          could be shape of `indices`
+        SizeVector updatesShape = indicesShape;
+        for (int i = 0; i < outputNDims - 1; i++) {
+            updatesShape.push_back(outputShape[i + 1]);
+        }
+        const int updatesNDims = updatesShape.size();
+
+        //
+        // Initialize input tensors, and compute reference output
+        //
+
+        const int inputTotal = getTotal(inputShape);
+        const int outputTotal = getTotal(outputShape);
+        const int indicesTotal = getTotal(indicesShape);
+        const int updatesTotal = getTotal(updatesShape);
+        const int axisTotal = getTotal(axisShape);
+
+        std::vector<ie_fp16> inputData(inputTotal);
+        std::vector<ie_fp16> outputData(outputTotal);
+        std::vector<int32_t> indicesData(indicesTotal);
+        std::vector<ie_fp16> updatesData(updatesTotal);
+        std::vector<int32_t> axisData(axisTotal);
+
+        std::mt19937 gen;
+
+        fillUniformly(inputData.data(), inputTotal, Precision::FP16, 0, 255, gen);
+        fillUniformly(updatesData.data(), updatesTotal, Precision::FP16, -1, +1, gen);
+
+        const int indicesLimit = outputShape[0] - 1;
+        fillUniformly(indicesData.data(), indicesTotal, Precision::I32, 0, indicesLimit, gen);
+
+        axisData[0] = 0;  // yet we support only axis == 0
+
+        referenceScatterUpdate(inputShape,
+                               outputShape,
+                               indicesShape,
+                               updatesShape,
+                               axisShape,
+                               inputData,
+                               outputData,
+                               indicesData,
+                               updatesData,
+                               axisData);
+
+        //
+        // Initialize 1-layer network, and infer
+        //
+
+        std::string model = createModel(inputShape,
+                                        outputShape,
+                                        indicesShape,
+                                        updatesShape);
+        #if DEBUG
+        std::cout << "model:\n" << model << "\n";
+        #endif
+
+        ASSERT_NO_THROW(readNetwork(model));
+
+        const auto& network = _cnnNetwork;
+
+        _inputsInfo = network.getInputsInfo();
+        _inputsInfo["input"]->setPrecision(Precision::FP16);
+        _inputsInfo["indices"]->setPrecision(Precision::I32);
+        _inputsInfo["updates"]->setPrecision(Precision::FP16);
+        _inputsInfo["axis"]->setPrecision(Precision::I32);
+
+        _outputsInfo = network.getOutputsInfo();
+        _outputsInfo["scatter_update"]->setPrecision(Precision::FP16);
+
+        StatusCode st = OK;
+
+        ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network, _config, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+        ASSERT_NE(_exeNetwork, nullptr) << _resp.msg;
+
+        ASSERT_NO_THROW(st = _exeNetwork->CreateInferRequest(_inferRequest, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        Blob::Ptr inputBlob;
+        ASSERT_NO_THROW(st = _inferRequest->GetBlob("input", inputBlob, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        void* inputBlobData = inputBlob->buffer();
+        ASSERT_NE(inputBlobData, nullptr);
+        std::copy(inputData.cbegin(), inputData.cend(), reinterpret_cast<ie_fp16*>(inputBlobData));
+
+        Blob::Ptr indicesBlob;
+        ASSERT_NO_THROW(st = _inferRequest->GetBlob("indices", indicesBlob, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        void* indicesBlobData = indicesBlob->buffer();
+        ASSERT_NE(indicesBlobData, nullptr);
+        std::copy(indicesData.cbegin(), indicesData.cend(), reinterpret_cast<int32_t*>(indicesBlobData));
+
+        Blob::Ptr updatesBlob;
+        ASSERT_NO_THROW(st = _inferRequest->GetBlob("updates", updatesBlob, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        void* updatesBlobData = updatesBlob->buffer();
+        ASSERT_NE(updatesBlobData, nullptr);
+        std::copy(updatesData.cbegin(), updatesData.cend(), reinterpret_cast<ie_fp16*>(updatesBlobData));
+
+        Blob::Ptr axisBlob;
+        ASSERT_NO_THROW(st = _inferRequest->GetBlob("axis", axisBlob, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        void* axisBlobData = axisBlob->buffer();
+        ASSERT_NE(axisBlobData, nullptr);
+        std::copy(axisData.cbegin(), axisData.cend(), reinterpret_cast<int32_t*>(axisBlobData));
+
+        ASSERT_NO_THROW(st = _inferRequest->Infer(&_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        Blob::Ptr outputBlob;
+        ASSERT_NO_THROW(st = _inferRequest->GetBlob("scatter_update", outputBlob, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        const void* outputBlobDataPtr = outputBlob->cbuffer();
+        const ie_fp16* outputBlobData = reinterpret_cast<const ie_fp16*>(outputBlobDataPtr);
+        ASSERT_NE(outputBlobData, nullptr);
+
+        //
+        // Check result
+        //
+
+        int errors = 0;
+
+        // cycle over `output` coordinates
+        SizeVector outputCoord(outputNDims, 0);
+        do {
+            const int outputOffset = offsetByCoord(outputCoord.data(), outputShape.data(), outputNDims);
+
+            const float result = PrecisionUtils::f16tof32(outputBlobData[outputOffset]);
+            const float reference = PrecisionUtils::f16tof32(outputData[outputOffset]);
+            const float diff = result - reference;
+
+            if (diff != 0) {
+                if (errors++ < 25) {
+                    std::cout << "error:"
+                        << " outputCoord=" << to_string(outputCoord)
+                        << " result=" << result
+                        << " reference=" << reference
+                        << " diff=" << diff
+                        << std::endl;
+                }
+            }
+        } while (nextCoord(outputCoord.data(), outputShape.data(), outputNDims));
+
+        if (errors > 0) {
+            std::cout << "errors: " << errors << std::endl;
+        }
+
+        ASSERT_EQ(errors, 0);
+    }
+
+private:
+
+    static
+    void referenceScatterUpdate(const      SizeVector     & inputShape,
+                                const      SizeVector     & outputShape,
+                                const      SizeVector     & indicesShape,
+                                const      SizeVector     & updatesShape,
+                                const      SizeVector     & axisShape,
+                                const std::vector<ie_fp16>& inputData,
+                                      std::vector<ie_fp16>& outputData,
+                                const std::vector<int32_t>& indicesData,
+                                const std::vector<ie_fp16>& updatesData,
+                                const std::vector<int32_t>& axisData) {
+        // yet we only support axis == 0
+        IE_ASSERT(axisShape.size() == 1);
+        IE_ASSERT(axisShape[0] == 1);
+        IE_ASSERT(axisData[0] == 0);
+
+        // copy `input` to `output`
+        const int inputTotal = getTotal(inputShape);
+        const int outputTotal = getTotal(outputShape);
+        IE_ASSERT(inputTotal == outputTotal);
+        std::copy(inputData.cbegin(), inputData.cend(), outputData.begin());
+
+        const int outputNDims = outputShape.size();
+        SizeVector outputCoord(outputNDims, 0);
+
+        // cycle over indices of `updates` tensor
+        const int updatesNDims = updatesShape.size();
+        SizeVector updatesCoord(updatesNDims, 0);
+        do {
+            const int indicesNDims = indicesShape.size();
+            const size_t* indicesCoord = updatesCoord.data();
+            const int indicesOffset = offsetByCoord(indicesCoord, indicesShape.data(), indicesNDims);
+            const int n = indicesData[indicesOffset];
+
+            const int axis = 0;
+            IE_ASSERT(0 <= n && n < outputShape[axis]);
+
+            for (int i = 0; i < outputNDims - 1; i++) {
+                outputCoord[i + 1] = updatesCoord[i + indicesNDims];
+            }
+            outputCoord[0] = n;
+
+            const int outputOffset = offsetByCoord(outputCoord.data(), outputShape.data(), outputNDims);
+            const int updatesOffset = offsetByCoord(updatesCoord.data(), updatesShape.data(), updatesNDims);
+
+            const ie_fp16 value = updatesData[updatesOffset];
+            outputData[outputOffset] = value;
+        } while (nextCoord(updatesCoord.data(), updatesShape.data(), updatesNDims));
+    }
+
+    static
+    std::string to_string(const SizeVector& v) {
+        std::stringstream s;
+        s << "{";
+        for (int i = 0; i < v.size(); i++) {
+            s << (i? ", ": "") << v[i];
+        }
+        s << "}";
+        return s.str();
+    }
+
+    static
+    bool nextCoord(size_t coord[],
+             const size_t shape[],
+                   int    nDims) {
+        // let W's index change quicker than H's:
+        // note that dims order is like ..., H, W
+        for (int i = nDims - 1; i >= 0; i--) {
+            if (++coord[i] < shape[i])
+                return true;
+            coord[i] = 0;
+        }
+        return false; // cannot get next indices
+    }
+
+    // Get element offset by ND coordinates
+    static
+    int offsetByCoord(const size_t coord[],
+                      const size_t shape[],
+                      const int    ndims) {
+        int offset = 0;
+        int stride = 1;
+        for (int i = ndims - 1; i >= 0; i--) {
+            offset += coord[i] * stride;
+            stride *= shape[i];
+        }
+        return offset;
+    }
+
+    // Count total number of elements in ND tensor
+    static
+    int getTotal(const SizeVector& shape) {
+        return std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<int>());
+    }
+
+    // Fill data[] array with random numbers
+    // distributed uniformly in the interval [a,b]
+    static
+    void fillUniformly(void* data,
+                       const int num,
+                       const Precision& precision,
+                       const double a,
+                       const double b,
+                       std::mt19937& gen) {
+        if (Precision::FP16 == precision) {
+            std::uniform_real_distribution<float> uniform(a, b);
+            for (int i = 0; i < num; i++) {
+                const float v = uniform(gen);
+                reinterpret_cast<ie_fp16*>(data)[i] = PrecisionUtils::f32tof16(v);
+            }
+        } else
+        if (Precision::I32 == precision) {
+            const int ia = static_cast<int>(std::round(a));
+            const int ib = static_cast<int>(std::round(b));
+            std::uniform_int_distribution<int> uniform(ia, ib);
+            for (int i = 0; i < num; i++) {
+                const int v = uniform(gen);
+                reinterpret_cast<int32_t*>(data)[i] = v;
+            }
+        } else {
+            IE_ASSERT(Precision::FP16 == precision ||
+                        Precision::I32  == precision);
+        }
+    }
+
+    // Note that:
+    // - IR version is v7 (should be v10): as readNetwork() method
+    //   cannot parse / denies IR v10 if there's no weights tensor
+    static
+    std::string createModel(const SizeVector& inputShape,
+                            const SizeVector& outputShape,
+                            const SizeVector& indicesShape,
+                            const SizeVector& updatesShape) {
+        std::string model = R"V0G0N(
+            <?xml version="1.0" ?>
+            <net name="testScatterUpdate" version="7">
+                <layers>
+                    <layer id="0" name="input" type="Input">
+                        <output>
+                            <port id="0" precision="FP16">
+                                __INPUT_DIMS__
+                            </port>
+                        </output>
+                    </layer>
+                    <layer id="1" name="indices" type="Input">
+                        <output>
+                            <port id="0" precision="I32">
+                                __INDICES_DIMS__
+                            </port>
+                        </output>
+                    </layer>
+                    <layer id="2" name="updates" type="Input">
+                        <output>
+                            <port id="0" precision="FP16">
+                                __UPDATES_DIMS__
+                            </port>
+                        </output>
+                    </layer>
+                    <layer id="3" name="axis" type="Input">
+                        <output>
+                            <port id="0" precision="I32">
+                                <dim>1</dim>
+                            </port>
+                        </output>
+                    </layer>
+                    <layer id="4" name="scatter_update" type="ScatterUpdate">
+                        <input>
+                            <port id="0" precision="FP16">
+                                __INPUT_DIMS__
+                            </port>
+                            <port id="1" precision="I32">
+                                __INDICES_DIMS__
+                            </port>
+                            <port id="2" precision="FP16">
+                                __UPDATES_DIMS__
+                            </port>
+                            <port id="3" precision="I32">
+                                <dim>1</dim>
+                            </port>
+                        </input>
+                        <output>
+                            <port id="4" precision="FP16">
+                                __OUTPUT_DIMS__
+                            </port>
+                        </output>
+                    </layer>
+                </layers>
+                <edges>
+                    <edge from-layer="0" from-port="0" to-layer="4" to-port="0"/>
+                    <edge from-layer="1" from-port="0" to-layer="4" to-port="1"/>
+                    <edge from-layer="2" from-port="0" to-layer="4" to-port="2"/>
+                    <edge from-layer="3" from-port="0" to-layer="4" to-port="3"/>
+                </edges>
+            </net>
+        )V0G0N";
+
+        const std::string inputDimsStr = shapeToDimsString(inputShape);
+        const std::string outputDimsStr = shapeToDimsString(outputShape);
+        const std::string indicesDimsStr = shapeToDimsString(indicesShape);
+        const std::string updatesDimsStr = shapeToDimsString(updatesShape);
+        REPLACE_WITH_STR(model, "__INPUT_DIMS__", inputDimsStr);
+        REPLACE_WITH_STR(model, "__OUTPUT_DIMS__", outputDimsStr);
+        REPLACE_WITH_STR(model, "__INDICES_DIMS__", indicesDimsStr);
+        REPLACE_WITH_STR(model, "__UPDATES_DIMS__", updatesDimsStr);
+
+        return model;
+    }
+
+    static
+    std::string shapeToDimsString(const SizeVector& shape)
+    {
+        std::string str;
+        for (int i = 0; i < shape.size(); i++) {
+            str += (i? " ": "");
+            str += "<dim>" + std::to_string(shape[i]) + "</dim>";
+        }
+        return str;
+    }
+};
+
+TEST_P(myriadLayersScatterUpdateTest_nightly, accuracy) {
+    testScatterUpdate();
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_select_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_select_test.cpp
new file mode 100644 (file)
index 0000000..d5db411
--- /dev/null
@@ -0,0 +1,11 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_select_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadTestsSelect_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(s_eltwiseTensors),
+        ::testing::ValuesIn(s_eltwiseDims))
+);
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_select_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_select_test.hpp
new file mode 100644 (file)
index 0000000..560728b
--- /dev/null
@@ -0,0 +1,140 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tests.hpp"
+#include <functional>
+#include <algorithm>
+#include <string>
+#include "myriad_layers_reference_functions.hpp"
+
+using namespace InferenceEngine;
+
+PRETTY_PARAM(NDims, nd_tensor_test_params);
+
+auto refSelect = [](const float a, const float b, const float c) noexcept {
+    return (a != 0) ? b : c;
+};
+
+typedef float (*kernel)(const float a, const float b, const float c);
+
+void genRandomDataLogic(Blob::Ptr blob);
+void getCoord(uint32_t nSubspace, SizeVector dims, uint32_t subspaceCoord[]);
+int getNum(uint32_t subspaceDims[], SizeVector dims);
+SizeVector convertDims(SizeVector dims);
+
+class myriadLayersTestsSelectBase: public myriadLayersTests_nightly {
+protected:
+    void RefSelect()
+    {
+        auto itr = _inputMap.begin();
+        int coeff_num = 0;
+        const uint16_t *srcData = itr->second->buffer().as<const uint16_t*>();
+        uint16_t *dstData = _refBlob->buffer().as<uint16_t*>();
+        uint32_t src_coords[4];
+        SizeVector refDims = convertDims(_refBlob->getTensorDesc().getDims());
+        SizeVector itrDims = convertDims(itr->second->getTensorDesc().getDims());
+
+        itr++;
+        ASSERT_NE(itr, _inputMap.end());
+        const uint16_t *src1Data = itr->second->buffer().as<const uint16_t*>();
+        SizeVector itr1Dims = convertDims(itr->second->getTensorDesc().getDims());
+        itr++;
+        ASSERT_NE(itr, _inputMap.end());
+        const uint16_t *src2Data = itr->second->buffer().as<const uint16_t*>();
+        SizeVector itr2Dims = convertDims(itr->second->getTensorDesc().getDims());
+        itr++;
+        ASSERT_EQ(itr, _inputMap.end());
+
+        for (int i = 0; i < _refBlob->size(); i++) {
+            getCoord(i, refDims, src_coords);
+
+            uint32_t src1_coords[4], src2_coords[4];
+            for (int c = 0; c < refDims.size(); c++) {
+                src2_coords[c] = src1_coords[c] = src_coords[c];
+                if (src_coords[c] >= itrDims[c])
+                    src_coords[c] = 0;
+                if (src1_coords[c] >= itr1Dims[c])
+                    src1_coords[c] = 0;
+                if (src2_coords[c] >= itr2Dims[c])
+                    src2_coords[c] = 0;
+            }
+
+            int src_i = getNum(src_coords, itrDims);
+            int src1_i = getNum(src1_coords, itr1Dims);
+            int src2_i = getNum(src2_coords, itr2Dims);
+
+            float val = refSelect(PrecisionUtils::f16tof32(srcData[src_i]),
+                                  PrecisionUtils::f16tof32(src1Data[src1_i]),
+                                  PrecisionUtils::f16tof32(src2Data[src2_i]));
+            dstData[i] = PrecisionUtils::f32tof16(val);
+        }
+    }
+
+    nd_tensor_test_params _p;
+    std::map<std::string, std::string> _params;
+
+};
+
+class SelectTest : public myriadLayersTestsSelectBase,
+                   public testing::WithParamInterface<std::tuple<NDims, int>> {
+protected:
+    virtual void InitBody()
+    {
+        float ERROR_BOUND;
+
+        ERROR_BOUND = 8.4e-3f;
+
+        _params.clear();
+        auto params = GetParam();
+        _p = std::get<0>(params);
+        int ndims = std::get<1>(params);
+        int count = 3; // Select support only 3 onputs
+
+        InferenceEngine::SizeVector dims;
+        dims.resize(ndims);
+        for (int i = 0; i < ndims; i++)
+            dims[i] = _p.dims[i];
+
+        IN_OUT_desc inpt(count);
+        for (int i = 0; i < count; ++i) {
+            inpt[i] = dims;
+        }
+
+        SetInputTensors(inpt);
+        SetOutputTensors({dims});
+
+        _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+
+        ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("Select").params(_params)));
+        ASSERT_TRUE(Infer());
+
+        ASSERT_NO_FATAL_FAILURE(RefSelect());
+        ASSERT_EQ(_outputMap.size(), 1);
+
+        CompareCommonAbsolute(_outputMap.begin()->second, _refBlob, ERROR_BOUND);
+    }
+};
+
+class myriadTestsSelect_nightly: public SelectTest
+{
+    void SetUp() override {
+        SelectTest::SetUp();
+        _genDataCallback0 = genRandomDataLogic;
+    }
+};
+
+TEST_P(myriadTestsSelect_nightly, Select)
+{
+    InitBody();
+}
+
+static std::vector<NDims> s_eltwiseTensors = {
+        {{3, 2, 14, 32}},
+        {{5, 4, 8, 16}},
+        {{2, 16, 16, 8}},
+};
+
+static std::vector<int> s_eltwiseDims = {
+        2, 3, 4
+};
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_sigmoid_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_sigmoid_test.cpp
new file mode 100644 (file)
index 0000000..8d79d80
--- /dev/null
@@ -0,0 +1,44 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_sigmoid_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(
+        accuracy, myriadLayersTestsSigmoid_nightly,
+        ::testing::ValuesIn(s_sigmoidParams));
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsMaxPoolingWithSigmoid_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(g_poolingInput),
+        ::testing::ValuesIn(g_poolingLayerParamsLite),
+        ::testing::ValuesIn(g_poolingLayout))
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsAvgPoolingWithSigmoid_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(g_poolingInput),
+        ::testing::ValuesIn(g_poolingLayerParamsLite),
+        ::testing::ValuesIn(g_poolingLayout))
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayerConvolutionWithSigmoid_nightly,
+        ::testing::Combine(
+            ::testing::ValuesIn(g_convolutionTensors)
+          , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 3, 3))
+          , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<uint32_t>(16)
+          , ::testing::Values<uint32_t>(1)
+          , ::testing::Values<IRVersion>(IRVersion::v7, IRVersion::v10)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(
+    accuracy, myriadLayerFullyConnectedWithSigmoid_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(g_fcTestParamsSubset),
+        ::testing::Values(g_dimensionsFC[0]),
+        ::testing::ValuesIn(g_addBiasFC)
+    )
+);
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_sigmoid_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_sigmoid_test.hpp
new file mode 100644 (file)
index 0000000..dc25cb6
--- /dev/null
@@ -0,0 +1,111 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tests.hpp"
+#include "myriad_layers_reference_functions.hpp"
+#include <cmath>
+#include <algorithm>
+
+#define BOUND (10.0f)
+#define ERROR_BOUND (1.e-3f)
+#define ERROR_BOUND_WITH_SIGMOID (1.e-3f)
+
+using namespace InferenceEngine;
+
+class myriadLayersTestsSigmoid_nightly: public myriadLayersTests_nightly,
+                           public testing::WithParamInterface<InferenceEngine::SizeVector> {
+public:
+};
+
+TEST_P(myriadLayersTestsSigmoid_nightly, TestsSigmoid)
+{
+    _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+
+    SizeVector p = GetParam();
+    SetInputTensors({p});
+    SetOutputTensors({p});
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("Sigmoid")));
+    SetFirstInputToRange(-BOUND, BOUND);
+    ASSERT_TRUE(Infer());
+
+    /* output check */
+    ref_sigmoid(_inputMap.begin()->second, _refBlob);
+    CompareCommonAbsolute(_outputMap.begin()->second, _refBlob, ERROR_BOUND);
+}
+
+static std::vector<InferenceEngine::SizeVector> s_sigmoidParams = {
+    {{3, 1, 16, 16}},
+    {{3, 2, 16, 16}},
+    {{3, 3, 16, 16}},
+    {{3, 1, 53, 16}},
+    {{3, 2, 53, 16}},
+    {{3, 3, 53, 16}},
+    {{4, 4, 1, 224, 224}},
+    {{4, 4, 2, 224, 224}},
+    {{4, 4, 3, 224, 224}},
+    {{1, 224, 235}},
+    {{2, 224, 235}},
+    {{3, 224, 235}},
+    {{1, 1, 277, 230}},
+    {{1, 2, 277, 230}},
+    {{1, 3, 277, 230}}
+};
+
+class myriadLayersTestsMaxPoolingWithSigmoid_nightly: public PoolingTest<POOLING_MAX>{
+};
+
+class myriadLayersTestsAvgPoolingWithSigmoid_nightly: public PoolingTest<POOLING_AVG>{
+};
+
+TEST_P(myriadLayersTestsMaxPoolingWithSigmoid_nightly, TestsMaxPoolingWithSigmoid)
+{
+    _testNet.addLayer(LayerInitParams("Sigmoid")
+             .in({_output_tensor})
+             .out({_output_tensor}),
+             ref_sigmoid_wrap);
+    ASSERT_TRUE(generateNetAndInfer(NetworkInitParams()));
+    CompareCommonAbsolute(_outputMap.begin()->second, getReferenceOutput(), ERROR_BOUND_WITH_SIGMOID);
+}
+
+TEST_P(myriadLayersTestsAvgPoolingWithSigmoid_nightly, TestsAvgPoolingWithSigmoid)
+{
+    _testNet.addLayer(LayerInitParams("Sigmoid")
+             .in({_output_tensor})
+             .out({_output_tensor}),
+             ref_sigmoid_wrap);
+    ASSERT_TRUE(generateNetAndInfer(NetworkInitParams()));
+    CompareCommonAbsolute(_outputMap.begin()->second, getReferenceOutput(), ERROR_BOUND_WITH_SIGMOID);
+}
+
+class myriadLayerConvolutionWithSigmoid_nightly: public ConvolutionTest<IRVersion>{
+};
+
+TEST_P(myriadLayerConvolutionWithSigmoid_nightly, Convolution) {
+    _irVersion = std::get<6>(GetParam());
+    _testNet.addLayer(LayerInitParams("Sigmoid")
+             .in({_output_tensor})
+             .out({_output_tensor}),
+             ref_sigmoid_wrap);
+
+    float maxerr = 0;
+    if (group == 1)
+        maxerr = 0.00055 * IC * kernel.x * kernel.y;
+    else // TODO: currently dephConv is slightly less accurate
+        maxerr = 0.00066 * (IC / group) * kernel.x * kernel.y;
+    ASSERT_TRUE(generateNetAndInfer(NetworkInitParams()));
+    CompareCommonAbsolute(_outputMap.begin()->second, getReferenceOutput(), maxerr);
+}
+
+class myriadLayerFullyConnectedWithSigmoid_nightly: public FCTest<>{
+};
+
+TEST_P(myriadLayerFullyConnectedWithSigmoid_nightly, TestsFullyConnected)
+{
+    _testNet.addLayer(LayerInitParams("Sigmoid")
+             .in({_output_tensor})
+             .out({_output_tensor}),
+             ref_sigmoid_wrap);
+    ASSERT_TRUE(generateNetAndInfer(NetworkInitParams()));
+    CompareCommonAbsolute(_outputMap.begin()->second, getReferenceOutput(), _par.error_bound);
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_slice_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_slice_test.cpp
new file mode 100644 (file)
index 0000000..71eae36
--- /dev/null
@@ -0,0 +1,11 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_slice_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsSlice_nightly,
+                        ::testing::Values<SliceTestParams>(
+                                MAKE_STRUCT(SliceParams, {4, 8, 16, 32, 64}, {{4, 8, 16, 10, 64}, {4, 8, 16, 22, 64}}, 3),
+                                MAKE_STRUCT(SliceParams, {4, 8, 16, 32}, {{4, 8, 2, 32}, {4, 8, 14, 32}}, 2))
+);
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_slice_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_slice_test.hpp
new file mode 100644 (file)
index 0000000..6f79476
--- /dev/null
@@ -0,0 +1,60 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tests.hpp"
+
+using namespace InferenceEngine;
+
+static const float ERROR_BOUND = 0.0f;
+
+struct SliceParams {
+    SizeVector inputDims;
+    IN_OUT_desc outputs;
+    int axis;
+};
+
+PRETTY_PARAM(SliceTestParams, SliceParams);
+
+typedef myriadLayerTestBaseWithParam<SliceTestParams> myriadLayersTestsSlice_nightly;
+
+TEST_P(myriadLayersTestsSlice_nightly, Slice) {
+    _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+
+    const SliceParams testParams = GetParam();
+
+    const auto inputDims = testParams.inputDims;
+    const auto outputs = testParams.outputs;
+    const auto axis = testParams.axis;
+    std::map<std::string, std::string> layerParams;
+    layerParams["axis"] = std::to_string(axis);
+
+    SetInputTensors({inputDims});
+
+    SetOutputTensors(outputs);
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("Slice").params(layerParams)));
+
+    BlobMap refBlobMap;
+    const auto inputBlob = _inputMap.begin()->second;
+
+    const auto layout = vpu::deviceLayout(
+            inputBlob->getTensorDesc().getLayout(),
+            vpu::LayoutPreference::ChannelMajor);
+    inputBlob->getTensorDesc().setLayout(layout);
+
+    for (const auto& item : _outputMap) {
+        item.second->getTensorDesc().setLayout(layout);
+        refBlobMap[item.first] = make_shared_blob<ie_fp16>({
+           Precision::FP16,
+           item.second->getTensorDesc().getDims(),
+           item.second->getTensorDesc().getLayout()
+        });
+        refBlobMap[item.first]->allocate();
+    }
+
+    ASSERT_TRUE(Infer());
+    ref_Split(inputBlob, refBlobMap, axis);
+
+    for (const auto& item : _outputMap)
+        CompareCommonAbsolute(item.second, refBlobMap[item.first], ERROR_BOUND);
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_softmax_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_softmax_test.cpp
new file mode 100644 (file)
index 0000000..236329a
--- /dev/null
@@ -0,0 +1,13 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_softmax_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(
+    accuracy, myriadLayersTestsSoftMax_nightly,    
+    ::testing::Combine(
+        ::testing::ValuesIn(s_softMaxTensors)
+      , ::testing::Values<IRVersion>(IRVersion::v7, IRVersion::v10)
+      )
+);
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_softmax_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_softmax_test.hpp
new file mode 100644 (file)
index 0000000..3f27293
--- /dev/null
@@ -0,0 +1,71 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tests.hpp"
+#include "myriad_layers_reference_functions.hpp"
+
+using namespace InferenceEngine;
+
+#define ERROR_BOUND (1.e-3f)
+
+typedef struct {
+    int axis;
+    SizeVector sizes;
+} SoftmaxAxisSizes;
+
+void PrintTo(const SoftmaxAxisSizes& p, std::ostream* os) {
+    *os << "axis=" << p.axis << ", sizes=" << testing::PrintToString(p.sizes);
+}
+
+using myriadLayersTestsSoftMaxParams_nightly = myriadLayerTestBaseWithParam<std::tuple<SoftmaxAxisSizes, IRVersion>>;
+
+class myriadLayersTestsSoftMax_nightly: public myriadLayersTestsSoftMaxParams_nightly {
+protected:
+    SoftmaxAxisSizes _testingInput;
+
+    void SetUp() override {
+        myriadLayersTestsSoftMaxParams_nightly::SetUp();
+        _testingInput = std::get<0>(GetParam());
+        _irVersion = std::get<1>(GetParam());
+    }
+};
+
+TEST_P(myriadLayersTestsSoftMax_nightly, TestsSoftMax)
+{
+    _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+    SetInputTensors({_testingInput.sizes});
+    SetOutputTensors({_testingInput.sizes});
+
+    std::map<std::string, std::string> params;
+    params["axis"] = std::to_string(_testingInput.axis);
+
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("Softmax").params(params)));
+
+    ASSERT_TRUE(Infer());
+
+    ASSERT_NO_FATAL_FAILURE(
+        ref_softMax(_inputMap.begin()->second, _refBlob, _testingInput.axis)
+    );
+
+    CompareCommonAbsolute(_outputMap.begin()->second, _refBlob, ERROR_BOUND);
+}
+
+static std::vector<SoftmaxAxisSizes> s_softMaxTensors = {
+        {0, {  10,   91}},
+        {1, {  10,   91}},
+        {0, {5000}},
+        {1, {   1, 1000, 1, 1}},
+        {1, {   1, 1024, 7, 7}},
+        {3, {   1,    7, 7, 1024}},
+        {2, {   1,    1, 32, 32}},
+        {0, {   8,   16, 16}},
+        {1, {   4,   16,  8}},
+        {2, {   3,    2, 16}},
+        {0, {2268,   21}},
+        {1, {  10,   10, 10, 10, 16, 16}},
+        {5, {  10,   10, 10, 10, 16, 16}},
+        {5, {   9,   10, 11, 12, 13,  5, 6}},
+        {6, {   9,   10, 11, 12, 13,  5, 6}},
+        {0, {   9,   10, 11, 12, 13,  5, 6}},
+};
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_split_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_split_test.cpp
new file mode 100644 (file)
index 0000000..45bdd62
--- /dev/null
@@ -0,0 +1,14 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_split_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsSplit_nightly,
+                        ::testing::Values<SplitTestParams>(
+                                MAKE_STRUCT(SplitParams, {4, 8, 16, 32, 64}, 2, 6),
+                                MAKE_STRUCT(SplitParams, {4, 8, 16, 32}, 2, 6),
+                                MAKE_STRUCT(SplitParams, {4, 8, 16}, 1, 6),
+                                MAKE_STRUCT(SplitParams, {4, 8}, 0, 3),
+                                MAKE_STRUCT(SplitParams, {4}, 0, 3)
+                        ));
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_split_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_split_test.hpp
new file mode 100644 (file)
index 0000000..bba4236
--- /dev/null
@@ -0,0 +1,71 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tests.hpp"
+
+using namespace InferenceEngine;
+
+static const float ERROR_BOUND = 0.0f;
+
+struct SplitParams {
+    SizeVector dims;
+    int axis;
+    int numSplit;
+};
+
+PRETTY_PARAM(SplitTestParams, SplitParams);
+
+typedef myriadLayerTestBaseWithParam<SplitTestParams> myriadLayersTestsSplit_nightly;
+
+TEST_P(myriadLayersTestsSplit_nightly, Split) {
+    _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+
+    const SplitParams testParams = GetParam();
+
+    const auto inputDims = testParams.dims;
+    const auto axis = testParams.axis;
+    const auto numSplit = testParams.numSplit;
+    std::map<std::string, std::string> layerParams;
+    layerParams["axis"] = std::to_string(axis);
+    layerParams["num_split"] = std::to_string(numSplit);
+
+    SetInputTensors({inputDims});
+
+    IN_OUT_desc output;
+    auto dims = inputDims;
+    for (size_t i = 0; i < numSplit; ++i) {
+        const int begin = (i + 0) * inputDims[axis] / numSplit;
+        const int end   = (i + 1) * inputDims[axis] / numSplit;
+        const int dimSize = end - begin;
+        dims[axis] = dimSize;
+        output.push_back(dims);
+    }
+    SetOutputTensors(output);
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("Split").params(layerParams)));
+
+    BlobMap refBlobMap;
+
+    const auto inputBlob = _inputMap.begin()->second;
+    const auto layout = vpu::deviceLayout(
+            inputBlob->getTensorDesc().getLayout(),
+            vpu::LayoutPreference::ChannelMajor);
+    inputBlob->getTensorDesc().setLayout(layout);
+
+    for (const auto& item : _outputMap) {
+        item.second->getTensorDesc().setLayout(layout);
+        refBlobMap[item.first] = make_shared_blob<ie_fp16>(
+                {
+                        Precision::FP16,
+                        item.second->getTensorDesc().getDims(),
+                        item.second->getTensorDesc().getLayout()
+                });
+        refBlobMap[item.first]->allocate();
+    }
+
+    ASSERT_TRUE(Infer());
+    ref_Split(inputBlob, refBlobMap, axis);
+
+    for (const auto& item : _outputMap)
+        CompareCommonAbsolute(item.second, refBlobMap[item.first], ERROR_BOUND);
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_squeeze_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_squeeze_test.cpp
new file mode 100644 (file)
index 0000000..c8c2eee
--- /dev/null
@@ -0,0 +1,50 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_squeeze_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsSqueezeTC1,
+    ::testing::Combine(
+        ::testing::ValuesIn(s_squeezeTensorsTC1),
+        ::testing::ValuesIn(s_squeezeIndicesTC1),
+        ::testing::ValuesIn(s_squeezeKeepAtLeast1D),
+        ::testing::ValuesIn(s_squeezeLayoutPreference)
+    )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsSqueezeTC2,
+    ::testing::Combine(
+        ::testing::ValuesIn(s_squeezeTensorsTC2),
+        ::testing::ValuesIn(s_squeezeIndicesTC2),
+        ::testing::ValuesIn(s_squeezeKeepAtLeast1D),
+        ::testing::ValuesIn(s_squeezeLayoutPreference)
+    )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsSqueezeTC3,
+    ::testing::Combine(
+        ::testing::ValuesIn(s_squeezeTensorsTC3),
+        ::testing::ValuesIn(s_squeezeIndicesTC3),
+        ::testing::ValuesIn(s_squeezeKeepAtLeast1D),
+        ::testing::ValuesIn(s_squeezeLayoutPreference)
+    )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsSqueezeTC4,
+        ::testing::Combine(
+        ::testing::ValuesIn(s_squeezeTensorsTC4),
+        ::testing::ValuesIn(s_squeezeIndicesTC4),
+        ::testing::ValuesIn(s_squeezeKeepAtLeast1D),
+        ::testing::ValuesIn(s_squeezeLayoutPreference)
+    )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsSqueezeTC5,
+                        ::testing::Combine(
+                                ::testing::ValuesIn(s_squeezeTensorsTC5),
+                                ::testing::ValuesIn(s_squeezeIndicesTC5),
+                                ::testing::ValuesIn(s_squeezeKeepAtLeast1D),
+                                ::testing::ValuesIn(s_squeezeLayoutPreference)
+                        )
+);
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_squeeze_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_squeeze_test.hpp
new file mode 100644 (file)
index 0000000..fc510eb
--- /dev/null
@@ -0,0 +1,263 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include "ie_layouts.h"
+#include "myriad_layers_tests.hpp"
+#include <vpu/private_plugin_config.hpp>
+#include "myriad_layers_reference_functions.hpp"
+#include "ie_memcpy.h"
+#include "tests_vpu_common.hpp"
+
+using namespace InferenceEngine;
+
+typedef std::vector<int32_t> IndicesVector;
+
+static void ref_squeeze(const InferenceEngine::Blob::Ptr src,
+                              InferenceEngine::Blob::Ptr dst,
+                        const SizeVector output) {
+    ASSERT_NE(src, nullptr);
+    ASSERT_NE(dst, nullptr);
+
+    ASSERT_EQ(src->size(), dst->size());
+
+    const ie_fp16 *src_data = src->buffer().as<ie_fp16*>();
+    ie_fp16 *dst_data = dst->buffer().as<ie_fp16*>();
+
+    size_t src_size = src->size();
+    size_t dst_size = dst->size();
+
+    ASSERT_NE(src_data, nullptr);
+    ASSERT_NE(dst_data, nullptr);
+    ASSERT_EQ(src_size, dst_size);
+
+    dst->getTensorDesc().setDims(output);
+    ie_memcpy(dst_data, dst_size * sizeof(ie_fp16), src_data, src_size * sizeof(ie_fp16));
+}
+
+PRETTY_PARAM(layoutPreference, vpu::LayoutPreference)
+
+
+static void GenerateOutput(SizeVector& output, const IndicesVector indices,
+                     const SizeVector input,   const int32_t keep_at_least_1d) {
+    auto indicesCopy = indices;
+    for (auto &index : indicesCopy) {
+        if (index < 0)
+            index += input.size();
+        ASSERT_LT(abs((int)index), input.size());
+        ASSERT_EQ(input[index], 1);
+    }
+
+    for (size_t k = 0; k < input.size(); k++) {
+        if (std::find(indicesCopy.begin(), indicesCopy.end(), k) == indicesCopy.end()) {
+            output.push_back(input[k]);
+        }
+    }
+
+    if (output.size() == 0) {
+        if (keep_at_least_1d) {
+            output.push_back({ 1 });
+        } else {
+            output.push_back({ 0 });
+        }
+    }
+}
+
+static std::string DimToString(SizeVector dimVector) {
+    std::string outString;
+    for (auto dim : dimVector) {
+        outString += "<dim>" + std::to_string(dim) + "</dim>\n";
+    }
+    return outString;
+}
+
+static std::string GenerateSqueezeNN(const SizeVector& inputDims, const SizeVector& outputDims,
+                                     const std::vector<int32_t>& indices, const int keep_at_least_1d) {
+    std::string model =  R"V0G0N(
+        <net name="SQUEEZE_MODEL" version="2" batch="1">
+            <layers>
+                <layer id="0" name="input" precision="FP16" type="Input">
+                <output>
+                    <port id="0">
+                    __IN_DIMS__
+                    </port>
+                </output>
+                </layer>
+                <layer id="1" name="indices" precision="FP16" type="Const">
+                    <output>
+                        <port id="1">
+                            <dim>
+                            __IND_SIZE__
+                            </dim>
+                        </port>
+                    </output>
+                    <blobs>
+                        <custom offset="0" size="__IND_SIZE_OFFSET__"/>
+                    </blobs>
+                </layer>
+                <layer id="2" name="squeeze" precision="FP16" type="Squeeze">
+                    <data keep_at_least_1d="__KEEP_1D__"/>
+                    <input>
+                        <port id="0">
+                        __IN_DIMS__
+                        </port>
+                        <port id="1">
+                            <dim>
+                            __IND_SIZE__
+                            </dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="1">
+                        __OUT_DIMS__
+                        </port>
+                    </output>
+                </layer>
+            </layers>
+            <edges>
+                <edge from-layer="0" from-port="0" to-layer="2" to-port="0"/>
+                <edge from-layer="1" from-port="1" to-layer="2" to-port="1"/>
+            </edges>
+        </net>
+)V0G0N";
+     REPLACE_WITH_STR(model, "__IN_DIMS__", DimToString(inputDims));
+     REPLACE_WITH_STR(model, "__OUT_DIMS__", DimToString(outputDims));
+     REPLACE_WITH_STR(model, "__IND_SIZE__", std::to_string(indices.size()));
+     REPLACE_WITH_STR(model, "__IND_SIZE_OFFSET__", std::to_string(indices.size()* sizeof(ie_fp16)));
+     REPLACE_WITH_STR(model, "__KEEP_1D__", std::to_string(keep_at_least_1d));
+
+     return model;
+}
+
+static  InferenceEngine::TBlob<uint8_t>* GenerateWeightBlob(const IndicesVector& indices) {
+    InferenceEngine::TBlob<uint8_t> *weights_raw = new InferenceEngine::TBlob<uint8_t>(
+            {InferenceEngine::Precision::U8,
+                    {indices.size() * sizeof(ie_fp16)},
+                    InferenceEngine::Layout :: C});
+    weights_raw->allocate();
+    ie_fp16 *inputBlobRawDataFp16 = weights_raw->data().as<ie_fp16 *>();
+    for (size_t index = 0; index < indices.size(); ++index) {
+        inputBlobRawDataFp16[index] = InferenceEngine::PrecisionUtils::f32tof16(indices[index]);
+    }
+    return weights_raw;
+}
+
+class myriadLayersTestsSqueezeBase : public
+        myriadLayerTestBaseWithParam<std::tuple<InferenceEngine::SizeVector, IndicesVector, int32_t, layoutPreference>>
+{
+protected:
+    virtual void InitBody()
+    {
+        auto input = std::get<0>(GetParam());
+        auto indices = std::get<1>(GetParam());
+        auto keep_at_least_1d = std::get<2>(GetParam());
+        auto layoutPreference = std::get<3>(GetParam());
+
+        SizeVector output;
+        GenerateOutput(output, indices, input, keep_at_least_1d);
+        TBlob<uint8_t>::Ptr weights(GenerateWeightBlob(indices));
+        std::string SQUEEZE_MODEL_FORMATTED = GenerateSqueezeNN(input, output, indices, keep_at_least_1d);
+
+        ASSERT_NO_THROW(readNetwork(SQUEEZE_MODEL_FORMATTED, weights));
+        createInferRequest(NetworkInitParams().useHWOpt(true).layoutPreference(layoutPreference).lockLayout(true));
+
+        ASSERT_TRUE(Infer());
+
+        ref_squeeze(_inputMap.begin()->second, _refBlob, output);
+        CompareCommonAbsolute(_outputMap.begin()->second, _refBlob, 0);
+    }
+};
+
+class myriadLayersTestsSqueezeTC1 : public myriadLayersTestsSqueezeBase
+{
+};
+
+class myriadLayersTestsSqueezeTC2 : public myriadLayersTestsSqueezeBase
+{
+};
+
+class myriadLayersTestsSqueezeTC3 : public myriadLayersTestsSqueezeBase
+{
+};
+
+class myriadLayersTestsSqueezeTC4 : public myriadLayersTestsSqueezeBase
+{
+};
+
+class myriadLayersTestsSqueezeTC5 : public myriadLayersTestsSqueezeBase
+{
+};
+
+TEST_P(myriadLayersTestsSqueezeTC1, Squeeze) {
+    DISABLE_IF(!CheckMyriadX());
+    InitBody();
+}
+
+TEST_P(myriadLayersTestsSqueezeTC2, Squeeze) {
+    DISABLE_IF(!CheckMyriadX());
+    InitBody();
+}
+
+TEST_P(myriadLayersTestsSqueezeTC3, Squeeze) {
+    DISABLE_IF(!CheckMyriadX());
+    InitBody();
+}
+
+TEST_P(myriadLayersTestsSqueezeTC4, Squeeze) {
+    DISABLE_IF(!CheckMyriadX());
+    InitBody();
+}
+
+TEST_P(myriadLayersTestsSqueezeTC5, Squeeze) {
+    DISABLE_IF(!CheckMyriadX());
+    InitBody();
+}
+
+static std::vector<InferenceEngine::SizeVector> s_squeezeTensorsTC1 = {
+    {{1, 3, 1}, {1, 1, 1}}
+};
+
+static std::vector<IndicesVector> s_squeezeIndicesTC1 = {
+    {{0, 2}, {0}, {2}, {-3}, {-1}, {-3, -1}}
+};
+
+static std::vector<InferenceEngine::SizeVector> s_squeezeTensorsTC2 = {
+    {{3, 1, 2}}
+};
+
+static std::vector<IndicesVector> s_squeezeIndicesTC2 = {
+    {{1, -2}, {-2, 1}}
+};
+
+static std::vector<InferenceEngine::SizeVector> s_squeezeTensorsTC3 = {
+        {{3, 1, 2, 3}}
+};
+
+static std::vector<IndicesVector> s_squeezeIndicesTC3 = {
+        {{1, -3}, {-3, 1}}
+};
+
+static std::vector<InferenceEngine::SizeVector> s_squeezeTensorsTC4 = {
+        {{3, 1, 2, 1}}
+};
+
+static std::vector<IndicesVector> s_squeezeIndicesTC4 = {
+        {{1}, {3}, {1, 3}, {3, 1}, {-1}, {-3}, {-3, -1}}
+};
+
+static std::vector<InferenceEngine::SizeVector> s_squeezeTensorsTC5 = {
+        {{1, 13, 1, 1, 33}},
+};
+
+static std::vector<IndicesVector> s_squeezeIndicesTC5 = {
+        {0}, {3}, {0, 2}, {0, 3}, {2, 3}, {-5, -2, -3},
+};
+
+static std::vector<int32_t> s_squeezeKeepAtLeast1D = {
+    0, 1
+};
+
+static std::vector<layoutPreference> s_squeezeLayoutPreference = {
+        vpu::LayoutPreference::ChannelMajor,
+};
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_strided_slice_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_strided_slice_test.cpp
new file mode 100644 (file)
index 0000000..249c589
--- /dev/null
@@ -0,0 +1,9 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_strided_slice_test.h"
+
+INSTANTIATE_TEST_CASE_P(
+    accuracy, myriadLayersTestsStridedSlice_nightly,
+    ::testing::ValuesIn(s_stridedSliceParams));
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_strided_slice_test.h b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_strided_slice_test.h
new file mode 100644 (file)
index 0000000..e62dfbf
--- /dev/null
@@ -0,0 +1,300 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tests.hpp"
+#include "myriad_layers_reference_functions.hpp"
+#include <cmath>
+
+#define BOUND (5.0f)
+
+using namespace InferenceEngine;
+
+struct strided_slice_test_param {
+    InferenceEngine::SizeVector in_shape;
+    size_t dim_size;
+    std::vector<int32_t> begin;
+    std::vector<int32_t> end;
+    std::vector<int32_t> strides;
+
+    InferenceEngine::SizeVector begin_mask;
+    InferenceEngine::SizeVector end_mask;
+    InferenceEngine::SizeVector ellipsis_mask;
+    InferenceEngine::SizeVector new_axis_mask;
+    InferenceEngine::SizeVector shrink_axis_mask;
+    InferenceEngine::SizeVector out_shape;
+};
+
+class myriadLayersTestsStridedSlice_nightly: public myriadLayersTests_nightly,
+                                             public testing::WithParamInterface<strided_slice_test_param> {
+public:
+    std::string model_t = R"V0G0N(
+<net Name="StridedSlice_net" version="2" precision="FP16" batch="1">
+    <layers>
+        <layer name="input" type="Input" precision="FP16" id="1">
+            <output>
+                <port id="1">
+                    _IN_
+                </port>
+            </output>
+        </layer>
+        <layer name="begin" type="Const" precision="I32" id="2">
+            <output>
+                <port id="2">
+                    <dim>_DIM_SIZE_</dim>
+                </port>
+            </output>
+            <blobs>
+                <custom offset="_DIM_BYTE_OFFSET_0_" size="_DIM_BYTE_SIZE_0_"/>
+            </blobs>
+        </layer>
+        <layer name="end" type="Const" precision="I32" id="3">
+            <output>
+                <port id="3">
+                    <dim>_DIM_SIZE_</dim>
+                </port>
+            </output>
+            <blobs>
+                <custom offset="_DIM_BYTE_OFFSET_1_" size="_DIM_BYTE_SIZE_1_"/>
+            </blobs>
+        </layer>
+        _STRIDES_IN_LAYER_
+        <layer name="strided_slice" id="5" type="StridedSlice" precision="FP16">
+            <data _BEGIN_ _END_ _ELLIPSIS_ _NEW_AXIS_ _SHRINK_/>
+            <input>
+                <port id="1">
+                    _IN_
+                </port>
+                <port id="2">
+                    <dim>_DIM_SIZE_</dim>
+                </port>
+                <port id="3">
+                    <dim>_DIM_SIZE_</dim>
+                </port>
+                _STRIDES_IN_PORT_
+            </input>
+            <output>
+                <port id="5">
+                    _OUT_
+                </port>
+            </output>
+        </layer>
+    </layers>
+    <edges>
+        <edge from-layer="1" from-port="1" to-layer="5" to-port="1"/>
+        <edge from-layer="2" from-port="2" to-layer="5" to-port="2"/>
+        <edge from-layer="3" from-port="3" to-layer="5" to-port="3"/>
+        _STRIDES_IN_EDGE_
+    </edges>
+</net>
+)V0G0N";
+
+std::string stridesLayer = R"V0G0N(
+<layer name="strides" type="Const" precision="I32" id="4">
+    <output>
+        <port id="4">
+            <dim>_DIM_SIZE_</dim>
+        </port>
+    </output>
+    <blobs>
+        <custom offset="_DIM_BYTE_OFFSET_2_" size="_DIM_BYTE_SIZE_2_"/>
+    </blobs>
+</layer>
+)V0G0N";
+
+std::string stridesInPort = R"V0G0N(
+<port id="4">
+    <dim>_DIM_SIZE_</dim>
+</port>
+)V0G0N";
+
+std::string stridesEdge = R"V0G0N(
+<edge from-layer="4" from-port="4" to-layer="5" to-port="4"/>
+)V0G0N";
+
+    std::string getModel(const strided_slice_test_param& p) {
+        std::string model = model_t;
+        std::string in_shape;
+        std::string out_shape;
+        std::string begin;
+        std::string end;
+        std::string ellipsis;
+        std::string new_axis;
+        std::string shrink_axis;
+
+        for (const auto& i : p.in_shape) {
+            in_shape += "<dim>";
+            in_shape += std::to_string(i) + "</dim>\n";
+        }
+        in_shape.pop_back();
+
+        if (!p.strides.empty()) {
+            REPLACE_WITH_NUM(stridesLayer, "_DIM_BYTE_SIZE_2_", p.strides.size() * sizeof(uint32_t));
+            REPLACE_WITH_NUM(stridesLayer, "_DIM_BYTE_OFFSET_2_", (p.begin.size() + p.end.size()) * sizeof(uint32_t));
+            REPLACE_WITH_NUM(model, "_STRIDES_IN_LAYER_", stridesLayer);
+            REPLACE_WITH_NUM(model, "_STRIDES_IN_PORT_", stridesInPort);
+            REPLACE_WITH_NUM(model, "_STRIDES_IN_EDGE_", stridesEdge);
+        } else {
+            REPLACE_WITH_NUM(model, "_STRIDES_IN_LAYER_", std::string());
+            REPLACE_WITH_NUM(model, "_STRIDES_IN_PORT_", std::string());
+            REPLACE_WITH_NUM(model, "_STRIDES_IN_EDGE_", std::string());
+        }
+        REPLACE_WITH_STR(model, "_IN_", in_shape);
+        REPLACE_WITH_NUM(model, "_DIM_SIZE_", p.dim_size);
+        REPLACE_WITH_NUM(model, "_DIM_BYTE_SIZE_0_", p.begin.size() * sizeof(uint32_t));
+        REPLACE_WITH_NUM(model, "_DIM_BYTE_SIZE_1_", p.end.size() * sizeof(uint32_t));
+        REPLACE_WITH_NUM(model, "_DIM_BYTE_OFFSET_0_", 0);
+        REPLACE_WITH_NUM(model, "_DIM_BYTE_OFFSET_1_", p.begin.size() * sizeof(uint32_t));
+
+        if (!p.begin_mask.empty()) {
+            begin = "begin_mask=\"";
+            for (const auto& pb : p.begin_mask)
+                begin += std::to_string(pb) + ",";
+            begin.pop_back();
+            begin += "\"";
+        }
+        REPLACE_WITH_STR(model, "_BEGIN_", begin);
+
+        if (!p.end_mask.empty()) {
+            end = "end_mask=\"";
+            for (const auto& pb : p.end_mask)
+                end += std::to_string(pb) + ",";
+            end.pop_back();
+            end += "\"";
+        }
+        REPLACE_WITH_STR(model, "_END_", end);
+
+        if (!p.ellipsis_mask.empty()) {
+            ellipsis = "ellipsis_mask=\"";
+            for (const auto& pb : p.ellipsis_mask)
+                ellipsis += std::to_string(pb) + ",";
+            ellipsis.pop_back();
+            ellipsis += "\"";
+        }
+        REPLACE_WITH_STR(model, "_ELLIPSIS_", ellipsis);
+
+        if (!p.new_axis_mask.empty()) {
+            new_axis = "new_axis_mask=\"";
+            for (const auto& pb : p.new_axis_mask)
+                new_axis += std::to_string(pb) + ",";
+            new_axis.pop_back();
+            new_axis += "\"";
+        }
+        REPLACE_WITH_STR(model, "_NEW_AXIS_", new_axis);
+
+        if (!p.shrink_axis_mask.empty()) {
+            shrink_axis = "shrink_axis_mask=\"";
+            for (const auto& pb : p.shrink_axis_mask)
+                shrink_axis += std::to_string(pb) + ",";
+            shrink_axis.pop_back();
+            shrink_axis += "\"";
+        }
+        REPLACE_WITH_STR(model, "_SHRINK_", shrink_axis);
+
+        for (const auto& i : p.out_shape) {
+            out_shape += "<dim>";
+            out_shape += std::to_string(i) + "</dim>\n";
+        }
+        out_shape.pop_back();
+        REPLACE_WITH_STR(model, "_OUT_", out_shape);
+
+        return model;
+    }
+
+    static InferenceEngine::TBlob<uint8_t>::Ptr generateWeights(const std::vector<std::vector<int32_t>> &data) {
+        size_t totalSize = 0;
+        for (const auto & i : data)
+            totalSize += i.size();
+        auto weights = new InferenceEngine::TBlob<uint8_t>(
+            {InferenceEngine::Precision::U8, { totalSize * sizeof(int32_t) }, InferenceEngine::C});
+        weights->allocate();
+        size_t vectorCounter = 0;
+        size_t innerVectorCounter = 0;
+        for (size_t i = 0; i < totalSize; i++) {
+            if (innerVectorCounter >= data[vectorCounter].size()) {
+                ++vectorCounter;
+                innerVectorCounter = 0;
+            }
+            weights->data().as<int32_t*>()[i] = data[vectorCounter][innerVectorCounter];
+            ++innerVectorCounter;
+        }
+        return InferenceEngine::TBlob<uint8_t>::Ptr(weights);
+    }
+};
+
+TEST_P(myriadLayersTestsStridedSlice_nightly, TestsStridedSlice) {
+    auto p = ::testing::WithParamInterface<strided_slice_test_param>::GetParam();
+
+    std::string model = getModel(p);
+
+    TBlob<uint8_t>::Ptr weights(generateWeights({ p.begin, p.end, p.strides }));
+    // Parse model.
+    ASSERT_NO_THROW(readNetwork(model, weights));
+
+    const auto& network = _cnnNetwork;
+
+    auto inputsInfo = network.getInputsInfo();
+    inputsInfo["input"]->setPrecision(Precision::FP16);
+    auto outputsInfo = network.getOutputsInfo();
+    outputsInfo["strided_slice"]->setPrecision(Precision::FP16);
+
+    // Load network.
+    StatusCode st = GENERAL_ERROR;
+    ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(
+        _exeNetwork, network, { {VPU_CONFIG_KEY(PERF_REPORT_MODE), VPU_CONFIG_VALUE(PER_STAGE)} },
+        &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    ASSERT_NE(_exeNetwork, nullptr) << _resp.msg;
+
+    // Create InferRequest.
+    InferenceEngine::IInferRequest::Ptr inferRequest;
+    ASSERT_NO_THROW(st = _exeNetwork->CreateInferRequest(inferRequest, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    // Input Data.
+    InferenceEngine::Blob::Ptr inputBlob;
+    ASSERT_NO_THROW(st = inferRequest->GetBlob("input", inputBlob, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    GenRandomData(inputBlob);
+
+    // Infer & get output blob.
+    InferenceEngine::Blob::Ptr outputBlob;
+    ASSERT_NO_THROW(st = inferRequest->Infer(&_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    ASSERT_NO_THROW(st = inferRequest->GetBlob("strided_slice", outputBlob, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    // Output Reference.
+    Blob::Ptr refBlob = InferenceEngine::make_shared_blob<ie_fp16>(outputBlob->getTensorDesc());
+    refBlob->allocate();
+
+    // Check results.
+    InferenceEngine::SizeVector out_dims;
+    ref_strided_slice(inputBlob, refBlob, out_dims, p.begin, p.end, p.strides, p.begin_mask, p.end_mask);
+
+    // Check out shapes.
+    if(out_dims.size() != p.out_shape.size())
+        FAIL() << "Wrong out_shape size!";
+    for (size_t i = 0; i < p.out_shape.size(); i++) {
+        if (out_dims[i] != p.out_shape[i])
+            FAIL() << "Wrong out_shape dimensions!";
+    }
+
+    CompareCommonAbsolute(outputBlob, refBlob, 0);
+}
+
+// Params: in_shape, dim_size, begin, end, stride, begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask, out_shape
+static std::vector<strided_slice_test_param> s_stridedSliceParams = {
+    strided_slice_test_param{ { 10 }, 1, { 0 }, { 10 }, { 2 }, {}, {}, {}, {}, {}, { 5 } },
+    strided_slice_test_param{ { 10 }, 1, { 1 }, { 9 }, { 2 }, {}, {}, {}, {}, {}, { 4 } },
+    strided_slice_test_param{ { 10 }, 1, { 1 }, { 9 }, { 2 }, { 0 }, {}, {}, {}, {}, { 5 } },
+    strided_slice_test_param{ { 1000, 4 }, 2, { 0, 0 }, { 1000, 4 }, { 1, 4 }, { 0, 1 }, { 0, 1 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 1000, 1 } },
+    strided_slice_test_param{ { 1000, 4 }, 2, { 200, 1 }, { 500, 3 }, { 1, 2 }, { 0, 1 }, { 0, 1 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 1000, 1 } },
+    strided_slice_test_param{ { 1, 2, 35, 33 }, 4, { 0, 0, 0, 2 }, { 1, 2, 33, 31 }, {1, 1, 1, 2}, {}, {}, {}, {}, {}, { 1, 2, 33, 15 } },
+    strided_slice_test_param{ { 2, 2, 2, 3}, 4, { 0, 0, 0, 1 }, { 2, 2, 2, 3 }, { 1, 2, 2, 2 }, { 1, 1, 0, 1 }, { 1, 1, 0, 1 }, {}, {}, {}, { 2, 1, 1, 1 } },
+    strided_slice_test_param{ { 2, 8, 32, 32}, 4, { 0, 2, 0, 0 }, { 2, 7, 32, 32 }, { 1, 3, 1, 1 }, {}, {}, {}, {}, {}, { 2, 2, 32, 32 } },
+    strided_slice_test_param{ { 2, 8, 32, 32}, 4, { 0, 0, 2, 0 }, { 2, 8, 31, 32 }, { 1, 1, 3, 1 }, {}, {}, {}, {}, {}, { 2, 8, 10, 32 } },
+    strided_slice_test_param{ { 2, 8, 32, 32}, 4, { 0, 0, 0, 2 }, { 2, 8, 32, 32 }, { 1, 1, 1, 3 }, {}, {}, {}, {}, {}, { 2, 8, 32, 10 } },
+    strided_slice_test_param{ { 1, 32, 128, 128 }, 4, {0, 0, 0, 0 }, { 1, 32, 128, 128 }, { 1, 2, 4, 8 }, {}, {}, {}, {}, {}, { 1, 16, 32, 16 } },
+    strided_slice_test_param{ { 1, 32, 128, 128 }, 4, {0, 16, 0, 0 }, { 1, 32, 128, 128 }, {}, {}, {}, {}, {}, {}, { 1, 16, 128, 128 } },
+};
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_tanh_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_tanh_test.cpp
new file mode 100644 (file)
index 0000000..c2129b5
--- /dev/null
@@ -0,0 +1,44 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tanh_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(
+        accuracy, myriadLayersTestsTanh_nightly,
+        ::testing::ValuesIn(s_tanhParams));
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayerConvolutionWithTanH_nightly,
+        ::testing::Combine(
+            ::testing::ValuesIn(g_convolutionTensors)
+          , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 3, 3))
+          , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<param_size>(MAKE_STRUCT(param_size, 1, 1))
+          , ::testing::Values<uint32_t>(16)
+          , ::testing::Values<uint32_t>(1)
+          , ::testing::Values<IRVersion>(IRVersion::v7, IRVersion::v10)
+          )
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsMaxPoolingWithTanh_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(g_poolingInput),
+        ::testing::ValuesIn(g_poolingLayerParamsLite),
+        ::testing::ValuesIn(g_poolingLayout))
+);
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsAvgPoolingWithTanh_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(g_poolingInput),
+        ::testing::ValuesIn(g_poolingLayerParamsLite),
+        ::testing::ValuesIn(g_poolingLayout))
+);
+
+INSTANTIATE_TEST_CASE_P(
+    accuracy, myriadLayerFullyConnectedWithTanH_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(g_fcTestParamsSubset),
+        ::testing::Values(g_dimensionsFC[0]),
+        ::testing::ValuesIn(g_addBiasFC)
+    )
+);
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_tanh_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_tanh_test.hpp
new file mode 100644 (file)
index 0000000..af95678
--- /dev/null
@@ -0,0 +1,132 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tests.hpp"
+#include "myriad_layers_reference_functions.hpp"
+
+#define BOUND (10.0f)
+#define ERROR_BOUND (1.2e-3f)
+#define ERROR_BOUND_WITH_TANH (1.0e-3f)
+using namespace InferenceEngine;
+
+class myriadLayersTestsTanh_nightly: public myriadLayersTests_nightly,
+                             public testing::WithParamInterface<SizeVector> {
+};
+
+TEST_P(myriadLayersTestsTanh_nightly, TestsTanh)
+{
+    _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+    auto p = ::testing::WithParamInterface<SizeVector>::GetParam();
+    SetInputTensors({p});
+    SetOutputTensors({p});
+
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("TanH")));
+    SetFirstInputToRange(-BOUND, BOUND);
+    ASSERT_TRUE(Infer());
+    /* output check */
+    ref_tanh(_inputMap.begin()->second, _refBlob);
+    CompareCommonAbsolute(_outputMap.begin()->second, _refBlob, ERROR_BOUND);
+}
+
+static std::vector<SizeVector> s_tanhParams = {
+    {{4, 1, 16, 16}},
+    {{4, 2, 16, 16}},
+    {{4, 3, 16, 16}},
+    {{4, 4, 1, 53, 16}},
+    {{4, 4, 2, 53, 16}},
+    {{4, 4, 3, 53, 16}},
+    {{4, 4, 1, 224, 224}},
+    {{4, 4, 4, 2, 224, 224}},
+    {{4, 4, 4, 3, 224, 224}},
+    {{4, 4, 4, 1, 224, 235}},
+    {{4, 4, 4, 2, 224, 235}},
+    {{4, 4, 4, 3, 224, 235}},
+    {{1, 1, 277, 230}},
+    {{1, 2, 277, 230}},
+    {{1, 3, 277, 230}}
+
+};
+
+static std::vector<InferenceEngine::SizeVector> s_convolutionTensors = {
+    {{1, 8, 4, 16}, {16, 8, 16}}  //NCHW
+};
+
+/* tests subset to check 2 layers operation invocation */
+/* additional tests for 2D and 3D tensors added        */
+static std::vector<int32_t> s_dimensionsFC = {
+    4, 3
+};
+
+static std::vector<int32_t> s_addBiasFC = {
+    1, 0
+};
+
+/* to decrease tests duration and tests amount */
+static std::vector<fcon_test_params> s_fcTestParamsSubset = {
+    {{1, 1, 16, 8},     8, 0.02f},
+    {{1, 1, 8, 40},     8, 0.02f},
+    {{1, 4, 8, 16},     4, 0.065f},
+    {{1, 16, 16, 16},  16, 0.36f},
+    {{1, 16, 8, 8},    8, 0.065f}
+};
+
+class myriadLayerConvolutionWithTanH_nightly: public ConvolutionTest<IRVersion>{
+};
+
+TEST_P(myriadLayerConvolutionWithTanH_nightly, Convolution) {
+    auto param = GetParam();
+    _irVersion = std::get<6>(param);
+
+    _testNet.addLayer(LayerInitParams("TanH")
+             .in({_output_tensor})
+             .out({_output_tensor}),
+             ref_tanh_wrap);
+
+    float maxerr = 0;
+    if (group == 1)
+        maxerr = 0.00055 * IC * kernel.x * kernel.y;
+    else // TODO: currently dephConv is slightly less accurate
+        maxerr = 0.00066 * (IC / group) * kernel.x * kernel.y;
+    ASSERT_TRUE(generateNetAndInfer(NetworkInitParams()));
+    CompareCommonAbsolute(_outputMap.begin()->second, getReferenceOutput(), maxerr);
+}
+
+class myriadLayersTestsMaxPoolingWithTanh_nightly: public PoolingTest<POOLING_MAX>{
+};
+
+class myriadLayersTestsAvgPoolingWithTanh_nightly: public PoolingTest<POOLING_AVG>{
+};
+
+TEST_P(myriadLayersTestsMaxPoolingWithTanh_nightly, TestsMaxPoolingWithTanh)
+{
+    _testNet.addLayer(LayerInitParams("TanH")
+             .in({_output_tensor})
+             .out({_output_tensor}),
+             ref_tanh_wrap);
+    ASSERT_TRUE(generateNetAndInfer(NetworkInitParams()));
+    CompareCommonAbsolute(_outputMap.begin()->second, getReferenceOutput(), ERROR_BOUND_WITH_TANH);
+}
+
+TEST_P(myriadLayersTestsAvgPoolingWithTanh_nightly, TestsAvgPoolingWithTanh)
+{
+    _testNet.addLayer(LayerInitParams("TanH")
+             .in({_output_tensor})
+             .out({_output_tensor}),
+             ref_tanh_wrap);
+    ASSERT_TRUE(generateNetAndInfer(NetworkInitParams()));
+    CompareCommonAbsolute(_outputMap.begin()->second, getReferenceOutput(), ERROR_BOUND_WITH_TANH);
+}
+
+class myriadLayerFullyConnectedWithTanH_nightly: public FCTest<>{
+};
+
+TEST_P(myriadLayerFullyConnectedWithTanH_nightly, TestsFullyConnected)
+{
+    _testNet.addLayer(LayerInitParams("TanH")
+             .in({_output_tensor})
+             .out({_output_tensor}),
+             ref_tanh_wrap);
+    ASSERT_TRUE(generateNetAndInfer(NetworkInitParams()));
+    CompareCommonAbsolute(_outputMap.begin()->second, getReferenceOutput(), _par.error_bound);
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_tensor_iterator_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_tensor_iterator_test.cpp
new file mode 100644 (file)
index 0000000..e18562d
--- /dev/null
@@ -0,0 +1,66 @@
+// Copyright (C) 2019 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "vpu_layers_tests.hpp"
+#include "vpu_case_params.hpp"
+#include "common/include/vpu/utils/error.hpp"
+
+#include "single_layer_common.hpp"
+
+#include "gtest/gtest.h"
+
+#include <string>
+#include <ngraph_functions/subgraph_builders.hpp>
+#include <common_test_utils/test_common.hpp>
+#include <functional_test_utils/blob_utils.hpp>
+#include <vpu_case_common.hpp>
+
+namespace {
+
+class MyriadLayersTestsTensorIterator : public CommonTestUtils::TestsCommon {
+public:
+    void SetUp() override {
+        fn_ptr = ngraph::builder::subgraph::makeTIwithLSTMcell();
+    }
+protected:
+    std::shared_ptr<ngraph::Function> fn_ptr;
+};
+
+// TODO: Issue: 29485
+TEST_F(MyriadLayersTestsTensorIterator, CompareNativeVersionWithUnrolledLoop) {
+    DISABLE_IF(!CheckMyriadX () && !CheckMA2085());
+    CNNNetwork network(fn_ptr);
+    network.getInputsInfo().begin()->second->setPrecision(Precision::FP16);
+
+
+    auto ie = PluginCache::get().ie();
+
+    ExecutableNetwork exeNetworkWithConfig = ie->LoadNetwork(network, CommonTestUtils::DEVICE_MYRIAD,
+                                                             {{VPU_CONFIG_KEY(FORCE_PURE_TENSOR_ITERATOR), CONFIG_VALUE(NO)},
+                                                              {VPU_CONFIG_KEY(ENABLE_TENSOR_ITERATOR_UNROLLING), CONFIG_VALUE(YES)}});
+    InferRequest inferRequestWithConfig = exeNetworkWithConfig.CreateInferRequest();
+    auto blobWithConfig = FuncTestUtils::createAndFillBlob(network.getInputsInfo().begin()->second->getTensorDesc());
+    inferRequestWithConfig.SetBlob(network.getInputsInfo().begin()->first, blobWithConfig);
+    inferRequestWithConfig.Infer();
+    auto* outRawDataWithConfig = inferRequestWithConfig.GetBlob(network.getOutputsInfo().begin()->first)->cbuffer().as<float*>();
+
+    ExecutableNetwork exeNetworkWithoutConfig = ie->LoadNetwork(network, CommonTestUtils::DEVICE_MYRIAD,
+                                                                {{VPU_CONFIG_KEY(FORCE_PURE_TENSOR_ITERATOR), CONFIG_VALUE(YES)},
+                                                                 {VPU_CONFIG_KEY(ENABLE_TENSOR_ITERATOR_UNROLLING), CONFIG_VALUE(NO)}});
+    InferRequest inferRequestWithoutConfig = exeNetworkWithoutConfig.CreateInferRequest();
+    auto blobWithoutConfig = FuncTestUtils::createAndFillBlob(network.getInputsInfo().begin()->second->getTensorDesc());
+    inferRequestWithoutConfig.SetBlob(network.getInputsInfo().begin()->first, blobWithoutConfig);
+    inferRequestWithoutConfig.Infer();
+    auto* outRawDataWithoutConfig = inferRequestWithoutConfig.GetBlob(network.getOutputsInfo().begin()->first)->cbuffer().as<float*>();
+
+    auto thr = FuncTestUtils::GetComparisonThreshold(InferenceEngine::Precision::FP16);
+    size_t outElementsCount = std::accumulate(begin(fn_ptr->get_output_shape(0)), end(fn_ptr->get_output_shape(0)), 1,
+                                              std::multiplies<size_t>());
+
+    FuncTestUtils::compareRawBuffers(outRawDataWithoutConfig, outRawDataWithConfig, outElementsCount,
+                                     outElementsCount,
+                                     thr);
+
+}
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_tile_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_tile_test.cpp
new file mode 100644 (file)
index 0000000..bf23c58
--- /dev/null
@@ -0,0 +1,40 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tile_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(accuracyAdd, myriadLayerTestTile_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<test_params>(
+                                        MAKE_STRUCT(tile_test::nd_tensor_test_params, {4, 5, 6}, 0)
+                                        , MAKE_STRUCT(tile_test::nd_tensor_test_params, {4, 5, 6, 7}, 0)
+                                        , MAKE_STRUCT(tile_test::nd_tensor_test_params, {4, 5, 6, 7}, 1)
+                                        , MAKE_STRUCT(tile_test::nd_tensor_test_params, {4, 5, 6, 27, 13}, 0)
+                                        , MAKE_STRUCT(tile_test::nd_tensor_test_params, {4, 5, 6, 27, 13}, 1)
+                                        , MAKE_STRUCT(tile_test::nd_tensor_test_params, {4, 5, 6, 27, 13}, 2)
+                                        , MAKE_STRUCT(tile_test::nd_tensor_test_params, {4, 5, 6, 27, 13, 18}, 0)
+                                        , MAKE_STRUCT(tile_test::nd_tensor_test_params, {4, 5, 6, 27, 13, 18}, 1)
+                                        , MAKE_STRUCT(tile_test::nd_tensor_test_params, {4, 5, 6, 27, 13, 18}, 2)
+                                        , MAKE_STRUCT(tile_test::nd_tensor_test_params, {4, 5, 6, 27, 13, 18}, 3))
+
+                                , ::testing::Values<tiles>(2, 3, 5)
+                        ));
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayerTestTile_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<test_params>(
+                                        MAKE_STRUCT(tile_test::nd_tensor_test_params, {4, 5, 6}, 1)
+                                        , MAKE_STRUCT(tile_test::nd_tensor_test_params, {4, 5, 6}, 2)
+                                        , MAKE_STRUCT(tile_test::nd_tensor_test_params, {5, 6}, 0)
+                                        , MAKE_STRUCT(tile_test::nd_tensor_test_params, {5, 6}, 1)
+                                        , MAKE_STRUCT(tile_test::nd_tensor_test_params, {6}, 0)
+                                        , MAKE_STRUCT(tile_test::nd_tensor_test_params, {6, 5, 6, 7}, 2)
+                                        , MAKE_STRUCT(tile_test::nd_tensor_test_params, {6, 5, 6, 7}, 3)
+                                        , MAKE_STRUCT(tile_test::nd_tensor_test_params, {4, 5, 6, 27, 13}, 3)
+                                        , MAKE_STRUCT(tile_test::nd_tensor_test_params, {4, 5, 6, 27, 13}, 4)
+                                        , MAKE_STRUCT(tile_test::nd_tensor_test_params, {4, 5, 6, 27, 13, 18}, 4)
+                                        , MAKE_STRUCT(tile_test::nd_tensor_test_params, {4, 5, 6, 27, 13, 18}, 5))
+
+                                , ::testing::Values<tiles>(2, 3, 5)
+                        ));
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_tile_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_tile_test.hpp
new file mode 100644 (file)
index 0000000..1d20b1a
--- /dev/null
@@ -0,0 +1,133 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tests.hpp"
+#include <algorithm>
+
+using std::tuple;
+using std::get;
+
+using namespace InferenceEngine;
+
+namespace tile_test {           // avoiding ODR violation with other tests
+struct nd_tensor_test_params {
+    SizeVector dims;
+    int axis;
+};
+}
+
+PRETTY_PARAM(test_params, tile_test::nd_tensor_test_params);
+PRETTY_PARAM(tiles, int);
+
+bool iter(SizeVector& in, SizeVector& out)
+{
+    bool flag = true;
+    for(int i = 0; i < out.size(); i++) {
+        if(in[i] < out[i] - 1) {
+            in[i]++;
+            break;
+        } else {
+            if(i == out.size() - 1) {
+                flag = false;
+                break;
+            }
+            in[i] = 0;
+        }
+    }
+    return flag;
+}
+
+void calcPos(SizeVector& in, SizeVector& out) {
+    for(int i = 0; i < out.size(); i++) {
+        out[i] %= in[i];
+    }
+}
+
+int calcOffset(SizeVector& in, SizeVector& out)
+{
+    int offset = in.back();
+    for(int i = in.size() - 2; i >= 0; i--) {
+        int mul = in[i];
+        for(int j = i + 1; j < out.size(); j++)
+            mul *= out[j];
+        offset += mul;
+    }
+    return offset;
+}
+
+void ref_tile(const InferenceEngine::Blob::Ptr src,
+              InferenceEngine::Blob::Ptr dst,
+              int axis_val,
+              int tiles_val)
+{
+    ASSERT_NE(src, nullptr);
+    ASSERT_NE(dst, nullptr);
+
+    SizeVector in_size = src->getTensorDesc().getDims();
+    SizeVector out_size = dst->getTensorDesc().getDims();
+    Layout layout = src->getTensorDesc().getLayout();
+    SizeVector curr_size(in_size.size());
+    const uint16_t *src_data = src->buffer();
+    uint16_t *dst_data = dst->buffer();
+
+// TODO: investigate this case
+    if (layout == NCHW || layout == NHWC) {
+        size_t N = in_size[0];
+        size_t C = in_size[1];
+        size_t H = in_size[2];
+        size_t W = in_size[3];
+
+        size_t N1 = out_size[0];
+        size_t C1 = out_size[1];
+        size_t H1 = out_size[2];
+        size_t W1 = out_size[3];
+        for (size_t n = 0; n < N1; n++) {
+            for (size_t c = 0; c < C1; c++) {
+                for (size_t h = 0; h < H1; h++) {
+                    for (size_t w = 0; w < W1; w++) {
+                        size_t idx = layout == NCHW ?
+                                     (w % W) + (h % H) * W + (c % C) * W * H + (n % N) * W * H * C :
+                                     (c % C) + (w % W) * C + (h % H) * C * W + (n % N) * W * H * C;
+                        size_t actualIdx = layout == NCHW ?
+                                           w + h * W1 + c * W1 * H1 + n * W1 * H1 * C1 :
+                                           c + w * C1 + h * C1 * W1 + n * W1 * H1 * C1;
+                        dst_data[actualIdx] = src_data[idx];
+                    }
+                }
+            }
+        }
+    } else {
+        do {
+            SizeVector ref = curr_size;
+            calcPos(in_size, ref);
+            dst_data[calcOffset(curr_size, out_size)] = src_data[calcOffset(ref, in_size)];
+        } while(iter(curr_size, out_size));
+    }
+}
+
+typedef myriadLayerTestBaseWithParam<tuple<test_params, tiles>> myriadLayerTestTile_nightly;
+
+TEST_P(myriadLayerTestTile_nightly, Tile) {
+    _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+
+    tile_test::nd_tensor_test_params input_dims = get<0>(GetParam());
+    int tiles = get<1>(GetParam());
+    int ndims = input_dims.dims.size();
+    int axis = input_dims.axis;
+    auto dims = input_dims.dims;
+    SetInputTensors({dims});
+    dims[axis] *= tiles;
+    SetOutputTensors({dims});
+    std::map<std::string, std::string> params;
+    params["axis"] = std::to_string(axis);
+    params["tiles"] = std::to_string(tiles);
+
+    ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("Tile").params(params)));
+    auto inputBlob = _inputMap.begin()->second;
+    auto outputBlob = _outputMap.begin()->second;
+    SetFirstInputToRange(1.0f, 100.0f);
+    ASSERT_TRUE(Infer());
+    ref_tile(inputBlob, _refBlob, axis, tiles);
+    CompareCommonAbsolute(outputBlob, _refBlob, 0);
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_topk_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_topk_test.cpp
new file mode 100644 (file)
index 0000000..e31adc8
--- /dev/null
@@ -0,0 +1,40 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_topk_test.hpp"
+
+static const std::vector<Geometry> s_geometries_list =
+{
+    {{ 1, 4 }, 1, 3 },
+    {{ 3, 4, 7, 5, 6 }, 1, 2 },
+    {{ 5, 6, 3, 4 }, 2, 2 },
+// TODO: 3D geometries excluded due to incorrect CHW/HWC layouts processing in IE/GT; uncomment when fixed
+//    {{ 223, 217, 21 }, 0, 13 },
+//    {{ 439, 429, 5 }, 2, 2 },
+    {{ 65, 33 }, 1, 3 },
+    {{ 31680, 1 }, 0, 13 },
+    {{ 495, 1 }, 0, 7 },
+    {{ 80000 }, 0, 117 },
+    {{ 3639 }, 0, 3 },
+};
+
+static const std::vector<std::string> s_modes_list =
+{
+    "max",
+    "min",
+};
+
+static const std::vector<std::string> s_sorts_list =
+{
+    "value",
+    "index",
+//    "none", // currently is not supported by firmware
+};
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadTestsTopK_nightly,
+    ::testing::Combine(
+        ::testing::ValuesIn(s_geometries_list),
+        ::testing::ValuesIn(s_modes_list),
+        ::testing::ValuesIn(s_sorts_list))
+);
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_topk_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_topk_test.hpp
new file mode 100644 (file)
index 0000000..53baba2
--- /dev/null
@@ -0,0 +1,351 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_reference_functions.hpp"
+#include "myriad_layers_tests.hpp"
+#include "tests_vpu_common.hpp"
+
+#include <debug.h>
+
+#include <algorithm>
+#include <functional>
+#include <string>
+
+using namespace InferenceEngine;
+
+typedef struct {
+    SizeVector dims;
+    int axis;
+    int k;
+} Geometry;
+
+void PrintTo(const Geometry& p, std::ostream* os) {
+    *os << "{dims:" << details::dumpVec(p.dims) << ", axis:" << p.axis << ", k:" << p.k << "}";
+}
+
+using TopKTestParams = std::tuple<Geometry, std::string, std::string>;
+
+static const Precision dataPrecision = Precision::FP16;
+static const Precision indexPrecision = Precision::I32;
+
+class TopKTest: public myriadLayerTestBaseWithParam<TopKTestParams>
+{
+protected:
+    std::set<std::string> getExecutedStagesTypes() const {
+        std::set<std::string> result;
+        std::map<std::string, InferenceEngine::InferenceEngineProfileInfo> perfMap;
+        _inferRequest->GetPerformanceCounts(perfMap, nullptr);
+
+        for (const auto& perf : perfMap)
+            result.emplace(perf.second.exec_type);
+
+        return result;
+    }
+
+    void testTopK(const IRVersion irVersion, const bool outputValues, const bool outputIndices) {
+        _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+        _config[CONFIG_KEY(PERF_COUNT)] = CONFIG_VALUE(YES);
+        _irVersion = irVersion;
+
+        // Skipping outputs available only for v10.
+        ASSERT_TRUE(irVersion == IRVersion::v10 || outputValues && outputIndices);
+
+        const auto params = GetParam();
+        const auto geometry = std::get<0>(params);
+        const auto inputDims = geometry.dims;
+        const auto axis = geometry.axis;
+        const auto k = geometry.k;
+        const auto mode = std::get<1>(params);
+        const auto sort = std::get<2>(params);
+
+        const auto outputDims = calcOutputDims(inputDims, axis, k);
+        const auto model = irVersion == IRVersion::v10
+                ? getModelV10(inputDims, outputDims, axis, mode, sort, outputValues, outputIndices)
+                : getModelV7(inputDims, outputDims, axis, mode, sort);
+
+        TBlob<uint8_t>::Ptr weightsBlob;
+        TBlob<int32_t>::Ptr inputKBlob;
+        getKBlob(k, weightsBlob, inputKBlob);
+        ASSERT_NE(weightsBlob, nullptr);
+
+        ASSERT_NO_THROW(readNetwork(model, weightsBlob));
+
+        const auto& network = _cnnNetwork;
+
+        _inputsInfo = network.getInputsInfo();
+        _inputsInfo["topk_input"]->setPrecision(dataPrecision);
+        _inputsInfo["topk_input"]->setLayout(defaultLayout(inputDims.size()));
+
+        _outputsInfo = network.getOutputsInfo();
+        if (outputValues) {
+            _outputsInfo["topk.0"]->setPrecision(dataPrecision);
+            _outputsInfo["topk.0"]->setLayout(defaultLayout(outputDims.size()));
+        }
+        if (outputIndices) {
+            _outputsInfo["topk.1"]->setPrecision(indexPrecision);
+            _outputsInfo["topk.1"]->setLayout(defaultLayout(outputDims.size()));
+        }
+
+        StatusCode st = OK;
+
+        ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network, _config, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+        ASSERT_NE(_exeNetwork, nullptr) << _resp.msg;
+
+        ASSERT_NO_THROW(st = _exeNetwork->CreateInferRequest(_inferRequest, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        Blob::Ptr inputValuesBlob;
+        ASSERT_NO_THROW(st = _inferRequest->GetBlob("topk_input", inputValuesBlob, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        GenRandomData(inputValuesBlob);
+
+        ASSERT_NO_THROW(st = _inferRequest->Infer(&_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        const auto executedTypes = getExecutedStagesTypes();
+
+        // This logic must be synchronized with TopKStage class.
+        const bool useArgMaxOptimization = (!outputValues || !outputIndices)
+                && mode == "max"
+                && ((sort == "value" && outputValues) || (sort == "index" && outputIndices));
+
+        ASSERT_EQ(executedTypes.count("ArgMax"), useArgMaxOptimization);
+        ASSERT_EQ(executedTypes.count("TopK"), !useArgMaxOptimization);
+
+        Blob::Ptr outputValuesBlob, outputIndicesBlob;
+        if (outputValues) {
+            ASSERT_NO_THROW(st = _inferRequest->GetBlob("topk.0", outputValuesBlob, &_resp));
+            ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+        }
+        if (outputIndices) {
+            ASSERT_NO_THROW(st = _inferRequest->GetBlob("topk.1", outputIndicesBlob, &_resp));
+            ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+        }
+
+        const InferenceEngine::TensorDesc valuesDesc{dataPrecision, outputDims, defaultLayout(outputDims.size())};
+        const InferenceEngine::TensorDesc indicesDesc{indexPrecision, outputDims, defaultLayout(outputDims.size())};
+
+        Blob::Ptr refValuesBlob = make_shared_blob<ie_fp16>(valuesDesc);
+        refValuesBlob->allocate();
+        Blob::Ptr refIndicesBlob = make_shared_blob<int32_t>(indicesDesc);
+        refIndicesBlob->allocate();
+
+        ref_topk(inputValuesBlob, inputKBlob, refValuesBlob, refIndicesBlob, axis, mode, sort);
+        if (outputValues)
+            CompareCommonAbsolute(outputValuesBlob, /*expected=*/refValuesBlob, 0.0f);
+
+        if (outputIndices)
+            CompareCommonExact(outputIndicesBlob, /*expected=*/refIndicesBlob);
+    }
+
+    static std::string getModelV7(const SizeVector& inputDims,
+                                  const SizeVector& outputDims, int axis,
+                                  const std::string& mode, const std::string& sort) {
+        std::string model = R"V0G0N(
+            <net name="testTopK" version="7">
+                <layers>
+                    <layer id="0" name="topk_input" type="Input">
+                        <output>
+                            <port id="0" precision="__DATA_PRECISION__">__INPUT_DIMS__</port>
+                        </output>
+                    </layer>
+                    <layer id="1" name="topk_k" type="Const">
+                        <output>
+                            <port id="1" precision="__INDEX_PRECISION__">__K_DIMS__</port>
+                        </output>
+                        <blobs>
+                            <custom offset="0" size="__K_SIZE__"/>
+                        </blobs>
+                    </layer>
+                    <layer id="2" name="topk" type="TopK">
+                        <data axis="__AXIS__" mode="__MODE__" sort="__SORT__"/>
+                        <input>
+                            <port id="0">__INPUT_DIMS__</port>
+                            <port id="1">__K_DIMS__</port>
+                        </input>
+                        <output>
+                            <port id="2" precision="__DATA_PRECISION__">__OUTPUT_DIMS__</port>
+                            <port id="3" precision="__INDEX_PRECISION__">__OUTPUT_DIMS__</port>
+                        </output>
+                    </layer>
+                </layers>
+                <edges>
+                    <edge from-layer="0" from-port="0" to-layer="2" to-port="0"/>
+                    <edge from-layer="1" from-port="1" to-layer="2" to-port="1"/>
+                </edges>
+            </net>
+        )V0G0N";
+
+        const std::string inputDimsStr = dimsToString(inputDims);
+        const std::string kDims = dimsToString({1});
+        const std::string outputDimsStr = dimsToString(outputDims);
+        const size_t kSize = sizeof(int32_t);
+
+        REPLACE_WITH_STR(model, "__DATA_PRECISION__", dataPrecision.name());
+        REPLACE_WITH_STR(model, "__INDEX_PRECISION__", indexPrecision.name());
+        REPLACE_WITH_STR(model, "__INPUT_DIMS__", inputDimsStr);
+        REPLACE_WITH_STR(model, "__K_DIMS__", kDims);
+        REPLACE_WITH_NUM(model, "__K_SIZE__", kSize);
+        REPLACE_WITH_STR(model, "__OUTPUT_DIMS__", outputDimsStr);
+        REPLACE_WITH_NUM(model, "__AXIS__", axis);
+        REPLACE_WITH_STR(model, "__MODE__", mode);
+        REPLACE_WITH_STR(model, "__SORT__", sort);
+
+        return model;
+    }
+    static std::string getModelV10(const SizeVector& inputDims,
+                                const SizeVector& outputDims, int axis,
+                                const std::string& mode, const std::string& sort,
+                                const bool outputValues, const bool outputIndices) {
+        std::string model = R"V0G0N(
+            <net name="testTopK" version="10">
+                <layers>
+                    <layer id="0" name="topk_input" type="Parameter" version="opset1">
+                        <data element_type="f16" shape="__INPUT_DIMS_SHAPE__"/>
+                        <output>
+                            <port id="0" precision="__DATA_PRECISION__">__INPUT_DIMS__</port>
+                        </output>
+                    </layer>
+                    <layer id="1" name="topk_k" type="Const" version="opset1">
+                        <data element_type="f16" offset="0" shape="__K_DIMS_SHAPE__" size="__K_SIZE__"/>
+                        <output>
+                            <port id="1" precision="__INDEX_PRECISION__" />
+                        </output>
+                    </layer>
+                    <layer id="2" name="topk" type="TopK" version="opset1">
+                        <data axis="__AXIS__" mode="__MODE__" sort="__SORT__"/>
+                        <input>
+                            <port id="0">__INPUT_DIMS__</port>
+                            <port id="1" />
+                        </input>
+                        <output>
+                            <port id="2" precision="__DATA_PRECISION__">__OUTPUT_DIMS__</port>
+                            <port id="3" precision="__INDEX_PRECISION__">__OUTPUT_DIMS__</port>
+                        </output>
+                    </layer>
+                    __RESULT_LAYERS__
+                </layers>
+                <edges>
+                    <edge from-layer="0" from-port="0" to-layer="2" to-port="0"/>
+                    <edge from-layer="1" from-port="1" to-layer="2" to-port="1"/>
+                    __RESULT_EDGES__
+                </edges>
+            </net>
+        )V0G0N";
+
+        const std::string inputDimsStr  = dimsToString(inputDims);
+        const std::string outputDimsStr = dimsToString(outputDims);
+
+        const size_t kSize = sizeof(int32_t);
+
+        /// TODO: consider extending IRDumperNetwork to support this with OOP API.
+        /// At the moment layers with multiple outputs not supported
+        std::string resultLayers, resultEdges;
+        auto addResultLayer = [&resultLayers, &resultEdges, &outputDimsStr]
+                (const std::string& name, const std::string& id, const std::string& sourcePort){
+
+            std::string result = R"V0G0N(
+               <layer id="__ID__" name="__NAME__" type="Result" version="opset1">
+                   <input>
+                       <port id="0">__OUTPUT_DIMS__</port>
+                   </input>
+               </layer>
+               )V0G0N";
+             REPLACE_WITH_STR(result, "__ID__", id);
+             REPLACE_WITH_STR(result, "__NAME__", name);
+             REPLACE_WITH_STR(result, "__OUTPUT_DIMS__", outputDimsStr);
+             resultLayers += result;
+             resultEdges  += "<edge from-layer=\"2\" from-port=\"" + sourcePort + "\" to-layer=\"" + id + "\" to-port=\"0\"/>";
+        };
+
+        if (outputValues)
+            addResultLayer("topk.0", "3", "2");
+
+        if (outputIndices)
+            addResultLayer("topk.1", "4", "3");
+
+
+        REPLACE_WITH_STR(model, "__DATA_PRECISION__", dataPrecision.name());
+        REPLACE_WITH_STR(model, "__INDEX_PRECISION__", indexPrecision.name());
+        REPLACE_WITH_STR(model, "__INPUT_DIMS__", inputDimsStr);
+        REPLACE_WITH_NUM_VECTOR(model, "__INPUT_DIMS_SHAPE__", inputDims);
+        REPLACE_WITH_STR(model, "__K_DIMS_SHAPE__", "1");
+        REPLACE_WITH_NUM(model, "__K_SIZE__", kSize);
+        REPLACE_WITH_STR(model, "__OUTPUT_DIMS__", outputDimsStr);
+        REPLACE_WITH_NUM(model, "__AXIS__", axis);
+        REPLACE_WITH_STR(model, "__MODE__", mode);
+        REPLACE_WITH_STR(model, "__SORT__", sort);
+        REPLACE_WITH_STR(model, "__RESULT_LAYERS__", resultLayers);
+        REPLACE_WITH_STR(model, "__RESULT_EDGES__", resultEdges);
+
+        return model;
+    }
+
+    static std::string dimsToString(const SizeVector& dims) {
+        std::string str;
+        for (auto& d : dims)
+            str += "<dim>" + std::to_string(d) + "</dim>";
+        return str;
+    }
+
+    static SizeVector calcOutputDims(const SizeVector& inputDims, int axis, int k) {
+        SizeVector outputDims = inputDims;
+        outputDims[axis] = k;
+        return outputDims;
+    }
+    static Layout defaultLayout(int ndims) {
+        switch (ndims) {
+        case 5: return NCDHW;
+        case 4: return NCHW;
+        case 3: return CHW;
+        case 2: return NC;
+        case 1: return C;
+        }
+        return ANY;
+    }
+    static void getKBlob(int k, TBlob<uint8_t>::Ptr& weightsBlob, TBlob<int32_t>::Ptr& kBlob) {
+        const size_t k_size = 1;
+        const size_t weights_size = k_size * sizeof(int32_t);
+
+        TBlob<uint8_t>* weights_raw = new TBlob<uint8_t>(TensorDesc(Precision::U8, {weights_size}, C));
+        weights_raw->allocate();
+        int32_t* weightsData = weights_raw->data().as<int32_t*>();
+
+        TBlob<int32_t>* k_raw = new TBlob<int32_t>(TensorDesc(Precision::I32, {k_size}, C));
+        k_raw->allocate();
+        int32_t* kData = k_raw->data().as<int32_t*>();
+
+        weightsData[0] = k;
+        kData[0] = k;
+
+        weightsBlob = TBlob<uint8_t>::Ptr(weights_raw);
+        kBlob = TBlob<int32_t>::Ptr(k_raw);
+    }
+};
+
+class myriadTestsTopK_nightly: public TopKTest
+{
+};
+
+TEST_P(myriadTestsTopK_nightly, TopKv7)
+{
+    testTopK(IRVersion::v7, true, true);
+}
+
+TEST_P(myriadTestsTopK_nightly, TopKv10_All)
+{
+    testTopK(IRVersion::v10, true, true);
+}
+
+TEST_P(myriadTestsTopK_nightly, TopKv10_ArgMaxValues)
+{
+    testTopK(IRVersion::v10, true, false);
+}
+
+TEST_P(myriadTestsTopK_nightly, TopKv10_ArgMaxIndices)
+{
+    testTopK(IRVersion::v10, false, true);
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_unsqueeze_test.cpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_unsqueeze_test.cpp
new file mode 100644 (file)
index 0000000..3b254e9
--- /dev/null
@@ -0,0 +1,12 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_unsqueeze_test.hpp"
+
+INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsUnsqueeze,
+    ::testing::Combine(
+        ::testing::ValuesIn(s_squeezeTensors),
+        ::testing::ValuesIn(s_squeezeIndices)
+    )
+);
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_unsqueeze_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_unsqueeze_test.hpp
new file mode 100644 (file)
index 0000000..2b4ae6d
--- /dev/null
@@ -0,0 +1,188 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include "ie_layouts.h"
+#include "myriad_layers_tests.hpp"
+#include <vpu/private_plugin_config.hpp>
+#include "myriad_layers_reference_functions.hpp"
+#include "ie_memcpy.h"
+
+using namespace InferenceEngine;
+
+typedef std::vector<int32_t> IndicesVector;
+typedef myriadLayerTestBaseWithParam<std::tuple<InferenceEngine::SizeVector, IndicesVector>> myriadLayersTestsUnsqueeze;
+
+static void ref_unsqueeze(const InferenceEngine::Blob::Ptr src,
+                        InferenceEngine::Blob::Ptr dst) {
+    ASSERT_NE(src, nullptr);
+    ASSERT_NE(dst, nullptr);
+    int32_t OW = 1;
+    int32_t OH = 1;
+    int32_t OC = 1;
+    int32_t ON = 1;
+    int32_t IW = 1;
+    int32_t IH = 1;
+    int32_t IC = 1;
+    int32_t I_N = 1;
+
+    get_ndims(src, IW, IH, IC, I_N);
+    get_ndims(dst, OW, OH, OC, ON);
+
+    ASSERT_EQ(IW * IH * IC * I_N, OW * OH * OC * ON);
+
+    const ie_fp16 *src_data = src->buffer();
+    ie_fp16 *dst_data = dst->buffer();
+    size_t src_size = src->size();
+    size_t dst_size = dst->size();
+
+    ASSERT_NE(src_data, nullptr);
+    ASSERT_NE(dst_data, nullptr);
+    ASSERT_EQ(src_size, dst_size);
+
+    ie_memcpy(dst_data, dst_size * sizeof(ie_fp16), src_data, src_size * sizeof(ie_fp16));
+}
+
+TEST_P (myriadLayersTestsUnsqueeze, Unsqueeze){
+    auto input = std::get<0>(GetParam());
+    auto indices = std::get<1>(GetParam());
+
+    std::string in_dims{};
+    std::string out_dims{};
+
+    InferenceEngine::SizeVector output = input;
+
+    std::sort(indices.begin(), indices.end());
+
+    for (auto index : indices) {
+        ASSERT_LE(index, output.size());
+        output.insert(output.begin() + index, 1);
+    }
+
+    for (auto in_dim : input) {
+        in_dims += R"V0G0N(
+                        <dim>
+)V0G0N"
+                            + std::to_string(in_dim) +
+R"V0G0N(
+                        </dim>
+)V0G0N";
+    }
+
+    for (auto out_dim : output) {
+        out_dims += R"V0G0N(
+                        <dim>
+)V0G0N"
+                            + std::to_string(out_dim) +
+R"V0G0N(
+                        </dim>
+)V0G0N";
+    }
+
+    std::string UNSQUEEZE_MODEL = R"V0G0N(
+        <net name="UNSQUEEZE_MODEL" version="2" batch="1">
+            <layers>
+                <layer id="0" name="input" precision="FP16" type="Input">
+                <output>
+                    <port id="0">
+)V0G0N"
+                    + in_dims +
+R"V0G0N(
+                    </port>
+                </output>
+                </layer>
+                <layer id="1" name="indices" precision="FP16" type="Const">
+                    <output>
+                        <port id="1">
+                            <dim>
+)V0G0N"
+                                + std::to_string(indices.size()) +
+R"V0G0N(
+                            </dim>
+                        </port>
+                    </output>
+                    <blobs>
+                        <custom offset="0" size=")V0G0N"
+                                          + std::to_string(indices.size() * sizeof(ie_fp16)) +
+                                          R"V0G0N("/>
+                    </blobs>
+                </layer>
+                <layer id="2" name="unsqueeze" precision="FP16" type="Unsqueeze">
+                    <input>
+                        <port id="0">
+)V0G0N"
+                        + in_dims +
+R"V0G0N(
+                        </port>
+                        <port id="1">
+                            <dim>
+)V0G0N"
+                                + std::to_string(indices.size()) +
+R"V0G0N(
+                            </dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="1">
+)V0G0N"
+                        + out_dims +
+R"V0G0N(
+                        </port>
+                    </output>
+                </layer>
+            </layers>
+            <edges>
+                <edge from-layer="0" from-port="0" to-layer="2" to-port="0"/>
+                <edge from-layer="1" from-port="1" to-layer="2" to-port="1"/>
+            </edges>
+        </net>
+)V0G0N";
+
+    InferenceEngine::StatusCode st = InferenceEngine::OK;
+    InferenceEngine::ResponseDesc resp;
+
+    InferenceEngine::TBlob<uint8_t> *weights_raw = new InferenceEngine::TBlob<uint8_t>(
+        {InferenceEngine::Precision::U8,
+         {indices.size() * sizeof(ie_fp16)},
+         InferenceEngine::C});
+    weights_raw->allocate();
+    ie_fp16 *inputBlobRawDataFp16 = weights_raw->data().as<ie_fp16 *>();
+
+    for (size_t index = 0; index < indices.size(); ++index) {
+        inputBlobRawDataFp16[index] = InferenceEngine::PrecisionUtils::f32tof16(indices[index]);
+    }
+
+    TBlob<uint8_t>::Ptr weights(weights_raw);
+    ASSERT_NO_THROW(readNetwork(UNSQUEEZE_MODEL, weights));
+    createInferRequest(NetworkInitParams().useHWOpt(true));
+
+    ASSERT_TRUE(Infer());
+
+    ref_unsqueeze(_inputMap.begin()->second, _refBlob);
+    auto outBlob = _outputMap.begin()->second;
+
+    const auto& outDims = outBlob->getTensorDesc().getDims();
+    const auto& refDims = _refBlob->getTensorDesc().getDims();
+    ASSERT_EQ(outDims.size(), refDims.size());
+    for (size_t i = 0; i < outDims.size(); i++) {
+        ASSERT_EQ(outDims[i], refDims[i]);
+    }
+
+    const ie_fp16 *out_data = outBlob->buffer();
+    const ie_fp16 *ref_data = _refBlob->buffer();
+    size_t out_size = outBlob->size();
+    size_t ref_size = _refBlob->size();
+    ASSERT_EQ(out_size, ref_size);
+    for (size_t i = 0; i < out_size; i++) {
+        ASSERT_EQ(out_data[i], ref_data[i]);
+    }
+}
+
+static std::vector<InferenceEngine::SizeVector> s_squeezeTensors = {
+        {{3}, {1}, {1, 3}, {3, 1}}
+};
+
+static std::vector<IndicesVector> s_squeezeIndices = {
+        {{0, 2}, {0}, {1}, {0, 1}, {1, 2}}
+};
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/weights_for_convolution_test.h b/inference-engine/tests_deprecated/functional/vpu/common/layers/weights_for_convolution_test.h
new file mode 100644 (file)
index 0000000..33f338e
--- /dev/null
@@ -0,0 +1,309 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+/* this file contains convolution weights  and biases */
+/* for some convolution layers validation             */
+
+static const float s_3X3X3YOLO_Weights[] = {
+-0.00250244f, -0.000977516f, 0.00021565f,
+-0.00503159f, -0.000224233f, -0.00143242f,
+-0.00053215f, 0.00124073f, 0.00196457f,
+0.000410557f, 0.000440598f, 7.44462e-05f,
+-0.00197983f, -0.000780582f, -0.00404739f,
+0.0008564f, 0.000631332f, -0.0011692f,
+-0.00301743f, -0.00483322f, 0.000276327f,
+-0.0152969f, -0.0164795f, -0.0149231f,
+-0.00108051f, -0.00537491f, -0.00369072f,
+-0.0140305f, 0.0149231f, 0.00108624f,
+0.0154419f, -0.0142822f, -0.000786304f,
+-0.000128746f, -0.000143886f, -0.000134826f,
+-0.0239563f, 0.0216675f, 0.00124359f,
+0.0219574f, -0.0207672f, -0.00124454f,
+0.000842571f, -0.000302792f, 0.000639915f,
+-0.0185547f, 0.0180511f, -0.0f,
+0.0186615f, -0.0176544f, -0.000886917f,
+0.000407219f, -0.00103474f, 0.000302792f,
+0.00556946f, 0.000728607f, -0.00163841f,
+0.00627899f, -0.00646973f, -0.00296211f,
+0.00223351f, -0.00623322f, 0.000979424f,
+0.0151062f, 0.00264931f, -0.00341415f,
+0.0139008f, -0.0133057f, -0.00712204f,
+0.00261116f, -0.0121002f, 0.00231934f,
+0.0176849f, 0.00189114f, -0.00621414f,
+0.0165558f, -0.0126877f, -0.010498f,
+0.00707245f, -0.0133972f, -0.00208664f,
+0.00362587f, -0.0532532f, -0.0323181f,
+0.0341797f, 0.0622559f, -0.0229034f,
+-0.0169067f, 0.0184784f, 0.0205383f,
+0.00543976f, 0.00843811f, 0.0268097f,
+-0.00681305f, 0.0143661f, 0.00478745f,
+-0.0249939f, -0.0166321f, 0.000432968f,
+-0.00460434f, 0.0307159f, 0.00645065f,
+-0.0142288f, -0.0327759f, 0.00739288f,
+0.03479f, 0.00138283f, -0.00891113f,
+0.00037837f, 0.000591755f, 0.000750542f,
+0.000948906f, 0.00240707f, 0.00185871f,
+0.00186348f, -0.00352287f, -0.0087738f,
+0.000166059f, -0.000899315f, 0.000279665f,
+-0.000185966f, -0.00147343f, -0.000518322f,
+-0.000204086f, -0.00592041f, -0.00937653f,
+0.00116348f, 0.000398636f, 0.000302553f,
+0.0013485f, 0.000700951f, -0.000499725f,
+0.000939369f, -0.00666046f, -0.0114746f,
+0.0185699f, 0.00228119f, -0.0204926f,
+0.0302124f, 0.0116348f, -0.04245f,
+0.0152969f, 0.00748062f, -0.0223389f,
+0.00227165f, 7.68304e-05f, -0.00450516f,
+0.00731277f, 0.0022049f, -0.0103912f,
+0.0065918f, 0.00342178f, -0.00461197f,
+-0.0187225f, -0.00162601f, 0.020462f,
+-0.0337524f, -0.0116196f, 0.0464172f,
+-0.0180054f, -0.0096817f, 0.0248413f,
+0.00208282f, 0.00487518f, 0.00212288f,
+-0.00867462f, 0.000249386f, 0.00205803f,
+-0.00169086f, 0.000137448f, -0.000114262f,
+-0.00602341f, -0.00528336f, -0.00414658f,
+-0.0123596f, -0.00777435f, -0.0038662f,
+-0.00175571f, -0.00171375f, -0.00155544f,
+-0.00522232f, 0.00270844f, 0.00578308f,
+-0.0162811f, -0.00572586f, 0.00565338f,
+0.000440121f, -0.000655174f, 0.00285721f,
+-0.0036087f, -0.0116653f, -0.00170708f,
+-0.00322914f, 0.000715256f, 0.00501633f,
+0.00170994f, 0.00658798f, 0.00248909f,
+-0.00793457f, -0.0169373f, -0.00593185f,
+-0.00495148f, -6.85453e-05f, 0.00714493f,
+0.00675583f, 0.0138779f, 0.00994873f,
+-0.00830841f, -0.0177307f, -0.00447464f,
+-0.00577927f, -0.00128078f, 0.00985718f,
+0.00566101f, 0.0116806f, 0.00926971f,
+-0.00440216f, -0.00878906f, -0.00261497f,
+-0.00499344f, 0.000417233f, 0.00434494f,
+0.00323868f, 0.0071106f, 0.00418472f,
+-0.00681305f, -0.0134125f, -0.00323486f,
+-0.00817108f, -0.000277519f, 0.00852203f,
+0.00442123f, 0.0119781f, 0.00703049f,
+-0.00887299f, -0.0138474f, -0.00442123f,
+-0.00746918f, 0.000657082f, 0.00860596f,
+0.00527191f, 0.0134048f, 0.00967407f,
+-0.00146866f, -0.00243187f, 0.000966549f,
+0.000362873f, -0.00582504f, -0.00205803f,
+0.00298882f, -0.00528717f, 0.00069046f,
+0.00163269f, -0.00101471f, 0.00447464f,
+-0.00421906f, -0.0102463f, -0.00252724f,
+-0.000162959f, -0.0051651f, 0.00409698f,
+-7.79033e-05f, -0.00563812f, -0.00165367f,
+-0.00669098f, -0.0121765f, -0.00798035f,
+0.00087738f, -0.00686264f, -0.00112152f,
+-0.000668049f, -0.000166178f, -0.00290298f,
+0.00358009f, -0.0152588f, 0.0091629f,
+-0.00527954f, 0.0133057f, -0.00918579f,
+7.27773e-05f, 0.000730515f, -0.000287533f,
+0.00518417f, -0.0197296f, 0.012085f,
+-0.00677872f, 0.0166168f, -0.0124207f,
+-0.000530243f, 0.00147724f, -0.000135422f,
+0.00628662f, -0.0188141f, 0.0106354f,
+-0.00524139f, 0.0170441f, -0.0139999f,
+-0.00705719f, -0.0066185f, -0.00503922f,
+0.00406647f, 0.00852966f, 0.00355148f,
+0.00169182f, 0.00509262f, 0.00421524f,
+0.000461578f, 0.00206375f, 0.00218391f,
+0.0045433f, 0.00844574f, 0.00688553f,
+0.00397873f, 0.00659943f, 0.00614548f,
+0.00553513f, -0.000369072f, 0.00137329f,
+-0.00575638f, -0.0119324f, -0.00810242f,
+-0.00743103f, -0.0137939f, -0.00948334f,
+0.000232458f, 0.00021708f, -0.000338078f,
+-0.000994682f, -0.00551605f, -0.00306702f,
+-0.00103283f, -0.00345039f, -0.00148773f,
+0.000950336f, -0.000349998f, -0.000334024f,
+-0.00212097f, -0.0107956f, -0.00759888f,
+-0.00125599f, -0.00772858f, -0.00502014f,
+-0.00124741f, 0.000827312f, 0.00101089f,
+0.00250816f, 0.0153275f, 0.0100403f,
+0.00218964f, 0.0110474f, 0.00647354f,
+-0.00239372f, 0.00211525f, 0.000736713f,
+-0.0108566f, 0.00837708f, 0.0024395f,
+-0.00824738f, 0.00359726f, 0.00242615f,
+-0.00511932f, 0.00606537f, 0.000543118f,
+-0.0197144f, 0.0161133f, 0.00367355f,
+-0.0114746f, 0.00694656f, 0.00306702f,
+-0.00362015f, 0.0058403f, -0.000977516f,
+-0.015564f, 0.0150146f, 0.0017519f,
+-0.0104141f, 0.00753784f, 0.0029583f,
+0.0158691f, 0.021759f, 0.0104141f,
+0.0135117f, 0.0301666f, 0.0187378f,
+-0.0337219f, -0.0530701f, -0.0244141f,
+-0.00928497f, -0.0151749f, -0.0131454f,
+0.0140457f, 0.0230713f, 0.0151215f,
+-0.000950336f, -0.00801849f, -0.00119305f,
+-0.00414658f, -0.00984955f, 0.000359297f,
+-0.0251007f, -0.0421753f, -0.0256653f,
+0.0320129f, 0.0508423f, 0.0198822f,
+-0.05896f, -0.00815582f, 0.0673218f,
+-0.114807f, 0.0655518f, 0.0605469f,
+-0.0349731f, 0.0464783f, 0.00839996f,
+0.0762939f, -0.00314903f, -0.0492249f,
+0.0822144f, 0.00416565f, -0.088501f,
+0.0401917f, -0.00609589f, -0.057251f,
+-0.00206947f, -0.0139694f, 0.00429535f,
+0.0149002f, -0.0487671f, 0.0344543f,
+-0.0106583f, -0.0225525f, 0.0254211f,
+0.00247765f, 0.00720215f, 0.00425339f,
+0.00603104f, 0.00869751f, 0.00601959f,
+0.00428391f, 0.00596619f, 0.00349426f,
+-0.00138378f, 0.000154734f, -0.000167966f,
+-0.000342607f, 0.00111198f, 0.000254869f,
+0.000974655f, 0.000676155f, 0.000329256f,
+-0.00211334f, -0.00527573f, -0.00332642f,
+-0.00692749f, -0.00923157f, -0.00734329f,
+-0.00387764f, -0.00774002f, -0.0054512f,
+-0.00473404f, 0.00121403f, -0.00204468f,
+0.000809193f, 0.0106354f, 0.000278711f,
+-0.00061655f, 0.00346375f, -0.00639725f,
+-0.00735855f, 0.00458145f, -0.00299454f,
+0.00415421f, 0.0200806f, -0.00453949f,
+-0.00154114f, 0.000986099f, -0.0130234f,
+-0.00504303f, 0.00569153f, -0.0041008f,
+0.00690842f, 0.0209961f, -0.00362778f,
+-0.00101471f, 0.00149918f, -0.0155487f,
+-0.0043602f, -0.00137138f, 0.000820637f,
+-0.00326538f, -0.00106907f, -0.000898361f,
+-0.00171566f, -0.000668526f, 0.000166059f,
+0.00845337f, -0.00782013f, -0.00277328f,
+0.0034256f, -0.00942993f, 0.000739574f,
+0.00181103f, -0.00159454f, 0.0015707f,
+0.00512695f, -0.00868225f, -0.000290632f,
+0.000939846f, -0.00976562f, 0.00447083f,
+0.000295639f, -0.00110149f, 0.00518036f,
+-0.00455475f, -0.00510025f, -0.00338745f,
+-0.00477219f, -0.00387192f, -0.00410843f,
+-0.00225639f, -0.00343513f, -0.00458145f,
+0.00243187f, 0.00185585f, 0.00435638f,
+-0.00445938f, 0.000256062f, 0.00473022f,
+-0.0122299f, -0.00579071f, 0.00405121f,
+0.00629044f, 0.00343704f, -0.000114262f,
+-0.000589371f, 0.0f, -0.000875473f,
+-0.00933075f, -0.00598145f, 0.00112152f,
+-7.6592e-05f, -0.000171661f, 0.000160933f,
+-0.000795841f, -0.00102901f, -0.00043726f,
+-0.00104713f, -0.00146484f, -0.000791073f,
+-0.000742912f, -0.000822067f, -0.000703812f,
+-0.000944138f, -0.00154114f, -0.00127888f,
+-0.000912666f, -0.00130081f, -0.00104332f,
+0.000778675f, 9.81092e-05f, -0.000152111f,
+-0.00067997f, -0.0013876f, -0.00133324f,
+-0.000562191f, -0.00136185f, -0.00123596f,
+0.00177193f, 0.00536728f, 0.00238037f,
+-0.00230026f, 0.000165701f, 0.00273705f,
+-0.00271988f, -0.00579071f, -0.00352859f,
+0.00636673f, 0.0117569f, 0.0056572f,
+-0.00415802f, -0.000339031f, 0.00543213f,
+-0.00542831f, -0.0121765f, -0.00727081f,
+0.0071373f, 0.0136871f, 0.00923157f,
+-0.00369072f, 0.000901699f, 0.00754547f,
+-0.00901794f, -0.014061f, -0.00839996f,
+0.000832558f, 0.00191879f, 0.00138092f,
+0.0012846f, 0.00118446f, 0.00156593f,
+0.000792503f, 0.00110531f, 0.00097084f,
+-0.000164151f, 0.00163937f, -0.000556469f,
+-0.000222325f, -0.000549793f, -0.000517368f,
+-0.0f, 0.000468493f, -0.00050211f,
+0.00119591f, 0.00381851f, 0.00208473f,
+0.00115776f, 0.00120258f, 0.00157261f,
+0.00206757f, 0.00296402f, 0.00211143f,
+-0.000437021f, -0.00720596f, -0.0f,
+0.00584793f, -0.0044899f, -0.00452805f,
+0.00506592f, 0.00462723f, 9.08971e-05f,
+-6.52075e-05f, -0.0145035f, -0.000214815f,
+0.0143967f, -0.00895691f, -0.00984955f,
+0.0115967f, 0.00962067f, -0.00214005f,
+0.00106716f, -0.0145035f, -0.00335693f,
+0.0163422f, -0.00801849f, -0.0107956f,
+0.0144196f, 0.0109406f, -0.00285339f,
+0.00917816f, 0.000465393f, -0.00139618f,
+0.00831604f, -0.00971222f, -0.00862122f,
+0.00250244f, -0.00680542f, 0.00382805f,
+0.0172272f, 0.0018549f, -0.00459671f,
+0.0136719f, -0.0121536f, -0.0123825f,
+0.00616074f, -0.00939941f, -0.00112247f,
+0.0207062f, 0.00211525f, -0.00362778f,
+0.0111084f, -0.0150452f, -0.0132751f,
+0.00652695f, -0.00926971f, 0.00213814f,
+0.00197792f, 0.000858784f, -0.00191784f,
+-0.000635624f, -0.0033741f, -0.0021286f,
+-0.00162029f, -0.000936031f, 0.00155544f,
+0.00143528f, 0.00105667f, -0.00231934f,
+0.000121534f, -0.00217438f, -0.00153732f,
+-0.00128174f, -0.000323057f, 0.00168228f,
+0.000259399f, 0.000683308f, -0.00152683f,
+-0.000297308f, -0.00178432f, -0.000678062f,
+-0.000831127f, 0.000231266f, 0.00189877f,
+-0.00137615f, -0.0032959f, -0.00331306f,
+-0.00283813f, -0.00614548f, -0.00535583f,
+-0.00385284f, -0.0068512f, -0.0054245f,
+0.00121498f, 0.00237656f, 0.00153351f,
+0.00341225f, 0.00457001f, 0.00387383f,
+0.00180149f, 0.00314903f, 0.00244713f,
+-0.000752926f, -0.000261545f, -0.000555038f,
+0.00270271f, 0.00346375f, 0.00374985f,
+0.000188947f, 0.00220299f, 0.00207901f,
+0.00305367f, 0.00685883f, 0.0052681f,
+0.00457382f, 0.0085144f, 0.00593185f,
+0.00282288f, 0.00548935f, 0.00369263f,
+-0.00505447f, -0.00761795f, -0.00392151f,
+-0.00559616f, -0.00773621f, -0.00693512f,
+-0.00208664f, -0.00208473f, -0.00257874f,
+0.002388f, 0.00120449f, 0.00200272f,
+-0.000309229f, -0.00354767f, -0.00215149f,
+-0.00104427f, -0.00123882f, -0.00113106f,
+-0.000901699f, -0.00640488f, 0.00945282f,
+-0.00102615f, -0.00719833f, 0.00792694f,
+-0.000609398f, -0.00264359f, 0.00243378f,
+-0.000339985f, -0.0167694f, 0.0177917f,
+0.000838757f, -0.0162048f, 0.0139465f,
+0.000993729f, -0.00366783f, 0.00271606f,
+-0.000293732f, -0.0150757f, 0.0145035f,
+0.00140285f, -0.0150833f, 0.0115967f,
+0.00113392f, -0.00486374f, 0.00269127f,
+-0.0015564f, -0.00456238f, -0.00518799f,
+-0.00181675f, -0.0103149f, -0.00588989f,
+-0.0015049f, -0.00801086f, -0.00352287f,
+0.00175667f, 0.00298882f, -0.00199699f,
+0.00237083f, -0.000623226f, -0.0014534f,
+0.000842571f, -0.00120735f, -0.00094223f,
+0.000255108f, 0.00266457f, -0.00132465f,
+-0.000268459f, -0.000222921f, -0.00132084f,
+-6.49095e-05f, -0.000430107f, -0.0f,
+0.00163937f, 0.00312805f, -6.66976e-05f,
+-0.00598145f, -0.00869751f, -0.00292397f,
+0.00487137f, 0.00662994f, 0.00234795f,
+0.00676727f, 0.0102844f, 0.00386429f,
+-0.0143356f, -0.0211945f, -0.00869751f,
+0.00774384f, 0.0110245f, 0.00372887f,
+0.00640488f, 0.0107346f, 0.00461578f,
+-0.0146637f, -0.0205078f, -0.00789642f,
+0.00447083f, 0.00775909f, 0.00225449f,
+0.00561523f, 0.00815582f, 0.00632858f,
+0.00182724f, 0.004673f, 0.00122547f,
+0.00150108f, -0.000480413f, -0.00105476f,
+-0.00641632f, 0.00205421f, -0.00147152f,
+-0.00133133f, 0.000114024f, 0.00455475f,
+0.00068903f, 0.00408554f, 0.00524521f,
+-0.00598145f, 0.00126171f, -0.0152893f,
+0.00983429f, 0.0134735f, 0.0110092f,
+-0.00581741f, 0.00759888f, -0.00773621f,};
+
+static const float s_3X3X3YOLO_Biases[] = {
+-1.56836f, -0.00650787f, 0.999512f, -10.2969f,
+ 2.11133f, -0.0234985f, 2.03125f, -1.38965f,
+ 0.73877f, 1.79688f, 1.08398f, 1.55176f,
+ 1.03027f, 0.335693f, -0.293701f, -4.82812f,
+ 1.10449f, 0.212036f, 1.66016f, 1.86621f,
+ 0.256836f, 0.447266f, -7.18359f, 0.549805f,
+ -1.93652f, -0.0514526f, 0.663574f, 1.09668f,
+ 1.14941f, -1.3877f, 1.93359f, -8.9375f
+};
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/myriad_get_output_tests.cpp b/inference-engine/tests_deprecated/functional/vpu/common/myriad_get_output_tests.cpp
new file mode 100644 (file)
index 0000000..90129eb
--- /dev/null
@@ -0,0 +1,17 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_get_output_tests.hpp"
+
+INSTANTIATE_TEST_CASE_P(Test_params_pool, myriadGetOutput_nightly,
+        testing::Values(
+                std::make_tuple(std::make_tuple(&full_model, &poolModel), "pool1_3x3_s2"),
+                std::make_tuple(std::make_tuple(&full_model, &convModel), "conv1_7x7_s2"),
+                std::make_tuple(std::make_tuple(&full_model, &reluConvModel), "conv1_relu_7x7"),
+                std::make_tuple(std::make_tuple(&full_model, &fcModel), "loss3_classifier"),
+                std::make_tuple(std::make_tuple(&full_model, &reluFcModel), "ReluFC"),
+                std::make_tuple(std::make_tuple(&concatModel, &concatModelConv), "conv1_2")
+        ),
+    getTestCaseName
+);
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/myriad_get_output_tests.hpp b/inference-engine/tests_deprecated/functional/vpu/common/myriad_get_output_tests.hpp
new file mode 100644 (file)
index 0000000..f14a3b9
--- /dev/null
@@ -0,0 +1,104 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <myriad_layers_tests.hpp>
+#include "myriad_xml_tests.hpp"
+
+using namespace InferenceEngine;
+using GetOutputTestsParams = std::tuple<std::tuple<std::string*, std::string*>, std::string>;
+
+class myriadGetOutput_nightly :
+        public myriadLayersTests_nightly,
+        public testing::WithParamInterface<GetOutputTestsParams> {
+public:
+    std::string name_model_full;
+    std::string name_model_crop;
+    std::string name_output;
+};
+
+TEST_P(myriadGetOutput_nightly, AddOutput) {
+    StatusCode st;
+
+    name_model_full = (*(std::get<0>(std::get<0>(GetParam()))));
+    name_model_crop = (*(std::get<1>(std::get<0>(GetParam()))));
+    name_output = std::get<1>(GetParam());
+
+    TBlob<uint8_t>::Ptr weights(GenWeights( ( 32786944 + 2000) / sizeof(ie_fp16),  0, 1));
+
+    InferenceEngine::Core ie;
+    auto crop_network = ie.ReadNetwork(name_model_crop, weights);
+
+    InferenceEngine::InputsDataMap networkInputs;
+    ASSERT_NO_THROW(networkInputs = crop_network.getInputsInfo());
+    InferenceEngine::OutputsDataMap networkOutputs;
+    ASSERT_NO_THROW(networkOutputs = crop_network.getOutputsInfo());
+
+    networkInputs.begin()->second->setPrecision(InferenceEngine::Precision::FP16);
+    networkOutputs.begin()->second->setPrecision(InferenceEngine::Precision::FP16);
+
+    InferenceEngine::Blob::Ptr inputBlob;
+
+    InferenceEngine::IExecutableNetwork::Ptr exeNetwork;
+    std::map<std::string, std::string> networkConfig;
+    ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(exeNetwork, crop_network, networkConfig, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    ASSERT_NE(exeNetwork, nullptr) << _resp.msg;
+
+    InferenceEngine::IInferRequest::Ptr inferRequest;
+    ASSERT_NO_THROW(st = exeNetwork->CreateInferRequest(inferRequest, &_resp));
+
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    ASSERT_NO_THROW(st = inferRequest->GetBlob(networkInputs.begin()->first.c_str(), inputBlob, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    GenRandomData(inputBlob);
+
+    InferenceEngine::Blob::Ptr output_crop;
+    ASSERT_NO_THROW(st = inferRequest->Infer(&_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    ASSERT_NO_THROW(st = inferRequest->GetBlob(networkOutputs.begin()->first.c_str(), output_crop, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    /*Full Network Infer */
+
+    auto full_network = ie.ReadNetwork(name_model_full, weights);
+
+    full_network.addOutput(name_output, 0);
+
+    InferenceEngine::InputsDataMap networkInputsFull;
+    networkInputsFull = full_network.getInputsInfo();
+    InferenceEngine::OutputsDataMap networkOutputsFull;
+    networkOutputsFull = full_network.getOutputsInfo();
+
+    networkInputsFull.begin()->second->setPrecision(InferenceEngine::Precision::FP16);
+    networkOutputsFull.begin()->second->setPrecision(InferenceEngine::Precision::FP16);
+    (++networkOutputsFull.begin())->second->setPrecision(InferenceEngine::Precision::FP16);
+
+    InferenceEngine::IExecutableNetwork::Ptr exeNetworkFull;
+    ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(exeNetworkFull, full_network, networkConfig, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    InferenceEngine::IInferRequest::Ptr inferRequestFull;
+    ASSERT_NO_THROW(st = exeNetworkFull->CreateInferRequest(inferRequestFull, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    ASSERT_NO_THROW(st = inferRequestFull->SetBlob("data", inputBlob, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    InferenceEngine::Blob::Ptr output_full;
+    ASSERT_NO_THROW(st = inferRequestFull->Infer(&_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    ASSERT_NO_THROW(st = inferRequestFull->GetBlob(name_output.c_str(), output_full, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    CompareCommonAbsolute(output_full, output_crop, 0.0f);
+}
+
+std::string getTestCaseName(const testing::TestParamInfo<GetOutputTestsParams>& param) {
+    return  "addOutput_" + std::get<1>(param.param);
+}
+
+class myriadCheckOutput_nightly :
+        public myriadLayersTests_nightly {
+};
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/myriad_get_perf_count_tests.cpp b/inference-engine/tests_deprecated/functional/vpu/common/myriad_get_perf_count_tests.cpp
new file mode 100644 (file)
index 0000000..87a02fb
--- /dev/null
@@ -0,0 +1,65 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tests.hpp"
+#include <limits>
+#include <ngraph_functions/subgraph_builders.hpp>
+
+#include "ngraph_functions/builders.hpp"
+
+using namespace InferenceEngine;
+
+#if defined(_WIN32) || defined(__APPLE__) || defined(ANDROID)
+typedef std::chrono::time_point<std::chrono::steady_clock> time_point;
+#else
+typedef std::chrono::time_point<std::chrono::system_clock> time_point;
+#endif
+typedef std::chrono::high_resolution_clock Time;
+typedef std::chrono::duration<double, std::ratio<1, 1000>> ms;
+typedef std::chrono::duration<float> fsec;
+
+#define TIMEDIFF(start, end) ((std::chrono::duration_cast<ms>((end) - (start))).count())
+
+using myriadGetPerformanceTests_nightly = myriadLayersTests_nightly;
+
+TEST_F(myriadGetPerformanceTests_nightly, CorrectTimings) {
+    std::shared_ptr<ngraph::Function> fnPtr = ngraph::builder::subgraph::makeSplitConvConcat();
+
+    ASSERT_NO_THROW(_cnnNetwork = CNNNetwork(fnPtr));
+
+    StatusCode st;
+
+    ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, _cnnNetwork,
+    {
+        {
+            CONFIG_KEY(PERF_COUNT),
+            CONFIG_VALUE(YES)
+        },
+        {
+            CONFIG_KEY(LOG_LEVEL),
+            CONFIG_VALUE(LOG_WARNING)
+        }
+    }, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    ASSERT_NO_THROW(st = _exeNetwork->CreateInferRequest(_inferRequest, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+
+    time_point start = Time::now();
+    ASSERT_NO_THROW(st = _inferRequest->Infer(&_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    time_point end = Time::now();
+    double inferTime_mSec = (std::chrono::duration_cast<ms>(end - start)).count();
+
+    std::map<std::string, InferenceEngineProfileInfo> perfMap;
+    ASSERT_NO_THROW(st = _inferRequest->GetPerformanceCounts(perfMap, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    long long stagesTime_uSec = 0;
+    for (const auto &i : perfMap) {
+        stagesTime_uSec += i.second.realTime_uSec;
+    }
+    double stagesTime_mSec = stagesTime_uSec / 1000.0;
+    ASSERT_TRUE(stagesTime_mSec > std::numeric_limits<double>::epsilon() && stagesTime_mSec < inferTime_mSec);
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/myriad_hw_conv_tests.hpp b/inference-engine/tests_deprecated/functional/vpu/common/myriad_hw_conv_tests.hpp
new file mode 100644 (file)
index 0000000..9cd50e7
--- /dev/null
@@ -0,0 +1,1214 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include "myriad_hw_tests_base.hpp"
+
+using HWConvParams = std::tuple<DimsInput, kernel, stride, pad, out_channels, group, dilation_factor>;
+
+class MyriadX_HW_Convolution_Tests_nightly
+        : public MyriadX_HW_Tests_nightly,
+          public testing::WithParamInterface<HWConvParams> {
+public:
+    tensor_test_params in_dims;
+    param_size kernel;
+    param_size stride;
+    param_size pad;
+    size_t out_c;
+    size_t group;
+    param_size dilation_factor;
+
+    tensor_test_params out_dims;
+
+    IN_OUT_desc in_tensor, out_tensor;
+
+    size_t numWeights;
+    size_t numBiases;
+
+    void SetUp() override {
+        ASSERT_NO_FATAL_FAILURE(MyriadX_HW_Tests_nightly::SetUp());
+
+        in_dims = std::get<0>(GetParam());
+        kernel = std::get<1>(GetParam());
+        stride = std::get<2>(GetParam());
+        pad = std::get<3>(GetParam());
+        out_c = std::get<4>(GetParam());
+        group = std::get<5>(GetParam());
+        dilation_factor = std::get<6>(GetParam());
+
+        size_t out_w = (in_dims.w + 2 * pad.x - dilation_factor.x * (kernel.x - 1) - 1 + stride.x) / stride.x;
+        size_t out_h = (in_dims.h + 2 * pad.y - dilation_factor.y * (kernel.y - 1) - 1 + stride.y) / stride.y;
+
+        out_dims = {1, out_c, out_h, out_w};
+
+        in_tensor.push_back({in_dims.n, in_dims.c, in_dims.h, in_dims.w});
+        out_tensor.push_back({out_dims.n, out_dims.c, out_dims.h, out_dims.w});
+
+        numWeights = kernel.x * kernel.y * (in_dims.c / group) * out_dims.c;
+        numBiases = out_dims.c;
+
+        _config[VPU_CONFIG_KEY(HW_DILATION)] = CONFIG_VALUE(YES);
+    }
+
+    void AddInitialCopyLayer() {
+        _testNet.addLayer(LayerInitParams("Copy").in({in_tensor}).out({in_tensor}),
+                 ref_copy_wrap);
+    }
+
+    void AddConvolutionLayer() {
+        std::map<std::string, std::string> convParams = {
+                  {"kernel-x", std::to_string(kernel.x)}
+                , {"kernel-y", std::to_string(kernel.y)}
+                , {"stride-x", std::to_string(stride.x)}
+                , {"stride-y", std::to_string(stride.y)}
+                , {"pad-x", std::to_string(pad.x)}
+                , {"pad-y", std::to_string(pad.y)}
+                , {"output", std::to_string(out_c)}
+                , {"group", std::to_string(group)}
+                , {"dilation-x", std::to_string(dilation_factor.x)}
+                , {"dilation-y", std::to_string(dilation_factor.y)}
+        };
+        _testNet.addLayer(LayerInitParams("Convolution")
+                 .params(convParams)
+                 .weights(numWeights).fillWeights(defaultWeightsRange)
+                 .biases(numBiases).fillBiases(defaultWeightsRange)
+                 .in(in_tensor)
+                 .out(out_tensor),
+                 ref_convolution_wrap);
+    }
+
+    void AddReLULayer(float negativeSlope = 0.0) {
+        ParamsStruct reluParams = {
+            {"negative_slope", std::to_string(negativeSlope)}
+        };
+        _testNet.addLayer(LayerInitParams("ReLU")
+                 .params(reluParams)
+                 .in(out_tensor)
+                 .out(out_tensor),
+                 ref_ReLU_wrap);
+    }
+
+    void AddClampLayer(float min = 0.0, float max = 6.0) {
+        ParamsStruct clampParams = {
+                {"max", std::to_string(max)}
+              , {"min", std::to_string(min)}
+        };
+        _testNet.addLayer(LayerInitParams("Clamp")
+                 .params(clampParams)
+                 .in(out_tensor)
+                 .out(out_tensor),
+                 ref_Clamp_wrap);
+    }
+};
+
+TEST_P(MyriadX_HW_Convolution_Tests_nightly, Single) {
+    if (!CheckMyriadX()) {
+        SKIP() << "Non-MyriadX device";
+    }
+
+    AddInitialCopyLayer();
+    AddConvolutionLayer();
+
+    float maxerr = 0.002 * (in_dims.c / group) * kernel.x * kernel.y;
+    CompareWithSW(maxerr);
+}
+
+TEST_P(MyriadX_HW_Convolution_Tests_nightly, WithReLU) {
+    if (!CheckMyriadX()) {
+        SKIP() << "Non-MyriadX device";
+    }
+
+    AddInitialCopyLayer();
+    AddConvolutionLayer();
+    AddReLULayer(0.0f);
+
+    float maxerr = 0.002 * (in_dims.c / group) * kernel.x * kernel.y;
+    CompareWithSW(maxerr);
+}
+
+TEST_P(MyriadX_HW_Convolution_Tests_nightly, WithLeakyReLU) {
+    if (!CheckMyriadX()) {
+        SKIP() << "Non-MyriadX device";
+    }
+
+    AddConvolutionLayer();
+    AddReLULayer(0.1f);
+
+    float maxerr = 0.1 * (in_dims.c / group) * kernel.x * kernel.y;
+    CompareWithSW(maxerr);
+}
+
+TEST_P(MyriadX_HW_Convolution_Tests_nightly, WithClamp) {
+    if (!CheckMyriadX()) {
+        SKIP() << "Non-MyriadX device";
+    }
+
+    AddConvolutionLayer();
+    AddClampLayer();
+
+    float maxerr = 0.1 * (in_dims.c / group) * kernel.x * kernel.y;
+    CompareWithSW(maxerr);
+}
+
+TEST_P(MyriadX_HW_Convolution_Tests_nightly, MultipleInfer) {
+    if (!CheckMyriadX()) {
+        SKIP() << "Non-MyriadX device";
+    }
+
+    AddConvolutionLayer();
+
+    CompareWithItself(100);
+}
+
+INSTANTIATE_TEST_CASE_P(MaskRcnn101_DILATION, MyriadX_HW_Convolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 256, 50, 50),
+                                                             MAKE_STRUCT(tensor_test_params, 1, 256, 100, 171))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 2, 2))
+                                , ::testing::Values<out_channels>(256)
+                                , ::testing::Values<group>(1)
+                                , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 2, 2))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(kernel_7x7_DILATION, MyriadX_HW_Convolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 32, 90, 90))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 7, 7))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+                                , ::testing::Values<out_channels>(32)
+                                , ::testing::Values<group>(1)
+                                , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 4, 4))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(Unequal_hw_pad_dilationfactor_DILATION, MyriadX_HW_Convolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 32, 128, 128))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3),
+                                                            MAKE_STRUCT(param_size, 3, 1))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 2, 0))
+                                , ::testing::Values<out_channels>(64)
+                                , ::testing::Values<group>(1)
+                                , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 2, 2),
+                                                                     MAKE_STRUCT(param_size, 2, 1))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(Strides_DILATION, MyriadX_HW_Convolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 32, 64, 64))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 2, 2),
+                                                            MAKE_STRUCT(param_size, 4, 4))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 4, 4))
+                                , ::testing::Values<out_channels>(32)
+                                , ::testing::Values<group>(1)
+                                , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 2, 2),
+                                                                     MAKE_STRUCT(param_size, 4, 4))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(conv_1x1s1p0_extra1, MyriadX_HW_Convolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 64, 180, 320))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+                                , ::testing::Values<out_channels>(64)
+                                , ::testing::Values<group>(1)
+                                , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 1))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(conv_1x1s1p0_extra2, MyriadX_HW_Convolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 512, 45, 80))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+                                , ::testing::Values<out_channels>(18)
+                                , ::testing::Values<group>(1)
+                                , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 1))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(conv_1x1s1p0_extra3, MyriadX_HW_Convolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 256, 45, 80))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+                                , ::testing::Values<out_channels>(294)
+                                , ::testing::Values<group>(1)
+                                , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 1))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(conv_1x1s1p0_extra4, MyriadX_HW_Convolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 512, 45, 80))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+                                , ::testing::Values<out_channels>(392)
+                                , ::testing::Values<group>(1)
+                                , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 1))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(conv_1x1s2p0_extra1, MyriadX_HW_Convolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 64, 180, 320))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 2, 2))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+                                , ::testing::Values<out_channels>(128)
+                                , ::testing::Values<group>(1)
+                                , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 1))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(conv_1x1s2p0_extra2, MyriadX_HW_Convolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 128, 90, 160))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 2, 2))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+                                , ::testing::Values<out_channels>(256)
+                                , ::testing::Values<group>(1)
+                                , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 1))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(conv_1x1s2p0_extra3, MyriadX_HW_Convolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 128, 90, 160))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 2, 2))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+                                , ::testing::Values<out_channels>(128)
+                                , ::testing::Values<group>(1)
+                                , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 1))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(conv_3x3s2p1_extra1, MyriadX_HW_Convolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 64, 180, 320))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 2, 2))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<out_channels>(128)
+                                , ::testing::Values<group>(1)
+                                , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 1))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(conv_3x3s2p1_extra2, MyriadX_HW_Convolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 128, 90, 160))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 2, 2))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<out_channels>(256)
+                                , ::testing::Values<group>(1)
+                                , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 1))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(conv_3x3s2p1_extra3, MyriadX_HW_Convolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 256, 45, 80))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 2, 2))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<out_channels>(512)
+                                , ::testing::Values<group>(1)
+                                , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 1))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(conv_3x3s2p1_extra4, MyriadX_HW_Convolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 32, 180, 320))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 2, 2))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<out_channels>(64)
+                                , ::testing::Values<group>(1)
+                                , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 1))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(conv_1x1s1p0, MyriadX_HW_Convolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 64, 56, 56))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+                                , ::testing::Values<out_channels>(64)
+                                , ::testing::Values<group>(1)
+                                , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 1))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(conv_1x1s1p0_resnet50, MyriadX_HW_Convolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 2048, 7, 7))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+                                , ::testing::Values<out_channels>(512)
+                                , ::testing::Values<group>(1)
+                                , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 1))
+                        )
+);
+
+// This case adds extra CopyMakeBorder stage
+INSTANTIATE_TEST_CASE_P(conv_1x1s1p1, MyriadX_HW_Convolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 512, 13, 13))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<out_channels>(1000)
+                                , ::testing::Values<group>(1)
+                                , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 1))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(conv_1x1s2p0, MyriadX_HW_Convolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 256, 56, 56),
+                                                             MAKE_STRUCT(tensor_test_params, 1, 512, 28, 28),
+                                                             MAKE_STRUCT(tensor_test_params, 1, 1024, 14, 14))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 2, 2))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+                                , ::testing::Values<out_channels>(128, 256, 512, 1024)
+                                , ::testing::Values<group>(1)
+                                , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 1))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(conv_3x3s1p1, MyriadX_HW_Convolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 64, 56, 56))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<out_channels>(192)
+                                , ::testing::Values<group>(1), ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 1))
+
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(conv_3x3s1p1_yolo_tiny_v1_conv1, MyriadX_HW_Convolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 3, 448, 448))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<out_channels>(16)
+                                , ::testing::Values<group>(1)
+                                , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 1))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(conv_3x3s1p1_yolo_tiny_v1_conv7, MyriadX_HW_Convolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 512, 7, 7))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<out_channels>(1024)
+                                , ::testing::Values<group>(1)
+                                , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 1))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(conv_3x3s1p1_yolo_tiny_v1_conv8, MyriadX_HW_Convolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 1024, 7, 7))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<out_channels>(256)
+                                , ::testing::Values<group>(1)
+                                , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 1))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(conv_3x3s1p1_vgg, MyriadX_HW_Convolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 64, 224, 224))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<out_channels>(64)
+                                , ::testing::Values<group>(1)
+                                , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 1))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(conv_7x7s2p3, MyriadX_HW_Convolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 3, 224, 224))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 7, 7))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 2, 2))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 3, 3))
+                                , ::testing::Values<out_channels>(64)
+                                , ::testing::Values<group>(1)
+                                , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 1))
+                        )
+);
+
+//  This case for unsymmetric convolution
+
+INSTANTIATE_TEST_CASE_P(conv_3x1s1_LPR, MyriadX_HW_Convolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 32, 22, 92))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 1))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 0))
+                                , ::testing::Values<out_channels>(32)
+                                , ::testing::Values<group>(1)
+                                , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 1))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(conv_1x3s1_LPR, MyriadX_HW_Convolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 32, 22, 92))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 1, 3))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 1))
+                                , ::testing::Values<out_channels>(32)
+                                , ::testing::Values<group>(1)
+                                , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 1))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(conv_1x5s1_LPR, MyriadX_HW_Convolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 256, 5, 88))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 1, 5))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+                                , ::testing::Values<out_channels>(128)
+                                , ::testing::Values<group>(1)
+                                , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 1))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(conv_13x1s1_LPR, MyriadX_HW_Convolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 128, 1, 88))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 13, 1))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 6, 0))
+                                , ::testing::Values<out_channels>(71)
+                                , ::testing::Values<group>(1)
+                                , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 1))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(conv_5x1s1_LPR, MyriadX_HW_Convolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 128, 1, 28))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 5, 1))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+                                , ::testing::Values<out_channels>(128)
+                                , ::testing::Values<group>(1)
+                                , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 1))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(conv_4x4s2p1, MyriadX_HW_Convolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 3, 256, 416))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 4, 4))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 2, 2))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<out_channels>(16)
+                                , ::testing::Values<group>(1)
+                                , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 1))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(conv_5x5s2p1, MyriadX_HW_Convolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 3, 256, 416))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 5, 5))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 2, 2))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<out_channels>(16)
+                                , ::testing::Values<group>(1)
+                                , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 1))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(conv_5x5s2p2, MyriadX_HW_Convolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 3, 256, 416))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 5, 5))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 2, 2))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 2, 2))
+                                , ::testing::Values<out_channels>(16)
+                                , ::testing::Values<group>(1)
+                                , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 1))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(conv_3x3s1p1_group1, MyriadX_HW_Convolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 32, 150, 150))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<out_channels>(32)
+                                , ::testing::Values<group>(32)
+                                , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 1))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(conv_3x3s2p1_group1, MyriadX_HW_Convolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 64, 150, 150))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 2, 2))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<out_channels>(64)
+                                , ::testing::Values<group>(1)
+                                , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 1))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(conv_3x3s2p1_group2, MyriadX_HW_Convolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 128, 75, 75))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 2, 2))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<out_channels>(128)
+                                , ::testing::Values<group>(128)
+                                , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 1))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(conv_3x3s2p1_group3, MyriadX_HW_Convolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 256, 38, 38))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 2, 2))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<out_channels>(256)
+                                , ::testing::Values<group>(256)
+                                , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 1))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(conv_3x3s1p1_pva_pvd, MyriadX_HW_Convolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 6, 208, 368))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<out_channels>(16)
+                                , ::testing::Values<group>(1)
+                                , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 1))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(conv_1x1s2p0_pva_pvd, MyriadX_HW_Convolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 32, 128, 208))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 2, 2))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+                                , ::testing::Values<out_channels>(48)
+                                , ::testing::Values<group>(1)
+                                , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 1))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(conv_3x3s1p1_ssd, MyriadX_HW_Convolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 128, 75, 75))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<out_channels>(256)
+                                , ::testing::Values<group>(1)
+                                , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 1))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(conv_unequal_hw_pad, MyriadX_HW_Convolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 32, 128, 128))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 5, 5),
+                                                            MAKE_STRUCT(param_size, 5, 1))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 2, 0))
+                                , ::testing::Values<out_channels>(64)
+                                , ::testing::Values<group>(1)
+                                , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 1))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(conv_3x3s3p1_resnet34, MyriadX_HW_Convolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 512, 75, 75))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 3, 3))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<out_channels>(24)
+                                , ::testing::Values<group>(1)
+                                , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 1))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(fc_to_conv_case, MyriadX_HW_Convolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 256, 56, 350))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 7, 7))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 7, 7))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+                                , ::testing::Values<out_channels>(1024)
+                                , ::testing::Values<group>(1)
+                                , ::testing::Values<dilation_factor>(MAKE_STRUCT(param_size, 1, 1))
+                        )
+);
+
+using HWConvPoolMerge = std::tuple<DimsInput,
+                                   kernel, stride, pad, out_channels,
+                                   kernel, stride, pad>;
+
+class MyriadX_HW_ConvPoolMerged_Tests_nightly
+        : public MyriadX_HW_Tests_nightly,
+          public testing::WithParamInterface<HWConvPoolMerge> {
+public:
+    tensor_test_params in_dims;
+    param_size conv_kernel;
+    param_size conv_stride;
+    param_size conv_pad;
+    size_t conv_out_c;
+    param_size pool_kernel;
+    param_size pool_stride;
+    param_size pool_pad;
+
+    tensor_test_params conv_out_dims;
+
+    size_t conv_num_weights;
+    size_t conv_num_biases;
+
+    tensor_test_params pool_out_dims;
+
+    IN_OUT_desc in_tensor, conv_out_tensor, pool_out_tensor;
+
+    void SetUp() override {
+        ASSERT_NO_FATAL_FAILURE(MyriadX_HW_Tests_nightly::SetUp());
+
+        in_dims = std::get<0>(GetParam());
+        conv_kernel = std::get<1>(GetParam());
+        conv_stride = std::get<2>(GetParam());
+        conv_pad = std::get<3>(GetParam());
+        conv_out_c = std::get<4>(GetParam());
+        pool_kernel = std::get<5>(GetParam());
+        pool_stride = std::get<6>(GetParam());
+        pool_pad = std::get<7>(GetParam());
+
+        size_t conv_out_w = (in_dims.w + 2 * conv_pad.x - conv_kernel.x + conv_stride.x) / conv_stride.x;
+        size_t conv_out_h = (in_dims.h + 2 * conv_pad.y - conv_kernel.y + conv_stride.y) / conv_stride.y;
+        conv_out_dims = {1, conv_out_c, conv_out_h, conv_out_w};
+
+        conv_num_weights = conv_kernel.x * conv_kernel.y * in_dims.c * conv_out_dims.c;
+        conv_num_biases = conv_out_dims.c;
+
+        size_t pool_out_w = std::ceil((conv_out_dims.w + 2.0 * pool_pad.x - pool_kernel.x) / pool_stride.x + 1);
+        size_t pool_out_h = std::ceil((conv_out_dims.h + 2.0 * pool_pad.y - pool_kernel.y) / pool_stride.y + 1);
+        pool_out_dims = {1, conv_out_dims.c, pool_out_h, pool_out_w};
+
+        in_tensor.push_back({in_dims.n, in_dims.c, in_dims.h, in_dims.w});
+        conv_out_tensor.push_back({conv_out_dims.n, conv_out_dims.c, conv_out_dims.h, conv_out_dims.w});
+        pool_out_tensor.push_back({pool_out_dims.n, pool_out_dims.c, pool_out_dims.h, pool_out_dims.w});
+    }
+
+    void AddConvLayer() {
+        ParamsStruct conv_params = {
+                  {"kernel-x", std::to_string(conv_kernel.x)}
+                , {"kernel-y", std::to_string(conv_kernel.y)}
+                , {"stride-x", std::to_string(conv_stride.x)}
+                , {"stride-y", std::to_string(conv_stride.y)}
+                , {"pad-x", std::to_string(conv_pad.x)}
+                , {"pad-y", std::to_string(conv_pad.y)}
+                , {"output", std::to_string(conv_out_dims.c)}
+                , {"group", "1"}
+        };
+        _testNet.addLayer(LayerInitParams("Convolution")
+                 .params(conv_params)
+                 .weights(conv_num_weights).fillWeights(defaultWeightsRange)
+                 .biases(conv_num_biases).fillBiases(defaultWeightsRange)
+                 .in(in_tensor)
+                 .out(conv_out_tensor),
+                 ref_convolution_wrap);
+    }
+
+    void AddReLULayer(float negativeSlope) {
+        ParamsStruct relu_params = {
+            {"negative_slope", std::to_string(negativeSlope)}
+        };
+        _testNet.addLayer(LayerInitParams("ReLU")
+                 .params(relu_params)
+                 .in(conv_out_tensor)
+                 .out(conv_out_tensor),
+                 ref_ReLU_wrap);
+    }
+
+    void AddPoolLayer() {
+        ParamsStruct pool_params = {
+                  {"kernel-x", std::to_string(pool_kernel.x)}
+                , {"kernel-y", std::to_string(pool_kernel.y)}
+                , {"stride-x", std::to_string(pool_stride.x)}
+                , {"stride-y", std::to_string(pool_stride.y)}
+                , {"pad-x", std::to_string(pool_pad.x)}
+                , {"pad-y", std::to_string(pool_pad.y)}
+                , {"pool-method", "max"}
+        };
+        _testNet.addLayer(LayerInitParams("Pooling")
+                 .params(pool_params)
+                 .in(conv_out_tensor)
+                 .out(pool_out_tensor),
+                 ref_pooling_wrap);
+    }
+};
+
+TEST_P(MyriadX_HW_ConvPoolMerged_Tests_nightly, WithReLU) {
+    if (!CheckMyriadX()) {
+        SKIP() << "Non-MyriadX device";
+    }
+
+    AddConvLayer();
+    AddReLULayer(0.0f);
+    AddPoolLayer();
+
+    auto maxerr = 0.0009 * in_dims.c * conv_kernel.x * conv_kernel.y;
+    CompareWithSW(maxerr);
+}
+
+TEST_P(MyriadX_HW_ConvPoolMerged_Tests_nightly, WithLeakyReLU) {
+    if (!CheckMyriadX()) {
+        SKIP() << "Non-MyriadX device";
+    }
+
+    AddConvLayer();
+    AddReLULayer(0.1f);
+    AddPoolLayer();
+
+    auto maxerr = 0.01 * in_dims.c * conv_kernel.x * conv_kernel.y;
+    CompareWithSW(maxerr);
+}
+
+INSTANTIATE_TEST_CASE_P(yolo_conv1, MyriadX_HW_ConvPoolMerged_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 3, 448, 448)),
+                                ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3)),
+                                ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1)),
+                                ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1)),
+                                ::testing::Values<out_channels>(16),
+                                ::testing::Values<kernel>(MAKE_STRUCT(param_size, 2, 2)),
+                                ::testing::Values<stride>(MAKE_STRUCT(param_size, 2, 2)),
+                                ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(yolov2_tf_conv, MyriadX_HW_ConvPoolMerged_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 32, 304, 304),
+                                                             MAKE_STRUCT(tensor_test_params, 1, 64, 152, 152)),
+                                ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3)),
+                                ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1)),
+                                ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1)),
+                                ::testing::Values<out_channels>(64, 128),
+                                ::testing::Values<kernel>(MAKE_STRUCT(param_size, 2, 2)),
+                                ::testing::Values<stride>(MAKE_STRUCT(param_size, 2, 2)),
+                                ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(yolo_conv2, MyriadX_HW_ConvPoolMerged_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 16, 224, 224)),
+                                ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3)),
+                                ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1)),
+                                ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1)),
+                                ::testing::Values<out_channels>(32),
+                                ::testing::Values<kernel>(MAKE_STRUCT(param_size, 2, 2)),
+                                ::testing::Values<stride>(MAKE_STRUCT(param_size, 2, 2)),
+                                ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(yolo_conv4, MyriadX_HW_ConvPoolMerged_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 64, 56, 56)),
+                                ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3)),
+                                ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1)),
+                                ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1)),
+                                ::testing::Values<out_channels>(128),
+                                ::testing::Values<kernel>(MAKE_STRUCT(param_size, 2, 2)),
+                                ::testing::Values<stride>(MAKE_STRUCT(param_size, 2, 2)),
+                                ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(ssd_case1, MyriadX_HW_ConvPoolMerged_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 64, 98, 150)),
+                                ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3)),
+                                ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1)),
+                                ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1)),
+                                ::testing::Values<out_channels>(64),
+                                ::testing::Values<kernel>(MAKE_STRUCT(param_size, 2, 2)),
+                                ::testing::Values<stride>(MAKE_STRUCT(param_size, 2, 2)),
+                                ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(vgg16_case1, MyriadX_HW_ConvPoolMerged_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 512, 28, 28)),
+                                ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3)),
+                                ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1)),
+                                ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1)),
+                                ::testing::Values<out_channels>(512),
+                                ::testing::Values<kernel>(MAKE_STRUCT(param_size, 2, 2)),
+                                ::testing::Values<stride>(MAKE_STRUCT(param_size, 2, 2)),
+                                ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+                        )
+);
+
+using ConvTFParams = std::tuple<DimsInput, DimsOutput, kernel, stride, tfPad, group>;
+
+class MyriadX_HW_ConvTF_Tests_nightly :
+        public MyriadX_HW_Tests_nightly,
+        public testing::WithParamInterface<ConvTFParams>{
+public:
+    tensor_test_params inDims;
+    tensor_test_params outDims;
+    param_size kernel;
+    param_size stride;
+    paddings4 pad;
+    int group;
+
+    void SetUp() override {
+        ASSERT_NO_FATAL_FAILURE(MyriadX_HW_Tests_nightly::SetUp());
+
+        inDims = std::get<0>(GetParam());
+        outDims = std::get<1>(GetParam());
+        kernel = std::get<2>(GetParam());
+        stride = std::get<3>(GetParam());
+        pad = std::get<4>(GetParam());
+        group = std::get<5>(GetParam());
+    }
+
+    void AddConvolutionLayer() {
+        std::map<std::string, std::string> convParams = {
+            {"kernel-x", std::to_string(kernel.x)},
+            {"kernel-y", std::to_string(kernel.y)},
+            {"stride-x", std::to_string(stride.x)},
+            {"stride-y", std::to_string(stride.y)},
+            {"pad-x", std::to_string(pad.left)},
+            {"pad-r", std::to_string(pad.right)},
+            {"pad-y", std::to_string(pad.top)},
+            {"pad-b", std::to_string(pad.bottom)},
+            {"dilation-x", "1"},
+            {"dilation-y", "1"},
+            {"group", std::to_string(group)},
+            {"output", std::to_string(outDims.c)}
+        };
+
+        _testNet.addLayer(LayerInitParams("Convolution")
+                 .params(convParams)
+                 .weights(kernel.x * kernel.y * (inDims.c / group) * outDims.c)
+                 .biases(outDims.c)
+                 .fillWeights(defaultWeightsRange)
+                 .fillBiases(defaultWeightsRange)
+                 .in({{inDims.n, inDims.c, inDims.h, inDims.w}})
+                 .out({{outDims.n, outDims.c, outDims.h, outDims.w}}),
+            ref_convolution_wrap);
+    }
+};
+
+TEST_P(MyriadX_HW_ConvTF_Tests_nightly, Single) {
+    if (!CheckMyriadX()) {
+        SKIP() << "Non-MyriadX device";
+    }
+
+    AddConvolutionLayer();
+
+    float maxerr = 0.002 * (inDims.c / group) * kernel.x * kernel.y;
+    CompareWithSW(maxerr);
+}
+
+INSTANTIATE_TEST_CASE_P(tf, MyriadX_HW_ConvTF_Tests_nightly,
+    ::testing::Values(
+        std::make_tuple(
+            MAKE_STRUCT(tensor_test_params, 1, 3, 224, 224),    // input
+            MAKE_STRUCT(tensor_test_params, 1, 24, 112, 112),   // output
+            MAKE_STRUCT(param_size, 7, 7),                      // kernel
+            MAKE_STRUCT(param_size, 2, 2),                      // stride
+            MAKE_STRUCT(paddings4, 2, 2, 3, 3),                 // pad
+            3                                                   // group
+        ),
+        std::make_tuple(
+            MAKE_STRUCT(tensor_test_params, 1, 64, 56, 56),     // input
+            MAKE_STRUCT(tensor_test_params, 1, 192, 56, 56),    // output
+            MAKE_STRUCT(param_size, 3, 3),                      // kernel
+            MAKE_STRUCT(param_size, 1, 1),                      // stride
+            MAKE_STRUCT(paddings4, 1, 1, 1, 1),                 // pad
+            1                                                   // group
+        ),
+        std::make_tuple(
+            MAKE_STRUCT(tensor_test_params, 1, 128, 3, 3),      // input
+            MAKE_STRUCT(tensor_test_params, 1, 128, 2, 2),      // output
+            MAKE_STRUCT(param_size, 3, 3),                      // kernel
+            MAKE_STRUCT(param_size, 2, 2),                      // stride
+            MAKE_STRUCT(paddings4, 1, 1, 1, 1),                 // pad
+            128                                                 // group
+        ),
+        std::make_tuple(
+            MAKE_STRUCT(tensor_test_params, 1, 256, 2, 2),      // input
+            MAKE_STRUCT(tensor_test_params, 1, 24, 2, 2),       // output
+            MAKE_STRUCT(param_size, 3, 3),                      // kernel
+            MAKE_STRUCT(param_size, 1, 1),                      // stride
+            MAKE_STRUCT(paddings4, 1, 1, 1, 1),                 // pad
+            1                                                   // group
+        ),
+        std::make_tuple(
+            MAKE_STRUCT(tensor_test_params, 1, 64, 2, 2),       // input
+            MAKE_STRUCT(tensor_test_params, 1, 64, 1, 1),       // output
+            MAKE_STRUCT(param_size, 3, 3),                      // kernel
+            MAKE_STRUCT(param_size, 2, 2),                      // stride
+            MAKE_STRUCT(paddings4, 0, 0, 1, 1),                 // pad
+            64                                                  // group
+        ),
+        std::make_tuple(
+            MAKE_STRUCT(tensor_test_params, 1, 128, 1, 1),      // input
+            MAKE_STRUCT(tensor_test_params, 1, 24, 1, 1),       // output
+            MAKE_STRUCT(param_size, 3, 3),                      // kernel
+            MAKE_STRUCT(param_size, 1, 1),                      // stride
+            MAKE_STRUCT(paddings4, 1, 1, 1, 1),                 // pad
+            1                                                   // group
+        ),
+        std::make_tuple(
+            MAKE_STRUCT(tensor_test_params, 1, 32, 128, 128),   // input
+            MAKE_STRUCT(tensor_test_params, 1, 64, 128, 128),   // output
+            MAKE_STRUCT(param_size, 2, 2),                      // kernel
+            MAKE_STRUCT(param_size, 1, 1),                      // stride
+            MAKE_STRUCT(paddings4, 0, 0, 1, 1),                 // pad
+            1                                                   // group
+        ),
+        std::make_tuple(
+            MAKE_STRUCT(tensor_test_params, 1, 32, 128, 128),   // input
+            MAKE_STRUCT(tensor_test_params, 1, 64, 128, 128),   // output
+            MAKE_STRUCT(param_size, 2, 2),                      // kernel
+            MAKE_STRUCT(param_size, 1, 1),                      // stride
+            MAKE_STRUCT(paddings4, 1, 1, 0, 0),                 // pad
+            1                                                   // group
+        )
+    )
+);
+
+using HWDeconvParams = std::tuple<DimsInput, kernel, stride, pad, out_channels, group>;
+
+class MyriadX_HW_Deconvolution_Tests_nightly
+        : public MyriadX_HW_Tests_nightly,
+          public testing::WithParamInterface<HWDeconvParams> {
+public:
+    tensor_test_params in_dims;
+    param_size kernel;
+    param_size stride;
+    param_size pad;
+    size_t out_c;
+    size_t group;
+
+    tensor_test_params out_dims;
+
+    IN_OUT_desc in_tensor, out_tensor;
+
+    size_t numWeights;
+    size_t numBiases;
+
+    void SetUp() override {
+        ASSERT_NO_FATAL_FAILURE(MyriadX_HW_Tests_nightly::SetUp());
+
+        in_dims = std::get<0>(GetParam());
+        kernel = std::get<1>(GetParam());
+        stride = std::get<2>(GetParam());
+        pad = std::get<3>(GetParam());
+        out_c = std::get<4>(GetParam());
+        group = std::get<5>(GetParam());
+
+        size_t out_w = stride.x * (in_dims.w - 1) + kernel.x - 2 * pad.x;
+        size_t out_h = stride.y * (in_dims.h - 1) + kernel.y - 2 * pad.y;
+        out_dims = {1, out_c, out_h, out_w};
+
+        in_tensor.push_back({in_dims.n, in_dims.c, in_dims.h, in_dims.w});
+        out_tensor.push_back({out_dims.n, out_dims.c, out_dims.h, out_dims.w});
+
+        numWeights = kernel.x * kernel.y * (in_dims.c / group) * out_dims.c;
+        numBiases = out_dims.c;
+    }
+
+    void AddInitialCopyLayer() {
+        _testNet.addLayer(LayerInitParams("Copy")
+                 .in(in_tensor)
+                 .out(in_tensor),
+                 ref_copy_wrap);
+    }
+
+    void AddDeconvolutionLayer() {
+        std::map<std::string, std::string> deconvParams = {
+                  {"kernel-x", std::to_string(kernel.x)}
+                , {"kernel-y", std::to_string(kernel.y)}
+                , {"stride-x", std::to_string(stride.x)}
+                , {"stride-y", std::to_string(stride.y)}
+                , {"pad-x", std::to_string(pad.x)}
+                , {"pad-y", std::to_string(pad.y)}
+                , {"output", std::to_string(out_c)}
+                , {"group", std::to_string(group)}
+        };
+        _testNet.addLayer(LayerInitParams("Deconvolution")
+                 .params(deconvParams)
+                 .weights(numWeights)
+                 .biases(numBiases)
+                 .fillWeights(defaultWeightsRange)
+                 .fillBiases(defaultWeightsRange)
+                 .in(in_tensor)
+                 .out(out_tensor),
+                 ref_deconvolution_wrap);
+    }
+
+    void AddDeconvolutionLayerSmallWeights() {
+        std::map<std::string, std::string> deconvParams = {
+                  {"kernel-x", std::to_string(kernel.x)}
+                , {"kernel-y", std::to_string(kernel.y)}
+                , {"stride-x", std::to_string(stride.x)}
+                , {"stride-y", std::to_string(stride.y)}
+                , {"pad-x", std::to_string(pad.x)}
+                , {"pad-y", std::to_string(pad.y)}
+                , {"output", std::to_string(out_c)}
+                , {"group", std::to_string(group)}
+        };
+        _testNet.addLayer(LayerInitParams("Deconvolution")
+                 .params(deconvParams)
+                 .weights(numWeights)
+                 .biases(numBiases)
+                 .fillWeights(smallWeightsRange)
+                 .fillBiases(smallWeightsRange)
+                 .in(in_tensor)
+                 .out(out_tensor),
+                 ref_deconvolution_wrap);
+    }
+};
+
+TEST_P(MyriadX_HW_Deconvolution_Tests_nightly, Single) {
+    if (!CheckMyriadX()) {
+        SKIP() << "Non-MyriadX device";
+    }
+
+    AddInitialCopyLayer();
+    AddDeconvolutionLayer();
+
+    float maxerr = 0.002 * (in_dims.c / group) * kernel.x * kernel.y;
+    CompareWithSW(maxerr);
+}
+
+TEST_P(MyriadX_HW_Deconvolution_Tests_nightly, ScaleTests) {
+    if (!CheckMyriadX()) {
+        SKIP() << "Non-MyriadX device";
+    }
+
+    AddInitialCopyLayer();
+    AddDeconvolutionLayerSmallWeights();
+
+    float maxerr = 0.01;
+    CompareWithSW(maxerr);
+}
+
+INSTANTIATE_TEST_CASE_P(deconv_tf_ssd, MyriadX_HW_Deconvolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 2, 3, 3))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+                                , ::testing::Values<out_channels>(1)
+                                , ::testing::Values<group>(1)
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(deconv_3x3_str1, MyriadX_HW_Deconvolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 256, 3, 3),
+                                                             MAKE_STRUCT(tensor_test_params, 1, 128, 5, 5),
+                                                             MAKE_STRUCT(tensor_test_params, 1, 64, 7, 7))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+                                , ::testing::Values<out_channels>(128, 256)
+                                , ::testing::Values<group>(1)
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(hw_accuracy_deconv_3x3, MyriadX_HW_Deconvolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 256, 5, 5),
+                                                             MAKE_STRUCT(tensor_test_params, 1, 128, 11, 11),
+                                                             MAKE_STRUCT(tensor_test_params, 1, 64, 13, 13),
+                                                             MAKE_STRUCT(tensor_test_params, 1, 32, 8, 8))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 2, 2))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0),
+                                                         MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<out_channels>(1, 128)
+                                , ::testing::Values<group>(1)
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(hw_accuracy_deconv, MyriadX_HW_Deconvolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 120, 36, 36),
+                                                             MAKE_STRUCT(tensor_test_params, 1, 73, 40, 54),
+                                                             MAKE_STRUCT(tensor_test_params, 1, 7, 9, 13))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 5, 5))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1),
+                                                            MAKE_STRUCT(param_size, 2, 2))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0),
+                                                         MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<out_channels>(19, 53)
+                                , ::testing::Values<group>(1)
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(hw_accuracy_scale_deconv, MyriadX_HW_Deconvolution_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 120, 36, 36))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 5, 5))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1),
+                                                            MAKE_STRUCT(param_size, 2, 2))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0),
+                                                         MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<out_channels>(256)
+                                , ::testing::Values<group>(1)
+                        )
+);
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/myriad_hw_extra_tests.hpp b/inference-engine/tests_deprecated/functional/vpu/common/myriad_hw_extra_tests.hpp
new file mode 100644 (file)
index 0000000..adb6ed2
--- /dev/null
@@ -0,0 +1,1444 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include "myriad_hw_tests_base.hpp"
+
+TEST_F(MyriadX_HW_Tests_nightly, SeveralLayers) {
+    if (!CheckMyriadX()) {
+        SKIP() << "Non-MyriadX device";
+    }
+
+    tensor_test_params dims1{1, 3, 224, 224};
+    tensor_test_params dims2{1, 64, 112, 112};
+    tensor_test_params dims3{1, 64, 56, 56};
+
+    param_size kernel1{7, 7};
+    param_size stride1{2, 2};
+    param_size pad1{3, 3};
+
+    param_size kernel2{3, 3};
+    param_size stride2{2, 2};
+    param_size pad2{0, 0};
+
+    IN_OUT_desc tensor1, tensor2, tensor3;
+    tensor1.push_back({dims1.n, dims1.c, dims1.h, dims1.w});
+    tensor2.push_back({dims2.n, dims2.c, dims2.h, dims2.w});
+    tensor3.push_back({dims3.n, dims3.c, dims3.h, dims3.w});
+
+    size_t numWeights = kernel1.x * kernel1.y * dims1.c * dims2.c;
+    size_t numBiases = dims2.c;
+
+    ParamsStruct convParams = {
+              {"kernel-x", std::to_string(kernel1.x)}
+            , {"kernel-y", std::to_string(kernel1.y)}
+            , {"stride-x", std::to_string(stride1.x)}
+            , {"stride-y", std::to_string(stride1.y)}
+            , {"pad-x", std::to_string(pad1.x)}
+            , {"pad-y", std::to_string(pad1.y)}
+            , {"output", std::to_string(dims2.c)}
+            , {"group", "1"}
+    };
+    _testNet.addLayer(LayerInitParams("Convolution")
+             .params(convParams)
+             .weights(numWeights).fillWeights(defaultWeightsRange)
+             .biases(numBiases).fillBiases(defaultWeightsRange)
+             .in(tensor1)
+             .out(tensor2),
+             ref_convolution_wrap);
+
+    ParamsStruct reluParams = {
+        {"negative_slope", "0.0"}
+    };
+    _testNet.addLayer(LayerInitParams("ReLU")
+             .params(reluParams)
+             .in(tensor2)
+             .out(tensor2),
+             ref_ReLU_wrap);
+
+    ParamsStruct poolParams = {
+              {"kernel-x", std::to_string(kernel2.x)}
+            , {"kernel-y", std::to_string(kernel2.y)}
+            , {"stride-x", std::to_string(stride2.x)}
+            , {"stride-y", std::to_string(stride2.y)}
+            , {"pad-x", std::to_string(pad2.x)}
+            , {"pad-y", std::to_string(pad2.y)}
+            , {"pool-method", "max"}
+    };
+    _testNet.addLayer(LayerInitParams("Pooling")
+             .params(poolParams)
+             .in(tensor2)
+             .out(tensor3),
+             ref_pooling_wrap);
+
+    CompareWithSW(0.1f);
+}
+
+TEST_F(MyriadX_HW_Tests_nightly, LargePoolWithConv) {
+    if (!CheckMyriadX()) {
+        SKIP() << "Non-MyriadX device";
+    }
+
+    tensor_test_params dims1{1, 16, 448, 448};
+    tensor_test_params dims2{1, 16, 224, 224};
+    tensor_test_params dims3{1, 32, 224, 224};
+
+    param_size kernel1{2, 2};
+    param_size stride1{2, 2};
+    param_size pad1{0, 0};
+
+    param_size kernel2{3, 3};
+    param_size stride2{1, 1};
+    param_size pad2{1, 1};
+
+    IN_OUT_desc tensor1, tensor2, tensor3;
+    tensor1.push_back({dims1.n, dims1.c, dims1.h, dims1.w});
+    tensor2.push_back({dims2.n, dims2.c, dims2.h, dims2.w});
+    tensor3.push_back({dims3.n, dims3.c, dims3.h, dims3.w});
+
+    ParamsStruct poolParams = {
+              {"kernel-x", std::to_string(kernel1.x)}
+            , {"kernel-y", std::to_string(kernel1.y)}
+            , {"stride-x", std::to_string(stride1.x)}
+            , {"stride-y", std::to_string(stride1.y)}
+            , {"pad-x", std::to_string(pad1.x)}
+            , {"pad-y", std::to_string(pad1.y)}
+            , {"pool-method", "max"}
+    };
+    _testNet.addLayer(LayerInitParams("Pooling")
+             .params(poolParams)
+             .in(tensor1)
+             .out(tensor2),
+             ref_pooling_wrap);
+
+    size_t numWeights = kernel2.x * kernel2.y * dims2.c * dims3.c;
+    size_t numBiases = dims3.c;
+
+    ParamsStruct convParams = {
+              {"kernel-x", std::to_string(kernel2.x)}
+            , {"kernel-y", std::to_string(kernel2.y)}
+            , {"stride-x", std::to_string(stride2.x)}
+            , {"stride-y", std::to_string(stride2.y)}
+            , {"pad-x", std::to_string(pad2.x)}
+            , {"pad-y", std::to_string(pad2.y)}
+            , {"output", std::to_string(dims3.c)}
+            , {"group", "1"}
+    };
+    _testNet.addLayer(LayerInitParams("Convolution")
+             .params(convParams)
+             .weights(numWeights).fillWeights(defaultWeightsRange)
+             .biases(numBiases).fillBiases(defaultWeightsRange)
+             .in(tensor2)
+             .out(tensor3),
+             ref_convolution_wrap);
+
+    CompareWithSW(0.095f, vpu::LayoutPreference::ChannelMinor);
+}
+
+TEST_F(MyriadX_HW_Tests_nightly, ConvWithPool) {
+    if (!CheckMyriadX()) {
+        SKIP() << "Non-MyriadX device";
+    }
+
+    tensor_test_params dims1{1, 16, 4, 4};
+    tensor_test_params dims2{1, 64, 4, 4};
+    tensor_test_params dims3{1, 64, 2, 2};
+
+    param_size kernel1{3, 3};
+    param_size stride1{1, 1};
+    param_size pad1{1, 1};
+
+    param_size kernel2{2, 2};
+    param_size stride2{2, 2};
+    param_size pad2{0, 0};
+
+    IN_OUT_desc tensor1, tensor2, tensor3;
+    tensor1.push_back({dims1.n, dims1.c, dims1.h, dims1.w});
+    tensor2.push_back({dims2.n, dims2.c, dims2.h, dims2.w});
+    tensor3.push_back({dims3.n, dims3.c, dims3.h, dims3.w});
+
+    size_t numWeights = kernel1.x * kernel1.y * dims1.c * dims2.c;
+    size_t numBiases = dims2.c;
+
+    ParamsStruct convParams = {
+              {"kernel-x", std::to_string(kernel1.x)}
+            , {"kernel-y", std::to_string(kernel1.y)}
+            , {"stride-x", std::to_string(stride1.x)}
+            , {"stride-y", std::to_string(stride1.y)}
+            , {"pad-x", std::to_string(pad1.x)}
+            , {"pad-y", std::to_string(pad1.y)}
+            , {"output", std::to_string(dims2.c)}
+            , {"group", "1"}
+    };
+
+    _testNet.addLayer(LayerInitParams("Convolution")
+             .params(convParams)
+             .weights(numWeights).fillWeights(defaultWeightsRange)
+             .biases(numBiases).fillBiases(defaultWeightsRange)
+             .in(tensor1)
+             .out(tensor2),
+             ref_convolution_wrap);
+
+    ParamsStruct poolParams = {
+              {"kernel-x", std::to_string(kernel2.x)}
+            , {"kernel-y", std::to_string(kernel2.y)}
+            , {"stride-x", std::to_string(stride2.x)}
+            , {"stride-y", std::to_string(stride2.y)}
+            , {"pad-x", std::to_string(pad2.x)}
+            , {"pad-y", std::to_string(pad2.y)}
+            , {"pool-method", "max"}
+    };
+
+    _testNet.addLayer(LayerInitParams("Pooling")
+             .params(poolParams)
+             .in(tensor2)
+             .out(tensor3),
+             ref_pooling_wrap);
+
+    CompareWithSW(0.08f);
+}
+
+TEST_F(MyriadX_HW_Tests_nightly, WithConcat) {
+    if (!CheckMyriadX()) {
+        SKIP() << "Non-MyriadX device";
+    }
+
+    const std::string model = R"V0G0N(
+        <Net name="WithConcat" version="2" batch="1">
+            <layers>
+                <layer name="input" type="Input" precision="FP16" id="1">
+                    <output>
+                        <port id="1">
+                            <dim>1</dim>
+                            <dim>16</dim>
+                            <dim>28</dim>
+                            <dim>28</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="conv1" type="Convolution" precision="FP16" id="2">
+                    <data stride-x="1" stride-y="1" pad-x="0" pad-y="0" kernel-x="1" kernel-y="1" output="16" group="1"/>
+                    <input>
+                        <port id="2">
+                            <dim>1</dim>
+                            <dim>16</dim>
+                            <dim>28</dim>
+                            <dim>28</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="3">
+                            <dim>1</dim>
+                            <dim>16</dim>
+                            <dim>28</dim>
+                            <dim>28</dim>
+                        </port>
+                    </output>
+                    <weights offset="0" size="512"/>
+                    <biases offset="512" size="32"/>
+                </layer>
+                <layer name="conv2" type="Convolution" precision="FP16" id="3">
+                    <data stride-x="1" stride-y="1" pad-x="0" pad-y="0" kernel-x="1" kernel-y="1" output="16" group="1"/>
+                    <input>
+                        <port id="4">
+                            <dim>1</dim>
+                            <dim>16</dim>
+                            <dim>28</dim>
+                            <dim>28</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="5">
+                            <dim>1</dim>
+                            <dim>16</dim>
+                            <dim>28</dim>
+                            <dim>28</dim>
+                        </port>
+                    </output>
+                    <weights offset="0" size="512"/>
+                    <biases offset="512" size="32"/>
+                </layer>
+                <layer name="conv3" type="Convolution" precision="FP16" id="4">
+                    <data stride-x="1" stride-y="1" pad-x="0" pad-y="0" kernel-x="1" kernel-y="1" output="16" group="1"/>
+                    <input>
+                        <port id="6">
+                            <dim>1</dim>
+                            <dim>16</dim>
+                            <dim>28</dim>
+                            <dim>28</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="7">
+                            <dim>1</dim>
+                            <dim>16</dim>
+                            <dim>28</dim>
+                            <dim>28</dim>
+                        </port>
+                    </output>
+                    <weights offset="0" size="512"/>
+                    <biases offset="512" size="32"/>
+                </layer>
+                <layer name="concat" type="Concat" precision="FP16" id="5">
+                    <data axis="1"/>
+                    <input>
+                        <port id="8">
+                            <dim>1</dim>
+                            <dim>16</dim>
+                            <dim>28</dim>
+                            <dim>28</dim>
+                        </port>
+                        <port id="9">
+                            <dim>1</dim>
+                            <dim>16</dim>
+                            <dim>28</dim>
+                            <dim>28</dim>
+                        </port>
+                        <port id="10">
+                            <dim>1</dim>
+                            <dim>16</dim>
+                            <dim>28</dim>
+                            <dim>28</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="11">
+                            <dim>1</dim>
+                            <dim>48</dim>
+                            <dim>28</dim>
+                            <dim>28</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="last" type="Convolution" precision="FP16" id="6">
+                    <data stride-x="1" stride-y="1" pad-x="0" pad-y="0" kernel-x="1" kernel-y="1" output="48" group="1"/>
+                    <input>
+                        <port id="12">
+                            <dim>1</dim>
+                            <dim>48</dim>
+                            <dim>28</dim>
+                            <dim>28</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="13">
+                            <dim>1</dim>
+                            <dim>48</dim>
+                            <dim>28</dim>
+                            <dim>28</dim>
+                        </port>
+                    </output>
+                    <weights offset="544" size="4608"/>
+                    <biases offset="5152" size="96"/>
+                </layer>
+            </layers>
+            <edges>
+                <edge from-layer="1" from-port="1" to-layer="2" to-port="2"/>
+                <edge from-layer="1" from-port="1" to-layer="3" to-port="4"/>
+                <edge from-layer="1" from-port="1" to-layer="4" to-port="6"/>
+                <edge from-layer="2" from-port="3" to-layer="5" to-port="8"/>
+                <edge from-layer="3" from-port="5" to-layer="5" to-port="9"/>
+                <edge from-layer="4" from-port="7" to-layer="5" to-port="10"/>
+                <edge from-layer="5" from-port="11" to-layer="6" to-port="12"/>
+            </edges>
+        </Net>
+    )V0G0N";
+
+    TBlob<uint8_t>::Ptr weights(GenWeights(5248 / sizeof(ie_fp16)));
+
+    ASSERT_NO_THROW(readNetwork(model, weights));
+
+    const auto& network = _cnnNetwork;
+
+    _inputsInfo = network.getInputsInfo();
+    auto inputInfo = _inputsInfo["input"];
+    inputInfo->setPrecision(Precision::FP16);
+
+    _outputsInfo = network.getOutputsInfo();
+    auto outputInfo = _outputsInfo["last"];
+    outputInfo->setPrecision(Precision::FP16);
+
+    auto tensorDesc = InferenceEngine::TensorDesc(Precision::FP16, inputInfo->getTensorDesc().getDims(), Layout::NCHW);
+    Blob::Ptr input = make_shared_blob<ie_fp16>(tensorDesc);
+    input->allocate();
+    GenRandomData(input);
+
+    Blob::Ptr swOutput, hwOutput;
+    {
+        SCOPED_TRACE("SW");
+
+        RunInfo runInfo;
+        runInfo.hwMode = false;
+
+        ASSERT_NO_FATAL_FAILURE(RunNetwork(network, input, swOutput, "input", "last", runInfo));
+    }
+
+    {
+        SCOPED_TRACE("HW");
+
+        RunInfo runInfo;
+        runInfo.hwMode = true;
+
+        ASSERT_NO_FATAL_FAILURE(RunNetwork(network, input, hwOutput, "input", "last", runInfo));
+
+        ASSERT_NO_FATAL_FAILURE(CheckHWRun());
+    }
+
+    CompareCommonAbsolute(hwOutput, swOutput, 0.2f);
+}
+
+TEST_F(MyriadX_HW_Tests_nightly, WithConcatMisaligned) {
+    if (!CheckMyriadX()) {
+        SKIP() << "Non-MyriadX device";
+    }
+
+    const std::string model = R"V0G0N(
+        <Net name="WithConcat" version="2" batch="1">
+            <layers>
+                <layer name="input" type="Input" precision="FP16" id="1">
+                    <output>
+                        <port id="1">
+                            <dim>1</dim>
+                            <dim>35</dim>
+                            <dim>28</dim>
+                            <dim>28</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="conv1" type="Convolution" precision="FP16" id="2">
+                    <data stride-x="1" stride-y="1" pad-x="0" pad-y="0" kernel-x="1" kernel-y="1" output="35" group="1"/>
+                    <input>
+                        <port id="2">
+                            <dim>1</dim>
+                            <dim>35</dim>
+                            <dim>28</dim>
+                            <dim>28</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="3">
+                            <dim>1</dim>
+                            <dim>35</dim>
+                            <dim>28</dim>
+                            <dim>28</dim>
+                        </port>
+                    </output>
+                    <weights offset="0" size="2450"/>
+                    <biases offset="2450" size="70"/>
+                </layer>
+                <layer name="conv2" type="Convolution" precision="FP16" id="3">
+                    <data stride-x="1" stride-y="1" pad-x="0" pad-y="0" kernel-x="1" kernel-y="1" output="35" group="1"/>
+                    <input>
+                        <port id="4">
+                            <dim>1</dim>
+                            <dim>35</dim>
+                            <dim>28</dim>
+                            <dim>28</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="5">
+                            <dim>1</dim>
+                            <dim>35</dim>
+                            <dim>28</dim>
+                            <dim>28</dim>
+                        </port>
+                    </output>
+                    <weights offset="0" size="2450"/>
+                    <biases offset="2450" size="70"/>
+                </layer>
+                <layer name="concat" type="Concat" precision="FP16" id="4">
+                    <data axis="1"/>
+                    <input>
+                        <port id="6">
+                            <dim>1</dim>
+                            <dim>35</dim>
+                            <dim>28</dim>
+                            <dim>28</dim>
+                        </port>
+                        <port id="7">
+                            <dim>1</dim>
+                            <dim>35</dim>
+                            <dim>28</dim>
+                            <dim>28</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="8">
+                            <dim>1</dim>
+                            <dim>70</dim>
+                            <dim>28</dim>
+                            <dim>28</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="last" type="Power" precision="FP16" id="5">
+                    <data power="1.0" scale="1.0" shift="0.0"/>
+                    <input>
+                        <port id="9">
+                            <dim>1</dim>
+                            <dim>70</dim>
+                            <dim>28</dim>
+                            <dim>28</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="10">
+                            <dim>1</dim>
+                            <dim>70</dim>
+                            <dim>28</dim>
+                            <dim>28</dim>
+                        </port>
+                    </output>
+                </layer>
+            </layers>
+            <edges>
+                <edge from-layer="1" from-port="1" to-layer="2" to-port="2"/>
+                <edge from-layer="1" from-port="1" to-layer="3" to-port="4"/>
+                <edge from-layer="2" from-port="3" to-layer="4" to-port="6"/>
+                <edge from-layer="3" from-port="5" to-layer="4" to-port="7"/>
+                <edge from-layer="4" from-port="8" to-layer="5" to-port="9"/>
+            </edges>
+        </Net>
+    )V0G0N";
+
+    TBlob<uint8_t>::Ptr weights(GenWeights(2520 / sizeof(ie_fp16)));
+
+    ASSERT_NO_THROW(readNetwork(model, weights));
+
+    const auto& network = _cnnNetwork;
+
+    _inputsInfo = network.getInputsInfo();
+    auto inputInfo = _inputsInfo["input"];
+    inputInfo->setPrecision(Precision::FP16);
+
+    _outputsInfo = network.getOutputsInfo();
+    auto outputInfo = _outputsInfo["last"];
+    outputInfo->setPrecision(Precision::FP16);
+
+    Blob::Ptr input = make_shared_blob<ie_fp16>({Precision::FP16, inputInfo->getTensorDesc().getDims(), Layout::NCHW});
+    input->allocate();
+    GenRandomData(input);
+
+    Blob::Ptr swOutput, hwOutput;
+    {
+        SCOPED_TRACE("SW");
+
+        RunInfo runInfo;
+        runInfo.hwMode = false;
+
+        ASSERT_NO_FATAL_FAILURE(RunNetwork(network, input, swOutput, "input", "last", runInfo));
+    }
+
+    {
+        SCOPED_TRACE("HW");
+
+        RunInfo runInfo;
+        runInfo.hwMode = true;
+
+        ASSERT_NO_FATAL_FAILURE(RunNetwork(network, input, hwOutput, "input", "last", runInfo));
+
+        ASSERT_NO_FATAL_FAILURE(CheckHWRun());
+    }
+
+    CompareCommonAbsolute(hwOutput, swOutput, 0.03f);
+}
+
+TEST_F(MyriadX_HW_Tests_nightly, With_3_FC_Layers) {
+    if (!CheckMyriadX()) {
+        SKIP() << "Non-MyriadX device";
+    }
+
+    const std::string model = R"V0G0N(
+        <Net name="WithConcat" version="2" batch="1">
+            <layers>
+                <layer name="input" type="Input" precision="FP16" id="1">
+                    <output>
+                        <port id="1">
+                            <dim>1</dim>
+                            <dim>128</dim>
+                            <dim>2</dim>
+                            <dim>2</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer id="2" name="angle_y" precision="FP16" type="FullyConnected">
+                    <data out-size="1"/>
+                    <input>
+                        <port id="0">
+                            <dim>1</dim>
+                            <dim>128</dim>
+                            <dim>2</dim>
+                            <dim>2</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="3">
+                            <dim>1</dim>
+                            <dim>1</dim>
+                        </port>
+                    </output>
+                    <blobs>
+                        <weights offset="0" size="1024"/>
+                        <biases offset="1024" size="2"/>
+                    </blobs>
+                </layer>
+                <layer id="3" name="angle_p" precision="FP16" type="FullyConnected">
+                    <data out-size="1"/>
+                    <input>
+                        <port id="0">
+                            <dim>1</dim>
+                            <dim>128</dim>
+                            <dim>2</dim>
+                            <dim>2</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="3">
+                            <dim>1</dim>
+                            <dim>1</dim>
+                        </port>
+                    </output>
+                    <blobs>
+                        <weights offset="0" size="1024"/>
+                        <biases offset="1024" size="2"/>
+                    </blobs>
+                </layer>
+                <layer id="4" name="angle_q" precision="FP16" type="FullyConnected">
+                    <data out-size="1"/>
+                    <input>
+                        <port id="0">
+                            <dim>1</dim>
+                            <dim>128</dim>
+                            <dim>2</dim>
+                            <dim>2</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="3">
+                            <dim>1</dim>
+                            <dim>1</dim>
+                        </port>
+                    </output>
+                    <blobs>
+                        <weights offset="0" size="1024"/>
+                        <biases offset="1024" size="2"/>
+                    </blobs>
+                </layer>
+            </layers>
+            <edges>
+                <edge from-layer="1" from-port="1" to-layer="2" to-port="0"/>
+                <edge from-layer="1" from-port="1" to-layer="3" to-port="0"/>
+                <edge from-layer="1" from-port="1" to-layer="4" to-port="0"/>
+            </edges>
+        </Net>
+    )V0G0N";
+
+    TBlob<uint8_t>::Ptr weights(GenWeights((1024 + 2) / sizeof(ie_fp16)));
+
+    ASSERT_NO_THROW(readNetwork(model, weights));
+
+    const auto& network = _cnnNetwork;
+
+    _inputsInfo = network.getInputsInfo();
+    auto inputInfo = _inputsInfo["input"];
+    inputInfo->setPrecision(Precision::FP16);
+
+    _outputsInfo = network.getOutputsInfo();
+    const std::string names[] = { "angle_p", "angle_q", "angle_y" };
+    for (size_t i = 0; i < sizeof(names) / sizeof(names[0]); ++i) {
+        auto outputInfo = _outputsInfo[names[i]];
+        ASSERT_NE(outputInfo, nullptr);
+        outputInfo->setPrecision(Precision::FP32);
+
+    }
+
+    Blob::Ptr input = make_shared_blob<ie_fp16>({Precision::FP16, inputInfo->getTensorDesc().getDims(), Layout::NCHW});
+    input->allocate();
+    GenRandomData(input);
+
+    Blob::Ptr swOutput, hwOutput;
+    _inferRequest.reset();
+    _exeNetwork.reset();
+
+    StatusCode st;
+
+    ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network,
+                                                      {
+                                                          {
+                                                              VPU_CONFIG_KEY(PERF_REPORT_MODE),
+                                                              VPU_CONFIG_VALUE(PER_STAGE)
+                                                          },
+                                                          {
+                                                              VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION),
+                                                              CONFIG_VALUE(YES)
+                                                          },
+                                                      },
+                                                      &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    ASSERT_NO_THROW(st = _exeNetwork->CreateInferRequest(_inferRequest, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    ASSERT_NO_THROW(st = _inferRequest->SetBlob("input", input, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    ASSERT_NO_THROW(st = _inferRequest->Infer(&_resp));
+
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    std::vector<float> results(sizeof(names) / sizeof(names[0]));
+    for (size_t i = 0; i < sizeof(names) / sizeof(names[0]); ++i) {
+        ASSERT_NO_THROW(st = _inferRequest->GetBlob(names[i].c_str(), hwOutput, &_resp));
+        ASSERT_NE(hwOutput, nullptr);
+        BufferWrapper res_ptr(hwOutput);
+        results[i] = res_ptr[0];
+    }
+    for (size_t i = 1; i < results.size(); ++i) {
+        ASSERT_NEAR(results[0], results[i], 0.0001f);
+    }
+}
+
+TEST_F(MyriadX_HW_Tests_nightly, WithEltwise) {
+    if (!CheckMyriadX()) {
+        SKIP() << "Non-MyriadX device";
+    }
+
+    const std::string model = R"V0G0N(
+        <Net name="WithEltwise" version="2" batch="1">
+            <layers>
+                <layer name="input" type="Input" precision="FP16" id="1">
+                    <output>
+                        <port id="1">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="branch1" type="Convolution" precision="FP16" id="2">
+                    <convolution_data
+                        stride-x="1" stride-y="1"
+                        pad-x="0" pad-y="0"
+                        kernel-x="1" kernel-y="1"
+                        output="64"
+                        group="1"/>
+                    <input>
+                        <port id="2">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="3">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </output>
+                    <weights offset="0" size="8192"/>
+                    <biases offset="8192" size="128"/>
+                </layer>
+                <layer name="branch2a" type="Convolution" precision="FP16" id="3">
+                    <convolution_data
+                        stride-x="1" stride-y="1"
+                        pad-x="1" pad-y="1"
+                        kernel-x="3" kernel-y="3"
+                        output="64"
+                        group="1"/>
+                    <input>
+                        <port id="4">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="5">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </output>
+                    <weights offset="8320" size="73728"/>
+                    <biases offset="82048" size="128"/>
+                </layer>
+                <layer name="branch2a_relu" type="ReLU" precision="FP16" id="4">
+                    <input>
+                        <port id="6">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="7">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="branch2b" type="Convolution" precision="FP16" id="5">
+                    <convolution_data
+                        stride-x="1" stride-y="1"
+                        pad-x="1" pad-y="1"
+                        kernel-x="3" kernel-y="3"
+                        output="64"
+                        group="1"/>
+                    <input>
+                        <port id="8">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="9">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </output>
+                    <weights offset="82176" size="73728"/>
+                    <biases offset="155904" size="128"/>
+                </layer>
+                <layer name="sum" type="Eltwise" precision="FP16" id="6">
+                    <elementwise_data operation="sum"/>
+                    <input>
+                        <port id="10">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                        <port id="11">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="12">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="last" type="Convolution" precision="FP16" id="7">
+                    <convolution_data
+                        stride-x="1" stride-y="1"
+                        pad-x="0" pad-y="0"
+                        kernel-x="1" kernel-y="1"
+                        output="64"
+                        group="1"/>
+                    <input>
+                        <port id="13">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="14">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </output>
+                    <weights offset="156032" size="8192"/>
+                    <biases offset="164224" size="128"/>
+                </layer>
+            </layers>
+            <edges>
+                <edge from-layer="1" from-port="1" to-layer="2" to-port="2"/>
+                <edge from-layer="1" from-port="1" to-layer="3" to-port="4"/>
+                <edge from-layer="3" from-port="5" to-layer="4" to-port="6"/>
+                <edge from-layer="4" from-port="7" to-layer="5" to-port="8"/>
+                <edge from-layer="2" from-port="3" to-layer="6" to-port="10"/>
+                <edge from-layer="5" from-port="9" to-layer="6" to-port="11"/>
+                <edge from-layer="6" from-port="12" to-layer="7" to-port="13"/>
+            </edges>
+        </Net>
+    )V0G0N";
+
+    TBlob<uint8_t>::Ptr weights(GenWeights(164352 / sizeof(ie_fp16)));
+
+    ASSERT_NO_THROW(readNetwork(model, weights));
+
+    const auto& network = _cnnNetwork;
+
+    _inputsInfo = network.getInputsInfo();
+    auto inputInfo = _inputsInfo["input"];
+    inputInfo->setPrecision(Precision::FP16);
+
+    _outputsInfo = network.getOutputsInfo();
+    auto outputInfo = _outputsInfo["last"];
+    outputInfo->setPrecision(Precision::FP16);
+
+    Blob::Ptr input = make_shared_blob<ie_fp16>({Precision::FP16, inputInfo->getTensorDesc().getDims(), Layout::NCHW});
+    input->allocate();
+    GenRandomData(input);
+
+    Blob::Ptr swOutput;
+    {
+        SCOPED_TRACE("SW");
+
+        RunInfo runInfo;
+        runInfo.hwMode = false;
+
+        ASSERT_NO_FATAL_FAILURE(RunNetwork(network, input, swOutput, "input", "last", runInfo));
+    }
+
+    Blob::Ptr hwOutput;
+    {
+        SCOPED_TRACE("HW");
+
+        RunInfo runInfo;
+        runInfo.hwMode = true;
+
+        ASSERT_NO_FATAL_FAILURE(RunNetwork(network, input, hwOutput, "input", "last", runInfo));
+        ASSERT_NO_FATAL_FAILURE(CheckHWRun());
+    }
+
+    CompareCommonAbsolute(hwOutput, swOutput, 30);
+}
+
+TEST_F(MyriadX_HW_Tests_nightly, WithEltwiseReLU) {
+    if (!CheckMyriadX()) {
+        SKIP() << "Non-MyriadX device";
+    }
+
+    const std::string model = R"V0G0N(
+        <Net name="WithEltwise" version="2" batch="1">
+            <layers>
+                <layer name="input" type="Input" precision="FP16" id="1">
+                    <output>
+                        <port id="1">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="branch1" type="Convolution" precision="FP16" id="2">
+                    <convolution_data
+                        stride-x="1" stride-y="1"
+                        pad-x="0" pad-y="0"
+                        kernel-x="1" kernel-y="1"
+                        output="64"
+                        group="1"/>
+                    <input>
+                        <port id="2">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="3">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </output>
+                    <weights offset="0" size="8192"/>
+                    <biases offset="8192" size="128"/>
+                </layer>
+                <layer name="branch2a" type="Convolution" precision="FP16" id="3">
+                    <convolution_data
+                        stride-x="1" stride-y="1"
+                        pad-x="1" pad-y="1"
+                        kernel-x="3" kernel-y="3"
+                        output="64"
+                        group="1"/>
+                    <input>
+                        <port id="4">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="5">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </output>
+                    <weights offset="8320" size="73728"/>
+                    <biases offset="82048" size="128"/>
+                </layer>
+                <layer name="branch2a_relu" type="ReLU" precision="FP16" id="4">
+                    <input>
+                        <port id="6">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="7">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="branch2b" type="Convolution" precision="FP16" id="5">
+                    <convolution_data
+                        stride-x="1" stride-y="1"
+                        pad-x="1" pad-y="1"
+                        kernel-x="3" kernel-y="3"
+                        output="64"
+                        group="1"/>
+                    <input>
+                        <port id="8">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="9">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </output>
+                    <weights offset="82176" size="73728"/>
+                    <biases offset="155904" size="128"/>
+                </layer>
+                <layer name="sum" type="Eltwise" precision="FP16" id="6">
+                    <elementwise_data operation="sum"/>
+                    <input>
+                        <port id="10">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                        <port id="11">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="12">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="sum_relu" type="ReLU" precision="FP16" id="7">
+                    <input>
+                        <port id="13">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="14">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="last" type="Convolution" precision="FP16" id="8">
+                    <convolution_data
+                        stride-x="1" stride-y="1"
+                        pad-x="0" pad-y="0"
+                        kernel-x="1" kernel-y="1"
+                        output="64"
+                        group="1"/>
+                    <input>
+                        <port id="15">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="16">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </output>
+                    <weights offset="156032" size="8192"/>
+                    <biases offset="164224" size="128"/>
+                </layer>
+            </layers>
+            <edges>
+                <edge from-layer="1" from-port="1" to-layer="2" to-port="2"/>
+                <edge from-layer="1" from-port="1" to-layer="3" to-port="4"/>
+                <edge from-layer="3" from-port="5" to-layer="4" to-port="6"/>
+                <edge from-layer="4" from-port="7" to-layer="5" to-port="8"/>
+                <edge from-layer="2" from-port="3" to-layer="6" to-port="10"/>
+                <edge from-layer="5" from-port="9" to-layer="6" to-port="11"/>
+                <edge from-layer="6" from-port="12" to-layer="7" to-port="13"/>
+                <edge from-layer="7" from-port="14" to-layer="8" to-port="15"/>
+            </edges>
+        </Net>
+    )V0G0N";
+
+    TBlob<uint8_t>::Ptr weights(GenWeights(164352 / sizeof(ie_fp16)));
+
+    ASSERT_NO_THROW(readNetwork(model, weights));
+
+    const auto& network = _cnnNetwork;
+
+    _inputsInfo = network.getInputsInfo();
+    auto inputInfo = _inputsInfo["input"];
+    inputInfo->setPrecision(Precision::FP16);
+
+    _outputsInfo = network.getOutputsInfo();
+    auto outputInfo = _outputsInfo["last"];
+    outputInfo->setPrecision(Precision::FP16);
+
+    Blob::Ptr input = make_shared_blob<ie_fp16>(TensorDesc(Precision::FP16, inputInfo->getTensorDesc().getDims() , Layout::NCHW));
+    input->allocate();
+    GenRandomData(input);
+
+    Blob::Ptr swOutput;
+    {
+        SCOPED_TRACE("SW");
+
+        RunInfo runInfo;
+        runInfo.hwMode = false;
+
+        ASSERT_NO_FATAL_FAILURE(RunNetwork(network, input, swOutput, "input", "last", runInfo));
+    }
+
+    Blob::Ptr hwOutput;
+    {
+        SCOPED_TRACE("HW");
+
+        RunInfo runInfo;
+        runInfo.hwMode = true;
+
+        ASSERT_NO_FATAL_FAILURE(RunNetwork(network, input, hwOutput, "input", "last", runInfo));
+        ASSERT_NO_FATAL_FAILURE(CheckHWRun());
+    }
+
+    CompareCommonAbsolute(hwOutput, swOutput, 18.f);
+}
+
+TEST_F(MyriadX_HW_Tests_nightly, PermuteFlattenConcat) {
+    if (!CheckMyriadX()) {
+        SKIP() << "Non-MyriadX device";
+    }
+
+    const std::string model = R"V0G0N(
+        <Net name="WithPermuteFlattenConcat" version="2" batch="1">
+            <layers>
+                <layer name="input" type="Input" precision="FP16" id="1">
+                    <output>
+                        <port id="1">
+                            <dim>1</dim>
+                            <dim>256</dim>
+                            <dim>23</dim>
+                            <dim>23</dim>
+                        </port>
+                    </output>
+                </layer>
+
+                <layer name="conv1" type="Convolution" precision="FP16" id="2">
+                    <convolution_data
+                         stride-x="1"
+                         stride-y="1"
+                         pad-x="1"
+                         pad-y="1"
+                         kernel-x="3"
+                         kernel-y="3"
+                         output="54"
+                         group="1" />
+                    <input>
+                        <port id="2">
+                            <dim>1</dim>
+                            <dim>256</dim>
+                            <dim>23</dim>
+                            <dim>23</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="3">
+                            <dim>1</dim>
+                            <dim>54</dim>
+                            <dim>23</dim>
+                            <dim>23</dim>
+                        </port>
+                    </output>
+                    <weights offset="0" size="248832"/>
+                    <biases offset="248832" size="108"/>
+                </layer>
+                <layer name="perm1" type="Permute" precision="FP16" id="3">
+                    <data order="0,2,3,1"/>
+                    <input>
+                        <port id="4">
+                            <dim>1</dim>
+                            <dim>54</dim>
+                            <dim>23</dim>
+                            <dim>23</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="5">
+                            <dim>1</dim>
+                            <dim>23</dim>
+                            <dim>23</dim>
+                            <dim>54</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="flat1" type="Flatten" precision="FP16" id="4">
+                    <data axis="1" end_axis="-1"/>
+                    <input>
+                        <port id="6">
+                            <dim>1</dim>
+                            <dim>23</dim>
+                            <dim>23</dim>
+                            <dim>54</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="7">
+                            <dim>1</dim>
+                            <dim>28566</dim>
+                        </port>
+                    </output>
+                </layer>
+
+                <layer name="conv2" type="Convolution" precision="FP16" id="5">
+                    <convolution_data
+                         stride-x="1"
+                         stride-y="1"
+                         pad-x="1"
+                         pad-y="1"
+                         kernel-x="3"
+                         kernel-y="3"
+                         output="54"
+                         group="1" />
+                    <input>
+                        <port id="8">
+                            <dim>1</dim>
+                            <dim>256</dim>
+                            <dim>23</dim>
+                            <dim>23</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="9">
+                            <dim>1</dim>
+                            <dim>54</dim>
+                            <dim>23</dim>
+                            <dim>23</dim>
+                        </port>
+                    </output>
+                    <weights offset="0" size="248832"/>
+                    <biases offset="248832" size="108"/>
+                </layer>
+                <layer name="perm2" type="Permute" precision="FP16" id="6">
+                    <data order="0,2,3,1"/>
+                    <input>
+                        <port id="10">
+                            <dim>1</dim>
+                            <dim>54</dim>
+                            <dim>23</dim>
+                            <dim>23</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="11">
+                            <dim>1</dim>
+                            <dim>23</dim>
+                            <dim>23</dim>
+                            <dim>54</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="flat2" type="Flatten" precision="FP16" id="7">
+                    <data axis="1" end_axis="-1"/>
+                    <input>
+                        <port id="12">
+                            <dim>1</dim>
+                            <dim>23</dim>
+                            <dim>23</dim>
+                            <dim>54</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="13">
+                            <dim>1</dim>
+                            <dim>28566</dim>
+                        </port>
+                    </output>
+                </layer>
+
+                <layer name="result" type="Concat" precision="FP16" id="8">
+                    <concat_data axis="1"/>
+                    <input>
+                        <port id="14">
+                            <dim>1</dim>
+                            <dim>28566</dim>
+                        </port>
+                        <port id="15">
+                            <dim>1</dim>
+                            <dim>28566</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="16">
+                            <dim>1</dim>
+                            <dim>57132</dim>
+                        </port>
+                    </output>
+                </layer>
+            </layers>
+            <edges>
+                <edge from-layer="1" from-port="1" to-layer="2" to-port="2"/>
+                <edge from-layer="2" from-port="3" to-layer="3" to-port="4"/>
+                <edge from-layer="3" from-port="5" to-layer="4" to-port="6"/>
+                <edge from-layer="4" from-port="7" to-layer="8" to-port="14"/>
+                <edge from-layer="1" from-port="1" to-layer="5" to-port="8"/>
+                <edge from-layer="5" from-port="9" to-layer="6" to-port="10"/>
+                <edge from-layer="6" from-port="11" to-layer="7" to-port="12"/>
+                <edge from-layer="7" from-port="13" to-layer="8" to-port="15"/>
+            </edges>
+        </Net>
+    )V0G0N";
+
+    TBlob<uint8_t>::Ptr weights(GenWeights(248940 / sizeof(ie_fp16)));
+
+    ASSERT_NO_THROW(readNetwork(model, weights));
+
+    const auto& network = _cnnNetwork;
+
+    _inputsInfo = network.getInputsInfo();
+    auto inputInfo = _inputsInfo["input"];
+    inputInfo->setPrecision(Precision::FP16);
+
+    _outputsInfo = network.getOutputsInfo();
+    auto outputInfo = _outputsInfo["result"];
+    outputInfo->setPrecision(Precision::FP16);
+
+    Blob::Ptr input = make_shared_blob<ie_fp16>({Precision::FP16, inputInfo->getTensorDesc().getDims(), Layout::NCHW});
+    input->allocate();
+    GenRandomData(input);
+
+    Blob::Ptr swOutput;
+    {
+        SCOPED_TRACE("SW");
+
+        RunInfo runInfo;
+        runInfo.hwMode = false;
+
+        ASSERT_NO_FATAL_FAILURE(RunNetwork(network, input, swOutput, "input", "result", runInfo));
+    }
+
+    Blob::Ptr hwOutput;
+    {
+        SCOPED_TRACE("HW");
+
+        RunInfo runInfo;
+        runInfo.hwMode = true;
+
+        ASSERT_NO_FATAL_FAILURE(RunNetwork(network, input, hwOutput, "input", "result", runInfo));
+        ASSERT_NO_FATAL_FAILURE(CheckHWRun());
+    }
+
+    CompareCommonAbsolute(hwOutput, swOutput, 1.3f);
+}
+
+TEST_F(MyriadX_HW_Tests_nightly, VGG_FirstTwoConvs) {
+    if (!CheckMyriadX()) {
+        SKIP() << "Non-MyriadX device";
+    }
+
+    IN_OUT_desc in_tensor, out_tensor;
+    in_tensor.push_back({1, 3, 224, 224});
+    out_tensor.push_back({1, 64, 224, 224});
+
+    ParamsStruct conv1_params = {
+        {"kernel-x", "3"},
+        {"kernel-y", "3"},
+        {"stride-x", "1"},
+        {"stride-y", "1"},
+        {"pad-x", "1"},
+        {"pad-y", "1"},
+        {"output", "64"},
+        {"group", "1"}
+    };
+    _testNet.addLayer(LayerInitParams("Convolution")
+             .params(conv1_params)
+             .weights(1728).fillWeights(defaultWeightsRange)
+             .biases(64).fillBiases(defaultWeightsRange)
+             .in(in_tensor)
+             .out(out_tensor),
+             ref_convolution_wrap);
+
+    _testNet.addLayer(LayerInitParams("ReLU")
+             .in(out_tensor)
+             .out(out_tensor),
+             ref_ReLU_wrap);
+
+    ParamsStruct conv2_params = {
+        {"kernel-x", "3"},
+        {"kernel-y", "3"},
+        {"stride-x", "1"},
+        {"stride-y", "1"},
+        {"pad-x", "1"},
+        {"pad-y", "1"},
+        {"output", "64"},
+        {"group", "1"}
+    };
+    _testNet.addLayer(LayerInitParams("Convolution")
+             .params(conv2_params)
+             .weights(36864).fillWeights(defaultWeightsRange)
+             .biases(64).fillBiases(defaultWeightsRange)
+             .in(out_tensor)
+             .out(out_tensor),
+             ref_convolution_wrap);
+
+    _testNet.addLayer(LayerInitParams("ReLU")
+             .in(out_tensor)
+             .out(out_tensor),
+             ref_ReLU_wrap);
+
+    CompareWithSW(0.85f);
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/myriad_hw_fc_tests.hpp b/inference-engine/tests_deprecated/functional/vpu/common/myriad_hw_fc_tests.hpp
new file mode 100644 (file)
index 0000000..1a32e18
--- /dev/null
@@ -0,0 +1,140 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include "myriad_hw_tests_base.hpp"
+
+class MyriadX_HW_FullyConnected_Tests_nightly
+        : public MyriadX_HW_Tests_nightly,
+          public testing::WithParamInterface<fcon_test_params> {
+public:
+    fcon_test_params p;
+
+    size_t numWeights;
+    size_t numBias;
+
+    IN_OUT_desc in_tensor, out_tensor;
+
+    void SetUp() override {
+        ASSERT_NO_FATAL_FAILURE(MyriadX_HW_Tests_nightly::SetUp());
+
+        p = GetParam();
+
+        numWeights = p.in.c * p.in.h * p.in.w * p.out_c;
+        numBias = p.out_c;
+
+        in_tensor.push_back({p.in.n, p.in.c, p.in.h, p.in.w});
+        out_tensor.push_back({p.in.n, p.out_c});
+
+        _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+    }
+
+    void AddFCLayer() {
+        std::map<std::string, std::string> fcParams;
+        fcParams["out-size"] = std::to_string(p.out_c);
+        _testNet.addLayer(LayerInitParams("FullyConnected")
+                 .params(fcParams)
+                 .weights(numWeights).fillWeights(defaultWeightsRange)
+                 .biases(numBias).fillBiases(defaultWeightsRange)
+                 .in(in_tensor)
+                 .out(out_tensor),
+                 ref_innerproduct_wrap);
+    }
+
+    void AddReLULayer(float negativeSlope = 0.0) {
+        ParamsStruct reluParams = {
+            {"negative_slope", std::to_string(negativeSlope)}
+        };
+        _testNet.addLayer(LayerInitParams("ReLU")
+                 .params(reluParams)
+                 .in(out_tensor)
+                 .out(out_tensor),
+                 ref_ReLU_wrap);
+    }
+};
+
+TEST_P(MyriadX_HW_FullyConnected_Tests_nightly, Single) {
+    if (!CheckMyriadX()) {
+        SKIP() << "Non-MyriadX device";
+    }
+
+    AddFCLayer();
+
+    CompareWithSW(p.error_bound);
+}
+
+TEST_P(MyriadX_HW_FullyConnected_Tests_nightly, Single_NC) {
+    if (!CheckMyriadX()) {
+        SKIP() << "Non-MyriadX device";
+    }
+
+    if (p.in.h != 1 || p.in.w != 1) {
+        SKIP() << "Non NC case";
+    }
+
+    in_tensor.clear();
+    in_tensor.push_back({p.in.n, p.in.c});
+
+    AddFCLayer();
+
+    CompareWithSW(p.error_bound);
+}
+
+TEST_P(MyriadX_HW_FullyConnected_Tests_nightly, WithReLU) {
+    if (!CheckMyriadX()) {
+        SKIP() << "Non-MyriadX device";
+    }
+
+    AddFCLayer();
+    AddReLULayer(0.0f);
+
+    CompareWithSW(p.error_bound);
+}
+
+TEST_P(MyriadX_HW_FullyConnected_Tests_nightly, MultipleInfer) {
+    if (!CheckMyriadX()) {
+        SKIP() << "Non-MyriadX device";
+    }
+
+    AddFCLayer();
+
+    CompareWithItself(100);
+}
+
+INSTANTIATE_TEST_CASE_P(fc_1024to1000, MyriadX_HW_FullyConnected_Tests_nightly,
+                        ::testing::Values(MAKE_STRUCT(fcon_test_params, {1, 1024, 1, 1}, 1000, 0.25f))
+);
+
+INSTANTIATE_TEST_CASE_P(fc_4096to1000, MyriadX_HW_FullyConnected_Tests_nightly,
+                        ::testing::Values(MAKE_STRUCT(fcon_test_params, {1, 4096, 1, 1}, 1000, 0.82f))
+);
+
+INSTANTIATE_TEST_CASE_P(fc_4096to4096, MyriadX_HW_FullyConnected_Tests_nightly,
+                        ::testing::Values(MAKE_STRUCT(fcon_test_params, {1, 4096, 1, 1}, 4096, 0.9f))
+);
+
+INSTANTIATE_TEST_CASE_P(fc_16x16x16to16, MyriadX_HW_FullyConnected_Tests_nightly,
+                        ::testing::Values(MAKE_STRUCT(fcon_test_params, {1, 16, 16, 16}, 16, 0.71f))
+);
+
+INSTANTIATE_TEST_CASE_P(fc_512x7x7to4096, MyriadX_HW_FullyConnected_Tests_nightly,
+                        ::testing::Values(MAKE_STRUCT(fcon_test_params, {1, 512, 7, 7}, 4096, 4.38f))
+);
+
+INSTANTIATE_TEST_CASE_P(fc_256x7x7to1470, MyriadX_HW_FullyConnected_Tests_nightly,
+                        ::testing::Values(MAKE_STRUCT(fcon_test_params, {1, 256, 7, 7}, 1470, 2.375f))
+);
+
+INSTANTIATE_TEST_CASE_P(fc_576to128, MyriadX_HW_FullyConnected_Tests_nightly,
+                        ::testing::Values(MAKE_STRUCT(fcon_test_params, {1, 576, 1, 1}, 128, 0.76f))
+);
+
+INSTANTIATE_TEST_CASE_P(fc_1152to128, MyriadX_HW_FullyConnected_Tests_nightly,
+                        ::testing::Values(MAKE_STRUCT(fcon_test_params, {1, 1152, 1, 1}, 128, 0.76f))
+);
+
+INSTANTIATE_TEST_CASE_P(fc_batch, MyriadX_HW_FullyConnected_Tests_nightly,
+                        ::testing::Values(MAKE_STRUCT(fcon_test_params, {100, 256, 1, 1}, 1024, 0.1f))
+);
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/myriad_hw_network_tests.hpp b/inference-engine/tests_deprecated/functional/vpu/common/myriad_hw_network_tests.hpp
new file mode 100644 (file)
index 0000000..98374d0
--- /dev/null
@@ -0,0 +1,163 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <ngraph_functions/subgraph_builders.hpp>
+#include <functional_test_utils/blob_utils.hpp>
+#include "myriad_hw_tests_base.hpp"
+
+using HwNetworkParams = std::tuple<Precision, Precision>;
+
+class MyriadX_HW_Networks_Tests_nightly :
+        public MyriadX_HW_Tests_nightly,
+        public testing::WithParamInterface<HwNetworkParams> {
+public:
+    Precision inputPrecision;
+    Precision outputPrecision;
+
+    Blob::Ptr _input;
+
+    void SetUp() override {
+        ASSERT_NO_FATAL_FAILURE(MyriadX_HW_Tests_nightly::SetUp());
+
+        inputPrecision = std::get<0>(GetParam());
+        outputPrecision = std::get<1>(GetParam());
+    }
+
+    Blob::Ptr getFp32Blob(const Blob::Ptr& in) {
+        if (in->getTensorDesc().getPrecision() == Precision::FP32)
+            return in;
+
+        auto out = make_shared_blob<float>({Precision::FP32, in->getTensorDesc().getDims(), in->getTensorDesc().getLayout()});
+        out->allocate();
+
+        if (in->getTensorDesc().getPrecision() == Precision::FP16) {
+            PrecisionUtils::f16tof32Arrays(out->buffer().as<float *>(), in->cbuffer().as<ie_fp16 *>(), in->size());
+        } else {
+            ADD_FAILURE() << "Unsupported precision " << in->getTensorDesc().getPrecision();
+        }
+
+        return out;
+    }
+
+    Blob::Ptr getFp16Blob(const Blob::Ptr& in) {
+        if (in->getTensorDesc().getPrecision() == Precision::FP16)
+            return in;
+
+        auto out = make_shared_blob<ie_fp16>({Precision::FP16, in->getTensorDesc()/*??*/.getDims(), in->getTensorDesc().getLayout()});
+        out->allocate();
+
+        if (in->getTensorDesc().getPrecision() == Precision::FP32) {
+            PrecisionUtils::f32tof16Arrays(out->buffer().as<ie_fp16 *>(), in->cbuffer().as<float *>(), in->size());
+        } else {
+            ADD_FAILURE() << "Unsupported precision " << in->getTensorDesc().getPrecision();
+        }
+
+        return out;
+    }
+
+    void RunAsyncTest(int numIters = 20) {
+        if (!CheckMyriadX()) {
+            SKIP() << "Non-MyriadX device";
+        }
+
+        auto fnPtr = ngraph::builder::subgraph::makeSplitMultiConvConcat();
+        ASSERT_NO_THROW(_cnnNetwork = CNNNetwork(fnPtr));
+
+        _cnnNetwork.getInputsInfo().begin()->second->setPrecision(inputPrecision);
+        _cnnNetwork.getOutputsInfo().begin()->second->setPrecision(outputPrecision);
+
+        _input = FuncTestUtils::createAndFillBlob(_cnnNetwork.getInputsInfo().begin()->second->getTensorDesc());
+
+        auto runTest = [&]() {
+            const int NUM_REQUESTS = 4;
+
+            std::map<std::string, std::string> config = {
+                { VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), CONFIG_VALUE(YES) },
+                { CONFIG_KEY(PERF_COUNT), CONFIG_VALUE(YES) },
+                { VPU_CONFIG_KEY(PERF_REPORT_MODE), VPU_CONFIG_VALUE(PER_STAGE) }
+            };
+
+            StatusCode st;
+
+            ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, _cnnNetwork, config, &_resp));
+            ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+            IInferRequest::Ptr inferRequests[NUM_REQUESTS];
+            Blob::Ptr outputs[NUM_REQUESTS];
+
+            for (int i = 0; i < NUM_REQUESTS; ++i) {
+                ASSERT_NO_THROW(st = _exeNetwork->CreateInferRequest(inferRequests[i], &_resp));
+                ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+                ASSERT_NO_THROW(st = inferRequests[i]->SetBlob(_cnnNetwork.getInputsInfo().begin()->first.c_str(), _input, &_resp));
+                ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+                ASSERT_NO_THROW(st = inferRequests[i]->GetBlob(_cnnNetwork.getOutputsInfo().begin()->first.c_str(), outputs[i], &_resp));
+                ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+            }
+
+            std::vector<Blob::Ptr> allOutputs[NUM_REQUESTS];
+            for (int i = 0; i < NUM_REQUESTS; ++i) {
+                allOutputs[i].resize(numIters);
+            }
+
+            for (int iterInd = 0; iterInd < numIters; ++iterInd) {
+                for (int inferInd = 0; inferInd < NUM_REQUESTS; ++inferInd) {
+                    ASSERT_NO_THROW(st = inferRequests[inferInd]->StartAsync(&_resp));
+                    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+                }
+
+                for (int inferInd = 0; inferInd < NUM_REQUESTS; ++inferInd) {
+                    ASSERT_NO_THROW(st = inferRequests[inferInd]->Wait(IInferRequest::RESULT_READY, &_resp));
+                    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+                }
+
+                for (int inferInd = 0; inferInd < NUM_REQUESTS; ++inferInd) {
+                    auto tensorDesc = outputs[inferInd]->getTensorDesc();
+                    tensorDesc.setPrecision(Precision::FP16);
+
+                    allOutputs[inferInd][iterInd] = make_blob_with_precision(Precision::FP16, tensorDesc);
+                    allOutputs[inferInd][iterInd]->allocate();
+
+                    auto outputFP16 = getFp16Blob(outputs[inferInd]);
+
+                    ie_memcpy(allOutputs[inferInd][iterInd]->buffer(), allOutputs[inferInd][iterInd]->byteSize(),
+                              outputFP16->cbuffer(), outputFP16->byteSize());
+                }
+            }
+
+            for (int iterInd1 = 0; iterInd1 < numIters; ++iterInd1) {
+                for (int iterInd2 = iterInd1; iterInd2 < numIters; ++iterInd2) {
+                    for (int inferInd1 = 0; inferInd1 < NUM_REQUESTS; ++inferInd1) {
+                        for (int inferInd2 = inferInd1; inferInd2 < NUM_REQUESTS; ++inferInd2) {
+                            ASSERT_NO_FATAL_FAILURE(CompareCommonAbsolute(allOutputs[inferInd1][iterInd1], allOutputs[inferInd2][iterInd2], 0.0f))
+                                    << "inferInd1=" << inferInd1 << " "
+                                    << "iterInd1=" << iterInd1 << " "
+                                    << "inferInd2=" << inferInd2 << " "
+                                    << "iterInd2=" << iterInd2;
+                        }
+                    }
+                }
+            }
+        };
+
+        runTest();
+    }
+};
+
+TEST_P(MyriadX_HW_Networks_Tests_nightly, SimpleNetAsync) {
+    RunAsyncTest(100);
+}
+
+inline std::string getTestCaseName(const testing::TestParamInfo<HwNetworkParams>& param) {
+    return std::string((std::get<0>(param.param)).name()) + "_" +
+           std::string((std::get<1>(param.param)).name());
+}
+
+INSTANTIATE_TEST_CASE_P(Input_Output_ExecMode, MyriadX_HW_Networks_Tests_nightly,
+    testing::Values(
+          std::make_tuple(Precision::FP16, Precision::FP16)
+    ),
+    getTestCaseName
+);
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/myriad_hw_opt_tests.cpp b/inference-engine/tests_deprecated/functional/vpu/common/myriad_hw_opt_tests.cpp
new file mode 100644 (file)
index 0000000..954f4c9
--- /dev/null
@@ -0,0 +1,5 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_hw_opt_tests.hpp"
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/myriad_hw_opt_tests.hpp b/inference-engine/tests_deprecated/functional/vpu/common/myriad_hw_opt_tests.hpp
new file mode 100644 (file)
index 0000000..05d056b
--- /dev/null
@@ -0,0 +1,12 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include "myriad_hw_tests_base.hpp"
+#include "myriad_hw_conv_tests.hpp"
+#include "myriad_hw_pool_tests.hpp"
+#include "myriad_hw_fc_tests.hpp"
+#include "myriad_hw_extra_tests.hpp"
+#include "myriad_hw_network_tests.hpp"
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/myriad_hw_pool_tests.hpp b/inference-engine/tests_deprecated/functional/vpu/common/myriad_hw_pool_tests.hpp
new file mode 100644 (file)
index 0000000..695cd8e
--- /dev/null
@@ -0,0 +1,434 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include "myriad_hw_tests_base.hpp"
+
+using HWPoolingParams = std::tuple<DimsInput, kernel, stride, pad>;
+
+class MyriadX_HW_Pooling_Tests_nightly
+        : public MyriadX_HW_Tests_nightly,
+          public testing::WithParamInterface<HWPoolingParams> {
+public:
+    tensor_test_params in_dims;
+    param_size kernel;
+    param_size stride;
+    param_size pad;
+
+    tensor_test_params out_dims;
+
+    IN_OUT_desc in_tensor, out_tensor;
+
+    void SetUp() override {
+        ASSERT_NO_FATAL_FAILURE(MyriadX_HW_Tests_nightly::SetUp());
+
+        in_dims = std::get<0>(GetParam());
+        kernel = std::get<1>(GetParam());
+        stride = std::get<2>(GetParam());
+        pad = std::get<3>(GetParam());
+
+        size_t out_w = std::ceil((in_dims.w + 2.0 * pad.x - kernel.x) / stride.x + 1);
+        size_t out_h = std::ceil((in_dims.h + 2.0 * pad.y - kernel.y) / stride.y + 1);
+
+        out_dims = {in_dims.n, in_dims.c, out_h, out_w};
+
+        in_tensor.push_back({in_dims.n, in_dims.c, in_dims.h, in_dims.w});
+        out_tensor.push_back({out_dims.n, out_dims.c, out_dims.h, out_dims.w});
+
+        _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+    }
+
+    void AddPoolingLayer(const std::string& poolMethod) {
+        std::map<std::string, std::string> poolParams = {
+                  {"kernel-x", std::to_string(kernel.x)}
+                , {"kernel-y", std::to_string(kernel.y)}
+                , {"stride-x", std::to_string(stride.x)}
+                , {"stride-y", std::to_string(stride.y)}
+                , {"pad-x", std::to_string(pad.x)}
+                , {"pad-y", std::to_string(pad.y)}
+                , {"pool-method", poolMethod}
+        };
+        _testNet.addLayer(LayerInitParams("Pooling")
+                 .params(poolParams)
+                 .in(in_tensor)
+                 .out(out_tensor),
+                 ref_pooling_wrap);
+    }
+
+    void AddReLULayer(float negativeSlope = 0.0) {
+        ParamsStruct reluParams = {
+            {"negative_slope", std::to_string(negativeSlope)}
+        };
+        _testNet.addLayer(LayerInitParams("ReLU")
+                 .params(reluParams)
+                 .in(out_tensor)
+                 .out(out_tensor),
+                 ref_ReLU_wrap);
+    }
+
+    void RunSingleTest(const std::string& poolMethod, float tolerance) {
+        if (!CheckMyriadX()) {
+            SKIP() << "Non-MyriadX device";
+        }
+
+        AddPoolingLayer(poolMethod);
+
+        CompareWithSW(tolerance);
+    }
+
+    void RunWithReLUTest(const std::string& poolMethod, float tolerance) {
+        if (!CheckMyriadX()) {
+            SKIP() << "Non-MyriadX device";
+        }
+
+        AddPoolingLayer(poolMethod);
+        AddReLULayer(0.0f);
+
+        CompareWithSW(tolerance);
+    }
+
+    void RunMultipleInferTest(const std::string& poolMethod) {
+        if (!CheckMyriadX()) {
+            SKIP() << "Non-MyriadX device";
+        }
+
+        AddPoolingLayer(poolMethod);
+
+        CompareWithItself(100);
+    }
+};
+
+TEST_P(MyriadX_HW_Pooling_Tests_nightly, Max_Single) {
+    RunSingleTest("max", 0.0f);
+}
+
+TEST_P(MyriadX_HW_Pooling_Tests_nightly, Avg_Single) {
+    // this case is not supported by HW
+    if (kernel.x == 3 && kernel.y == 3 &&
+        stride.x == 2 && stride.y == 2) {
+        SKIP() << "Unsupported case";
+    }
+    if ((kernel.x % 2 == 0 || kernel.y % 2 == 0) &&
+        (in_dims.w % 2 == 1 || in_dims.h % 2 == 1)) {
+        SKIP() << "Unsupported case";
+    }
+
+    RunSingleTest("avg", 0.0015f);
+}
+
+TEST_P(MyriadX_HW_Pooling_Tests_nightly, Max_WithReLU) {
+    RunWithReLUTest("max", 0.0f);
+}
+
+TEST_P(MyriadX_HW_Pooling_Tests_nightly, Avg_WithReLU) {
+    // this case is not supported by HW
+    if (kernel.x == 3 && kernel.y == 3 &&
+        stride.x == 2 && stride.y == 2) {
+        SKIP() << "Unsupported case";
+    }
+    if ((kernel.x % 2 == 0 || kernel.y % 2 == 0) &&
+        (in_dims.w % 2 == 1 || in_dims.h % 2 == 1)) {
+        SKIP() << "Unsupported case";
+    }
+
+    RunWithReLUTest("avg", 0.0015f);
+}
+
+TEST_P(MyriadX_HW_Pooling_Tests_nightly, Max_MultipleInfer) {
+    RunMultipleInferTest("max");
+}
+
+TEST_P(MyriadX_HW_Pooling_Tests_nightly, Avg_MultipleInfer) {
+    // this case is not supported by HW
+    if (kernel.x == 3 && kernel.y == 3 &&
+        stride.x == 2 && stride.y == 2) {
+        SKIP() << "Unsupported case";
+    }
+    if ((kernel.x % 2 == 0 || kernel.y % 2 == 0) &&
+        (in_dims.w % 2 == 1 || in_dims.h % 2 == 1)) {
+        SKIP() << "Unsupported case";
+    }
+
+    RunMultipleInferTest("avg");
+}
+
+INSTANTIATE_TEST_CASE_P(pool_2x2s1p0, MyriadX_HW_Pooling_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 64, 112, 112))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 2, 2))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(pool_2x2s2p0, MyriadX_HW_Pooling_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 64, 224, 224),
+                                                             MAKE_STRUCT(tensor_test_params, 1, 128, 112, 112),
+                                                             MAKE_STRUCT(tensor_test_params, 1, 256, 56, 56),
+                                                             MAKE_STRUCT(tensor_test_params, 1, 512, 28, 28),
+                                                             MAKE_STRUCT(tensor_test_params, 1, 512, 14, 14))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 2, 2))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 2, 2))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(pool_2x2s2p0_yolo_tiny_v1, MyriadX_HW_Pooling_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 16, 448, 448))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 2, 2))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 2, 2))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(pool_3x3s1p0, MyriadX_HW_Pooling_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 192, 28, 28),
+                                                             MAKE_STRUCT(tensor_test_params, 1, 100, 28, 28))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(pool_3x3s1p1, MyriadX_HW_Pooling_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 192, 28, 28))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+                        )
+);
+
+// TODO : 3x3s2p0 HW seems to work only for Max Pooling
+INSTANTIATE_TEST_CASE_P(pool_3x3s2p0, MyriadX_HW_Pooling_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 64, 112, 112))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 2, 2))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(pool_3x3s2p1, MyriadX_HW_Pooling_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 576, 7, 7),
+                                                             MAKE_STRUCT(tensor_test_params, 1, 16, 35, 35),
+                                                             MAKE_STRUCT(tensor_test_params, 1, 16, 75, 75),
+                                                             MAKE_STRUCT(tensor_test_params, 1, 16, 35, 2045))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 2, 2))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(pool_7x7s1p0, MyriadX_HW_Pooling_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 1024, 7, 7))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 7, 7))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(pool_14x14s1p0, MyriadX_HW_Pooling_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 1024, 14, 14),
+                                                             MAKE_STRUCT(tensor_test_params, 1, 1000, 14, 14))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 14, 14))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(pool_15x15s1p0, MyriadX_HW_Pooling_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 1024, 15, 15),
+                                                             MAKE_STRUCT(tensor_test_params, 1, 1000, 15, 15))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 15, 15))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(pool_2x2s1p1_odd, MyriadX_HW_Pooling_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 512, 13, 13))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 2, 2))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 1, 1))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(pool_2x2s2p0_odd, MyriadX_HW_Pooling_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 256, 75, 75),
+                                                             MAKE_STRUCT(tensor_test_params, 2, 64, 75, 75),
+                                                             MAKE_STRUCT(tensor_test_params, 2, 64, 76, 75),
+                                                             MAKE_STRUCT(tensor_test_params, 2, 64, 75, 76))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 2, 2))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 2, 2))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(pool_3x3s1p0_odd, MyriadX_HW_Pooling_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 192, 37, 37),
+                                                             MAKE_STRUCT(tensor_test_params, 1, 832, 9, 9),
+                                                             MAKE_STRUCT(tensor_test_params, 1, 512, 19, 19))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 2, 2))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(pool_3x3s2p0_odd, MyriadX_HW_Pooling_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 96, 93, 93),
+                                                             MAKE_STRUCT(tensor_test_params, 1, 512, 23, 23),
+                                                             MAKE_STRUCT(tensor_test_params, 1, 192, 75, 75),
+                                                             MAKE_STRUCT(tensor_test_params, 1, 480, 37, 37))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 2, 2))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(pool_3x3s2p0_extra, MyriadX_HW_Pooling_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 96, 32, 52))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 3, 3))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 2, 2))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+                        )
+);
+
+INSTANTIATE_TEST_CASE_P(pool_7x7s7p0_rfcn_batch, MyriadX_HW_Pooling_Tests_nightly,
+                        ::testing::Combine(
+                                ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 300, 5, 7, 7))
+                                , ::testing::Values<kernel>(MAKE_STRUCT(param_size, 7, 7))
+                                , ::testing::Values<stride>(MAKE_STRUCT(param_size, 7, 7))
+                                , ::testing::Values<pad>(MAKE_STRUCT(param_size, 0, 0))
+                        )
+);
+
+using PoolTFParams = std::tuple<DimsInput, kernel, stride, tfPad>;
+
+class MyriadX_HW_PoolTF_Tests_nightly :
+        public MyriadX_HW_Tests_nightly,
+        public testing::WithParamInterface<PoolTFParams>{
+public:
+    tensor_test_params inDims;
+    tensor_test_params outDims;
+    param_size kernel;
+    param_size stride;
+    paddings4 pad;
+
+    void SetUp() override {
+        ASSERT_NO_FATAL_FAILURE(MyriadX_HW_Tests_nightly::SetUp());
+
+        inDims = std::get<0>(GetParam());
+        kernel = std::get<1>(GetParam());
+        stride = std::get<2>(GetParam());
+        pad = std::get<3>(GetParam());
+
+        size_t out_w = std::ceil((inDims.w + pad.left + pad.right - kernel.x) / stride.x + 1);
+        size_t out_h = std::ceil((inDims.h + pad.top + pad.bottom - kernel.y) / stride.y + 1);
+
+        outDims = {inDims.n, inDims.c, out_h, out_w};
+    }
+
+    void AddPoolingLayer() {
+        std::map<std::string, std::string> poolParams = {
+            {"pool-method", "max"},
+            {"kernel-x", std::to_string(kernel.x)},
+            {"kernel-y", std::to_string(kernel.y)},
+            {"stride-x", std::to_string(stride.x)},
+            {"stride-y", std::to_string(stride.y)},
+            {"exclude-pad", "true"},
+            {"rounding-type", "floor"},
+            {"pad-x", std::to_string(pad.left)},
+            {"pad-r", std::to_string(pad.right)},
+            {"pad-y", std::to_string(pad.top)},
+            {"pad-b", std::to_string(pad.bottom)}
+        };
+
+        _testNet.addLayer(LayerInitParams("Pooling")
+                 .params(poolParams)
+                 .in({{inDims.n, inDims.c, inDims.h, inDims.w}})
+                 .out({{outDims.n, outDims.c, outDims.h, outDims.w}}),
+                 ref_pooling_wrap);
+    }
+};
+
+TEST_P(MyriadX_HW_PoolTF_Tests_nightly, Single) {
+    if (!CheckMyriadX()) {
+        SKIP() << "Non-MyriadX device";
+    }
+
+    AddPoolingLayer();
+
+    CompareWithSW(0.0f);
+}
+
+INSTANTIATE_TEST_CASE_P(pool_2x2_3x3, MyriadX_HW_PoolTF_Tests_nightly,
+    ::testing::Combine(
+        ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 32, 128, 128)),
+        ::testing::Values<kernel>(MAKE_STRUCT(param_size, 2, 2),
+                                  MAKE_STRUCT(param_size, 3, 3)),
+        ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1),
+                                  MAKE_STRUCT(param_size, 2, 2)),
+        ::testing::Values<tfPad>(MAKE_STRUCT(paddings4, 0, 0, 0, 0),
+                                 MAKE_STRUCT(paddings4, 0, 0, 1, 1),
+                                 MAKE_STRUCT(paddings4, 1, 0, 0, 0),
+                                 MAKE_STRUCT(paddings4, 1, 1, 0, 0),
+                                 MAKE_STRUCT(paddings4, 1, 1, 1, 1))
+    )
+);
+
+INSTANTIATE_TEST_CASE_P(pool4x4, MyriadX_HW_PoolTF_Tests_nightly,
+    ::testing::Combine(
+        ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 32, 128, 128)),
+        ::testing::Values<kernel>(MAKE_STRUCT(param_size, 4, 4)),
+        ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1),
+                                  MAKE_STRUCT(param_size, 2, 2)),
+        ::testing::Values<tfPad>(MAKE_STRUCT(paddings4, 0, 0, 0, 0),
+                                 MAKE_STRUCT(paddings4, 0, 0, 2, 2),
+                                 MAKE_STRUCT(paddings4, 1, 1, 2, 2),
+                                 MAKE_STRUCT(paddings4, 2, 0, 0, 0),
+                                 MAKE_STRUCT(paddings4, 2, 0, 0, 1),
+                                 MAKE_STRUCT(paddings4, 2, 0, 1, 0),
+                                 MAKE_STRUCT(paddings4, 2, 2, 0, 0),
+                                 MAKE_STRUCT(paddings4, 2, 2, 1, 1),
+                                 MAKE_STRUCT(paddings4, 2, 2, 2, 2))
+    )
+);
+
+INSTANTIATE_TEST_CASE_P(pool_with_large_width, MyriadX_HW_PoolTF_Tests_nightly,
+    ::testing::Combine(
+        ::testing::Values<DimsInput>(MAKE_STRUCT(tensor_test_params, 1, 8, 640, 960),
+                                     MAKE_STRUCT(tensor_test_params, 1, 64, 6, 1000)),
+        ::testing::Values<kernel>(MAKE_STRUCT(param_size, 2, 2),
+                                  MAKE_STRUCT(param_size, 3, 3),
+                                  MAKE_STRUCT(param_size, 4, 4)),
+        ::testing::Values<stride>(MAKE_STRUCT(param_size, 1, 1),
+                                  MAKE_STRUCT(param_size, 2, 2)),
+        ::testing::Values<tfPad>(MAKE_STRUCT(paddings4, 0, 0, 0, 0))
+    )
+);
+
+INSTANTIATE_TEST_CASE_P(tf, MyriadX_HW_PoolTF_Tests_nightly,
+    ::testing::Values(
+        std::make_tuple(
+            MAKE_STRUCT(tensor_test_params, 1, 64, 112, 112),   // input
+            MAKE_STRUCT(param_size, 3, 3),                      // kernel
+            MAKE_STRUCT(param_size, 2, 2),                      // stride
+            MAKE_STRUCT(paddings4, 0, 0, 1, 1)                  // pad
+        )
+    )
+);
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/myriad_hw_tests_base.hpp b/inference-engine/tests_deprecated/functional/vpu/common/myriad_hw_tests_base.hpp
new file mode 100644 (file)
index 0000000..7ec8a7f
--- /dev/null
@@ -0,0 +1,202 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <cstring>
+#include <thread>
+#include <chrono>
+
+#include <blob_factory.hpp>
+#include <ie_memcpy.h>
+#include <format_reader_ptr.h>
+
+#include <myriad_layers_tests.hpp>
+#include <myriad_layers_reference_functions.hpp>
+
+using namespace InferenceEngine;
+
+PRETTY_PARAM(kernel, param_size)
+PRETTY_PARAM(stride, param_size)
+PRETTY_PARAM(pad, param_size)
+PRETTY_PARAM(out_channels, int)
+PRETTY_PARAM(group, int)
+PRETTY_PARAM(dilation_factor, param_size)
+PRETTY_PARAM(tfPad, paddings4)
+
+struct RunInfo {
+    bool hwMode = true;
+};
+
+class MyriadX_HW_Tests_nightly : public myriadLayersTests_nightly {
+public:
+    void CheckHWRun() {
+        StatusCode st;
+
+        std::map<std::string, InferenceEngineProfileInfo> perfMap;
+        ASSERT_NO_THROW(st = _inferRequest->GetPerformanceCounts(perfMap, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        std::vector<std::pair<std::string, InferenceEngineProfileInfo>> perfVec(perfMap.begin(), perfMap.end());
+        std::sort(perfVec.begin(), perfVec.end(),
+            [=](const std::pair<std::string, InferenceEngineProfileInfo> &pair1,
+                const std::pair<std::string, InferenceEngineProfileInfo> &pair2) {
+                return pair1.second.execution_index < pair2.second.execution_index;
+            });
+
+        size_t maxLayerName = 0u, maxExecType = 0u;
+        for (auto it = perfVec.begin(); it != perfVec.end(); ++it) {
+            maxLayerName = std::max(maxLayerName, it->first.length());
+            maxExecType = std::max(maxExecType, std::strlen(it->second.exec_type));
+        }
+
+        size_t indexWidth = 7, nameWidth = maxLayerName + 5, typeWidth = maxExecType + 5, timeWidth = 10;
+        size_t totalWidth = indexWidth + nameWidth + typeWidth + timeWidth;
+
+        std::cout << std::endl;
+        std::cout << "Detailed Per Stage Profile" << std::endl;
+
+        for (size_t i = 0; i < totalWidth; i++) {
+            std::cout << "=";
+        }
+
+        std::cout << std::endl;
+        std::cout << std::setw(indexWidth) << std::left << "Index"
+                  << std::setw(nameWidth) << std::left << "Name"
+                  << std::setw(typeWidth) << std::left << "Type"
+                  << std::setw(timeWidth) << std::right << "Time (ms)"
+                  << std::endl;
+
+        for (size_t i = 0; i < totalWidth; i++) {
+            std::cout << "-";
+        }
+        std::cout << std::endl;
+
+        bool hasHWStage = false;
+        long long totalTime = 0;
+
+        for (const auto& p : perfVec) {
+            const auto& stageName = p.first;
+            const auto& info = p.second;
+
+            if (info.status == InferenceEngineProfileInfo::EXECUTED) {
+                std::string stageType(info.exec_type);
+                if (stageType.find("MyriadXHw") != std::string::npos) {
+                    hasHWStage = true;
+                }
+
+                std::cout << std::setw(indexWidth) << std::left << info.execution_index
+                          << std::setw(nameWidth) << std::left << stageName
+                          << std::setw(typeWidth) << std::left << info.exec_type
+                          << std::setw(timeWidth) << std::right << info.realTime_uSec / 1000.0
+                          << std::endl;
+
+                totalTime += info.realTime_uSec;
+            }
+        }
+
+        for (int i = 0; i < totalWidth; i++) {
+            std::cout << "-";
+        }
+        std::cout << std::endl;
+
+        std::cout << std::setw(totalWidth / 2) << std::right << "Total inference time:"
+                  << std::setw(totalWidth / 2 + 1) << std::right << totalTime / 1000.0
+                  << std::endl;
+
+        for (int i = 0; i < totalWidth; i++) {
+            std::cout << "-";
+        }
+        std::cout << std::endl;
+
+        EXPECT_TRUE(hasHWStage);
+    }
+
+    void RunNetwork(const CNNNetwork& network,
+                    const Blob::Ptr& input,
+                    Blob::Ptr& output,
+                    const char* inputName,
+                    const char* outputName,
+                    const RunInfo& runInfo,
+                    const std::string& logLevel = CONFIG_VALUE(LOG_NONE)) {
+        _inferRequest.reset();
+        _exeNetwork.reset();
+
+        StatusCode st;
+
+        std::map<std::string, std::string> config = {
+            { VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), runInfo.hwMode ? CONFIG_VALUE(YES) : CONFIG_VALUE(NO) },
+
+            { CONFIG_KEY(PERF_COUNT), CONFIG_VALUE(YES) },
+            { VPU_CONFIG_KEY(PERF_REPORT_MODE), VPU_CONFIG_VALUE(PER_STAGE) },
+
+            { CONFIG_KEY(LOG_LEVEL), logLevel }
+        };
+
+        ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network, config, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        ASSERT_NO_THROW(st = _exeNetwork->CreateInferRequest(_inferRequest, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        ASSERT_NO_THROW(st = _inferRequest->SetBlob(inputName, input, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        ASSERT_NO_THROW(st = _inferRequest->GetBlob(outputName, output, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        ASSERT_NO_THROW(st = _inferRequest->Infer(&_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    }
+
+    void CompareWithSW(float errorThreshold, vpu::LayoutPreference layoutPreference = vpu::LayoutPreference::ChannelMajor) {
+        Blob::Ptr swOutput;
+        {
+            SCOPED_TRACE("SW");
+
+            ResetGeneratedNet();
+            ASSERT_TRUE(generateNetAndInfer(NetworkInitParams()
+                                            .useHWOpt(false)
+                                            .runRefGraph(false)
+                                            .layoutPreference(layoutPreference)));
+
+            auto outBlob = _outputMap.begin()->second;
+            swOutput = make_shared_blob<ie_fp16>(outBlob->getTensorDesc());
+            swOutput->allocate();
+            std::copy_n(outBlob->cbuffer().as<const uint8_t*>(), outBlob->byteSize(), swOutput->buffer().as<uint8_t*>());
+        }
+
+        {
+            SCOPED_TRACE("HW");            
+
+            ResetGeneratedNet();
+            ASSERT_TRUE(generateNetAndInfer(NetworkInitParams()
+                                            .useHWOpt(true)
+                                            .runRefGraph(false)
+                                            .layoutPreference(layoutPreference)));
+            ASSERT_NO_FATAL_FAILURE(CheckHWRun());
+
+            auto outBlob = _outputMap.begin()->second;
+            CompareCommonAbsolute(outBlob, swOutput, errorThreshold);
+        }
+    }
+
+    void CompareWithItself(int numIters) {
+        ResetGeneratedNet();
+        ASSERT_TRUE(generateNetAndInfer(NetworkInitParams()
+                                        .useHWOpt(true)
+                                        .runRefGraph(false)));
+
+        auto outBlob = _outputMap.begin()->second;
+
+        auto firstOutput = make_shared_blob<ie_fp16>(outBlob->getTensorDesc());
+        firstOutput->allocate();
+        std::copy_n(outBlob->cbuffer().as<const ie_fp16*>(), outBlob->size(), firstOutput->buffer().as<ie_fp16*>());
+
+        for (int i = 0; i < numIters; ++i) {
+            ASSERT_TRUE(Infer());
+            ASSERT_NO_FATAL_FAILURE(CompareCommonAbsolute(outBlob, firstOutput, 0.0f)) << i;
+        }
+    }
+};
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/myriad_infer_tests.cpp b/inference-engine/tests_deprecated/functional/vpu/common/myriad_infer_tests.cpp
new file mode 100644 (file)
index 0000000..cdfdff8
--- /dev/null
@@ -0,0 +1,296 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tests.hpp"
+
+using namespace InferenceEngine;
+
+using myriadInferTests_nightly = myriadLayersTests_nightly;
+
+TEST_F(myriadInferTests_nightly, NCHW_Input) {
+    std::string model = R"V0G0N(
+        <net name="Power" version="2" batch="1">
+            <layers>
+                <layer name="data" type="Input" precision="FP16" id="1">
+                    <output>
+                        <port id="1">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>600</dim>
+                            <dim>800</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="power" type="Power" precision="FP16" id="2">
+                    <power_data power="1" scale="1" shift="0"/>
+                    <input>
+                        <port id="2">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>600</dim>
+                            <dim>800</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="3">
+                            <dim>1</dim>
+                            <dim>3</dim>
+                            <dim>600</dim>
+                            <dim>800</dim>
+                        </port>
+                    </output>
+                </layer>
+            </layers>
+            <edges>
+                <edge from-layer="1" from-port="1" to-layer="2" to-port="2"/>
+            </edges>
+        </net>
+    )V0G0N";
+
+    StatusCode st;
+
+    ASSERT_NO_THROW(readNetwork(model));
+
+    const auto& network = _cnnNetwork;
+
+    _inputsInfo = network.getInputsInfo();
+    _inputsInfo["data"]->setPrecision(Precision::FP16);
+
+    _outputsInfo = network.getOutputsInfo();
+    _outputsInfo["power"]->setPrecision(Precision::FP16);
+
+    ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network, {}, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    ASSERT_NE(_exeNetwork, nullptr) << _resp.msg;
+
+    ASSERT_NO_THROW(st = _exeNetwork->CreateInferRequest(_inferRequest, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    auto dims = _inputsInfo["data"]->getTensorDesc().getDims();
+
+    auto tensorDescNHWC = TensorDesc(Precision::FP16, dims, Layout::NHWC);
+    auto inputNHWC = make_shared_blob<ie_fp16>(tensorDescNHWC);
+    ASSERT_NO_THROW(inputNHWC->allocate());
+
+    auto outputNHWC = make_shared_blob<ie_fp16>(tensorDescNHWC);
+    ASSERT_NO_THROW(outputNHWC->allocate());
+
+    auto tensorDescNCHW = TensorDesc(Precision::FP16, dims, Layout::NCHW);
+    auto inputNCHW = make_shared_blob<ie_fp16>(tensorDescNCHW);
+    ASSERT_NO_THROW(inputNCHW->allocate());
+
+    auto outputNCHW = make_shared_blob<ie_fp16>(tensorDescNCHW);
+    ASSERT_NO_THROW(outputNCHW->allocate());
+
+    ASSERT_NO_THROW(GenRandomData(inputNHWC));
+
+    for (size_t i = 0; i < inputNHWC->size(); i++) {
+        inputNCHW->buffer().as<ie_fp16*>()[tensorDescNCHW.offset(i)] = inputNHWC->cbuffer().as<const ie_fp16*>()[tensorDescNHWC.offset(i)];
+    }
+
+    ASSERT_NO_THROW(st = _inferRequest->SetBlob("data", inputNHWC, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    ASSERT_NO_THROW(st = _inferRequest->SetBlob("power", outputNHWC, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    ASSERT_NO_THROW(st = _inferRequest->Infer(&_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    ASSERT_NO_THROW(st = _inferRequest->SetBlob("data", inputNCHW, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    ASSERT_NO_THROW(st = _inferRequest->SetBlob("power", outputNCHW, &_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    ASSERT_NO_THROW(st = _inferRequest->Infer(&_resp));
+    ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+    CompareCommonAbsolute(outputNHWC, outputNCHW, 0.0);
+}
+
+TEST_F(myriadInferTests_nightly, AddOutputToConvWithReLU) {
+    const std::string conv_model = R"V0G0N(
+        <Net name="conv_model" version="2" batch="1">
+            <layers>
+                <layer name="input" type="Input" precision="FP16" id="1">
+                    <output>
+                        <port id="1">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="conv" type="Convolution" precision="FP16" id="2">
+                    <convolution_data
+                        stride-x="1" stride-y="1"
+                        pad-x="0" pad-y="0"
+                        kernel-x="1" kernel-y="1"
+                        output="64"
+                        group="1"/>
+                    <input>
+                        <port id="2">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="3">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </output>
+                    <weights offset="0" size="8192"/>
+                    <biases offset="8192" size="128"/>
+                </layer>
+            </layers>
+            <edges>
+                <edge from-layer="1" from-port="1" to-layer="2" to-port="2"/>
+            </edges>
+        </Net>
+    )V0G0N";
+
+    const std::string full_model = R"V0G0N(
+        <Net name="full_model" version="2" batch="1">
+            <layers>
+                <layer name="input" type="Input" precision="FP16" id="1">
+                    <output>
+                        <port id="1">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </output>
+                </layer>
+                <layer name="conv" type="Convolution" precision="FP16" id="2">
+                    <convolution_data
+                        stride-x="1" stride-y="1"
+                        pad-x="0" pad-y="0"
+                        kernel-x="1" kernel-y="1"
+                        output="64"
+                        group="1"/>
+                    <input>
+                        <port id="2">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="3">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </output>
+                    <weights offset="0" size="8192"/>
+                    <biases offset="8192" size="128"/>
+                </layer>
+                <layer name="relu" type="ReLU" precision="FP16" id="3">
+                    <input>
+                        <port id="4">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </input>
+                    <output>
+                        <port id="5">
+                            <dim>1</dim>
+                            <dim>64</dim>
+                            <dim>56</dim>
+                            <dim>56</dim>
+                        </port>
+                    </output>
+                </layer>
+            </layers>
+            <edges>
+                <edge from-layer="1" from-port="1" to-layer="2" to-port="2"/>
+                <edge from-layer="2" from-port="3" to-layer="3" to-port="4"/>
+            </edges>
+        </Net>
+    )V0G0N";
+
+    StatusCode st;
+
+    TBlob<uint8_t>::Ptr weights(GenWeights(8320 / sizeof(ie_fp16)));
+
+    InferenceEngine::Core ie;
+    auto conv_network = ie.ReadNetwork(conv_model, weights);
+
+    auto conv_inputs_info = conv_network.getInputsInfo();
+    conv_inputs_info["input"]->setPrecision(Precision::FP16);
+
+    auto conv_outputs_info = conv_network.getOutputsInfo();
+    conv_outputs_info["conv"]->setPrecision(Precision::FP16);
+
+    Blob::Ptr input = make_shared_blob<ie_fp16>({Precision::FP16, conv_inputs_info["input"]->getTensorDesc().getDims(), Layout::NCHW});
+    input->allocate();
+    GenRandomData(input);
+
+    Blob::Ptr conv_output;
+    {
+        IExecutableNetwork::Ptr conv_exe;
+        ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(conv_exe, conv_network, {}, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+        ASSERT_NE(conv_exe, nullptr) << _resp.msg;
+
+        IInferRequest::Ptr conv_req;
+        ASSERT_NO_THROW(st = conv_exe->CreateInferRequest(conv_req, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        ASSERT_NO_THROW(st = conv_req->SetBlob("input", input, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        ASSERT_NO_THROW(st = conv_req->GetBlob("conv", conv_output, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        ASSERT_NO_THROW(st = conv_req->Infer(&_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    }
+    
+    auto full_network = ie.ReadNetwork(full_model, weights);
+
+    full_network.addOutput("conv", 0);
+
+    auto full_inputs_info = full_network.getInputsInfo();
+    full_inputs_info["input"]->setPrecision(Precision::FP16);
+
+    auto full_outputs_info = full_network.getOutputsInfo();
+    full_outputs_info["conv"]->setPrecision(Precision::FP16);
+    full_outputs_info["relu"]->setPrecision(Precision::FP16);
+
+    Blob::Ptr full_output;
+    {
+        IExecutableNetwork::Ptr full_exe;
+        ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(full_exe, full_network, {}, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+        ASSERT_NE(full_exe, nullptr) << _resp.msg;
+
+        IInferRequest::Ptr full_req;
+        ASSERT_NO_THROW(st = full_exe->CreateInferRequest(full_req, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        ASSERT_NO_THROW(st = full_req->SetBlob("input", input, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        ASSERT_NO_THROW(st = full_req->GetBlob("conv", full_output, &_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+
+        ASSERT_NO_THROW(st = full_req->Infer(&_resp));
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    }
+
+    CompareCommonAbsolute(full_output, conv_output, 0.0f);
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/myriad_merge_permute_tests.cpp b/inference-engine/tests_deprecated/functional/vpu/common/myriad_merge_permute_tests.cpp
new file mode 100644 (file)
index 0000000..748aad7
--- /dev/null
@@ -0,0 +1,23 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_merge_permute_tests.hpp"
+
+INSTANTIATE_TEST_CASE_P(accuracy_3D, myriadLayersMergePermuteNDTests_nightly,
+        ::testing::Combine(
+            ::testing::ValuesIn(s_inTensors_3D)
+          , ::testing::ValuesIn(s_permuteParams_3D)
+));
+
+INSTANTIATE_TEST_CASE_P(accuracy_4D, myriadLayersMergePermuteNDTests_nightly,
+        ::testing::Combine(
+            ::testing::ValuesIn(s_inTensors_4D)
+          , ::testing::ValuesIn(s_permuteParams_4D)
+));
+
+INSTANTIATE_TEST_CASE_P(accuracy_5D, myriadLayersMergePermuteNDTests_nightly,
+        ::testing::Combine(
+            ::testing::ValuesIn(s_inTensors_5D)
+          , ::testing::ValuesIn(s_permuteParams_5D)
+));
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/myriad_merge_permute_tests.hpp b/inference-engine/tests_deprecated/functional/vpu/common/myriad_merge_permute_tests.hpp
new file mode 100644 (file)
index 0000000..ec0194d
--- /dev/null
@@ -0,0 +1,109 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tests.hpp"
+
+#include <debug.h>
+
+using namespace InferenceEngine;
+
+using PermutationsSequence = std::vector<InferenceEngine::SizeVector>;
+
+using MergePermuteNDParams = std::tuple<InferenceEngine::SizeVector,  // input tensor sizes
+                                        PermutationsSequence>;        // permutation vectors sequence
+
+class myriadLayersMergePermuteNDTests_nightly:
+    public myriadLayersTests_nightly,
+    public testing::WithParamInterface<MergePermuteNDParams> {
+
+public:
+    Blob::Ptr InferPermute(InferenceEngine::SizeVector input_tensor_sizes,
+                           const PermutationsSequence& permutation_vectors,
+                           const bool usePermuteMerging,
+                           int64_t& executionMicroseconds)
+    {
+        ResetGeneratedNet();
+        ResetReferenceLayers();
+
+        _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)  ] = CONFIG_VALUE(NO);
+        _config[VPU_CONFIG_KEY(ENABLE_PERMUTE_MERGING)] = usePermuteMerging ? CONFIG_VALUE(YES) : CONFIG_VALUE(NO) ;
+
+        for (const auto& permutation_vector : permutation_vectors) {
+            const auto num_dims = input_tensor_sizes.size();
+            SizeVector output_tensor_sizes(num_dims);
+            for (size_t i = 0; i < num_dims; i++) {
+                output_tensor_sizes[i] = input_tensor_sizes[permutation_vector[i]];
+            }
+
+            const std::map<std::string, std::string> layer_params{{"order", details::joinVec(permutation_vector)}};
+
+            _testNet.addLayer(LayerInitParams("Permute")
+                     .params(layer_params)
+                     .in({input_tensor_sizes})
+                     .out({output_tensor_sizes}),
+                     ref_permute_wrap);
+
+            input_tensor_sizes = output_tensor_sizes;  // update input for next layer
+        }
+
+        IE_ASSERT(generateNetAndInfer(NetworkInitParams().useHWOpt(CheckMyriadX()).runRefGraph(false)));
+
+        std::map<std::string, InferenceEngine::InferenceEngineProfileInfo> perfMap;
+        _inferRequest->GetPerformanceCounts(perfMap, nullptr);
+
+        executionMicroseconds = 0;
+        for (const auto& perfPair : perfMap) {
+            const InferenceEngine::InferenceEngineProfileInfo& info = perfPair.second;
+            if (info.status == InferenceEngine::InferenceEngineProfileInfo::EXECUTED) {
+                executionMicroseconds += info.realTime_uSec;
+            }
+        }
+
+        return _outputMap.begin()->second;
+    }
+};
+
+TEST_P(myriadLayersMergePermuteNDTests_nightly, Permute) {
+    const auto& test_params = GetParam();
+    const auto& input_tensor_sizes  = std::get<0>(test_params);
+    const auto& permutation_vectors = std::get<1>(test_params);
+
+    int64_t executionMicroseconds = 0, executionMicrosecondsOptimized = 0;
+
+    const auto output_blob_with_merging    = InferPermute(input_tensor_sizes, permutation_vectors, true  , executionMicrosecondsOptimized);
+    const auto output_blob_without_merging = InferPermute(input_tensor_sizes, permutation_vectors, false , executionMicroseconds);
+
+    CompareCommonAbsolute(output_blob_with_merging, output_blob_without_merging, 0.);
+    std::cout << "Myriad time = non-optimized: " << executionMicroseconds << " us., optimized: " << executionMicrosecondsOptimized << " us.\n";
+}
+static const std::vector<InferenceEngine::SizeVector> s_inTensors_3D = {
+    {5, 7, 11},
+};
+
+static const std::vector<PermutationsSequence> s_permuteParams_3D = {
+    {{1, 2, 0}, {1, 2, 0}},
+    {{1, 2, 0}, {1, 2, 0}, {1, 2, 0}}, // trivial one.
+};
+
+static const std::vector<InferenceEngine::SizeVector> s_inTensors_4D = {
+    {3, 5, 7, 11},
+};
+
+static const std::vector<PermutationsSequence> s_permuteParams_4D = {
+    {{1, 2, 3, 0}, {1, 2, 3, 0}, {1, 2, 3, 0}},
+    {{1, 2, 3, 0}, {1, 2, 3, 0}, {1, 2, 3, 0}, {1, 2, 3, 0}}, // trivial one.
+};
+
+static const std::vector<InferenceEngine::SizeVector> s_inTensors_5D = {
+    {2, 3, 5, 7, 11},
+};
+static const std::vector<PermutationsSequence> s_permuteParams_5D = {
+    {{0, 4, 1, 2, 3}, {0, 2, 1, 3, 4}},
+    {{0, 3, 4, 1, 2}, {0, 1, 3, 2, 4}},
+    {{1, 2, 3, 4, 0}, {1, 2, 3, 4, 0}},
+    {{4, 0, 1, 2, 3}, {4, 0, 1, 2, 3}},
+    {{0, 1, 2, 3, 4}, {0, 3, 1, 2, 4}, {0, 1, 2, 4, 3}},
+    {{0, 1, 3, 2, 4}, {3, 1, 0, 2, 4}, {0, 1, 2, 4, 3}, {4, 1, 2, 0, 3}},
+    {{1, 2, 3, 4, 0}, {1, 2, 3, 4, 0}, {1, 2, 3, 4, 0}, {1, 2, 3, 4, 0}, {1, 2, 3, 4, 0}}, // trivial one.
+};
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/myriad_xml_tests.hpp b/inference-engine/tests_deprecated/functional/vpu/common/myriad_xml_tests.hpp
new file mode 100644 (file)
index 0000000..9743ab4
--- /dev/null
@@ -0,0 +1,797 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tests.hpp"
+
+std::string full_model = R"V0G0N(
+<net batch="1" name="model" version="2">
+       <layers>
+               <layer id="6" name="data" precision="FP16" type="Input">
+                       <output>
+                               <port id="0">
+                                       <dim>1</dim>
+                                       <dim>3</dim>
+                                       <dim>64</dim>
+                                       <dim>64</dim>
+                               </port>
+                       </output>
+               </layer>
+               <layer id="5" name="conv1_7x7_s2" precision="FP16" type="Convolution">
+                       <data dilation-x="1" dilation-y="1" group="1" kernel-x="7" kernel-y="7" output="64" pad-x="3" pad-y="3" stride="1,1,2,2" stride-x="2" stride-y="2"/>
+                       <input>
+                               <port id="0">
+                                       <dim>1</dim>
+                                       <dim>3</dim>
+                                       <dim>64</dim>
+                                       <dim>64</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="3">
+                                       <dim>1</dim>
+                                       <dim>64</dim>
+                                       <dim>32</dim>
+                                       <dim>32</dim>
+                               </port>
+                       </output>
+                       <blobs>
+                               <weights offset="0" size="18816"/>
+                               <biases offset="18816" size="128"/>
+                       </blobs>
+               </layer>
+               <layer id="10" name="conv1_relu_7x7" precision="FP16" type="ReLU">
+                       <data engine="caffe.ReLUParameter.DEFAULT" negative_slope="0.0"/>
+                       <input>
+                               <port id="0">
+                                       <dim>1</dim>
+                                       <dim>64</dim>
+                                       <dim>32</dim>
+                                       <dim>32</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="1">
+                                       <dim>1</dim>
+                                       <dim>64</dim>
+                                       <dim>32</dim>
+                                       <dim>32</dim>
+                               </port>
+                       </output>
+               </layer>
+               <layer id="4" name="pool1_3x3_s2" precision="FP16" type="Pooling">
+                       <data exclude-pad="false" kernel-x="3" kernel-y="3" pad-x="0" pad-y="0" pool-method="max" rounding_type="ceil" stride="1,1,2,2" stride-x="2" stride-y="2"/>
+                       <input>
+                               <port id="0">
+                                       <dim>1</dim>
+                                       <dim>64</dim>
+                                       <dim>32</dim>
+                                       <dim>32</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="1">
+                                       <dim>1</dim>
+                                       <dim>64</dim>
+                                       <dim>16</dim>
+                                       <dim>16</dim>
+                               </port>
+                       </output>
+               </layer>
+               <layer id="16" name="loss3_classifier" precision="FP16" type="FullyConnected">
+                       <data out-size="1000"/>
+                       <input>
+                               <port id="0">
+                                       <dim>1</dim>
+                                       <dim>64</dim>
+                                       <dim>16</dim>
+                                       <dim>16</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="3">
+                                       <dim>1</dim>
+                                       <dim>1000</dim>
+                               </port>
+                       </output>
+                       <blobs>
+                               <weights offset="18944" size="32768000"/>
+                               <biases offset="32786944" size="2000"/>
+                       </blobs>
+               </layer>
+               <layer id="3" name="ReluFC" precision="FP16" type="ReLU">
+                       <data engine="caffe.ReLUParameter.DEFAULT" negative_slope="0.0"/>
+                       <input>
+                               <port id="0">
+                                       <dim>1</dim>
+                                       <dim>1000</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="1">
+                                       <dim>1</dim>
+                                       <dim>1000</dim>
+                               </port>
+                       </output>
+               </layer>
+               <layer id="8" name="prob" precision="FP16" type="SoftMax">
+                       <data axis="1"/>
+                       <input>
+                               <port id="0">
+                                       <dim>1</dim>
+                                       <dim>1000</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="1">
+                                       <dim>1</dim>
+                                       <dim>1000</dim>
+                               </port>
+                       </output>
+               </layer>
+       </layers>
+       <edges>
+               <edge from-layer="6" from-port="0" to-layer="5" to-port="0"/>
+               <edge from-layer="5" from-port="3" to-layer="10" to-port="0"/>
+               <edge from-layer="10" from-port="1" to-layer="4" to-port="0"/>
+               <edge from-layer="4" from-port="1" to-layer="16" to-port="0"/>
+               <edge from-layer="16" from-port="3" to-layer="3" to-port="0"/>
+               <edge from-layer="3" from-port="1" to-layer="8" to-port="0"/>
+       </edges>
+</net>
+    )V0G0N";
+
+std::string fcModel = R"V0G0N(
+    <net batch="1" name="model" version="2">
+       <layers>
+               <layer id="10" name="data" precision="FP16" type="Input">
+                       <output>
+                               <port id="0">
+                                       <dim>1</dim>
+                                       <dim>3</dim>
+                                       <dim>64</dim>
+                                       <dim>64</dim>
+                               </port>
+                       </output>
+               </layer>
+               <layer id="2" name="conv1_7x7_s2" precision="FP16" type="Convolution">
+                       <data dilation-x="1" dilation-y="1" group="1" kernel-x="7" kernel-y="7" output="64" pad-x="3" pad-y="3" stride="1,1,2,2" stride-x="2" stride-y="2"/>
+                       <input>
+                               <port id="0">
+                                       <dim>1</dim>
+                                       <dim>3</dim>
+                                       <dim>64</dim>
+                                       <dim>64</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="3">
+                                       <dim>1</dim>
+                                       <dim>64</dim>
+                                       <dim>32</dim>
+                                       <dim>32</dim>
+                               </port>
+                       </output>
+                       <blobs>
+                               <weights offset="0" size="18816"/>
+                               <biases offset="18816" size="128"/>
+                       </blobs>
+               </layer>
+               <layer id="3" name="conv1_relu_7x7" precision="FP16" type="ReLU">
+                       <data engine="caffe.ReLUParameter.DEFAULT" negative_slope="0.0"/>
+                       <input>
+                               <port id="0">
+                                       <dim>1</dim>
+                                       <dim>64</dim>
+                                       <dim>32</dim>
+                                       <dim>32</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="1">
+                                       <dim>1</dim>
+                                       <dim>64</dim>
+                                       <dim>32</dim>
+                                       <dim>32</dim>
+                               </port>
+                       </output>
+               </layer>
+               <layer id="4" name="pool1_3x3_s2" precision="FP16" type="Pooling">
+                       <data exclude-pad="false" kernel-x="3" kernel-y="3" pad-x="0" pad-y="0" pool-method="max" rounding_type="ceil" stride="1,1,2,2" stride-x="2" stride-y="2"/>
+                       <input>
+                               <port id="0">
+                                       <dim>1</dim>
+                                       <dim>64</dim>
+                                       <dim>32</dim>
+                                       <dim>32</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="1">
+                                       <dim>1</dim>
+                                       <dim>64</dim>
+                                       <dim>16</dim>
+                                       <dim>16</dim>
+                               </port>
+                       </output>
+               </layer>
+               <layer id="6" name="loss3_classifier" precision="FP16" type="FullyConnected">
+                       <data out-size="1000"/>
+                       <input>
+                               <port id="0">
+                                       <dim>1</dim>
+                                       <dim>64</dim>
+                                       <dim>16</dim>
+                                       <dim>16</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="3">
+                                       <dim>1</dim>
+                                       <dim>1000</dim>
+                               </port>
+                       </output>
+                       <blobs>
+                               <weights offset="18944" size="32768000"/>
+                               <biases offset="32786944" size="2000"/>
+                       </blobs>
+               </layer>
+       </layers>
+       <edges>
+               <edge from-layer="10" from-port="0" to-layer="2" to-port="0"/>
+               <edge from-layer="2" from-port="3" to-layer="3" to-port="0"/>
+               <edge from-layer="3" from-port="1" to-layer="4" to-port="0"/>
+               <edge from-layer="4" from-port="1" to-layer="6" to-port="0"/>
+       </edges>
+</net>
+        )V0G0N";
+
+
+std::string reluFcModel = R"V0G0N(
+    <net batch="1" name="model" version="2">
+       <layers>
+               <layer id="7" name="data" precision="FP16" type="Input">
+                       <output>
+                               <port id="0">
+                                       <dim>1</dim>
+                                       <dim>3</dim>
+                                       <dim>64</dim>
+                                       <dim>64</dim>
+                               </port>
+                       </output>
+               </layer>
+               <layer id="11" name="conv1_7x7_s2" precision="FP16" type="Convolution">
+                       <data dilation-x="1" dilation-y="1" group="1" kernel-x="7" kernel-y="7" output="64" pad-x="3" pad-y="3" stride="1,1,2,2" stride-x="2" stride-y="2"/>
+                       <input>
+                               <port id="0">
+                                       <dim>1</dim>
+                                       <dim>3</dim>
+                                       <dim>64</dim>
+                                       <dim>64</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="3">
+                                       <dim>1</dim>
+                                       <dim>64</dim>
+                                       <dim>32</dim>
+                                       <dim>32</dim>
+                               </port>
+                       </output>
+                       <blobs>
+                               <weights offset="0" size="18816"/>
+                               <biases offset="18816" size="128"/>
+                       </blobs>
+               </layer>
+               <layer id="15" name="conv1_relu_7x7" precision="FP16" type="ReLU">
+                       <data engine="caffe.ReLUParameter.DEFAULT" negative_slope="0.0"/>
+                       <input>
+                               <port id="0">
+                                       <dim>1</dim>
+                                       <dim>64</dim>
+                                       <dim>32</dim>
+                                       <dim>32</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="1">
+                                       <dim>1</dim>
+                                       <dim>64</dim>
+                                       <dim>32</dim>
+                                       <dim>32</dim>
+                               </port>
+                       </output>
+               </layer>
+               <layer id="6" name="pool1_3x3_s2" precision="FP16" type="Pooling">
+                       <data exclude-pad="false" kernel-x="3" kernel-y="3" pad-x="0" pad-y="0" pool-method="max" rounding_type="ceil" stride="1,1,2,2" stride-x="2" stride-y="2"/>
+                       <input>
+                               <port id="0">
+                                       <dim>1</dim>
+                                       <dim>64</dim>
+                                       <dim>32</dim>
+                                       <dim>32</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="1">
+                                       <dim>1</dim>
+                                       <dim>64</dim>
+                                       <dim>16</dim>
+                                       <dim>16</dim>
+                               </port>
+                       </output>
+               </layer>
+               <layer id="4" name="loss3_classifier" precision="FP16" type="FullyConnected">
+                       <data out-size="1000"/>
+                       <input>
+                               <port id="0">
+                                       <dim>1</dim>
+                                       <dim>64</dim>
+                                       <dim>16</dim>
+                                       <dim>16</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="3">
+                                       <dim>1</dim>
+                                       <dim>1000</dim>
+                               </port>
+                       </output>
+                       <blobs>
+                               <weights offset="18944" size="32768000"/>
+                               <biases offset="32786944" size="2000"/>
+                       </blobs>
+               </layer>
+               <layer id="2" name="ReluFC" precision="FP16" type="ReLU">
+                       <data engine="caffe.ReLUParameter.DEFAULT" negative_slope="0.0"/>
+                       <input>
+                               <port id="0">
+                                       <dim>1</dim>
+                                       <dim>1000</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="1">
+                                       <dim>1</dim>
+                                       <dim>1000</dim>
+                               </port>
+                       </output>
+               </layer>
+       </layers>
+       <edges>
+               <edge from-layer="7" from-port="0" to-layer="11" to-port="0"/>
+               <edge from-layer="11" from-port="3" to-layer="15" to-port="0"/>
+               <edge from-layer="15" from-port="1" to-layer="6" to-port="0"/>
+               <edge from-layer="6" from-port="1" to-layer="4" to-port="0"/>
+               <edge from-layer="4" from-port="3" to-layer="2" to-port="0"/>
+       </edges>
+</net>
+    )V0G0N";
+
+std::string poolModel = R"V0G0N(
+    <net batch="1" name="model" version="2">
+       <layers>
+               <layer id="5" name="data" precision="FP16" type="Input">
+                       <output>
+                               <port id="0">
+                                       <dim>1</dim>
+                                       <dim>3</dim>
+                                       <dim>64</dim>
+                                       <dim>64</dim>
+                               </port>
+                       </output>
+               </layer>
+               <layer id="1" name="conv1_7x7_s2" precision="FP16" type="Convolution">
+                       <data dilation-x="1" dilation-y="1" group="1" kernel-x="7" kernel-y="7" output="64" pad-x="3" pad-y="3" stride="1,1,2,2" stride-x="2" stride-y="2"/>
+                       <input>
+                               <port id="0">
+                                       <dim>1</dim>
+                                       <dim>3</dim>
+                                       <dim>64</dim>
+                                       <dim>64</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="3">
+                                       <dim>1</dim>
+                                       <dim>64</dim>
+                                       <dim>32</dim>
+                                       <dim>32</dim>
+                               </port>
+                       </output>
+                       <blobs>
+                               <weights offset="0" size="18816"/>
+                               <biases offset="18816" size="128"/>
+                       </blobs>
+               </layer>
+               <layer id="0" name="conv1_relu_7x7" precision="FP16" type="ReLU">
+                       <data engine="caffe.ReLUParameter.DEFAULT" negative_slope="0.0"/>
+                       <input>
+                               <port id="0">
+                                       <dim>1</dim>
+                                       <dim>64</dim>
+                                       <dim>32</dim>
+                                       <dim>32</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="1">
+                                       <dim>1</dim>
+                                       <dim>64</dim>
+                                       <dim>32</dim>
+                                       <dim>32</dim>
+                               </port>
+                       </output>
+               </layer>
+               <layer id="7" name="pool1_3x3_s2" precision="FP16" type="Pooling">
+                       <data exclude-pad="false" kernel-x="3" kernel-y="3" pad-x="0" pad-y="0" pool-method="max" rounding_type="ceil" stride="1,1,2,2" stride-x="2" stride-y="2"/>
+                       <input>
+                               <port id="0">
+                                       <dim>1</dim>
+                                       <dim>64</dim>
+                                       <dim>32</dim>
+                                       <dim>32</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="1">
+                                       <dim>1</dim>
+                                       <dim>64</dim>
+                                       <dim>16</dim>
+                                       <dim>16</dim>
+                               </port>
+                       </output>
+               </layer>
+       </layers>
+       <edges>
+               <edge from-layer="5" from-port="0" to-layer="1" to-port="0"/>
+               <edge from-layer="1" from-port="3" to-layer="0" to-port="0"/>
+               <edge from-layer="0" from-port="1" to-layer="7" to-port="0"/>
+       </edges>
+</net>
+        )V0G0N";
+
+std::string reluConvModel = R"V0G0N(
+    <net batch="1" name="model" version="2">
+       <layers>
+               <layer id="6" name="data" precision="FP16" type="Input">
+                       <output>
+                               <port id="0">
+                                       <dim>1</dim>
+                                       <dim>3</dim>
+                                       <dim>64</dim>
+                                       <dim>64</dim>
+                               </port>
+                       </output>
+               </layer>
+               <layer id="7" name="conv1_7x7_s2" precision="FP16" type="Convolution">
+                       <data dilation-x="1" dilation-y="1" group="1" kernel-x="7" kernel-y="7" output="64" pad-x="3" pad-y="3" stride="1,1,2,2" stride-x="2" stride-y="2"/>
+                       <input>
+                               <port id="0">
+                                       <dim>1</dim>
+                                       <dim>3</dim>
+                                       <dim>64</dim>
+                                       <dim>64</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="3">
+                                       <dim>1</dim>
+                                       <dim>64</dim>
+                                       <dim>32</dim>
+                                       <dim>32</dim>
+                               </port>
+                       </output>
+                       <blobs>
+                               <weights offset="0" size="18816"/>
+                               <biases offset="18816" size="128"/>
+                       </blobs>
+               </layer>
+               <layer id="2" name="conv1_relu_7x7" precision="FP16" type="ReLU">
+                       <data engine="caffe.ReLUParameter.DEFAULT" negative_slope="0.0"/>
+                       <input>
+                               <port id="0">
+                                       <dim>1</dim>
+                                       <dim>64</dim>
+                                       <dim>32</dim>
+                                       <dim>32</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="1">
+                                       <dim>1</dim>
+                                       <dim>64</dim>
+                                       <dim>32</dim>
+                                       <dim>32</dim>
+                               </port>
+                       </output>
+               </layer>
+       </layers>
+       <edges>
+               <edge from-layer="6" from-port="0" to-layer="7" to-port="0"/>
+               <edge from-layer="7" from-port="3" to-layer="2" to-port="0"/>
+       </edges>
+</net>
+        )V0G0N";
+
+std::string convModel = R"V0G0N(
+    <net batch="1" name="model" version="2">
+       <layers>
+               <layer id="5" name="data" precision="FP16" type="Input">
+                       <output>
+                               <port id="0">
+                                       <dim>1</dim>
+                                       <dim>3</dim>
+                                       <dim>64</dim>
+                                       <dim>64</dim>
+                               </port>
+                       </output>
+               </layer>
+               <layer id="1" name="conv1_7x7_s2" precision="FP16" type="Convolution">
+                       <data dilation-x="1" dilation-y="1" group="1" kernel-x="7" kernel-y="7" output="64" pad-x="3" pad-y="3" stride="1,1,2,2" stride-x="2" stride-y="2"/>
+                       <input>
+                               <port id="0">
+                                       <dim>1</dim>
+                                       <dim>3</dim>
+                                       <dim>64</dim>
+                                       <dim>64</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="3">
+                                       <dim>1</dim>
+                                       <dim>64</dim>
+                                       <dim>32</dim>
+                                       <dim>32</dim>
+                               </port>
+                       </output>
+                       <blobs>
+                               <weights offset="0" size="18816"/>
+                               <biases offset="18816" size="128"/>
+                       </blobs>
+               </layer>
+       </layers>
+       <edges>
+               <edge from-layer="5" from-port="0" to-layer="1" to-port="0"/>
+       </edges>
+</net>
+        )V0G0N";
+
+
+std::string concatModel = R"V0G0N(
+       <net batch="1" name="model" version="2">
+       <layers>
+               <layer id="8" name="data" precision="FP16" type="Input">
+                       <output>
+                               <port id="0">
+                                       <dim>1</dim>
+                                       <dim>3</dim>
+                                       <dim>64</dim>
+                                       <dim>64</dim>
+                               </port>
+                       </output>
+               </layer>
+               <layer id="6" name="inPower" precision="FP16" type="Power">
+                       <data power="1.0" scale="1.0" shift="0.0"/>
+                       <input>
+                               <port id="0">
+                                       <dim>1</dim>
+                                       <dim>3</dim>
+                                       <dim>64</dim>
+                                       <dim>64</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="1">
+                                       <dim>1</dim>
+                                       <dim>3</dim>
+                                       <dim>64</dim>
+                                       <dim>64</dim>
+                               </port>
+                       </output>
+               </layer>
+               <layer id="9" name="conv1_2" precision="FP16" type="Convolution">
+                       <data dilation-x="1" dilation-y="1" group="1" kernel-x="7" kernel-y="7" output="16" pad-x="3" pad-y="3" stride="1,1,2,2" stride-x="2" stride-y="2"/>
+                       <input>
+                               <port id="0">
+                                       <dim>1</dim>
+                                       <dim>3</dim>
+                                       <dim>64</dim>
+                                       <dim>64</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="3">
+                                       <dim>1</dim>
+                                       <dim>16</dim>
+                                       <dim>32</dim>
+                                       <dim>32</dim>
+                               </port>
+                       </output>
+                       <blobs>
+                               <weights offset="64" size="4704"/>
+                               <biases offset="0" size="32"/>
+                       </blobs>
+               </layer>
+               <layer id="4" name="conv1_2_Relu" precision="FP16" type="ReLU">
+                       <data engine="caffe.ReLUParameter.DEFAULT" negative_slope="0.0"/>
+                       <input>
+                               <port id="0">
+                                       <dim>1</dim>
+                                       <dim>16</dim>
+                                       <dim>32</dim>
+                                       <dim>32</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="1">
+                                       <dim>1</dim>
+                                       <dim>16</dim>
+                                       <dim>32</dim>
+                                       <dim>32</dim>
+                               </port>
+                       </output>
+               </layer>
+               <layer id="7" name="conv1_1" precision="FP16" type="Convolution">
+                       <data dilation-x="1" dilation-y="1" group="1" kernel-x="7" kernel-y="7" output="16" pad-x="3" pad-y="3" stride="1,1,2,2" stride-x="2" stride-y="2"/>
+                       <input>
+                               <port id="0">
+                                       <dim>1</dim>
+                                       <dim>3</dim>
+                                       <dim>64</dim>
+                                       <dim>64</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="3">
+                                       <dim>1</dim>
+                                       <dim>16</dim>
+                                       <dim>32</dim>
+                                       <dim>32</dim>
+                               </port>
+                       </output>
+                       <blobs>
+                               <weights offset="4768" size="4704"/>
+                               <biases offset="32" size="32"/>
+                       </blobs>
+               </layer>
+               <layer id="17" name="conv1_1_Relu" precision="FP16" type="ReLU">
+                       <data engine="caffe.ReLUParameter.DEFAULT" negative_slope="0.0"/>
+                       <input>
+                               <port id="0">
+                                       <dim>1</dim>
+                                       <dim>16</dim>
+                                       <dim>32</dim>
+                                       <dim>32</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="1">
+                                       <dim>1</dim>
+                                       <dim>16</dim>
+                                       <dim>32</dim>
+                                       <dim>32</dim>
+                               </port>
+                       </output>
+               </layer>
+               <layer id="13" name="Concat" precision="FP16" type="Concat">
+                       <data axis="1"/>
+                       <input>
+                               <port id="0">
+                                       <dim>1</dim>
+                                       <dim>16</dim>
+                                       <dim>32</dim>
+                                       <dim>32</dim>
+                               </port>
+                               <port id="1">
+                                       <dim>1</dim>
+                                       <dim>16</dim>
+                                       <dim>32</dim>
+                                       <dim>32</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="2">
+                                       <dim>1</dim>
+                                       <dim>32</dim>
+                                       <dim>32</dim>
+                                       <dim>32</dim>
+                               </port>
+                       </output>
+               </layer>
+               <layer id="14" name="outPower" precision="FP16" type="Power">
+                       <data power="1.0" scale="1.0" shift="0.0"/>
+                       <input>
+                               <port id="0">
+                                       <dim>1</dim>
+                                       <dim>32</dim>
+                                       <dim>32</dim>
+                                       <dim>32</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="1">
+                                       <dim>1</dim>
+                                       <dim>32</dim>
+                                       <dim>32</dim>
+                                       <dim>32</dim>
+                               </port>
+                       </output>
+               </layer>
+       </layers>
+       <edges>
+               <edge from-layer="8" from-port="0" to-layer="6" to-port="0"/>
+               <edge from-layer="6" from-port="1" to-layer="9" to-port="0"/>
+               <edge from-layer="9" from-port="3" to-layer="4" to-port="0"/>
+               <edge from-layer="6" from-port="1" to-layer="7" to-port="0"/>
+               <edge from-layer="7" from-port="3" to-layer="17" to-port="0"/>
+               <edge from-layer="17" from-port="1" to-layer="13" to-port="0"/>
+               <edge from-layer="4" from-port="1" to-layer="13" to-port="1"/>
+               <edge from-layer="13" from-port="2" to-layer="14" to-port="0"/>
+       </edges>
+</net>
+               )V0G0N";
+
+
+std::string concatModelConv = R"V0G0N(
+       <net batch="1" name="model" version="2">
+       <layers>
+               <layer id="2" name="data" precision="FP16" type="Input">
+                       <output>
+                               <port id="0">
+                                       <dim>1</dim>
+                                       <dim>3</dim>
+                                       <dim>64</dim>
+                                       <dim>64</dim>
+                               </port>
+                       </output>
+               </layer>
+               <layer id="3" name="inPower" precision="FP16" type="Power">
+                       <data power="1.0" scale="1.0" shift="0.0"/>
+                       <input>
+                               <port id="0">
+                                       <dim>1</dim>
+                                       <dim>3</dim>
+                                       <dim>64</dim>
+                                       <dim>64</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="1">
+                                       <dim>1</dim>
+                                       <dim>3</dim>
+                                       <dim>64</dim>
+                                       <dim>64</dim>
+                               </port>
+                       </output>
+               </layer>
+               <layer id="5" name="conv1_2" precision="FP16" type="Convolution">
+                       <data dilation-x="1" dilation-y="1" group="1" kernel-x="7" kernel-y="7" output="16" pad-x="3" pad-y="3" stride="1,1,2,2" stride-x="2" stride-y="2"/>
+                       <input>
+                               <port id="0">
+                                       <dim>1</dim>
+                                       <dim>3</dim>
+                                       <dim>64</dim>
+                                       <dim>64</dim>
+                               </port>
+                       </input>
+                       <output>
+                               <port id="3">
+                                       <dim>1</dim>
+                                       <dim>16</dim>
+                                       <dim>32</dim>
+                                       <dim>32</dim>
+                               </port>
+                       </output>
+                       <blobs>
+                               <weights offset="64" size="4704"/>
+                               <biases offset="0" size="32"/>
+                       </blobs>
+               </layer>
+       </layers>
+       <edges>
+               <edge from-layer="2" from-port="0" to-layer="3" to-port="0"/>
+               <edge from-layer="3" from-port="1" to-layer="5" to-port="0"/>
+       </edges>
+</net>
+       )V0G0N";
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/reference_regression.cpp b/inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/reference_regression.cpp
new file mode 100644 (file)
index 0000000..b0c6c1b
--- /dev/null
@@ -0,0 +1,15 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "regression_reference.hpp"
+#include "vpu_tests_config.hpp"
+
+namespace Regression {
+    namespace Reference {
+
+        std::map<std::string, std::vector<ClassificationScoringResultsForTests>> values = {
+        };
+
+    }  // namespace Reference
+}  // namespace Regression
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/vpu_case_common.cpp b/inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/vpu_case_common.cpp
new file mode 100644 (file)
index 0000000..7c06d30
--- /dev/null
@@ -0,0 +1,42 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "vpu_case_common.hpp"
+
+bool CheckMyriadX() {
+    if (auto envVar = std::getenv("IE_VPU_MYRIADX")) {
+        return std::stoi(envVar) != 0;
+    }
+    return false;
+}
+
+bool CheckMA2085() {
+    if (auto envVar = std::getenv("IE_VPU_MA2085")) {
+        return std::stoi(envVar) != 0;
+    }
+    return false;
+}
+
+//------------------------------------------------------------------------------
+// Implementation of methods of class VpuNoRegressionBase
+//------------------------------------------------------------------------------
+
+std::string VpuNoRegressionBase::getTestCaseName(PluginDevicePair plugin_device_names,
+                                                 Precision precision,
+                                                 int batch,
+                                                 bool do_reshape) {
+    return "plugin=" + plugin_device_names.first +
+           "_device=" + plugin_device_names.second +
+           "_InPrecision=" + precision.name() +
+           "_Batch=" + std::to_string(batch) +
+           "_DoReshape=" + std::to_string(do_reshape);
+}
+
+std::string VpuNoRegressionBase::getDeviceName() const {
+    return device_name_;
+}
+
+void VpuNoRegressionBase::InitConfig() {
+    config_[CONFIG_KEY(LOG_LEVEL)] = CONFIG_VALUE(LOG_INFO);
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/vpu_case_common.hpp b/inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/vpu_case_common.hpp
new file mode 100644 (file)
index 0000000..0a80bad
--- /dev/null
@@ -0,0 +1,89 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <thread>
+#include <chrono>
+#include <gtest/gtest.h>
+#include <regression_tests.hpp>
+#include <string>
+#include <precision_utils.h>
+#include <vpu/vpu_plugin_config.hpp>
+#include "vpu_case_params.hpp"
+#include "vpu_param_containers.hpp"
+
+using namespace ::testing;
+using namespace InferenceEngine;
+using namespace Regression::Matchers;
+
+#define DISABLE_IF(expr) \
+    do { \
+        if (expr) { \
+            SKIP() << "Disabled since " << #expr << std::endl; \
+        } \
+    }while(false)
+
+
+#if defined(_WIN32) || defined(WIN32)
+#   define DISABLE_ON_WINDOWS_IF(expr) DISABLE_IF((expr))
+#else
+#   define DISABLE_ON_WINDOWS_IF(expr)
+#endif
+
+#if defined(__arm__) || defined(_M_ARM) || defined(__aarch64__) || defined(_M_ARM64)
+#   define DISABLE_ON_ARM      SKIP() << "Disabled on ARM" << std::endl;
+#   define VPU_REG_TEST_ARM_PLATFORM
+#else
+#   define DISABLE_ON_ARM
+#endif
+
+#define ENABLE_IF_MA2085 \
+    do { \
+        if (!CheckMA2085()) { \
+        SKIP() << "Disabled since not on MA2085" << std::endl; \
+        }\
+    }while(false)
+
+extern bool CheckMyriadX();
+extern bool CheckMA2085();
+
+//------------------------------------------------------------------------------
+// Parameters definition
+//------------------------------------------------------------------------------
+
+using Batch = int;
+using DoReshape = bool;
+using Resources = int;
+using IsIgnoreStatistic = bool;
+using PluginDevicePair = std::pair<std::string, std::string>;
+
+//------------------------------------------------------------------------------
+// class VpuNoRegressionBase
+//------------------------------------------------------------------------------
+
+class VpuNoRegressionBase : public Regression::RegressionTests {
+public:
+    //Operations
+    static std::string getTestCaseName(PluginDevicePair,
+                                       Precision,
+                                       Batch,
+                                       DoReshape);
+
+    // Accessors
+    std::string getDeviceName() const override;
+
+protected:
+    // Data section
+    std::string plugin_name_;
+    std::string device_name_;
+    Precision in_precision_;
+    int batch_;
+    bool do_reshape_;
+    std::map <std::string, std::string> config_;
+
+    //Operations
+    virtual void SetUp() override = 0;
+    virtual void InitConfig();
+};
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/vpu_case_params.hpp b/inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/vpu_case_params.hpp
new file mode 100644 (file)
index 0000000..4b7b63c
--- /dev/null
@@ -0,0 +1,186 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <regression_tests.hpp>
+#include <string>
+#include <vector>
+
+using namespace InferenceEngine;
+using namespace Regression::Matchers;
+using String2StringMap = std::map<std::string, std::string>;
+using AdditionCfgParamsFactory = std::function<String2StringMap()>;
+
+//------------------------------------------------------------------------------
+// class SourceParameterBase
+//------------------------------------------------------------------------------
+
+class SourceParameterBase {
+public:
+    //Constructors
+    SourceParameterBase() = default;
+    virtual ~SourceParameterBase() = default;
+    inline SourceParameterBase(
+            std::string model_name,
+            std::string& img_name,
+            double reference_delta);
+
+    // Accessors
+    inline std::string modelName() const;
+    inline std::string imageName() const;
+    inline double referenceDelta() const;
+
+    // Operations
+    inline virtual std::string name() const;
+
+protected:
+    //Data section
+    std::string model_name_;
+    std::string img_name_;
+    double reference_delta_;
+};
+
+//------------------------------------------------------------------------------
+// class ClassificationSrcParam
+//------------------------------------------------------------------------------
+
+class ClassificationSrcParam : public SourceParameterBase {
+public:
+    //Constructors
+    ClassificationSrcParam() = default;
+    inline ClassificationSrcParam(
+            std::string model_name,
+            std::string img_name,
+            double reference_delta,
+            Regression::EMean mean = Regression::EMean::eValues,
+            bool with_stat_file = false);
+
+    // Accessors
+    inline Regression::EMean mean() const;
+    inline bool withStatFile() const;
+
+    // Operations
+    inline std::string name() const override;
+
+    friend std::ostream& operator<<(std::ostream& os, const ClassificationSrcParam& param) {
+        return os << param.modelName() << ", " << param.imageName() <<
+        ", " << std::to_string(param.referenceDelta()) << ", " << format_mean(param.mean());
+    }
+
+private:
+    //Data section
+    Regression::EMean mean_;
+    bool with_stat_file_;
+};
+
+//------------------------------------------------------------------------------
+// class CompilationParameter
+//------------------------------------------------------------------------------
+
+class CompilationParameter {
+public:
+    //Constructors
+    CompilationParameter() = default;
+    inline CompilationParameter(std::string name,
+                                std::string path_to_network,
+                                std::string path_to_weights);
+    //Accessors
+    inline std::string name() const;
+    inline std::string pathToNetwork() const;
+    inline std::string pathToWeights() const;
+
+    friend std::ostream& operator<<(std::ostream& os, const CompilationParameter& param) {
+        return os << param.name();
+    }
+
+private:
+    //Data section
+    std::string name_;
+    std::string path_to_network_;
+    std::string path_to_weights_;
+};
+
+//------------------------------------------------------------------------------
+// Implementation of inline methods of class SourceParameterBase
+//------------------------------------------------------------------------------
+
+inline SourceParameterBase::SourceParameterBase(
+        std::string model_name,
+        std::string& img_name,
+        double reference_delta):
+        model_name_(model_name),
+        img_name_(img_name),
+        reference_delta_(reference_delta) {
+}
+
+inline std::string SourceParameterBase::modelName() const {
+    return model_name_;
+}
+
+inline std::string SourceParameterBase::imageName() const {
+    return img_name_;
+}
+
+inline double SourceParameterBase::referenceDelta() const {
+    return reference_delta_;
+}
+
+inline std::string SourceParameterBase::name() const {
+    return "ModelName=" + model_name_ +
+           "_ImageName=" + img_name_;
+}
+
+//------------------------------------------------------------------------------
+// Implementation of inline methods of class ClassificationSrcParam
+//------------------------------------------------------------------------------
+
+inline ClassificationSrcParam::ClassificationSrcParam(
+        std::string model_name,
+        std::string img_name,
+        double reference_delta,
+        Regression::EMean mean,
+        bool with_stat_file):
+        SourceParameterBase(model_name, img_name, reference_delta),
+        mean_(mean),
+        with_stat_file_(with_stat_file) {
+}
+
+inline Regression::EMean ClassificationSrcParam::mean() const {
+    return mean_;
+}
+
+inline bool ClassificationSrcParam::withStatFile() const {
+    return with_stat_file_;
+}
+
+inline std::string ClassificationSrcParam::name() const {
+    return SourceParameterBase::name() +
+           "_Mean=" + format_mean(mean_);
+}
+
+//------------------------------------------------------------------------------
+// Implementation of inline methods of class CompilationParameter
+//------------------------------------------------------------------------------
+
+inline CompilationParameter::CompilationParameter(
+        std::string name,
+        std::string path_to_network,
+        std::string path_to_weights):
+        name_(name),
+        path_to_network_(path_to_network),
+        path_to_weights_(path_to_weights) {
+}
+
+inline std::string CompilationParameter::name() const {
+    return name_;
+}
+
+inline std::string CompilationParameter::pathToNetwork() const {
+    return path_to_network_;
+}
+
+inline std::string CompilationParameter::pathToWeights() const {
+    return path_to_weights_;
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/vpu_classification_case.cpp b/inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/vpu_classification_case.cpp
new file mode 100644 (file)
index 0000000..81169ef
--- /dev/null
@@ -0,0 +1,97 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "vpu_classification_case.hpp"
+
+//------------------------------------------------------------------------------
+// Implementation of methods of class VpuNoClassificationRegression
+//------------------------------------------------------------------------------
+
+std::string VpuNoClassificationRegression::getTestCaseName(
+        TestParamInfo<ClassificationTestVpuParam::ParamType> param) {
+    return VpuNoRegressionBase::getTestCaseName(get<0>(param.param),
+                                                get<1>(param.param),
+                                                get<2>(param.param),
+                                                get<3>(param.param)) +
+            "_SHAVES=" + (get<4>(param.param) == -1 ? "AUTO" : std::to_string(get<4>(param.param))) +
+           "_IsIgnoreStatistic=" + std::to_string(get<5>(param.param)) +
+           "_" + get<6>(param.param).name();
+}
+
+void  VpuNoClassificationRegression::SetUp() {
+    TestsCommon::SetUp();
+
+    plugin_name_ = get<0>(ClassificationTestVpuParam::GetParam()).first;
+    device_name_ = get<0>(ClassificationTestVpuParam::GetParam()).second;
+    in_precision_= get<1>(ClassificationTestVpuParam::GetParam());
+    batch_= get<2>(ClassificationTestVpuParam::GetParam());
+    do_reshape_= get<3>(ClassificationTestVpuParam::GetParam());
+    resources_= get<4>(ClassificationTestVpuParam::GetParam());
+    is_ignore_statistic_ = get<5>(ClassificationTestVpuParam::GetParam());
+    source_param_= get<6>(ClassificationTestVpuParam::GetParam());
+
+    InitConfig();
+}
+
+void VpuNoClassificationRegression::InitConfig() {
+    VpuNoRegressionBase::InitConfig();
+
+    if (resources_ != -1) {
+        config_["VPU_NUMBER_OF_CMX_SLICES"] = std::to_string(resources_);
+        config_["VPU_NUMBER_OF_SHAVES"] = std::to_string(resources_);
+    }
+
+    if (is_ignore_statistic_) {
+        config_["VPU_IGNORE_IR_STATISTIC"] = CONFIG_VALUE(YES);
+    } else {
+        config_["VPU_IGNORE_IR_STATISTIC"] = CONFIG_VALUE(NO);
+    }
+}
+
+//------------------------------------------------------------------------------
+// Implementation of methods of class VpuNoClassificationRegressionSpecific
+//------------------------------------------------------------------------------
+
+std::string VpuNoClassificationRegressionSpecific::getTestCaseName(
+        TestParamInfo<ClassificationSpecificTestVpuParam::ParamType> param) {
+    return VpuNoRegressionBase::getTestCaseName(get<0>(param.param),
+                                                get<1>(param.param),
+                                                get<2>(param.param),
+                                                get<3>(param.param));
+}
+
+void  VpuNoClassificationRegressionSpecific::SetUp() {
+    TestsCommon::SetUp();
+
+    plugin_name_ = get<0>(ClassificationSpecificTestVpuParam::GetParam()).first;
+    device_name_ = get<0>(ClassificationSpecificTestVpuParam::GetParam()).second;
+    in_precision_= get<1>(ClassificationSpecificTestVpuParam::GetParam());
+    batch_= get<2>(ClassificationSpecificTestVpuParam::GetParam());
+    do_reshape_= get<3>(ClassificationSpecificTestVpuParam::GetParam());
+
+    InitConfig();
+}
+
+void VpuNoClassificationRegressionSpecific::InitConfig() {
+    VpuNoRegressionBase::InitConfig();
+}
+
+//------------------------------------------------------------------------------
+// Implementation of methods of class VpuNoRegressionWithCompilation
+//------------------------------------------------------------------------------
+
+std::string VpuNoRegressionWithCompilation::getTestCaseName(
+        TestParamInfo <CompilationTestParam::ParamType> param) {
+    return "plugin=" + get<0>(param.param).first +
+           std::string("_") + "device=" + get<0>(param.param).second +
+           std::string("_") + get<1>(param.param).name();
+}
+
+void VpuNoRegressionWithCompilation::SetUp() {
+    plugin_name_ = get<0>(CompilationTestParam::GetParam()).first;
+    device_name_ = get<0>(CompilationTestParam::GetParam()).second;
+    compilation_param_ = get<1>(CompilationTestParam::GetParam());
+
+    PluginCache::get().reset();
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/vpu_classification_case.hpp b/inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/vpu_classification_case.hpp
new file mode 100644 (file)
index 0000000..181d3e5
--- /dev/null
@@ -0,0 +1,93 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include "vpu_case_common.hpp"
+
+using CompilationTestParam = WithParamInterface<std::tuple<PluginDevicePair, CompilationParameter>>;
+using ClassificationTestVpuParam = WithParamInterface<std::tuple<
+        PluginDevicePair,
+        Precision,
+        Batch,
+        DoReshape,
+        Resources,
+        IsIgnoreStatistic,
+        ClassificationSrcParam>>;
+
+using ClassificationSpecificTestVpuParam = WithParamInterface<std::tuple<
+        PluginDevicePair,
+        Precision,
+        Batch,
+        DoReshape>>;
+
+//------------------------------------------------------------------------------
+// class VpuNoClassificationRegression
+//------------------------------------------------------------------------------
+
+class VpuNoClassificationRegression : public VpuNoRegressionBase,
+                                      public ClassificationTestVpuParam {
+public:
+    //Operations
+    static std::string getTestCaseName(
+            TestParamInfo<ClassificationTestVpuParam::ParamType> param);
+
+protected:
+    // Data section
+    int resources_;
+    bool is_ignore_statistic_;
+    ClassificationSrcParam source_param_;
+
+    //Operations
+    void SetUp() override;
+    void InitConfig() override;
+};
+
+//------------------------------------------------------------------------------
+// class VpuNoClassificationRegressionSpecific
+//------------------------------------------------------------------------------
+
+class VpuNoClassificationRegressionSpecific : public VpuNoRegressionBase,
+                                              public ClassificationSpecificTestVpuParam {
+public:
+    //Operations
+    static std::string getTestCaseName(
+            TestParamInfo<ClassificationSpecificTestVpuParam::ParamType> param);
+
+protected:
+    //Operations
+    void SetUp() override;
+    void InitConfig() override;
+};
+
+//------------------------------------------------------------------------------
+// class VpuNoRegressionWithCompilation
+//------------------------------------------------------------------------------
+
+class VpuNoRegressionWithCompilation : public Regression::RegressionTests,
+                                       public CompilationTestParam {
+public:
+    // Operations
+    static std::string getTestCaseName(TestParamInfo <CompilationTestParam::ParamType> param);
+
+    // Accessors
+    std::string getDeviceName() const override;
+
+protected:
+    // Data section
+    std::string plugin_name_;
+    std::string device_name_;
+    CompilationParameter compilation_param_;
+
+    //Operations
+    void SetUp() override;
+};
+
+//------------------------------------------------------------------------------
+// Implementation of methods of class VpuNoRegressionWithCompilation
+//------------------------------------------------------------------------------
+
+inline std::string VpuNoRegressionWithCompilation::getDeviceName() const {
+    return plugin_name_;
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/vpu_param_containers.cpp b/inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/vpu_param_containers.cpp
new file mode 100644 (file)
index 0000000..80271c6
--- /dev/null
@@ -0,0 +1,14 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "vpu_param_containers.hpp"
+
+//------------------------------------------------------------------------------
+// Initialization of members of class VpuTestParamsContainer
+//------------------------------------------------------------------------------
+
+PluginNamesVector VpuTestParamsContainer::testing_plugin_ = {
+        { ::vpu::tests::pluginName(), ::vpu::tests::deviceName() }
+};
+
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/vpu_param_containers.hpp b/inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/vpu_param_containers.hpp
new file mode 100644 (file)
index 0000000..5d81d4a
--- /dev/null
@@ -0,0 +1,34 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include "vpu_case_params.hpp"
+#include "vpu_tests_config.hpp"
+using PluginNamesVector = std::vector<std::pair<std::string, std::string>>;
+
+//------------------------------------------------------------------------------
+// class VpuTestParamsContainer
+//------------------------------------------------------------------------------
+
+class VpuTestParamsContainer {
+public:
+//    inline static const std::vector<CompilationParameter>& compilationParameters();
+//    inline static const std::vector<DetectionSrcParam>& detectionSrcParams();
+//    inline static const std::vector<DetectionSrcParam>& detectionSrcParamsSmoke();
+    inline static const PluginNamesVector& testingPlugin() {
+        return testing_plugin_;
+    };
+private:
+//    static std::vector<CompilationParameter> compilation_parameters_;
+    static PluginNamesVector testing_plugin_;
+//    static std::vector<DetectionSrcParam> detection_src_params_;
+//    static std::vector<DetectionSrcParam> detection_src_params_smoke_;
+};
+
+//------------------------------------------------------------------------------
+// Implementation of inline methods of class VpuTestParamsContainer
+//------------------------------------------------------------------------------
+
+
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/vpu_raw_results_case.cpp b/inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/vpu_raw_results_case.cpp
new file mode 100644 (file)
index 0000000..2caaec1
--- /dev/null
@@ -0,0 +1,231 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "vpu_raw_results_case.hpp"
+
+std::vector <float> operator + (std::vector <float> && l, const std::vector <float> & r) {
+    l.insert(l.end(), r.begin(), r.end());
+
+    return std::move(l);
+}
+
+//------------------------------------------------------------------------------
+// Implementation of public methods of class VpuNoRawResultsRegression
+//------------------------------------------------------------------------------
+
+std::string VpuNoRawResultsRegression::getTestCaseName(
+        TestParamInfo<RawResultsTestVpuParam ::ParamType> param) {
+
+    return VpuNoRegressionBase::getTestCaseName(get<0>(param.param),
+                                                get<1>(param.param),
+                                                get<2>(param.param),
+                                                get<3>(param.param));
+}
+
+
+std::vector<float> VpuNoRawResultsRegression::fromBinaryFile(std::string inputTensorBinary) {
+    std::vector <float> result;
+    std::ifstream in(inputTensorBinary, std::ios_base::binary | std::ios_base::ate);
+
+    int sizeFile = in.tellg();
+    in.seekg(0, std::ios_base::beg);
+    size_t count = sizeFile / sizeof(float);
+
+    if(in.good()) {
+        for (size_t i = 0; i < count; i++) {
+            float tmp;
+            in.read(reinterpret_cast<char *>(&tmp), sizeof(float));
+            result.push_back(tmp);
+        }
+    } else {
+        THROW_IE_EXCEPTION << "Can't open file "<< inputTensorBinary;
+    }
+
+    return result;
+}
+
+
+//------------------------------------------------------------------------------
+// Implementation of private methods of class VpuNoRawResultsRegression
+//------------------------------------------------------------------------------
+
+void  VpuNoRawResultsRegression::SetUp() {
+    TestsCommon::SetUp();
+
+    plugin_name_ = get<0>(RawResultsTestVpuParam::GetParam()).first;
+    device_name_ = get<0>(RawResultsTestVpuParam::GetParam()).second;
+    in_precision_= get<1>(RawResultsTestVpuParam::GetParam());
+    batch_= get<2>(RawResultsTestVpuParam::GetParam());
+    do_reshape_= get<3>(RawResultsTestVpuParam::GetParam());
+
+    InitConfig();
+}
+
+void VpuNoRawResultsRegression::InitConfig() {
+    VpuNoRegressionBase::InitConfig();
+}
+
+bool VpuNoRawResultsRegression::loadImage(const std::string &imageFilename, const InferenceEngine::Blob::Ptr &blob,
+    bool bgr, InferenceEngine::Layout layout) {
+
+    auto precision = blob->getTensorDesc().getPrecision();
+    if (precision != InferenceEngine::Precision::FP16
+        && precision != InferenceEngine::Precision::FP32
+        && precision != InferenceEngine::Precision::U8) {
+        std::cout << "loadImage error: Input must have U8, FP16, FP32 precision" << std::endl;
+        return false;
+    }
+
+    if ((layout != InferenceEngine::Layout::NCHW) && (layout != InferenceEngine::Layout::NHWC)) {
+        std::cout << "Support only two layouts NCHW and NHWC" << std::endl;
+        return false;
+    }
+
+    FormatReader::ReaderPtr reader(imageFilename.c_str());
+    if (reader.get() == nullptr) {
+        std::cout << "loadImage error: image " << imageFilename << " cannot be read!" << std::endl;
+        return false;
+    }
+
+    size_t w = blob->getTensorDesc().getDims()[3];
+    size_t h = blob->getTensorDesc().getDims()[2];
+    if (reader->width() != w || reader->height() != h) {
+        std::cout << "loadImage error: Input sizes mismatch, got " << reader->width() << "x" << reader->height()
+                  << " expecting " << w << "x" << h << std::endl;
+        return false;
+    }
+
+    auto numBlobChannels = blob->getTensorDesc().getDims()[1];
+    size_t numImageChannels = reader->size() / (reader->width() * reader->height());
+    if (numBlobChannels != numImageChannels && numBlobChannels != 1) {
+        std::cout << "loadImage error: Input channels mismatch: image channels " << numImageChannels << ", "
+                  << "network channels " << numBlobChannels << ", expecting count of image channels are equal "
+                  << "to count if network channels or count of network channels are equal to 1" << std::endl;
+        return false;
+    }
+
+    auto nPixels = w * h;
+    uint8_t *BGR8 = reader->getData().get();
+    for (unsigned int i = 0; i < nPixels; i++) {
+        for (unsigned int j = 0; j < numBlobChannels; j++) {
+            uint8_t val = bgr ? BGR8[i * numImageChannels + j] : BGR8[i * numBlobChannels + (numBlobChannels - j - 1)];
+            size_t index = (layout == InferenceEngine::Layout::NCHW) ? (i + j * nPixels) : (i * numBlobChannels + j) ;
+
+            switch (blob->getTensorDesc().getPrecision()){
+                case Precision::U8:
+                {
+                    uint8_t *inputDataPtr = std::dynamic_pointer_cast<InferenceEngine::TBlob<uint8_t>>(blob)->data();
+                    inputDataPtr[index] = val;
+                    break;
+                }
+                case Precision::FP16:
+                {
+                    // for fp16 unsigned short used see PrecisionTrait<Precision::FP16>::value_type see cldnn plugin for details
+                    auto buf = blob->buffer();
+                    ie_fp16 *inputDataPtr = buf.as<ie_fp16*>();
+                    inputDataPtr[index] = PrecisionUtils::f32tof16(static_cast<float>(val));
+                    break;
+                }
+                case Precision::FP32:
+                {
+                    float *inputDataPtr = std::dynamic_pointer_cast<InferenceEngine::TBlob<float>>(blob)->data();
+                    inputDataPtr[index] = static_cast<float>(val);
+                    break;
+                }
+                default:
+                    THROW_IE_EXCEPTION << "Unsupported precision!";
+            }
+        }
+    }
+    return true;
+}
+
+bool VpuNoRawResultsRegression::generateSeqIndLPR(InferenceEngine::Blob::Ptr &seq_ind) {
+
+    if (seq_ind->getTensorDesc().getPrecision() != InferenceEngine::Precision::FP16
+        && seq_ind->getTensorDesc().getPrecision() != InferenceEngine::Precision::FP32
+        && seq_ind->getTensorDesc().getPrecision() != InferenceEngine::Precision::U8) {
+        std::cout << "generateSeqIndLPR error: Input must have U8, FP16, FP32 precision" << std::endl;
+        return false;
+    }
+
+    switch (seq_ind->getTensorDesc().getPrecision()){
+        case Precision::U8:
+        {
+            uint8_t *input_data = seq_ind->buffer().as<uint8_t*>();
+            input_data[0] = 0;
+            for (size_t i = 1; i < seq_ind->size(); i++) {
+                input_data[i] = 1 ;
+            }
+            break;
+        }
+        case Precision::FP16:
+        {
+            ie_fp16 *input_data = seq_ind->buffer().as<ie_fp16*>();
+            input_data[0] = PrecisionUtils::f32tof16(0.0);
+            for (size_t i = 1; i < seq_ind->size(); i++) {
+                input_data[i] = PrecisionUtils::f32tof16(1.0) ;
+            }
+            break;
+        }
+        case Precision::FP32:
+        {
+            float *input_data = seq_ind->buffer().as<float*>();
+            input_data[0] = 0.0;
+            for (size_t i = 1; i < seq_ind->size(); i++) {
+                input_data[i] = 1.0;
+            }
+            break;
+        }
+        default:
+            THROW_IE_EXCEPTION << "Unsupported precision!";
+    }
+
+    return true;
+}
+
+bool VpuNoRawResultsRegression::loadTensorDistance(InferenceEngine::Blob::Ptr blob1, const std::vector<float> &input1) {
+    
+    auto blob_precision = blob1->getTensorDesc().getPrecision();
+
+    if (blob_precision != InferenceEngine::Precision::FP16
+        && blob_precision != InferenceEngine::Precision::FP32) {
+        std::cout << "loadTensorDistance error: Input must have FP16, FP32 precision" << std::endl;
+        return false;
+    }
+    size_t sizeBlob;
+    sizeBlob = blob1->size() / blob1->getTensorDesc().getDims()[0];
+
+    if (sizeBlob > input1.size()) {
+        std::cout << "Blobs must have same sizes with inputs" << std::endl;
+        return false;
+    }
+
+    switch (blob_precision){
+        case Precision::FP16:
+        {
+            auto buf1 = blob1->buffer();
+            ie_fp16 *inputDataPtr1 = buf1.as<ie_fp16*>();
+            for(size_t i = 0; i < sizeBlob; i++) {
+                inputDataPtr1[i] = PrecisionUtils::f32tof16(input1[i]);
+            }
+
+            break;
+        }
+        case Precision::FP32:
+        {
+            float *inputDataPtr1 = std::dynamic_pointer_cast<InferenceEngine::TBlob<float>>(blob1)->data();
+
+            for(size_t i = 0; i < sizeBlob; i++) {
+                inputDataPtr1[i] = input1[i];
+            }
+
+            break;
+        }
+        default:
+            THROW_IE_EXCEPTION << "Unsupported precision!";
+    }
+
+    return true;
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/vpu_raw_results_case.hpp b/inference-engine/tests_deprecated/functional/vpu/common/regression/helpers/vpu_raw_results_case.hpp
new file mode 100644 (file)
index 0000000..68fdbb6
--- /dev/null
@@ -0,0 +1,149 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include "vpu_case_common.hpp"
+#include "ie_memcpy.h"
+
+using RawResultsTestVpuParam = WithParamInterface<std::tuple<
+        PluginDevicePair,
+        Precision,
+        Batch,
+        DoReshape>>;
+
+std::vector <float> operator + (std::vector <float> && l,
+    const std::vector <float> & r);
+
+//------------------------------------------------------------------------------
+// class VpuNoDetectionRegression
+//------------------------------------------------------------------------------
+
+class VpuNoRawResultsRegression : public VpuNoRegressionBase,
+                                  public RawResultsTestVpuParam {
+public:
+    //Operations
+    static std::string getTestCaseName(
+            TestParamInfo<RawResultsTestVpuParam ::ParamType> param);
+
+    template <class Ctx, class Reader>
+    void intoBatch(const Ctx & ctx, int batch, const Reader & rdr) ;
+
+    template <class Ctx>
+    void readInput(const Ctx & ctx, bool rgb = false);
+
+    template <class Ctx>
+    void readInputDistance(const Ctx & ctx);
+
+    template <class Ctx>
+    InferenceEngine::Blob::Ptr skipNegatives(const Ctx & ctx);
+
+    template <class Ctx>
+    InferenceEngine::Blob::Ptr dumpToBlob(const Ctx & ctx);
+
+    template <class Ctx>
+    void readInputForLPR(const Ctx & ctx);
+
+    std::vector<float> fromBinaryFile(std::string inputTensorBinary);
+
+protected:
+    //Operations
+    virtual void SetUp() override;
+    virtual void InitConfig() override;
+
+    template <class T>
+    static T Times(int n, const T & container);
+
+    bool loadImage(const std::string &imageFilename, const InferenceEngine::Blob::Ptr &blob,
+        bool bgr = true, InferenceEngine::Layout layout = InferenceEngine::Layout::NCHW);
+    bool generateSeqIndLPR(InferenceEngine::Blob::Ptr &seq_ind);
+    bool loadTensorDistance(InferenceEngine::Blob::Ptr blob1, const std::vector<float> &input1);
+};
+
+
+//------------------------------------------------------------------------------
+// Implementation of template methods of class VpuNoRawResultsRegression
+//------------------------------------------------------------------------------
+
+template <class Ctx, class Reader>
+void VpuNoRawResultsRegression::intoBatch(const Ctx & ctx, int batch, const Reader & rdr) {
+    const auto & input = ctx.currentInputs();
+    std::this_thread::sleep_for(std::chrono::milliseconds(10000));
+    rdr();
+
+    for (int i = 1; i < batch; i++) {
+        auto p = const_cast<uint8_t*>(input->cbuffer().template as<const uint8_t*>());
+        size_t dstSize = input->byteSize() - i * input->byteSize() / batch;
+        ie_memcpy(p + i * input->byteSize() / batch, dstSize, p, input->byteSize() / batch);
+    }
+}
+
+template <class Ctx>
+void VpuNoRawResultsRegression::readInput(const Ctx & ctx, bool rgb) {
+
+    loadImage(ctx.currentInputFile(), ctx.currentInputs(), rgb);
+}
+
+template <class Ctx>
+void VpuNoRawResultsRegression::readInputDistance(const Ctx & ctx) {
+    auto input = ctx.currentInputs();
+    auto fileName = ctx.currentInputFile();
+
+    std::string inputTensorBinary = TestDataHelpers::get_data_path() + "/vpu/InputEmbedding.bin";
+
+    std::vector <float> inTensor = fromBinaryFile(inputTensorBinary);
+
+    loadTensorDistance(std::const_pointer_cast<InferenceEngine::Blob>(input), inTensor);
+}
+
+template <class Ctx>
+InferenceEngine::Blob::Ptr VpuNoRawResultsRegression::skipNegatives(const Ctx & ctx) {
+    std::vector <float> result;
+    for (auto output : ctx.outputs()) {
+        for (auto x : *std::dynamic_pointer_cast<TBlob<float>>(output.second)) {
+            if (x >= 0) {
+                result.push_back(x);
+            }
+        }
+    }
+
+    return make_shared_blob<float>({Precision::FP32, C}, &result[0]);
+}
+
+template <class Ctx>
+InferenceEngine::Blob::Ptr VpuNoRawResultsRegression::dumpToBlob(const Ctx & ctx) {
+
+    std::vector <float> result;
+    for (auto output : ctx.outputs()) {
+        for (auto x : *std::dynamic_pointer_cast<TBlob<float>>(output.second)) {
+            result.push_back(x);
+        }
+    }
+    return make_shared_blob<float>({Precision::FP32, C}, &result[0]);
+}
+
+template <class Ctx>
+void VpuNoRawResultsRegression::readInputForLPR(const Ctx & ctx) {
+
+    if (ctx.getInputIdx() == 0) {
+        const auto input = ctx.currentInputs();
+        auto fileName = ctx.fileNames()[0];
+        loadImage(fileName, input, true);
+    } else if (ctx.getInputIdx() == 1) {
+        auto seq_ind = ctx.currentInputs();
+        generateSeqIndLPR(seq_ind);
+    } else {
+        THROW_IE_EXCEPTION << "incorrect input index for LPRNET: " << ctx.getInputIdx();
+    }
+}
+
+template <class T>
+inline T VpuNoRawResultsRegression::Times(int n, const T & container) {
+    T out;
+    for (size_t i =0; i < n; i++) {
+        out.insert(out.end(), container.begin(), container.end());
+    }
+    return out;
+}
+
diff --git a/inference-engine/tests_deprecated/functional/vpu/graph_transformer/gt_functional_tests.cpp b/inference-engine/tests_deprecated/functional/vpu/graph_transformer/gt_functional_tests.cpp
new file mode 100644 (file)
index 0000000..3fd7ec1
--- /dev/null
@@ -0,0 +1,120 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "gt_functional_tests.hpp"
+
+#include <vpu/utils/logger.hpp>
+#include <vpu/compile_env.hpp>
+#include <vpu/graph_transformer_internal.hpp>
+
+using namespace InferenceEngine;
+using namespace vpu;
+
+namespace  {
+
+}  // namespace
+
+void graphTransformerFunctionalTests::SetUp() {
+    vpuLayersTests::SetUp();
+
+    _stageBuilder = std::make_shared<StageBuilder>();
+    _frontEnd = std::make_shared<FrontEnd>(_stageBuilder);
+    _backEnd = std::make_shared<BackEnd>();
+    _passManager = std::make_shared<PassManager>(_stageBuilder, _backEnd);
+    _platform = CheckMyriadX() ? Platform::MYRIAD_X : Platform::MYRIAD_2;
+}
+
+void graphTransformerFunctionalTests::CreateModel() {
+    const auto compilerLog = std::make_shared<Logger>("Test", LogLevel::Info, consoleOutput());
+    CompileEnv::init(_platform, _compilationConfig, compilerLog);
+    AutoScope autoDeinit([] {
+        CompileEnv::free();
+    });
+    const auto& env = CompileEnv::get();
+
+    auto unitTest = testing::UnitTest::GetInstance();
+    IE_ASSERT(unitTest != nullptr);
+    auto curTestInfo = unitTest->current_test_info();
+    IE_ASSERT(curTestInfo != nullptr);
+
+    _gtModel = std::make_shared<ModelObj>(
+                formatString("%s/%s", curTestInfo->test_case_name(), curTestInfo->name()));
+    _gtModel->attrs().set<Resources>("resources", env.resources);
+    _gtModel->attrs().set<int>("index", 1);
+}
+
+void graphTransformerFunctionalTests::PrepareGraphCompilation() {
+    SetSeed(DEFAULT_SEED_VALUE);
+    _compilationConfig = CompilationConfig();
+    _inputsInfo.clear();
+    _outputsInfo.clear();
+    _inputMap.clear();
+    _outputMap.clear();
+
+    // Executable network holds its device in booted & busy state.
+    // For the new network plugin tries to find new free device first (already booted or not booted),
+    // then to reuse busy devices. If we release the executable network, it marks its device as free and booted.
+    // Next network will find such device and will use it without boot, which is the fastest case.
+    _executableNetwork = ExecutableNetwork();
+    _inferRequest = nullptr;
+
+    CreateModel();
+}
+
+void graphTransformerFunctionalTests::InitializeInputData(const DataDesc& inputDataDesc) {
+    auto input = _gtModel->addInputData("Input", inputDataDesc);
+    _gtModel->attrs().set<int>("numInputs", 1);
+
+    InputInfo::Ptr inputInfoPtr(new InputInfo());
+    inputInfoPtr->setInputData(std::make_shared<InferenceEngine::Data>("Input", inputDataDesc.toTensorDesc()));
+    _inputsInfo["Input"] = inputInfoPtr;
+
+    _dataIntermediate  = input;
+}
+
+vpu::Data graphTransformerFunctionalTests::InitializeOutputData(const DataDesc& outputDataDesc) {
+    vpu::Data output = _gtModel->addOutputData("Output", outputDataDesc);
+    _gtModel->attrs().set<int>("numOutputs", 1);
+
+    _outputsInfo["Output"] = std::make_shared<InferenceEngine::Data>("Output", outputDataDesc.toTensorDesc());
+    return output;
+}
+
+int64_t graphTransformerFunctionalTests::CompileAndInfer(Blob::Ptr& inputBlob, Blob::Ptr& outputBlob, bool lockLayout) {
+    const auto compilerLog = std::make_shared<Logger>(
+                "Test",
+                LogLevel::Info,
+                consoleOutput());
+
+    auto compiledGraph = compileModel(
+                _gtModel,
+                _platform,
+                _compilationConfig,
+                compilerLog);
+
+    std::istringstream instream(std::string(compiledGraph->blob.data(), compiledGraph->blob.size()));
+
+    _executableNetwork = _vpuPluginPtr->ImportNetwork(instream, _config);
+    auto inferRequest = _executableNetwork.CreateInferRequest();
+    _inferRequest = inferRequest;
+
+    genInputBlobs(lockLayout);
+    genOutputBlobs(lockLayout);
+
+    IE_ASSERT(Infer());
+
+    std::map<std::string, InferenceEngine::InferenceEngineProfileInfo> perfMap;
+    _inferRequest->GetPerformanceCounts(perfMap, nullptr);
+
+    int64_t executionMicroseconds = 0;
+    for (const auto& perfPair : perfMap) {
+        const InferenceEngine::InferenceEngineProfileInfo& info = perfPair.second;
+        if (info.status == InferenceEngine::InferenceEngineProfileInfo::EXECUTED) {
+            executionMicroseconds += info.realTime_uSec;
+        }
+    }
+    inputBlob = _inputMap.begin()->second;
+    outputBlob = _outputMap.begin()->second;
+    return executionMicroseconds;
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/graph_transformer/gt_functional_tests.hpp b/inference-engine/tests_deprecated/functional/vpu/graph_transformer/gt_functional_tests.hpp
new file mode 100644 (file)
index 0000000..eb3154c
--- /dev/null
@@ -0,0 +1,40 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include "vpu_layers_tests.hpp"
+
+#include <vpu/middleend/pass_manager.hpp>
+#include <vpu/frontend/frontend.hpp>
+#include <cpp_interfaces/base/ie_inference_plugin_api.hpp>
+
+class graphTransformerFunctionalTests : public vpuLayersTests {
+protected:
+    void SetUp() override;
+
+    void CreateModel();
+    void PrepareGraphCompilation();
+    void InitializeInputData(const vpu::DataDesc& inputDataDesc);
+
+    vpu::Data InitializeOutputData(const vpu::DataDesc& outputDataDesc);
+
+    /// @returns execution time in microseconds
+    int64_t CompileAndInfer(InferenceEngine::Blob::Ptr& inputBlob,
+                            InferenceEngine::Blob::Ptr& outputBlob,
+                            bool lockLayout = false);
+
+protected:
+   vpu::ModelPtr          _gtModel;
+   vpu::CompilationConfig _compilationConfig;
+   vpu::StageBuilder::Ptr _stageBuilder;
+   vpu::Data              _dataIntermediate;
+
+private:
+   vpu::Platform                      _platform = vpu::Platform::MYRIAD_X;
+   vpu::FrontEnd::Ptr                 _frontEnd;
+   vpu::PassManager::Ptr              _passManager;
+   vpu::BackEnd::Ptr                  _backEnd;
+   InferenceEngine::ExecutableNetwork _executableNetwork;
+};
diff --git a/inference-engine/tests_deprecated/functional/vpu/graph_transformer/merge_permute_and_reorder_test.cpp b/inference-engine/tests_deprecated/functional/vpu/graph_transformer/merge_permute_and_reorder_test.cpp
new file mode 100644 (file)
index 0000000..15dd93f
--- /dev/null
@@ -0,0 +1,161 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "gt_functional_tests.hpp"
+
+#include <vpu/middleend/pass_manager.hpp>
+#include <vpu/compile_env.hpp>
+#include <vpu/frontend/frontend.hpp>
+#include <vpu/utils/logger.hpp>
+#include <cpp_interfaces/base/ie_inference_plugin_api.hpp>
+
+using namespace InferenceEngine;
+using namespace vpu;
+
+struct PermutationStage {
+    PermutationIndexVector permute;
+    const DimsOrder * reorder = nullptr; // use pointer to avoid copy of uninitialized global static object.
+    PermutationStage() = default;
+    PermutationStage(const PermutationIndexVector& permute) : permute(permute) {}
+    PermutationStage(const DimsOrder& reorder) : reorder(&reorder) {}
+};
+
+static inline void PrintTo(const PermutationStage& param, ::std::ostream* os) {
+    if (!param.permute.empty())
+        *os << ::testing::PrintToString(param.permute);
+    else
+        printTo(*os, *param.reorder);
+}
+
+using PermutationsSequence = std::vector<PermutationStage>;
+
+using MergePermuteNDParams = std::tuple<InferenceEngine::SizeVector,  // input tensor sizes
+                                        PermutationsSequence>;        // permutation vectors sequence
+
+class myriadGTMergePermuteNDTests_nightly:
+    public graphTransformerFunctionalTests,
+    public testing::WithParamInterface<MergePermuteNDParams> {
+
+    using PermuteDims = PermutationDimsMap;
+    static constexpr DataType defaultDataType = DataType::FP16;
+protected:
+    DimValues MakeStubDimValues(const DimsOrder& layout, const SizeVector& dims) {
+        const auto perm = layout.toPermutation();
+        const int numDms = layout.numDims();
+        DimValues dimValues;
+        for (int ind = 0; ind < numDms; ++ind) {
+            dimValues.set(perm[ind], static_cast<int>(dims[ind]));
+        }
+        return dimValues;
+    }
+
+    InferenceEngine::SizeVector applyPermute(const InferenceEngine::SizeVector& dims, const PermutationIndexVector& permute) {
+        InferenceEngine::SizeVector result(dims.size());
+        for (size_t i = 0; i < dims.size(); ++i) {
+            result[i] = dims[permute[i]];
+        }
+        return result;
+    }
+
+    int64_t InferPermuteSequence(InferenceEngine::SizeVector inputTensorSizes,
+                                 const PermutationsSequence& permutationVectors,
+                                 const bool usePermuteMerging,
+                                 Blob::Ptr& outputBlob) {
+        PrepareGraphCompilation();
+        _compilationConfig.detectBatch = false;
+        _compilationConfig.enablePermuteMerging = usePermuteMerging;
+
+        IE_ASSERT(permutationVectors.size() >= 2);
+
+        DimsOrder layout = *permutationVectors[0].reorder; // first "reorder" is fake and determines input layout.
+
+        InitializeInputData({defaultDataType, layout, MakeStubDimValues(layout, inputTensorSizes)});
+
+        for (int i = 1; i < permutationVectors.size(); ++i) {
+            const bool lastIteration = i == permutationVectors.size() - 1;
+            const auto permutationStep = permutationVectors[i];
+            PermutationIndexVector permute = permutationStep.permute;
+            if (permutationStep.permute.empty()) {
+                auto oldLayout = layout;
+                layout = *permutationStep.reorder;
+                permute = calculatePermuteForReorder(oldLayout, layout);
+            }
+
+            inputTensorSizes = applyPermute(inputTensorSizes, permute);
+
+            const DataDesc intermediateDataDesc(defaultDataType, layout,  MakeStubDimValues(layout, inputTensorSizes));
+
+            vpu::Data dataInt = lastIteration
+                              ? InitializeOutputData(intermediateDataDesc)
+                              : _gtModel->addNewData("data" + std::to_string(i), intermediateDataDesc);
+            if (!permutationStep.permute.empty()) {
+                _stageBuilder->addPermuteStage(_gtModel,
+                                               "Permute" + std::to_string(i),
+                                               nullptr,
+                                               _dataIntermediate,
+                                               dataInt,
+                                               permuteVectorToMap(permutationStep.permute, layout, layout)
+                                               );
+            } else {
+                _stageBuilder->addReorderStage(_gtModel,
+                                               "Reorder" + std::to_string(i),
+                                               nullptr,
+                                               _dataIntermediate,
+                                               dataInt
+                                               );
+            }
+            _dataIntermediate = dataInt;
+        }
+
+        Blob::Ptr inputBlob;
+        return CompileAndInfer(inputBlob, outputBlob);
+    }
+};
+
+static const std::vector<InferenceEngine::SizeVector> s_inTensors_3D = {
+    {5, 7, 11},
+};
+
+static const std::vector<PermutationsSequence> s_permuteParams_3D = {
+    {DimsOrder::CHW, PermutationIndexVector{0, 1, 2}},
+    {DimsOrder::CHW, PermutationIndexVector{1, 2, 0}, DimsOrder::HWC},
+    {DimsOrder::CHW, PermutationIndexVector{1, 2, 0}, DimsOrder::HWC},
+    {DimsOrder::CHW, PermutationIndexVector{1, 2, 0}, DimsOrder::HWC, PermutationIndexVector{1, 2, 0}, DimsOrder::HCW},
+};
+
+static const std::vector<InferenceEngine::SizeVector> s_inTensors_5D = {
+    {2, 3, 5, 7, 11},
+};
+
+static const std::vector<PermutationsSequence> s_permuteParams_5D = {
+    {DimsOrder::NCDHW, PermutationIndexVector{0, 1, 2, 3, 4}, DimsOrder::NDHWC},
+    {DimsOrder::NDHWC, PermutationIndexVector{1, 2, 3, 4, 0}, DimsOrder::NCDHW, PermutationIndexVector{1, 2, 3, 4, 0}, DimsOrder::NDHWC},
+    {DimsOrder::NCDHW, DimsOrder::NDHWC, DimsOrder::NCDHW},
+};
+
+TEST_P(myriadGTMergePermuteNDTests_nightly, Permute) {
+    const auto& test_params = GetParam();
+    const auto& inputTensorSizes   = std::get<0>(test_params);
+    const auto& permutationVectors = std::get<1>(test_params);
+
+    Blob::Ptr outputBlobWithMerging, outputBlobWithoutMerging;
+
+    const auto executionMicrosecondsOptimized = InferPermuteSequence(inputTensorSizes, permutationVectors, true  , outputBlobWithMerging);
+    const auto executionMicroseconds          = InferPermuteSequence(inputTensorSizes, permutationVectors, false , outputBlobWithoutMerging);
+
+    CompareCommonAbsolute(outputBlobWithMerging, outputBlobWithoutMerging, 0.);
+    std::cout << "Myriad time = non-optimized: " << executionMicroseconds << " us., optimized: " << executionMicrosecondsOptimized << " us.\n";
+}
+
+INSTANTIATE_TEST_CASE_P(accuracy_3D, myriadGTMergePermuteNDTests_nightly,
+        ::testing::Combine(
+            ::testing::ValuesIn(s_inTensors_3D)
+          , ::testing::ValuesIn(s_permuteParams_3D)
+));
+
+INSTANTIATE_TEST_CASE_P(accuracy_5D, myriadGTMergePermuteNDTests_nightly,
+        ::testing::Combine(
+            ::testing::ValuesIn(s_inTensors_5D)
+          , ::testing::ValuesIn(s_permuteParams_5D)
+));
diff --git a/inference-engine/tests_deprecated/functional/vpu/myriad_tests/myriad_configs_tests.cpp b/inference-engine/tests_deprecated/functional/vpu/myriad_tests/myriad_configs_tests.cpp
new file mode 100644 (file)
index 0000000..2a8ad3e
--- /dev/null
@@ -0,0 +1,114 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <ngraph_functions/subgraph_builders.hpp>
+
+#include "ie_common.h"
+#include "myriad_layers_tests.hpp"
+#include "tests_vpu_common.hpp"
+
+#include "functional_test_utils/plugin_cache.hpp"
+
+using config_t = std::map<std::string, std::string>;
+
+typedef myriadLayerTestBaseWithParam<config_t> myriadCorrectModelsConfigsTests_nightly;
+typedef myriadLayerTestBaseWithParam<config_t> myriadIncorrectModelsConfigsTests_nightly;
+
+//------------------------------------------------------------------------------
+//  myriadCorrectModelsConfigsTests_nightly
+//------------------------------------------------------------------------------
+
+TEST_P(myriadCorrectModelsConfigsTests_nightly, LoadNetworkWithCorrectConfig) {
+    InferenceEngine::ResponseDesc response;
+    const auto &config = GetParam();
+    DISABLE_IF(!hasAppropriateStick(config));
+
+    InferenceEngine::CNNNetwork net(ngraph::builder::subgraph::makeSplitConvConcat());
+    InferenceEngine::IExecutableNetwork::Ptr executable;
+    InferenceEngine::StatusCode sts = _vpuPluginPtr->LoadNetwork(executable, net, config, &response);
+
+    ASSERT_EQ(InferenceEngine::StatusCode::OK, sts) << response.msg;
+}
+
+TEST_P(myriadCorrectModelsConfigsTests_nightly, CreateInferRequestWithAvailableDevice) {
+    InferenceEngine::ResponseDesc response;
+    const auto &config = GetParam();
+    DISABLE_IF(!hasAppropriateStick(config));
+
+    InferenceEngine::CNNNetwork net(ngraph::builder::subgraph::makeSplitConvConcat());
+    InferenceEngine::IExecutableNetwork::Ptr executable;
+    InferenceEngine::StatusCode sts = _vpuPluginPtr->LoadNetwork(executable, net, config, &response);
+    ASSERT_EQ(InferenceEngine::StatusCode::OK, sts) << response.msg;
+
+    InferenceEngine::IInferRequest::Ptr request;
+    sts = executable->CreateInferRequest(request, &response);
+    ASSERT_EQ(InferenceEngine::StatusCode::OK, sts) << response.msg;
+}
+
+TEST_P(myriadCorrectModelsConfigsTests_nightly, CreateInferRequestWithUnavailableDevice) {
+    InferenceEngine::ResponseDesc response;
+    const auto &config = GetParam();
+    DISABLE_IF(hasAppropriateStick(config));
+
+    InferenceEngine::CNNNetwork net(ngraph::builder::subgraph::makeSplitConvConcat());
+    InferenceEngine::IExecutableNetwork::Ptr executable;
+    InferenceEngine::StatusCode sts = _vpuPluginPtr->LoadNetwork(executable, net, config, &response);
+    ASSERT_EQ(InferenceEngine::StatusCode::OK, sts) << response.msg;
+
+    InferenceEngine::IInferRequest::Ptr request;
+    sts = executable->CreateInferRequest(request, &response);
+    ASSERT_EQ(InferenceEngine::StatusCode::GENERAL_ERROR, sts) << response.msg;
+}
+
+//------------------------------------------------------------------------------
+//  myriadIncorrectModelsConfigsTests_nightly
+//------------------------------------------------------------------------------
+
+TEST_P(myriadIncorrectModelsConfigsTests_nightly, LoadNetworkWithIncorrectConfig) {
+    InferenceEngine::ResponseDesc response;
+    const auto &config = GetParam();
+    DISABLE_IF(hasAppropriateStick(config));
+
+    InferenceEngine::CNNNetwork net(ngraph::builder::subgraph::makeSplitConvConcat());
+    InferenceEngine::IExecutableNetwork::Ptr executable;
+    InferenceEngine::StatusCode sts = _vpuPluginPtr->LoadNetwork(executable, net, config, &response);
+
+    ASSERT_EQ(InferenceEngine::StatusCode::GENERAL_ERROR, sts) << response.msg;
+}
+
+//------------------------------------------------------------------------------
+//  Tests initiation
+//------------------------------------------------------------------------------
+
+static const std::vector<config_t> myriadCorrectPlatformConfigValues = {
+        {{VPU_MYRIAD_CONFIG_KEY(PLATFORM), VPU_MYRIAD_CONFIG_VALUE(2450)}},
+        {{VPU_MYRIAD_CONFIG_KEY(PLATFORM), VPU_MYRIAD_CONFIG_VALUE(2480)}},
+        {{VPU_MYRIAD_CONFIG_KEY(PLATFORM), ""}},
+        // Deprecated
+        {{VPU_CONFIG_KEY(PLATFORM), VPU_CONFIG_VALUE(2450)}},
+        {{VPU_CONFIG_KEY(PLATFORM), VPU_CONFIG_VALUE(2480)}},
+        {{VPU_CONFIG_KEY(PLATFORM), ""}}
+};
+
+static const std::vector<config_t> myriadIncorrectPlatformConfigValues = {
+        {{VPU_MYRIAD_CONFIG_KEY(PLATFORM), "-1"}},
+        {{VPU_MYRIAD_CONFIG_KEY(PLATFORM), " 0"}},
+        {{VPU_MYRIAD_CONFIG_KEY(PLATFORM), "MyriadX"}},
+        // Deprecated
+        {{VPU_CONFIG_KEY(PLATFORM), "-1"}},
+        {{VPU_CONFIG_KEY(PLATFORM), " 0"}},
+        {{VPU_CONFIG_KEY(PLATFORM), "MyriadX"}},
+        // Deprecated key & value from current
+        {{VPU_CONFIG_KEY(PLATFORM), VPU_MYRIAD_CONFIG_VALUE(2450)}},
+        {{VPU_CONFIG_KEY(PLATFORM), VPU_MYRIAD_CONFIG_VALUE(2480)}},
+        // Current key & deprecated value
+        {{VPU_MYRIAD_CONFIG_KEY(PLATFORM), VPU_CONFIG_VALUE(2450)}},
+        {{VPU_MYRIAD_CONFIG_KEY(PLATFORM), VPU_CONFIG_VALUE(2480)}},
+
+};
+
+INSTANTIATE_TEST_CASE_P(MyriadConfigs, myriadCorrectModelsConfigsTests_nightly, ::testing::ValuesIn(myriadCorrectPlatformConfigValues));
+
+INSTANTIATE_TEST_CASE_P(MyriadConfigs, myriadIncorrectModelsConfigsTests_nightly, ::testing::ValuesIn(myriadIncorrectPlatformConfigValues));
diff --git a/inference-engine/tests_deprecated/functional/vpu/myriad_tests/myriad_multiple_graph_tests.cpp b/inference-engine/tests_deprecated/functional/vpu/myriad_tests/myriad_multiple_graph_tests.cpp
new file mode 100644 (file)
index 0000000..ad6fadb
--- /dev/null
@@ -0,0 +1,31 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+#include <ngraph_functions/subgraph_builders.hpp>
+
+#include "myriad_layers_tests.hpp"
+#include "tests_vpu_common.hpp"
+
+using namespace InferenceEngine;
+
+PRETTY_PARAM(num_graphs, int)
+typedef myriadLayerTestBaseWithParam<num_graphs> myriadMultipleGraphsTests_nightly;
+
+// Test ability to load many graphs to device
+TEST_P(myriadMultipleGraphsTests_nightly, LoadGraphsOnDevice) {
+    ASSERT_NO_THROW(_cnnNetwork = InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeSplitConvConcat()));
+    const int num_graphs = GetParam();
+    StatusCode st;
+    std::vector<InferenceEngine::IExecutableNetwork::Ptr> exeNetwork(num_graphs);
+    std::map<std::string, std::string> networkConfig;
+    for (int i = 0; i < num_graphs; ++i) {
+        st = _vpuPluginPtr->LoadNetwork(exeNetwork[i], _cnnNetwork, networkConfig, &_resp);
+        ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
+    }
+}
+
+INSTANTIATE_TEST_CASE_P(numerOfGraphs, myriadMultipleGraphsTests_nightly,
+    ::testing::Values(2, 4, 10)
+);
diff --git a/inference-engine/tests_deprecated/functional/vpu/myriad_tests/myriad_streams_configuration_tests.cpp b/inference-engine/tests_deprecated/functional/vpu/myriad_tests/myriad_streams_configuration_tests.cpp
new file mode 100644 (file)
index 0000000..0d0b7e1
--- /dev/null
@@ -0,0 +1,29 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "vpu_layers_tests.hpp"
+
+#include <gtest/gtest.h>
+#include <ngraph_functions/subgraph_builders.hpp>
+
+namespace {
+
+class myriadCorrectStreamsConfiguration_nightly : public vpuLayersTests, public testing::WithParamInterface<std::uint32_t> {};
+TEST_P(myriadCorrectStreamsConfiguration_nightly, InfersWithConfiguredStreams) {
+    _config[VPU_MYRIAD_CONFIG_KEY(THROUGHPUT_STREAMS)] = std::to_string(GetParam());
+    _irVersion = IRVersion::v10;
+
+    auto fn_ptr = ngraph::builder::subgraph::makeSplitMultiConvConcat();
+    ASSERT_NO_THROW(_cnnNetwork = InferenceEngine::CNNNetwork(fn_ptr));
+    ASSERT_NO_THROW(_inputsInfo = _cnnNetwork.getInputsInfo());
+    ASSERT_NO_THROW(_outputsInfo = _cnnNetwork.getOutputsInfo());
+
+    createInferRequest(NetworkInitParams{}.useHWOpt(true));
+
+    ASSERT_TRUE(Infer());
+}
+
+INSTANTIATE_TEST_CASE_P(StreamsConfiguration, myriadCorrectStreamsConfiguration_nightly, testing::Values(1, 2, 3));
+
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/myriad_tests/vpu_tests_config.cpp b/inference-engine/tests_deprecated/functional/vpu/myriad_tests/vpu_tests_config.cpp
new file mode 100644 (file)
index 0000000..1d070c6
--- /dev/null
@@ -0,0 +1,24 @@
+// Copyright (C) 2018-2019 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "vpu_tests_config.hpp"
+#include "functional_test_utils/skip_tests_config.hpp"
+
+#include <gtest/gtest.h>
+
+namespace vpu {
+namespace tests {
+
+const char* pluginName      () { return "myriadPlugin"; }
+const char* pluginNameShort () { return "myriad"; }
+const char* deviceName      () { return "MYRIAD"; }
+bool        deviceForceReset() { return true; }
+
+}  // namespace tests
+}  // namespace vpu
+
+std::vector<std::string> disabledTestPatterns() {
+    return {
+    };
+}
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/functional/vpu/shared_tests_instance/common_single_layer_tests/single_layer_tests.cpp b/inference-engine/tests_deprecated/functional/vpu/shared_tests_instance/common_single_layer_tests/single_layer_tests.cpp
new file mode 100644 (file)
index 0000000..164f6b6
--- /dev/null
@@ -0,0 +1,97 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "single_layer_tests.hpp"
+#include "vpu_tests_config.hpp"
+
+static CommonTestUtils::conv_common_params convParams =
+        {
+                PropertyVector<unsigned>{{2, 2}},  // stride
+                PropertyVector<unsigned>{{3, 3}},  // kernel
+                {},                                // pad_begin
+                {},                                // pad_end
+                PropertyVector<unsigned>{{1, 1}},  // dilation
+                "same_upper",                      // auto_pad
+                1,                                 // group
+                2                                  // out_c
+        };
+
+static CommonTestUtils::pool_common_params poolParams =
+        {
+                PropertyVector<unsigned>{{2, 2}},  // stride
+                PropertyVector<unsigned>{{3, 3}},  // kernel
+                {},                                // pad_begin
+                {},                                // pad_end
+                "same_upper",                      // auto_pad
+                true,                              // avg
+                false                              // exclude_pad
+        };
+
+static std::vector<PluginParams> pluginParams = {
+    PluginDependentParam{vpu::tests::deviceName(), Layout::NHWC, Precision::FP16, 0.01f},
+};
+
+std::string
+getTestCaseName(testing::TestParamInfo<std::tuple<InitialShapes, NewShapes, PluginParams, Helper>> obj) {
+    auto params = obj.param;
+    PluginDependentParam pluginParams = std::get<2>(params);
+    LayerTestHelper::Ptr helper = std::get<3>(params);
+    // To correspond filter of functional tests
+    std::map<std::string, std::string> device2FilterName{
+        {vpu::tests::deviceName(), vpu::tests::pluginNameShort()},
+    };
+    return device2FilterName[pluginParams.deviceName] + helper->getType();
+}
+
+#if (defined INSTANTIATE_TESTS)
+
+INSTANTIATE_TEST_CASE_P(
+        Conv_nightly, CommonSingleLayerTest,
+        ::testing::Combine(
+        ::testing::Values(InitialShapes({
+                                                {{1, 2, 16, 16}},           // input
+                                                {{1, 2, 8,  8}}             // output
+                                        })),
+        ::testing::Values(NewShapes({
+                                            {{1, 2, 15, 15}},               // input
+                                            {{1, 2, 8,  8}}                 // output
+                                    })),
+        ::testing::ValuesIn(pluginParams),
+        ::testing::Values(Helper(std::make_shared<ConvolutionTestHelper>(convParams)))
+), getTestCaseName
+);
+
+INSTANTIATE_TEST_CASE_P(
+        Deconv_nightly, CommonSingleLayerTest,
+        ::testing::Combine(
+        ::testing::Values(InitialShapes({
+                                                {{1, 2, 8,  8}},             // input
+                                                {{1, 2, 16, 16}}              // output
+                                        })),
+        ::testing::Values(NewShapes({
+                                            {{1, 2, 7,  7}},                  // input
+                                            {{1, 2, 14, 14}}                  // output
+                                    })),
+        ::testing::ValuesIn(pluginParams),
+        ::testing::Values(Helper(std::make_shared<DeconvolutionTestHelper>(convParams)))
+), getTestCaseName
+);
+
+INSTANTIATE_TEST_CASE_P(
+        Pool_nightly, CommonSingleLayerTest,
+        ::testing::Combine(
+        ::testing::Values(InitialShapes({
+                                                {{1, 2, 16, 16}},           // input
+                                                {{1, 2, 8,  8}}             // output
+                                        })),
+        ::testing::Values(NewShapes({
+                                            {{1, 2, 15, 15}},               // input
+                                            {{1, 2, 8,  8}}                 // output
+                                    })),
+        ::testing::ValuesIn(pluginParams),
+        ::testing::Values(Helper(std::make_shared<PoolingTestHelper>(poolParams)))
+), getTestCaseName
+);
+
+#endif
diff --git a/inference-engine/tests_deprecated/functional/vpu/shared_tests_instance/ie_class/ie_class.cpp b/inference-engine/tests_deprecated/functional/vpu/shared_tests_instance/ie_class/ie_class.cpp
new file mode 100644 (file)
index 0000000..e44504a
--- /dev/null
@@ -0,0 +1,216 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "ie_class.hpp"
+#include "vpu_tests_config.hpp"
+#include "common_test_utils/file_utils.hpp"
+
+using IEClassExecutableNetworkGetMetricTest_nightly = IEClassExecutableNetworkGetMetricTest;
+using IEClassExecutableNetworkGetConfigTest_nightly = IEClassExecutableNetworkGetConfigTest;
+
+using IEClassGetMetricTest_nightly = IEClassGetMetricTest;
+using IEClassGetConfigTest_nightly = IEClassGetConfigTest;
+
+std::string devices[] = {
+    std::string(vpu::tests::deviceName()),
+};
+
+std::pair<std::string, std::string> plugins [] = {
+    std::make_pair(std::string(vpu::tests::pluginName()) , std::string(vpu::tests::deviceName()) ),
+};
+
+//
+// IE Class Common tests with <pluginName, deviceName params>
+//
+
+INSTANTIATE_TEST_CASE_P(
+    IEClassBasicTestP_smoke, IEClassBasicTestP,
+    ::testing::ValuesIn(plugins));
+
+INSTANTIATE_TEST_CASE_P(
+    IEClassNetworkTestP_smoke, IEClassNetworkTestP,
+    ::testing::ValuesIn(devices));
+
+//
+// IEClassNetworkTestP tests, customized to add SKIP_IF_CURRENT_TEST_IS_DISABLED()
+//
+
+class IEClassNetworkTestP_VPU : public IEClassNetworkTestP {};
+
+TEST_P(IEClassNetworkTestP_VPU, smoke_ImportNetworkNoThrowIfNoDeviceName) {
+    SKIP_IF_CURRENT_TEST_IS_DISABLED();
+    Core ie;
+    std::stringstream strm;
+    ExecutableNetwork executableNetwork;
+    ASSERT_NO_THROW(executableNetwork = ie.LoadNetwork(actualNetwork, deviceName));
+    SKIP_IF_NOT_IMPLEMENTED(executableNetwork.Export(strm));
+    if (!strm.str().empty() && deviceName.find("FPGA") != std::string::npos) {
+        SKIP_IF_NOT_IMPLEMENTED(executableNetwork = ie.ImportNetwork(strm));
+    }
+    if (nullptr != static_cast<IExecutableNetwork::Ptr&>(executableNetwork)) {
+        ASSERT_NO_THROW(executableNetwork.CreateInferRequest());
+    }
+}
+
+TEST_P(IEClassNetworkTestP_VPU, smoke_ImportNetworkNoThrowWithDeviceName) {
+    SKIP_IF_CURRENT_TEST_IS_DISABLED();
+    Core ie;
+    std::stringstream strm;
+    ExecutableNetwork executableNetwork;
+    ASSERT_NO_THROW(executableNetwork = ie.LoadNetwork(actualNetwork, deviceName));
+    SKIP_IF_NOT_IMPLEMENTED(executableNetwork.Export(strm));
+    SKIP_IF_NOT_IMPLEMENTED(executableNetwork = ie.ImportNetwork(strm, deviceName));
+    if (nullptr != static_cast<IExecutableNetwork::Ptr&>(executableNetwork)) {
+        ASSERT_NO_THROW(executableNetwork.CreateInferRequest());
+    }
+}
+
+// #-29320
+TEST_P(IEClassNetworkTestP_VPU, DISABLED_smoke_ExportUsingFileNameImportFromStreamNoThrowWithDeviceName) {
+    SKIP_IF_CURRENT_TEST_IS_DISABLED();
+    Core ie;
+    ExecutableNetwork executableNetwork;
+    std::string fileName{"ExportedNetwork"};
+    {
+        ASSERT_NO_THROW(executableNetwork = ie.LoadNetwork(actualNetwork, deviceName));
+        SKIP_IF_NOT_IMPLEMENTED(executableNetwork.Export(fileName));
+    }
+    if (CommonTestUtils::fileExists(fileName)) {
+        {
+            std::ifstream strm(fileName);
+            SKIP_IF_NOT_IMPLEMENTED(executableNetwork = ie.ImportNetwork(strm, deviceName));
+        }
+        ASSERT_EQ(0, remove(fileName.c_str()));
+    }
+    if (nullptr != static_cast<IExecutableNetwork::Ptr&>(executableNetwork)) {
+        ASSERT_NO_THROW(executableNetwork.CreateInferRequest());
+    }
+}
+
+using IEClassNetworkTestP_VPU_GetMetric = IEClassNetworkTestP_VPU;
+TEST_P(IEClassNetworkTestP_VPU_GetMetric, smoke_OptimizationCapabilitiesReturnsFP16) {
+    Core ie;
+    ASSERT_METRIC_SUPPORTED(METRIC_KEY(OPTIMIZATION_CAPABILITIES))
+
+    Parameter optimizationCapabilitiesParameter;
+    ASSERT_NO_THROW(optimizationCapabilitiesParameter = ie.GetMetric(deviceName, METRIC_KEY(OPTIMIZATION_CAPABILITIES)));
+
+    const auto optimizationCapabilities = optimizationCapabilitiesParameter.as<std::vector<std::string>>();
+    ASSERT_EQ(optimizationCapabilities.size(), 1);
+    ASSERT_EQ(optimizationCapabilities.front(), METRIC_VALUE(FP16));
+}
+
+INSTANTIATE_TEST_CASE_P(
+    smoke_IEClassGetMetricP, IEClassNetworkTestP_VPU_GetMetric,
+    ::testing::ValuesIn(devices));
+
+INSTANTIATE_TEST_CASE_P(
+        smoke_IEClassImportExportTestP, IEClassNetworkTestP_VPU,
+        ::testing::Values(std::string(vpu::tests::deviceName()), "HETERO:" + std::string(vpu::tests::deviceName())));
+
+#if defined(ENABLE_MKL_DNN) && ENABLE_MKL_DNN
+INSTANTIATE_TEST_CASE_P(
+        smoke_IEClassImportExportTestP_HETERO_CPU, IEClassNetworkTestP_VPU,
+        ::testing::Values("HETERO:" + std::string(vpu::tests::deviceName()) + ",CPU"));
+#endif
+
+//
+// Executable Network GetMetric
+//
+
+INSTANTIATE_TEST_CASE_P(
+    IEClassExecutableNetworkGetMetricTest_nightly,
+    IEClassExecutableNetworkGetMetricTest_ThrowsUnsupported,
+    ::testing::ValuesIn(devices));
+
+INSTANTIATE_TEST_CASE_P(
+    IEClassExecutableNetworkGetMetricTest_nightly,
+    IEClassExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS,
+    ::testing::ValuesIn(devices));
+
+INSTANTIATE_TEST_CASE_P(
+    IEClassExecutableNetworkGetMetricTest_nightly,
+    IEClassExecutableNetworkGetMetricTest_SUPPORTED_METRICS,
+    ::testing::ValuesIn(devices));
+
+INSTANTIATE_TEST_CASE_P(
+    IEClassExecutableNetworkGetMetricTest_nightly,
+    IEClassExecutableNetworkGetMetricTest_NETWORK_NAME,
+    ::testing::ValuesIn(devices));
+
+INSTANTIATE_TEST_CASE_P(
+    IEClassExecutableNetworkGetMetricTest_nightly,
+    IEClassExecutableNetworkGetMetricTest_OPTIMAL_NUMBER_OF_INFER_REQUESTS,
+    ::testing::ValuesIn(devices));
+
+//
+// Executable Network GetConfig
+//
+
+INSTANTIATE_TEST_CASE_P(
+    IEClassExecutableNetworkGetConfigTest_nightly,
+    IEClassExecutableNetworkGetConfigTest,
+    ::testing::ValuesIn(devices));
+
+//
+// IE Class GetMetric
+//
+
+INSTANTIATE_TEST_CASE_P(
+    IEClassGetMetricTest_nightly,
+    IEClassGetMetricTest_ThrowUnsupported,
+    ::testing::ValuesIn(devices));
+
+INSTANTIATE_TEST_CASE_P(
+    IEClassGetMetricTest_nightly,
+    IEClassGetMetricTest_AVAILABLE_DEVICES,
+    ::testing::ValuesIn(devices));
+
+INSTANTIATE_TEST_CASE_P(
+    IEClassGetMetricTest_nightly,
+    IEClassGetMetricTest_SUPPORTED_METRICS,
+    ::testing::ValuesIn(devices));
+
+INSTANTIATE_TEST_CASE_P(
+    IEClassGetMetricTest_nightly,
+    IEClassGetMetricTest_SUPPORTED_CONFIG_KEYS,
+    ::testing::ValuesIn(devices));
+
+INSTANTIATE_TEST_CASE_P(
+    IEClassGetMetricTest_nightly,
+    IEClassGetMetricTest_OPTIMIZATION_CAPABILITIES,
+    ::testing::ValuesIn(devices));
+
+INSTANTIATE_TEST_CASE_P(
+    IEClassGetMetricTest_nightly,
+    IEClassGetMetricTest_RANGE_FOR_ASYNC_INFER_REQUESTS,
+    ::testing::ValuesIn(devices));
+
+//
+// IE Class GetConfig
+//
+
+INSTANTIATE_TEST_CASE_P(
+    IEClassGetConfigTest_nightly,
+    IEClassGetConfigTest,
+    ::testing::ValuesIn(devices));
+
+INSTANTIATE_TEST_CASE_P(
+    IEClassGetConfigTest_nightly,
+    IEClassGetConfigTest_ThrowUnsupported,
+    ::testing::ValuesIn(devices));
+
+// IE Class Query network
+
+INSTANTIATE_TEST_CASE_P(
+    DISABLED_IEClassQueryNetworkTest_smoke,
+    IEClassQueryNetworkTest,
+    ::testing::ValuesIn(devices));
+
+// IE Class Load network
+
+INSTANTIATE_TEST_CASE_P(
+    IEClassLoadNetworkTest_smoke,
+    IEClassLoadNetworkTest,
+    ::testing::ValuesIn(devices));
diff --git a/inference-engine/tests_deprecated/functional/vpu/shared_tests_instance/input_tests/parser_tests.cpp b/inference-engine/tests_deprecated/functional/vpu/shared_tests_instance/input_tests/parser_tests.cpp
new file mode 100644 (file)
index 0000000..d03eb76
--- /dev/null
@@ -0,0 +1,35 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "parser_tests.hpp"
+
+ir_test_params ir_test_cases[] = {
+    ir_test_params("MYRIAD", "FP16", negative_conv_kernel_x_case),
+    ir_test_params("MYRIAD", "FP16", negative_conv_kernel_y_case),
+    ir_test_params("MYRIAD", "FP16", negative_conv_stride_x_case),
+    ir_test_params("MYRIAD", "FP16", negative_conv_weights_case),
+    ir_test_params("MYRIAD", "FP16", negative_conv_biases_case),
+
+    ir_test_params("MYRIAD", "FP16", negative_fc_out_size_case),
+    ir_test_params("MYRIAD", "FP16", negative_fc_weights_case),
+    ir_test_params("MYRIAD", "FP16", negative_fc_biases_case),
+
+    ir_test_params("MYRIAD", "FP16", negative_deconv_kernel_x_case),
+    ir_test_params("MYRIAD", "FP16", negative_deconv_kernel_y_case),
+    ir_test_params("MYRIAD", "FP16", negative_deconv_stride_x_case),
+    ir_test_params("MYRIAD", "FP16", negative_deconv_weights_case),
+    ir_test_params("MYRIAD", "FP16", negative_deconv_biases_case),
+
+    ir_test_params("MYRIAD", "FP16", negative_pool_kernel_x_case),
+    ir_test_params("MYRIAD", "FP16", negative_pool_kernel_y_case),
+    ir_test_params("MYRIAD", "FP16", negative_pool_stride_x_case),
+    ir_test_params("MYRIAD", "FP16", incorrect_pool_type_case),
+
+    ir_test_params("MYRIAD", "FP16", negative_norm_local_size_case),
+    ir_test_params("MYRIAD", "FP16", negative_norm_k_case)
+};
+
+INSTANTIATE_TEST_CASE_P(FunctionalTest_nightly, IncorrectIRTests,
+        ::testing::ValuesIn(ir_test_cases),
+        getTestName);
diff --git a/inference-engine/tests_deprecated/functional/vpu/shared_tests_instance/io_blob_tests/cropResize_tests.cpp b/inference-engine/tests_deprecated/functional/vpu/shared_tests_instance/io_blob_tests/cropResize_tests.cpp
new file mode 100644 (file)
index 0000000..02f0d10
--- /dev/null
@@ -0,0 +1,172 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "cropResize_tests.hpp"
+#include "vpu_tests_config.hpp"
+
+#ifdef USE_OPENCV
+
+#define COMBINE_WITH_DEFAULT(_dims, _in_layouts, _color_formats) \
+    Combine(Values(Precision::FP16), \
+            Values(_dims), \
+            Values(std::make_pair(Precision::FP32, 1e-1), std::make_pair(Precision::U8, 1)), \
+            Values(_in_layouts), \
+            Values(ResizeAlgorithm::RESIZE_BILINEAR, ResizeAlgorithm::RESIZE_AREA), \
+            Values(_color_formats), \
+            Values(ROI({0, 40, 50, 220, 220})), \
+            Values(false, true))
+
+// test resize-only for all dims (as before)
+// test resize + color conversion for smaller number of dims (simple upscale/downscale scenarios only)
+namespace smoke {
+static auto params_resize_only = COMBINE_WITH_DEFAULT(
+    TESTED_DIMS(1),
+    MULTI_VALUE(NCHW, NHWC),
+    COLOR_FORMATS_RAW);
+
+static auto params_csc_3ch_and_resize = COMBINE_WITH_DEFAULT(
+    TESTED_DIMS_SMALL(1),
+    MULTI_VALUE(NCHW, NHWC),
+    COLOR_FORMATS_3CH);
+
+static auto params_csc_4ch_and_resize = COMBINE_WITH_DEFAULT(
+    TESTED_DIMS_SMALL(1),
+    NHWC,
+    COLOR_FORMATS_4CH);
+
+// batch preprocessing parameters:
+static auto batch_params_resize_only = COMBINE_WITH_DEFAULT(
+    TESTED_DIMS(2),
+    MULTI_VALUE(NCHW, NHWC),
+    COLOR_FORMATS_RAW);
+
+static auto batch_params_csc_3ch_and_resize = COMBINE_WITH_DEFAULT(
+    TESTED_DIMS_SMALL(2),
+    MULTI_VALUE(NCHW, NHWC),
+    COLOR_FORMATS_3CH);
+
+static auto batch_params_csc_4ch_and_resize = COMBINE_WITH_DEFAULT(
+    TESTED_DIMS_SMALL(2),
+    NHWC,
+    COLOR_FORMATS_4CH);
+}  // namespace smoke
+
+// test everything in nightly (as before)
+namespace nightly {
+static auto params_csc_3ch_and_resize = COMBINE_WITH_DEFAULT(
+    TESTED_DIMS(1),
+    MULTI_VALUE(NCHW, NHWC),
+    MULTI_VALUE(COLOR_FORMATS_RAW, COLOR_FORMATS_3CH));
+
+static auto params_csc_4ch_and_resize = COMBINE_WITH_DEFAULT(
+    TESTED_DIMS(1),
+    NHWC,
+    COLOR_FORMATS_4CH);
+
+// batch preprocessing parameters:
+static auto batch_params_csc_3ch_and_resize = COMBINE_WITH_DEFAULT(
+    MULTI_VALUE(TESTED_DIMS(2), TESTED_DIMS(3)),
+    MULTI_VALUE(NCHW, NHWC),
+    MULTI_VALUE(COLOR_FORMATS_RAW, COLOR_FORMATS_3CH));
+
+static auto batch_params_csc_4ch_and_resize = COMBINE_WITH_DEFAULT(
+    MULTI_VALUE(TESTED_DIMS(2), TESTED_DIMS(3)),
+    NHWC,
+    COLOR_FORMATS_4CH);
+}  // namespace nightly
+
+// reorder preprocessing parameters:
+static auto reorder_params = Combine(
+        Values(Precision::FP16),  // network precision
+        Values(SizeVector({1, 3, 300, 300})),  // sizes of the network
+        Values(std::make_pair(Precision::FP32, 1e-1), std::make_pair(Precision::U8, 1)),  // precision and threshold
+        Values(std::make_pair(NCHW, NHWC), std::make_pair(NHWC, NCHW)),  // Input/network data layout
+        Values(ResizeAlgorithm::NO_RESIZE),
+        Values(ColorFormat::BGR),
+        Values(ROI({0, 0, 0, 300, 300})),  // cropped ROI params (id, x, y, width, height)
+        Values(false, true)  // Infer mode sync/async
+);
+
+// nv12 preprocessing parameters:
+static auto nv12_params = Combine(
+        Values(Precision::FP16),  // network precision
+        Values(cv::Size(300, 300)),  // input image size
+        Values(TESTED_DIMS(1)),  // sizes of the network
+        Values(std::make_pair(Precision::U8, 1)),  // precision and threshold
+        Values(ResizeAlgorithm::RESIZE_BILINEAR, ResizeAlgorithm::RESIZE_AREA),
+        Values(ColorFormat::NV12),
+        Values(ROI({0, 0, 0, 300, 300}), ROI({0, 15, 10, 210, 210})),  // cropped ROI params (id, x, y, width, height)
+        Values(false, true)  // Infer mode sync/async
+);
+
+static auto random_roi_3c = Combine(
+            Values(Precision::FP16),
+            Values(TESTED_DIMS(1)),
+            Values(std::make_pair(Precision::FP32, 1e-2), std::make_pair(Precision::U8, 1)),
+            Values(MULTI_VALUE(NCHW, NHWC)),
+            Values(ResizeAlgorithm::RESIZE_BILINEAR, ResizeAlgorithm::RESIZE_AREA),
+            Values(COLOR_FORMATS_3CH),
+            Values(ROI({0, 0, 0, 0, 0})),
+            Values(false, true)
+);
+
+static auto random_roi_4c = Combine(
+            Values(Precision::FP16),
+            Values(TESTED_DIMS(1)),
+            Values(std::make_pair(Precision::FP32, 1e-2), std::make_pair(Precision::U8, 1)),
+            Values(NHWC),
+            Values(ResizeAlgorithm::RESIZE_BILINEAR, ResizeAlgorithm::RESIZE_AREA),
+            Values(COLOR_FORMATS_4CH),
+            Values(ROI({0, 0, 0, 0, 0})),
+            Values(false, true)
+);
+
+static auto random_roi_nv12 = Combine(
+            Values(Precision::FP16),
+            Values(TESTED_DIMS(1)),
+            Values(std::make_pair(Precision::U8, 1)),
+            Values(NHWC),
+            Values(ResizeAlgorithm::RESIZE_BILINEAR, ResizeAlgorithm::RESIZE_AREA),
+            Values(ColorFormat::NV12),
+            Values(ROI({0, 0, 0, 0, 0})),
+            Values(false, true)
+);
+
+// smoke:
+VPU_PLUGING_CASE_WITH_SUFFIX(_gapi_random_roi_c3_smoke, RandomROITest, random_roi_3c);
+VPU_PLUGING_CASE_WITH_SUFFIX(_gapi_random_roi_c4_smoke, RandomROITest, random_roi_4c);
+VPU_PLUGING_CASE_WITH_SUFFIX(_gapi_random_roi_nv12_smoke, RandomROITest, random_roi_nv12);
+
+VPU_PLUGING_CASE_WITH_SUFFIX(_gapi_resize_only_smoke, CropResizeTest, smoke::params_resize_only);
+VPU_PLUGING_CASE_WITH_SUFFIX(_gapi_csc_3ch_and_resize_smoke, CropResizeTest, smoke::params_csc_3ch_and_resize);
+VPU_PLUGING_CASE_WITH_SUFFIX(_gapi_csc_4ch_and_resize_smoke, CropResizeTest, smoke::params_csc_4ch_and_resize);
+
+VPU_PLUGING_CASE_WITH_SUFFIX(_gapi_resize_only_smoke, BatchResizeTest, smoke::batch_params_resize_only);
+VPU_PLUGING_CASE_WITH_SUFFIX(_gapi_csc_3ch_and_resize_smoke, BatchResizeTest, smoke::batch_params_csc_3ch_and_resize);
+VPU_PLUGING_CASE_WITH_SUFFIX(_gapi_csc_4ch_and_resize_smoke, BatchResizeTest, smoke::batch_params_csc_4ch_and_resize);
+
+VPU_PLUGING_CASE_WITH_SUFFIX(_gapi_reorder_smoke, ReorderTest, reorder_params);
+
+VPU_PLUGING_CASE_WITH_SUFFIX(_gapi_csc_nv12_and_resize_smoke, NV12ColorConvertTest, nv12_params);
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+// nightly:
+
+// FIXME: enable these once smoke/nightly concepts are introduced in CI
+DISABLED_VPU_PLUGING_CASE_WITH_SUFFIX(_gapi_random_roi_c3_nightly, RandomROITest, random_roi_3c);
+DISABLED_VPU_PLUGING_CASE_WITH_SUFFIX(_gapi_random_roi_c4_nightly, RandomROITest, random_roi_4c);
+DISABLED_VPU_PLUGING_CASE_WITH_SUFFIX(_gapi_random_roi_nv12_nightly, RandomROITest, random_roi_nv12);
+
+DISABLED_VPU_PLUGING_CASE_WITH_SUFFIX(_gapi_csc_3ch_and_resize_nightly, CropResizeTest, nightly::params_csc_3ch_and_resize);
+DISABLED_VPU_PLUGING_CASE_WITH_SUFFIX(_gapi_csc_4ch_and_resize_nightly, CropResizeTest, nightly::params_csc_4ch_and_resize);
+
+DISABLED_VPU_PLUGING_CASE_WITH_SUFFIX(_gapi_csc_3ch_and_resize_nightly, BatchResizeTest, nightly::batch_params_csc_3ch_and_resize);
+DISABLED_VPU_PLUGING_CASE_WITH_SUFFIX(_gapi_csc_4ch_and_resize_nightly, BatchResizeTest, nightly::batch_params_csc_4ch_and_resize);
+
+DISABLED_VPU_PLUGING_CASE_WITH_SUFFIX(_gapi_reorder_nightly, ReorderTest, reorder_params);
+
+DISABLED_VPU_PLUGING_CASE_WITH_SUFFIX(_gapi_csc_nv12_and_resize_nightly, NV12ColorConvertTest, nv12_params);
+
+#endif  // USE_OPENCV
diff --git a/inference-engine/tests_deprecated/functional/vpu/shared_tests_instance/io_blob_tests/dims_tests.cpp b/inference-engine/tests_deprecated/functional/vpu/shared_tests_instance/io_blob_tests/dims_tests.cpp
new file mode 100644 (file)
index 0000000..46ddf97
--- /dev/null
@@ -0,0 +1,8 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "dims_tests.hpp"
+#include "vpu_tests_config.hpp"
+
+VPU_PLUGING_CASE_WITH_SUFFIX(_nightly, IO_BlobTest, params_myriad);
diff --git a/inference-engine/tests_deprecated/functional/vpu/shared_tests_instance/io_blob_tests/layout_tests.cpp b/inference-engine/tests_deprecated/functional/vpu/shared_tests_instance/io_blob_tests/layout_tests.cpp
new file mode 100644 (file)
index 0000000..acc9e21
--- /dev/null
@@ -0,0 +1,16 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "layout_tests.hpp"
+#include "vpu_tests_config.hpp"
+
+static auto params_myriad = ::testing::Combine(
+        ::testing::Values(conv_p),
+        ::testing::Values(std::make_pair(Precision::FP16, 1e-1)),
+        ::testing::Values(NCHW, NHWC),
+        ::testing::Values(NCHW, NHWC),
+        ::testing::Values(Precision::FP32, Precision::U8)  // TODO: What about U16/I8/FP16?
+);
+
+VPU_PLUGING_CASE_WITH_SUFFIX(_nightly, LayoutTTTest, params_myriad);
diff --git a/inference-engine/tests_deprecated/functional/vpu/test_data/test_model_repo.cpp b/inference-engine/tests_deprecated/functional/vpu/test_data/test_model_repo.cpp
new file mode 100644 (file)
index 0000000..1cb3045
--- /dev/null
@@ -0,0 +1,17 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "test_model_repo.hpp"
+
+std::string get_model_repo() {
+    return "models:";
+};
+
+const char* TestDataHelpers::getModelPathNonFatal() noexcept {
+    return TestDataHelpers::getModelPathNonFatalDefault();
+}
+
+std::string TestDataHelpers::get_data_path() {
+    return TestDataHelpers::get_data_path_default();
+}
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/functional/vpu/tester.py b/inference-engine/tests_deprecated/functional/vpu/tester.py
new file mode 100644 (file)
index 0000000..e74cb0a
--- /dev/null
@@ -0,0 +1,243 @@
+import errno
+import os
+import sys
+from pathlib import Path
+from argparse import ArgumentParser
+
+from typing import Union
+
+import xml.etree.cElementTree as et
+import xml.dom.minidom as dom
+
+
+def get_path(entry: Union[str, Path], is_directory=False, check_exists=True, file_or_directory=False):
+    try:
+        path = Path(entry)
+    except TypeError:
+        raise TypeError('"{}" is expected to be a path-like'.format(entry))
+
+    if not check_exists:
+        return path
+
+    # pathlib.Path.exists throws an exception in case of broken symlink
+    if not os.path.exists(str(path)):
+        raise FileNotFoundError('{}: {}'.format(os.strerror(errno.ENOENT), path))
+
+    if not file_or_directory:
+        if is_directory and not path.is_dir():
+            raise NotADirectoryError('{}: {}'.format(os.strerror(errno.ENOTDIR), path))
+
+        # if it exists it is either file (or valid symlink to file) or directory (or valid symlink to directory)
+        if not is_directory and not path.is_file():
+            raise IsADirectoryError('{}: {}'.format(os.strerror(errno.EISDIR), path))
+
+    return path
+
+
+def build_argument_parser():
+    parser = ArgumentParser(
+        description='Tool for adding tests in OpenVINO project. It is intended to crop IR by given layer name for per-layer tests.',
+        allow_abbrev=False
+    )
+    parser.add_argument(
+        '-w', '--weights',
+        help='path to the bin file containing weights (OpenVINO IR); '
+             'if not specified, the value specified in "--model" option with "bin" extension is used',
+        type=get_path,
+        required=False
+    )
+    parser.add_argument(
+        '-o', '--output',
+        help='name of output files (.xml and .bin); '
+             'if not specified, the value specified in "--model" option with "_extracted" suffix is used',
+        type=str,
+        required=False
+    )
+
+    required = parser.add_argument_group('required arguments')
+    required.add_argument(
+        '-m', '--model',
+        help='path to the xml file containing model (OpenVINO IR); only IRv10 or newer is supported at the moment',
+        type=get_path,
+        required=True
+    )
+    required.add_argument(
+        '-n', '--name',
+        help='name of the layer to be tested',
+        type=str,
+        required=True
+    )
+
+    return parser
+
+
+def tostring(layer, num_tabs=0):
+    return '\t' * num_tabs + str(et.tostring(layer, encoding='utf-8').decode())
+
+
+def tags(node):
+    inner_tags = {}
+    for tag in node:
+        inner_tags[tag.tag] = tag
+    return inner_tags
+
+
+def make_input(layer, port_id):
+    output = et.Element('output')
+    output_port = next(port for port in tags(layer)['output'] if int(port.attrib['id']) == int(port_id))
+    output.append(output_port)
+
+    precision_to_element_type = {'FP16': 'f16', 'I32': 'i32', 'FP32': 'f32'}
+    data = et.Element('data', attrib={
+        'element_type': precision_to_element_type[output_port.attrib['precision']],
+        'shape': ','.join([dim.text for dim in output_port])}
+    )
+
+    input_layer = et.Element('layer', attrib={
+        'id': layer.attrib['id'],
+        'name': layer.attrib['name'],
+        'type': 'Parameter',
+        'version': 'opset1'
+    })
+    input_layer.append(data)
+    input_layer.append(output)
+
+    return input_layer
+
+
+def make_output(layer, port_id):
+    input_section = et.Element('input')
+    input_port = next(port for port in tags(layer)['input'] if int(port.attrib['id']) == int(port_id))
+    input_section.append(input_port)
+
+    output_layer = et.Element('layer', attrib={
+        'id': layer.attrib['id'],
+        'name': layer.attrib['name'],
+        'type': 'Result',
+        'version': 'opset1'
+    })
+    output_layer.append(input_section)
+
+    return output_layer
+
+
+def extract_weights(source, layers, destination):
+    def update(section):
+        offset = int(section.attrib['offset'])
+        size = int(section.attrib['size'])
+
+        src.seek(offset)
+        original_segment = (offset, offset + size - 1)
+        if original_segment in output_offsets:
+            section.attrib['offset'] = str(output_offsets[original_segment])
+        else:
+            output_offset = dst.tell()
+            dst.write(src.read(size))
+
+            output_offsets[original_segment] = output_offset
+            section.attrib['offset'] = str(output_offset)
+
+    output_offsets = {}
+    with Path(source).open(mode='rb') as src, Path(destination).open(mode='w+b') as dst:
+        for layer in layers:
+            for inner_layer in layer.iter('layer'):
+                if inner_layer.attrib['version'] == 'opset1' and inner_layer.attrib['type'] == 'Const':
+                    # for standard operation set IR v10 keeps weights as Const layers
+                    # metadata such as offset and size is stored in data section
+                    update(inner_layer.find('data'))
+                else:
+                    # for other operation sets previous way is used
+                    # weights metadata is stored in blobs section
+                    # with "custom", "weights" and "biases" subsections
+                    blobs = inner_layer.find('blobs') or []
+                    for blob in blobs:
+                        update(blob)
+
+
+def prettify(element, indent=0):
+    header = dom.Document().toxml()
+    string = dom.parseString(tostring(element)).toprettyxml()[len(header) + 1:]
+    return '\n'.join(['\t' * indent + line for line in string.split('\n') if line.strip()])
+
+
+def dump(input_model, elements, output_model):
+    root = et.parse(str(input_model)).getroot()
+    net = et.Element('net', attrib={'name': root.attrib['name'], 'version': root.attrib['version']})
+
+    layers = et.Element('layers')
+    for layer in elements['layers']:
+        layers.append(layer)
+
+    edges = et.Element('edges')
+    for edge in elements['edges']:
+        edges.append(edge)
+
+    net.append(layers)
+    net.append(edges)
+
+    with Path(output_model).open(mode='w+t') as output:
+        print('<?xml version="{}" ?>'.format(input_model.version), file=output)
+        print(prettify(net), file=output)
+
+
+def main():
+    arguments = build_argument_parser().parse_args()
+    model = dom.parse(str(input_model))
+    if int(model.version) < 10:
+        print('Error: only IR version 10 or newer is supported, IRv{} has been given'.format(model.version))
+        sys.exit(-1)
+
+    layers, edges, _ = et.parse(str(arguments.model)).getroot()
+    layers_identifiers = {}
+    for layer in layers:
+        layers_identifiers[int(layer.attrib['id'])] = layer
+
+    input_edges, output_edges = {}, {}
+    for edge in edges:
+        from_layer = int(edge.attrib['from-layer'])
+        to_layer = int(edge.attrib['to-layer'])
+
+        if to_layer not in input_edges:
+            input_edges[to_layer] = [edge]
+        else:
+            input_edges[to_layer].append(edge)
+
+        if from_layer not in output_edges:
+            output_edges[from_layer] = [edge]
+        else:
+            output_edges[from_layer].append(edge)
+
+    elements = {'layers': [], 'edges': []}
+    layer = next(operation for operation in layers if operation.attrib['name'] == arguments.name)
+
+    identifier = int(layer.attrib['id'])
+    elements['edges'] = input_edges[identifier] + output_edges[identifier]
+
+    for edge in input_edges[identifier]:
+        input_layer = layers_identifiers[int(edge.attrib['from-layer'])]
+
+        if input_layer.attrib['type'] != 'Const':
+            elements['layers'].append(make_input(input_layer, int(edge.attrib['from-port'])))
+        else:
+            elements['layers'].append(input_layer)
+
+    elements['layers'].append(layer)
+
+    for edge in output_edges[identifier]:
+        output_layer = layers_identifiers[int(edge.attrib['to-layer'])]
+        if output_layer.attrib['type'] != 'Result':
+            elements['layers'].append(make_output(output_layer, int(edge.attrib['to-port'])))
+        else:
+            elements['layers'].append(output_layer)
+
+    weights = arguments.weights or str(arguments.model)[:-3] + 'bin'
+    output = arguments.output or str(arguments.model)[:-4] + '_extracted'
+    output_weights = Path(output + '.bin')
+    extract_weights(weights, elements['layers'], output_weights)
+
+    output_model = Path(output + '.xml')
+    dump(model, elements, output_model)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/inference-engine/tests_deprecated/functional/vpu/vpu_base/myriad_layers_reference_functions.cpp b/inference-engine/tests_deprecated/functional/vpu/vpu_base/myriad_layers_reference_functions.cpp
new file mode 100644 (file)
index 0000000..27df402
--- /dev/null
@@ -0,0 +1,2994 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+
+#include "myriad_layers_reference_functions.hpp"
+
+#include "myriad_layers_tests.hpp"
+#include "conv_ref.hpp"
+#include "deconv_ref.hpp"
+#include "pool_ref.hpp"
+#include "ie_memcpy.h"
+#include <single_layer_common.hpp>
+#include "common_test_utils/common_layers_params.hpp"
+#include "vpu/utils/error.hpp"
+
+#include <math.h>
+
+#ifdef MAX
+#undef MAX
+#endif
+#define MAX(a, b) ((a) > (b))?(a):(b)
+
+#ifdef MIN
+#undef MIN
+#endif
+#define MIN(a, b) ((a) < (b))?(a):(b)
+
+using namespace InferenceEngine;
+
+const std::string relu_param = "negative_slope";
+const std::string inner_product_param = "out-size";
+
+static void kchw_to_hwck(const uint16_t* src,
+                         uint16_t* dst,
+                         size_t dimx,
+                         size_t dimy,
+                         size_t dimz) {
+    for (size_t x = 0 ; x < dimx; ++x) {
+        for (size_t y = 0 ; y < dimy; ++y) {
+            for (size_t z = 0 ; z < dimz; ++z) {
+                size_t input = x + dimx * (y + dimy * z);
+                size_t output = z + dimz * (y + dimy * x);
+                dst[output] = src[input];
+            }
+        }
+    }
+}
+
+void ref_convolution_wrap(const InferenceEngine::Blob::Ptr src,
+                          InferenceEngine::Blob::Ptr dst,
+                          const uint16_t* weights,
+                          size_t weights_size,
+                          const uint16_t *biases,
+                          size_t bias_size,
+                          const ParamsStruct& params) {
+    common_ref_convolution_wrap<ie_fp16>({ src }, dst, (const ie_fp16*)weights, weights_size, (const ie_fp16*)biases, bias_size, params);
+}
+
+void ref_convolution(const Blob::Ptr src,
+                     Blob::Ptr dst,
+                     const ie_fp16* weights_data,
+                     const ie_fp16* bias_data,
+                     param_size kernel,
+                     param_size stride,
+                     param_size pad,
+                     size_t group,
+                     param_size dilation) {
+    CommonTestUtils::conv_common_params params;
+    params.kernel.insert(X_AXIS, kernel.x);
+    params.kernel.insert(Y_AXIS, kernel.y);
+    params.stride.insert(X_AXIS, stride.x);
+    params.stride.insert(Y_AXIS, stride.y);
+    params.pads_begin.insert(X_AXIS, pad.x);
+    params.pads_begin.insert(Y_AXIS, pad.y);
+    params.dilation.insert(X_AXIS, dilation.x);
+    params.dilation.insert(Y_AXIS, dilation.y);
+    params.group = group;
+    ref_conv_common<ie_fp16>({ src }, *dst.get(), weights_data, 0, bias_data, 0, params);
+}
+
+void ref_copy_wrap(InferenceEngine::Blob::Ptr src,
+                   InferenceEngine::Blob::Ptr dst,
+                   const ParamsStruct& params) {
+    ASSERT_TRUE(params.empty());
+    ref_copy(src, dst);
+}
+
+void ref_copy(const InferenceEngine::Blob::Ptr src,
+              InferenceEngine::Blob::Ptr dst) {
+    ASSERT_NE(src, nullptr);
+    ASSERT_NE(dst, nullptr);
+    ASSERT_EQ(src->getTensorDesc().getDims().size(), dst->getTensorDesc().getDims().size());
+    uint16_t *srcData = src->buffer();
+    uint16_t *dstData = dst->buffer();
+    ASSERT_NE(srcData, nullptr);
+    ASSERT_NE(dstData, nullptr);
+    ie_memcpy(dstData, dst->byteSize(), srcData, src->byteSize());
+}
+
+void ref_ReLU(Blob::Ptr inTensor,
+              Blob::Ptr outTensor,
+              float negative_slope) {
+    ASSERT_NE(inTensor, nullptr);
+    ASSERT_NE(outTensor, nullptr);
+    uint16_t *blobRawDataFp16 = inTensor->buffer();
+    ASSERT_NE(blobRawDataFp16, nullptr);
+    uint16_t *blobOutDataFp16 = outTensor->buffer();
+    ASSERT_NE(blobOutDataFp16, nullptr);
+    size_t count = inTensor->size();
+    ASSERT_EQ(count, outTensor->size());
+    for (size_t indx = 0; indx < count; ++indx) {
+        float inpt = PrecisionUtils::f16tof32(blobRawDataFp16[indx]);
+        float val = std::max(inpt, 0.0f) + negative_slope * std::min(inpt, 0.0f);
+        blobOutDataFp16[indx] = PrecisionUtils::f32tof16(val);
+    }
+}
+
+void ref_ReLU_wrap(InferenceEngine::Blob::Ptr inTensor,
+                   InferenceEngine::Blob::Ptr outTensor,
+                   const ParamsStruct& params) {
+    float negative_slope = 0.0f;
+    if (!params.empty()) {
+        auto iter = params.find(relu_param);
+        if (iter != params.end()) {
+            negative_slope = std::stof(iter->second);
+        }
+    }
+    ref_ReLU(inTensor, outTensor, negative_slope);
+}
+
+void ref_Clamp(Blob::Ptr inTensor,
+               Blob::Ptr outTensor,
+               float min,
+               float max) {
+    ASSERT_NE(inTensor, nullptr);
+    ASSERT_NE(outTensor, nullptr);
+    uint16_t *blobRawDataFp16 = inTensor->buffer();
+    ASSERT_NE(blobRawDataFp16, nullptr);
+    uint16_t *blobOutDataFp16 = outTensor->buffer();
+    ASSERT_NE(blobOutDataFp16, nullptr);
+    size_t count = inTensor->size();
+    ASSERT_EQ(count, outTensor->size());
+    for (size_t indx = 0; indx < count; ++indx) {
+        float inpt = PrecisionUtils::f16tof32(blobRawDataFp16[indx]);
+        float val = MIN(max, MAX(min, inpt));
+        blobOutDataFp16[indx] = PrecisionUtils::f32tof16(val);
+    }
+}
+
+void ref_Clamp_wrap(InferenceEngine::Blob::Ptr inTensor,
+                    InferenceEngine::Blob::Ptr outTensor,
+                    const ParamsStruct& params) {
+    float min = 0.0f;
+    float max = 6.0f;
+    if (!params.empty()) {
+        auto iter = params.find("max");
+        if (iter != params.end()) {
+            max = std::stof(iter->second);
+        }
+        iter = params.find("min");
+        if (iter != params.end()) {
+            min = std::stoi(iter->second);
+        }
+    }
+    ref_Clamp(inTensor, outTensor, min, max);
+}
+
+void ref_deconvolution_wrap(const InferenceEngine::Blob::Ptr src,
+                            InferenceEngine::Blob::Ptr dst,
+                            const uint16_t* weights,
+                            size_t weights_size,
+                            const uint16_t *biases,
+                            size_t bias_size,
+                            const ParamsStruct& params) {
+    common_ref_deconvolution_wrap<ie_fp16>({src}, dst, reinterpret_cast<const ie_fp16*>(weights), weights_size, reinterpret_cast<const ie_fp16*>(biases), bias_size, params);
+}
+
+void ref_eltwise(const Blob::Ptr src1,
+                 const Blob::Ptr src2,
+                 const Blob::Ptr src3,
+                 Blob::Ptr dst,
+                 eltwise_kernel fun,
+                 std::vector<float> coeff) {
+    ASSERT_NE(src1, nullptr);
+    ASSERT_NE(src2, nullptr);
+    ASSERT_NE(src3, nullptr);
+    ASSERT_NE(dst, nullptr);
+    uint16_t *dstData = dst->buffer().as<uint16_t*>();
+    uint16_t *src1Data = src1->buffer().as<uint16_t*>();
+    uint16_t *src2Data = src2->buffer().as<uint16_t*>();
+    uint16_t *src3Data = src3->buffer().as<uint16_t*>();
+
+    ASSERT_NE(src1Data, nullptr);
+    ASSERT_NE(src2Data, nullptr);
+    ASSERT_NE(src3Data, nullptr);
+    ASSERT_NE(dstData, nullptr);
+
+    for (int i = 0; i < dst->size(); i++) {
+        float val = fun(PrecisionUtils::f16tof32(src1Data[i])*coeff[0],
+                        PrecisionUtils::f16tof32(src2Data[i])*coeff[1],
+                        PrecisionUtils::f16tof32(src3Data[i])*coeff[2]);
+        dstData[i] = PrecisionUtils::f32tof16(val);
+    }
+}
+
+void ref_gather(const InferenceEngine::Blob::Ptr& srcIdx,
+                const InferenceEngine::Blob::Ptr& srcDct,
+                const InferenceEngine::Blob::Ptr& dst,
+                const                        int  axis) {
+    ASSERT_NE(srcIdx, nullptr);
+    ASSERT_NE(srcDct, nullptr);
+    ASSERT_NE(dst, nullptr);
+
+    const auto& idxDesc = srcIdx->getTensorDesc();
+    const auto& srcDesc = srcDct->getTensorDesc();
+    const auto& dstDesc = dst->getTensorDesc();
+
+    const auto& idxPrecision = idxDesc.getPrecision();
+    const auto& srcPrecision = srcDesc.getPrecision();
+    const auto& dstPrecision = dstDesc.getPrecision();
+
+    IE_ASSERT(idxPrecision == Precision::I32 ||
+              idxPrecision == Precision::FP16);  // TODO: remove FP16 case as obsolete for `index`
+    IE_ASSERT(srcPrecision == Precision::I32 ||
+              srcPrecision == Precision::FP16);
+    IE_ASSERT(srcPrecision == dstPrecision);
+
+    const void *idxData = srcIdx->cbuffer();
+    const void *srcData = srcDct->cbuffer();
+    void *dstData = dst->buffer();
+    ASSERT_NE(idxData, nullptr);
+    ASSERT_NE(srcData, nullptr);
+    ASSERT_NE(dstData, nullptr);
+
+    const size_t srcSize = srcIdx->size();
+
+    std::vector<size_t> dims = srcDct->getTensorDesc().getDims();
+    std::reverse(dims.begin(), dims.end());
+
+    const int axisInv = dims.size() - 1 - axis;
+
+    //  Find number of dictionaries, index range and data length
+    size_t numDictionaries = 1;
+    for (size_t i = axisInv + 1; i < dims.size(); i++)
+        numDictionaries *= dims[i];
+    size_t indexRange = dims[axisInv];
+    size_t dataLength = 1;
+    for (size_t i = 0; i < axisInv; i++)
+        dataLength *= dims[i];
+
+    //  The gathering process
+    for (size_t i = 0; i < srcSize; i++) {
+        const int idx = idxPrecision == Precision::FP16 ?
+                                            static_cast<int>(PrecisionUtils::f16tof32(
+                                                reinterpret_cast<const ie_fp16*>(idxData)[i])
+                                            ) :
+                                                reinterpret_cast<const int32_t*>(idxData)[i];
+
+        //  Index clipping
+        if (0 <= idx && idx < indexRange)
+        {
+            //  Copying data to destination from Dictionary
+            for (size_t j = 0; j < numDictionaries; j++) {
+                if (dstPrecision == Precision::FP16) {
+                    std::copy_n(reinterpret_cast<const ie_fp16*>(srcData) + dataLength * (idx + j * indexRange),
+                                dataLength,
+                                reinterpret_cast<ie_fp16*>(dstData) + dataLength * (i + j * srcSize));
+                } else {
+                    std::copy_n(reinterpret_cast<const int32_t*>(srcData) + dataLength * (idx + j * indexRange),
+                                dataLength,
+                                reinterpret_cast<int32_t*>(dstData) + dataLength * (i + j * srcSize));
+                }
+            }
+        } else {
+            for (size_t j = 0; j < numDictionaries; j++) {
+                if (dstPrecision == Precision::FP16) {
+                    std::fill_n(reinterpret_cast<ie_fp16*>(dstData) + dataLength * (i + j * srcSize),
+                                dataLength,
+                                0);
+                } else {
+                    std::fill_n(reinterpret_cast<int32_t*>(dstData) + dataLength * (i + j * srcSize),
+                                dataLength,
+                                0);
+                }
+            }
+        }
+    }
+}
+
+void ref_scatter_elements_update(InferenceEngine::Blob::Ptr& input,
+                                 InferenceEngine::Blob::Ptr& indices,
+                                 InferenceEngine::Blob::Ptr& updates,
+                                                  const int  axis,
+                                 InferenceEngine::Blob::Ptr& output)
+{
+    ASSERT_NE(input, nullptr);
+    ASSERT_NE(indices, nullptr);
+    ASSERT_NE(updates, nullptr);
+    ASSERT_NE(output, nullptr);
+
+    const auto& inputDesc = input->getTensorDesc();
+    const auto& indicesDesc = indices->getTensorDesc();
+    const auto& updatesDesc = updates->getTensorDesc();
+    const auto& outputDesc = output->getTensorDesc();
+
+    const auto& inputPrecision = inputDesc.getPrecision();
+    const auto& indicesPrecision = indicesDesc.getPrecision();
+    const auto& updatesPrecision = updatesDesc.getPrecision();
+    const auto& outputPrecision = outputDesc.getPrecision();
+
+    IE_ASSERT(inputPrecision == Precision::I32 ||
+              inputPrecision == Precision::FP16);
+    IE_ASSERT(indicesPrecision == Precision::I32);
+    IE_ASSERT(updatesPrecision == inputPrecision);
+    IE_ASSERT(outputPrecision == inputPrecision);
+
+    const void *inputData = input->cbuffer();
+    const void *indicesData = indices->cbuffer();
+    const void *updatesData = updates->cbuffer();
+    void *outputData = output->buffer();
+
+    ASSERT_NE(inputData, nullptr);
+    ASSERT_NE(indicesData, nullptr);
+    ASSERT_NE(updatesData, nullptr);
+    ASSERT_NE(outputData, nullptr);
+
+    std::vector<size_t> inputShape = inputDesc.getDims();
+    std::vector<size_t> indicesShape = indicesDesc.getDims();
+    std::vector<size_t> updatesShape = updatesDesc.getDims();
+    std::vector<size_t> outputShape = outputDesc.getDims();
+
+    ASSERT_EQ(indicesShape.size(), inputShape.size());
+    ASSERT_EQ(updatesShape.size(), inputShape.size());
+    ASSERT_EQ(outputShape.size(), inputShape.size());
+
+    const int ndims = inputShape.size();
+
+    for (int i = 0; i < ndims; i++) {
+        ASSERT_EQ(outputShape[i], inputShape[i]);
+        ASSERT_LE(indicesShape[i], inputShape[i]);
+        ASSERT_EQ(indicesShape[i], updatesShape[i]);
+    }
+
+    //
+    // Copy `input` to `output`
+    //
+
+    const int inputSize = input->size();
+
+    const int bpp = inputPrecision == Precision::I32 ? sizeof(int32_t) : sizeof(ie_fp16);
+
+    std::copy_n(reinterpret_cast<const uint8_t*>(inputData),
+                inputSize * bpp,
+                reinterpret_cast<uint8_t*>(outputData));
+
+    //
+    // Copy `updates` to `output`
+    //
+
+    const auto offset = [] (const std::vector<size_t>& coord,
+                            const std::vector<size_t>& shape) {
+                                int offset = 0;
+                                int stride = 1;
+                                int ndims = shape.size();
+                                for (int i = ndims - 1; i >= 0; i--)
+                                {
+                                   offset += coord[i] * stride;
+                                   stride *= shape[i];
+                                }
+                                return offset;
+                            };
+
+    const auto increment = [] (std::vector<size_t>& coord,
+                         const std::vector<size_t>& shape) {
+                             int ndims = shape.size();
+                             for (int i = ndims - 1; i >= 0; i--)
+                             {
+                                 coord[i]++;
+                                 if (coord[i] < shape[i]) {
+                                     break;
+                                 }
+                                 coord[i] = 0;
+                             }
+                         };
+
+    std::vector<size_t> indicesCoord(ndims, 0);
+
+    const int indicesSize = indices->size();
+
+    for (int i = 0; i < indicesSize; i++) {
+        const int indicesOffset = offset(indicesCoord, indicesShape);
+
+        int n = reinterpret_cast<const int32_t*>(indicesData)[indicesOffset];
+
+        ASSERT_GE(n, 0);
+        ASSERT_LT(n, outputShape[axis]);
+
+        std::vector<size_t> outputCoord = indicesCoord;
+        outputCoord[axis] = n;
+
+        const int outputOffset = offset(outputCoord, outputShape);
+
+        if (outputPrecision == Precision::I32) {
+            const int32_t value = reinterpret_cast<const int32_t*>(updatesData)[indicesOffset];
+            reinterpret_cast<int32_t*>(outputData)[outputOffset] = value;
+        } else /* if (outputPrecision == Precision::FP16) */ {
+            const ie_fp16 value = reinterpret_cast<const ie_fp16*>(updatesData)[indicesOffset];
+            reinterpret_cast<ie_fp16*>(outputData)[outputOffset] = value;
+        }
+
+        increment(indicesCoord, indicesShape);
+    }
+}
+
+void ref_innerproduct_wrap(const InferenceEngine::Blob::Ptr src,
+                           InferenceEngine::Blob::Ptr dst,
+                           const uint16_t *weights,
+                           size_t weightsSize,
+                           const uint16_t *biases,
+                           size_t biasSize,
+                           const ParamsStruct& params)
+{
+    uint32_t OC = 1;
+    if (!params.empty()) {
+        auto iter = params.find(inner_product_param);
+        if (iter != params.end()) {
+            OC = std::stol(iter->second);
+        }
+    }
+    ref_innerproduct(src, dst, weights, weightsSize, biases, biasSize, OC);
+}
+
+void ref_innerproduct(const Blob::Ptr src,
+                      Blob::Ptr dst,
+                      const uint16_t *weights,
+                      size_t weightsSize,
+                      const uint16_t *biases,
+                      size_t biasSize,
+                      uint32_t OC) {
+
+    ASSERT_NE(src, nullptr);
+    ASSERT_NE(dst, nullptr);
+    ASSERT_GT(weightsSize, 0);
+    size_t IW = 1;
+    size_t IH = 1;
+    size_t IC = 1;
+    size_t I_N = 1;
+    auto tensorDesc = src->getTensorDesc();
+    auto dims = tensorDesc.getDims();
+    switch(tensorDesc.getLayout()) {
+        case NCHW:
+        case NHWC:
+            IW = dims[3];
+            IH = dims[2];
+            IC = dims[1];
+            I_N = dims[0];
+            break;
+        case NC:
+            I_N = dims[0];
+            IC  = dims[1];
+            break;
+        case HW:
+            IH = dims[0];
+            IW  = dims[1];
+            break;
+        default:
+            THROW_IE_EXCEPTION << "Unsupported layout: " << tensorDesc.getLayout();
+    }
+    const uint16_t *src_data = static_cast<uint16_t*>(src->buffer());
+    const uint16_t *weights_data = weights;
+    uint16_t *dst_data = dst->buffer();
+
+    uint16_t *weights_hwck = new uint16_t [IW * IH * IC * OC];
+    if (tensorDesc.getLayout() == NCHW ||
+        tensorDesc.getLayout() == NHWC) {
+        ASSERT_NE(weights_hwck, nullptr);
+        kchw_to_hwck(weights_data, weights_hwck, (IW * IH), IC, OC);
+        for (size_t on = 0; on < I_N; on++) {
+            size_t offset = OC * on;
+            for (size_t oc = 0; oc < OC; oc++) {
+                float sum_f = 0.0f;
+                if (biases)
+                    sum_f = PrecisionUtils::f16tof32(biases[oc]);
+
+                for (size_t ic = 0; ic < IC; ic++) {
+                    for (size_t kh = 0; kh < IH; kh++) {
+                        for (size_t  kw = 0; kw < IW; kw++) {
+                            size_t iidx = ic * IH * IW + kh * IW + kw + on * IH * IW * IC;
+                            size_t widx = ic * IH * IW + kh * IW + kw;
+                            float mult = (PrecisionUtils::f16tof32(src_data[iidx]) * PrecisionUtils::f16tof32(weights_hwck[widx * OC + oc]));
+                            sum_f = sum_f + mult;
+                        }
+                    }
+                }
+                dst_data[oc + offset] = PrecisionUtils::f32tof16(sum_f);
+            }
+        }
+    } else if (tensorDesc.getLayout() == HW) {
+        for (size_t kh = 0; kh < IH; kh++) {
+            for (size_t oc = 0; oc < OC; oc++) {
+                float sum_f = 0.0f;
+                if (biases)
+                    sum_f = PrecisionUtils::f16tof32(biases[oc]);
+                for (size_t  kw = 0; kw < IW; kw++) {
+                    size_t iidx = kh * IW + kw;
+                    float mult = (PrecisionUtils::f16tof32(src_data[iidx]) * PrecisionUtils::f16tof32(weights_data[oc * IW + kw]));
+                    sum_f = sum_f + mult;
+                }
+                dst_data[oc + kh * OC] = PrecisionUtils::f32tof16(sum_f);
+            }
+        }
+    }
+    delete[] weights_hwck;
+}
+
+void ref_log_wrap(const InferenceEngine::Blob::Ptr& src,
+                  InferenceEngine::Blob::Ptr& dst,
+                  const ParamsStruct& params) {
+    ASSERT_TRUE(params.empty());
+    ref_log(src, dst);
+}
+
+void ref_log(const InferenceEngine::Blob::Ptr& src,
+             InferenceEngine::Blob::Ptr& dst) {
+    ASSERT_NE(src, nullptr);
+    ASSERT_NE(dst, nullptr);
+    ASSERT_EQ(src->getTensorDesc().getDims().size(), dst->getTensorDesc().getDims().size());
+
+    auto srcData = src->buffer().as<ie_fp16*>();
+    auto dstData = dst->buffer().as<ie_fp16*>();
+    ASSERT_NE(srcData, nullptr);
+    ASSERT_NE(dstData, nullptr);
+
+    auto logf16 = [](ie_fp16 value) {
+        return PrecisionUtils::f32tof16(logf(PrecisionUtils::f16tof32(value)));
+    };
+    std::transform(srcData, srcData + src->size(), dstData, logf16);
+}
+
+void ref_exp_wrap(const InferenceEngine::Blob::Ptr& src,
+                  InferenceEngine::Blob::Ptr& dst,
+                  const ParamsStruct& params) {
+    ASSERT_TRUE(params.empty());
+    ref_exp(src, dst);
+}
+
+void ref_exp(const InferenceEngine::Blob::Ptr& src,
+             InferenceEngine::Blob::Ptr& dst) {
+    ASSERT_NE(src, nullptr);
+    ASSERT_NE(dst, nullptr);
+    ASSERT_EQ(src->getTensorDesc().getDims().size(), dst->getTensorDesc().getDims().size());
+
+    auto srcData = src->buffer().as<ie_fp16*>();
+    auto dstData = dst->buffer().as<ie_fp16*>();
+    ASSERT_NE(srcData, nullptr);
+    ASSERT_NE(dstData, nullptr);
+
+    auto expf16 = [](ie_fp16 value) {
+        return PrecisionUtils::f32tof16(expf(PrecisionUtils::f16tof32(value)));
+    };
+    std::transform(srcData, srcData + src->size(), dstData, expf16);
+}
+
+
+
+template <typename T>
+void ref_Permute(const Blob::Ptr src, Blob::Ptr dst, std::vector<size_t> permutation) {
+    ASSERT_NE(src, nullptr);
+    ASSERT_NE(dst, nullptr);
+
+    auto iter = [](SizeVector& ind, const TensorDesc& desc) {
+        const auto& dims = desc.getDims();
+
+        const auto idx = static_cast<int>(dims.size() - 1);
+        for (auto i = idx; i >= 0; --i) {
+            if (++ind[i] < dims[i]) return true;
+            ind[i] = 0;
+        }
+
+        return false;
+    };
+    const auto& srcDims = src->getTensorDesc().getDims();
+    const auto& dstDims = dst->getTensorDesc().getDims();
+
+    ASSERT_EQ(srcDims.size(), dstDims.size());
+    ASSERT_EQ(srcDims.size(), permutation.size());
+
+    const auto num_dims = srcDims.size();
+
+    for (size_t i = 0; i < num_dims; i++) {
+        ASSERT_EQ(srcDims[permutation[i]], dstDims[i]);
+    }
+
+    const auto srcPtr = src->buffer().as<T*>();
+    const auto dstPtr = dst->buffer().as<T*>();
+
+    SizeVector srcIndex(num_dims);  // N-dimensional
+    do {
+        SizeVector dstIndex(num_dims);
+        for (size_t i = 0; i < num_dims; i++) {
+            dstIndex[i] = srcIndex[permutation[i]];
+        }
+        const auto srcOffset = src->getTensorDesc().offset(srcIndex);
+        const auto dstOffset = dst->getTensorDesc().offset(dstIndex);
+        dstPtr[dstOffset] = srcPtr[srcOffset];
+    } while (iter(srcIndex, src->getTensorDesc()));
+}
+void ref_permute_wrap(const InferenceEngine::Blob::Ptr src,
+                      InferenceEngine::Blob::Ptr dst,
+                      const ParamsStruct& params) {
+    const auto precision = src->getTensorDesc().getPrecision();
+    SizeVector order;
+    if (!params.empty()) {
+        auto iter = params.find("order");
+        if (iter != params.end()) {
+            std::string param = iter->second;
+            auto pos = std::string::npos;
+            do {
+                pos = param.find_first_of(",");
+                if (pos == std::string::npos) {
+                    if (!param.empty())
+                        order.push_back(std::stoi(param));
+                    break;
+                }
+                std::string val = param.substr(0, pos);
+                order.push_back(std::stoi(val));
+                param = param.substr(pos + 1, param.size() - 1);
+            }while(pos != std::string::npos);
+        }
+    }
+    switch (precision) {
+        case InferenceEngine::Precision::I32:
+            ref_Permute<int>(src, dst, order);
+            break;
+        case InferenceEngine::Precision::FP16:
+            ref_Permute<ie_fp16>(src, dst, order);
+            break;
+        default:
+            THROW_IE_EXCEPTION << "Unsupported precision";
+    }
+
+}
+
+void ref_pooling_wrap(const InferenceEngine::Blob::Ptr src,
+                      InferenceEngine::Blob::Ptr dst,
+                      const ParamsStruct &params) {
+    common_ref_pool_wrap<ie_fp16>({ src }, dst, params);
+}
+
+void ref_PReLU(const Blob::Ptr src,
+               Blob::Ptr dst,
+               const uint16_t *weights,
+               size_t weightsSize) {
+    ASSERT_EQ(src->getTensorDesc().getDims().size(), dst->getTensorDesc().getDims().size());
+    ie_fp16 *srcData = static_cast<ie_fp16*>(src->buffer());
+    ie_fp16 *dstData = static_cast<ie_fp16*>(dst->buffer());
+    ASSERT_NE(srcData, nullptr);
+    ASSERT_NE(dstData, nullptr);
+    // dst = max(src, 0) + w * min(src, 0)
+    for (size_t indx = 0; indx < src->size(); indx++) {
+        float w = PrecisionUtils::f16tof32(weights[indx % weightsSize]);
+        float src = PrecisionUtils::f16tof32(srcData[indx]);
+        float dst = std::max(src, 0.f) + w * std::min(src, 0.f);
+        dstData[indx] = PrecisionUtils::f32tof16(dst);
+    }
+}
+
+void ref_PReLU_wrap(const InferenceEngine::Blob::Ptr src,
+                    InferenceEngine::Blob::Ptr dst,
+                    const uint16_t *weights,
+                    size_t weightsSize,
+                    const uint16_t *biases,
+                    size_t biasSize,
+                    const ParamsStruct& params) {
+    int channel_shared = 0;
+    if (!params.empty()) {
+        auto iter = params.find(PRELU_PARAM);
+        if (iter != params.end()) {
+            channel_shared = std::stoi(iter->second);
+        }
+    }
+
+    size_t get_weightsSize = 1;
+    if (channel_shared == 0) {
+        if (src->getTensorDesc().getDims().size() == 2) {
+            get_weightsSize = src->getTensorDesc().getDims().back();
+        } else {
+            int32_t OW = 0;
+            int32_t OH = 0;
+            int32_t OC = 0;
+            get_dims(src, OW, OH, OC);
+            get_weightsSize = OC;
+        }
+    }
+    ASSERT_EQ(get_weightsSize, weightsSize);
+    ref_PReLU(src, dst, weights, weightsSize);
+}
+
+void ref_RegionYolo_wrap(InferenceEngine::Blob::Ptr inTensor,
+                         InferenceEngine::Blob::Ptr outTensor,
+                         const ParamsStruct& params) {
+
+    ASSERT_FALSE(params.empty());
+    /* default parameters */
+    int coords    = 4;
+    int classes   = 20;
+    int num       = 5;
+    int maskSize  = 5;
+    int doSoftmax = 1;
+
+    auto iter = params.find("coords");
+    if (iter != params.end()) {
+        coords = std::stoi(iter->second);
+    }
+    iter = params.find("classes");
+    if (iter != params.end()) {
+        classes = std::stoi(iter->second);
+    }
+    iter = params.find("num");
+    if (iter != params.end()) {
+        num = std::stoi(iter->second);
+    }
+    iter = params.find("do_softmax");
+    if (iter != params.end()) {
+        doSoftmax = std::stoi(iter->second);
+    }
+    iter = params.find("mask");
+    if (iter != params.end()) {
+
+        std::vector<int> order;
+        std::string param = iter->second;
+        auto pos = std::string::npos;
+        do {
+            pos = param.find_first_of(",");
+            if (pos == std::string::npos) {
+                if (!param.empty())
+                    order.push_back(std::stoi(param));
+                break;
+            }
+            std::string val = param.substr(0, pos);
+            order.push_back(std::stoi(val));
+            param = param.substr(pos + 1, param.size() - 1);
+        }while(pos != std::string::npos);
+
+        maskSize = order.size();
+    }
+    ref_RegionYolo(inTensor, outTensor, coords, classes, num, maskSize, doSoftmax);
+}
+
+static int entry_index(int w, int h, int outputs, int coords_classes, int batch, int location, int entry)
+{
+    int n = location / (w * h);
+    int loc = location % (w * h);
+    return batch * outputs + n * w * h * coords_classes + entry * w * h + loc;
+}
+
+static inline uint16_t logistic_activate(float x)
+{
+    float res = 1./(1. + exp(-x));
+    return PrecisionUtils::f32tof16(res);
+}
+
+static void activate_array(uint16_t *x, const int n)
+{
+    int i;
+    for(i = 0; i < n; ++i){
+        x[i] = logistic_activate(PrecisionUtils::f16tof32(x[i]));
+    }
+}
+
+static void softmax_FP16(const uint16_t *input, int n,
+                         float temp, int stride,
+                         uint16_t *output)
+{
+    int i;
+    float sum = 0;
+    float largest = -100.0;
+    std::vector<float> data(n);
+    for(i = 0; i < n; ++i){
+        data[i] = PrecisionUtils::f16tof32(input[i*stride]);
+        if(data[i] > largest)
+            largest = data[i];
+    }
+    for(i = 0; i < n; ++i){
+        float e = exp(data[i]/temp - largest/temp);
+        sum += e;
+        data[i] = e;
+    }
+    for(i = 0; i < n; ++i){
+        float tmp = data[i];
+        tmp /= sum;
+        output[i*stride] = PrecisionUtils::f32tof16(tmp);
+    }
+}
+
+static void softmax_cpu_FP16(const uint16_t *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, uint16_t *output)
+{
+    int g, b;
+    for(b = 0; b < batch; ++b){
+        for(g = 0; g < groups; ++g){
+            softmax_FP16(input + b*batch_offset + g*group_offset, n, temp, stride, output + b*batch_offset + g*group_offset);
+        }
+    }
+}
+
+void ref_RegionYolo(const InferenceEngine::Blob::Ptr src,
+                    InferenceEngine::Blob::Ptr dst,
+                    int coords,
+                    int classes,
+                    int num,
+                    int maskSize,
+                    int doSoftmax) {
+    if (!doSoftmax) {
+        num = maskSize;
+    }
+
+    ASSERT_NE(src, nullptr);
+    ASSERT_NE(dst, nullptr);
+    uint16_t *srcData = src->buffer();
+    uint16_t *dstData = dst->buffer();
+    ASSERT_NE(srcData, nullptr);
+    ASSERT_NE(dstData, nullptr);
+    auto dims = src->getTensorDesc().getDims();
+    ASSERT_EQ(src->getTensorDesc().getPrecision(), InferenceEngine::Precision::FP16);
+    ASSERT_EQ(dst->getTensorDesc().getPrecision(), InferenceEngine::Precision::FP16);
+    int iw = dims[3];
+    int ih = dims[2];
+    int ic = dims[1];
+    std::vector<uint16_t> ref_data(src->size());
+    uint16_t* inputBlobRawDataFp16 = ref_data.data();
+    switch(src->getTensorDesc().getLayout()) {
+        case InferenceEngine::NCHW:
+            ie_memcpy(ref_data.data(), ref_data.size() * sizeof(uint16_t), srcData, src->size() * sizeof(uint16_t));
+            break;
+        case InferenceEngine::NHWC:
+            for (int h = 0 ; h < ih; ++h) {
+                for (int w = 0 ; w < iw; ++w) {
+                    for (int c = 0 ; c < ic; ++c) {
+                        int dst_i = w + iw * h + iw * ih * c;
+                        int src_i = c + ic * w + iw * ic * h;
+                        inputBlobRawDataFp16[dst_i] = srcData[src_i];
+                    }
+                }
+            }
+            break;
+        default:
+            THROW_IE_EXCEPTION << "Unsupported layout: " << src->getTensorDesc().getLayout();
+
+    }
+    ie_memcpy(dstData, dst->byteSize() * sizeof(uint16_t), ref_data.data(), src->size() * sizeof(uint16_t));
+
+    int coords_classes = coords + classes + 1;
+    int batch = 1;
+    int outputs = num * ih * iw * coords_classes;
+    int inWidth = iw;
+    int inHeight = ih;
+    for (int b = 0; b < batch; ++b) {
+        for(int n = 0; n < num; ++n) {
+            int index = entry_index(inWidth, inHeight, outputs, coords_classes, b, n * inWidth * inHeight, 0);
+            activate_array(dstData + index, 2 * inWidth * inHeight);
+            index = entry_index(inWidth, inHeight, outputs, coords_classes, b, n * inHeight * inWidth, coords);
+            activate_array(dstData + index, inWidth * inHeight);
+        }
+    }
+
+    if (doSoftmax) {
+        int index = entry_index(inWidth, inHeight, outputs, coords_classes, 0, 0, coords + 1);
+        softmax_cpu_FP16(inputBlobRawDataFp16 + index, classes + 0, batch * num, outputs / num, inHeight * inWidth, 1, inHeight * inWidth, 1, dstData + index);
+    }
+    else
+    {
+        for (int b = 0; b < batch; ++b) {
+            for(int n = 0; n < num; ++n) {
+                for(int k = 0; k < classes; ++k) {
+                    int index = entry_index(inWidth, inHeight, outputs, coords_classes, b, n * inWidth * inHeight, coords + 1 + k);
+                    activate_array(dstData + index, inWidth * inHeight);
+                }
+            }
+        }
+    }
+}
+
+void ref_reshape_wrap(InferenceEngine::Blob::Ptr src,
+                      InferenceEngine::Blob::Ptr dst,
+                      const ParamsStruct& params) {
+    //ASSERT_TRUE(params.empty());
+    ref_reshape(src, dst);
+}
+
+void ref_reshape(const Blob::Ptr src,
+                 Blob::Ptr dst) {
+    ASSERT_EQ(src->byteSize(), dst->byteSize());
+
+    const uint8_t* srcPtr = src->buffer();
+    uint8_t* dstPtr = dst->buffer();
+
+    ASSERT_NE(srcPtr, nullptr);
+    ASSERT_NE(dstPtr, nullptr);
+
+    std::copy_n(srcPtr, src->byteSize(), dstPtr);
+}
+
+void ref_sigmoid_wrap(InferenceEngine::Blob::Ptr src,
+                      InferenceEngine::Blob::Ptr dst,
+                      const ParamsStruct& params) {
+    ASSERT_TRUE(params.empty());
+    ref_sigmoid(src, dst);
+}
+
+void ref_sigmoid(const InferenceEngine::Blob::Ptr src,
+                 InferenceEngine::Blob::Ptr dst) {
+    ASSERT_NE(src, nullptr);
+    ASSERT_NE(dst, nullptr);
+    ASSERT_EQ(src->getTensorDesc().getDims().size(), dst->getTensorDesc().getDims().size());
+    uint16_t *srcData = src->buffer();
+    uint16_t *dstData = dst->buffer();
+    ASSERT_NE(srcData, nullptr);
+    ASSERT_NE(dstData, nullptr);
+    for (size_t indx = 0; indx < src->size(); indx++) {
+        dstData[indx] =
+                PrecisionUtils::f32tof16(1.0f /(1.0f + exp(-PrecisionUtils::f16tof32(srcData[indx]))));
+    }
+}
+
+int getOffset(const SizeVector& coordinates, const SizeVector& strides) {
+    int offset = 0;
+    for(int i = 0; i < coordinates.size(); ++i) {
+        offset += coordinates[i] * strides[i];
+    }
+    return offset;
+}
+
+void incrementCoordinates(SizeVector& coordinates, const SizeVector& dims) {
+    for(int d = 0, nAdd = 1; d < coordinates.size() && nAdd == 1 ; ++d)
+    {
+        coordinates[d] = (coordinates[d] == dims[d] - 1) ? 0 : coordinates[d] + 1;
+        nAdd = (coordinates[d] == 0) ? 1 : 0;
+    }
+}
+
+void ref_softMax(const Blob::Ptr& src, Blob::Ptr& dst, int axis) {
+    SizeVector tensorSizes = src->getTensorDesc().getDims();
+    std::reverse(tensorSizes.begin(), tensorSizes.end());
+
+    SizeVector tensorStrides(tensorSizes.size());
+    axis = tensorSizes.size() - 1 - axis;
+    const ie_fp16 *src_data = src->cbuffer().as<const ie_fp16*>();
+    ie_fp16 *dst_data = dst->buffer().as<ie_fp16*>();
+    const ie_fp16 *srcLine;
+    ie_fp16 *dstLine;
+
+    size_t totalElements = 1;
+    size_t totalLines = 1;
+
+    for (int i = 0; i < tensorSizes.size(); ++i) {
+        tensorStrides[i] = totalElements;
+        totalElements *= tensorSizes[i];
+    }
+    size_t axisSize = tensorSizes[axis];
+    size_t axisStride = tensorStrides[axis];
+    tensorSizes.erase(tensorSizes.begin() + axis);
+    tensorStrides.erase(tensorStrides.begin() + axis);
+    totalLines = totalElements / axisSize;
+
+    std::vector<float> temp(axisSize);
+
+    SizeVector tensorCoordinates(tensorSizes.size());
+
+    for (int nLine = 0; nLine < totalLines; ++nLine) {
+        int offset = getOffset(tensorCoordinates, tensorStrides);
+
+        srcLine = src_data + offset;
+        dstLine = dst_data + offset;
+        float largest = std::numeric_limits<float>::lowest();
+        for (int i2 = 0; i2 < axisSize; ++i2) {
+            int ind = i2 * axisStride;
+            float val = PrecisionUtils::f16tof32(srcLine[ind]);
+            largest = std::max(val, largest);
+        }
+        float sum = 0.0f;
+        for (int i2 = 0; i2 < axisSize; ++i2) {
+            int ind = i2 * axisStride;
+            float val = PrecisionUtils::f16tof32(srcLine[ind]);
+            temp[i2] = std::exp(val - largest);
+            sum += temp[i2];
+        }
+        for (int i2 = 0; i2 < axisSize; ++i2) {
+            int ind = i2 * axisStride;
+            dstLine[ind] = PrecisionUtils::f32tof16(temp[i2] / sum);
+        }
+        incrementCoordinates(tensorCoordinates, tensorSizes);
+    }
+}
+
+void ref_tanh_wrap(InferenceEngine::Blob::Ptr src,
+                   InferenceEngine::Blob::Ptr dst,
+                   const ParamsStruct& params) {
+    ASSERT_TRUE(params.empty());
+    ref_tanh(src, dst);
+}
+
+void ref_tanh(const InferenceEngine::Blob::Ptr src,
+              InferenceEngine::Blob::Ptr dst) {
+    ASSERT_NE(src, nullptr);
+    ASSERT_NE(dst, nullptr);
+    ASSERT_EQ(src->getTensorDesc().getDims().size(), dst->getTensorDesc().getDims().size());
+    uint16_t *srcData = src->buffer();
+    uint16_t *dstData = dst->buffer();
+    ASSERT_NE(srcData, nullptr);
+    ASSERT_NE(dstData, nullptr);
+    for (size_t indx = 0; indx < src->size(); indx++) {
+        dstData[indx] =
+                PrecisionUtils::f32tof16(tanh(PrecisionUtils::f16tof32(srcData[indx])));
+    }
+}
+
+namespace reduceImpl
+{
+    template<typename DataType>
+    DataType& element(Blob::Ptr b, const SizeVector& indices)
+    {
+        int offset = 0;
+        DataType* data = b->buffer().as<DataType*>();
+
+        const int ndims = indices.size();
+        if (ndims > 0) {
+            const SizeVector& dims = b->getTensorDesc().getDims();
+            // Dims & indices are stored as in IE (in reverse order)
+            // so [0] is the highest(last) and [ndims-1] is the lowest(first) dim.
+            // We changed the only index calculation code instead of reversing all blobs' dims
+            offset = indices[0];
+            for (int i = 1; i < ndims; ++i)
+                offset = offset * dims[i] + indices[i];
+        }
+
+        return data[offset];
+    }
+
+    void increment1stCoord(SizeVector& indices, const SizeVector& ranges)
+    {
+        ASSERT_EQ(indices.size(), ranges.size());
+        int ndims = indices.size();
+        for (int i = 0; i < ndims; ++i)
+        {
+            ++indices[i];
+            if (indices[i] < ranges[i]) break;
+            indices[i] = 0;
+        }
+    }
+
+    template <class Action>
+    void forEach(int ndims, const SizeVector& ranges, Action action)
+    {
+        SizeVector indices(ndims, 0);
+        const int total = std::accumulate(ranges.begin(), ranges.end(), 1, std::multiplies<int>());
+
+        for (int n = 0; n < total; ++n)
+        {
+            action(indices);
+            increment1stCoord(indices, ranges);
+        }
+
+        for (int i = 0; i < ndims; ++i)
+        {
+            ASSERT_EQ(indices[i], 0); // internal iterations count mismatch
+        }
+    }
+
+    uint32_t list2mask(int N, int K, const int32_t L[])
+    {
+        uint32_t mask = 0;
+        for (int j = 0; j < K; ++j)
+        {
+            int i = L[j];
+            if ((i >= 0) && (i < N))
+                mask |= (1 << i);
+        }
+        return mask;
+    }
+
+    void split(int N, uint32_t mask, const SizeVector& D_dims, SizeVector& DR_dims, SizeVector& DI_dims)
+    {
+        int jr = 0, ji = 0;
+        for (int i = 0; i < N; ++i)
+        {
+            if (mask & (1 << i))
+                DR_dims[jr++] = D_dims[i];
+            else
+                DI_dims[ji++] = D_dims[i];
+        }
+    }
+
+    SizeVector merge(int N, uint32_t mask, const SizeVector& DR_dims, const SizeVector& DI_dims)
+    {
+        SizeVector D_dims(N);
+        int jr = 0, ji = 0;
+        for (int i = 0; i < N; ++i)
+        {
+            if (mask & (1 << i))
+                D_dims[i] = DR_dims[jr++];
+            else
+                D_dims[i] = DI_dims[ji++];
+        }
+        return D_dims;
+    }
+
+    template<typename DataType>
+    void copyReduce(const Blob::Ptr& in, Blob::Ptr& out, IReduceKernel<DataType>* op)
+    {
+        const SizeVector& in_dims = in->getTensorDesc().getDims();
+        const int N = in_dims.size();
+
+        forEach(N, in_dims, [&](const SizeVector& i) {
+            element<DataType>(out, i) = op->copy(element<DataType>(in, i));
+        });
+    }
+
+    template<typename DataType>
+    void fullReduce(const Blob::Ptr& in, Blob::Ptr& out, IReduceKernel<DataType>* op)
+    {
+        const SizeVector& in_dims = in->getTensorDesc().getDims();
+        const int N = in_dims.size();
+        DataType* outData = out->buffer().as<DataType*>();
+
+        op->init();
+        forEach(N, in_dims, [&](const SizeVector& i) {
+            op->accumulate(element<DataType>(in, i));
+        });
+        outData[0] = op->result();
+    }
+
+    template<typename DataType>
+    void partReduce(const Blob::Ptr& in, Blob::Ptr& out, int K, const int32_t L[], bool keep_dims, IReduceKernel<DataType>* op)
+    {
+        const SizeVector& in_dims = in->getTensorDesc().getDims();
+        int N = in_dims.size();
+
+        unsigned mask = list2mask(N, K, L);
+
+        SizeVector DR_dims(K), DI_dims(N - K);
+        split(N, mask, in_dims, DR_dims, DI_dims);
+
+        SizeVector ZR_dims(K, 0);
+
+        const int DI_total = std::accumulate(DI_dims.begin(), DI_dims.end(), 1, std::multiplies<int>());
+        const int DR_total = std::accumulate(DR_dims.begin(), DR_dims.end(), 1, std::multiplies<int>());
+
+        SizeVector di_dims(N - K, 0);
+        for (int di_idx = 0; di_idx < DI_total; ++di_idx) {
+            op->init();
+            SizeVector dr_dims(K, 0);
+            for (int dr_idx = 0; dr_idx < DR_total; ++dr_idx) {
+                SizeVector id_dims = merge(N, mask, dr_dims, di_dims);
+                op->accumulate(element<DataType>(in, id_dims));
+                increment1stCoord(dr_dims, DR_dims);
+            }
+            if (keep_dims) {
+                SizeVector od_dims = merge(N, mask, ZR_dims, di_dims);
+                element<DataType>(out, od_dims) = op->result();
+            } else {
+                element<DataType>(out, di_dims) = op->result();
+            }
+            increment1stCoord(di_dims, DI_dims);
+        }
+    }
+
+    template<typename DataType>
+    void refReduce(const Blob::Ptr& in, Blob::Ptr& out, int K, const int32_t L[], bool keep_dims, IReduceKernel<DataType>* op)
+    {
+        const SizeVector& in_dims = in->getTensorDesc().getDims();
+        const int N = in_dims.size();
+
+        if ((K <= 0) || (K >= N)) {
+            if (K <= 0)
+                copyReduce(in, out, op);
+            if (K >= N)
+                fullReduce(in, out, op);
+        }
+        partReduce(in, out, K, L, keep_dims, op);
+    }
+}
+
+template<>
+void ref_reduce<ie_fp16>(const Blob::Ptr& in,
+                         const Blob::Ptr& axes,
+                         Blob::Ptr& out,
+                         int keep_dims,
+                         IReduceKernel<ie_fp16>* op)
+{
+    ASSERT_NE(in, nullptr);
+    ASSERT_NE(axes, nullptr);
+    ASSERT_NE(out, nullptr);
+
+    const int16_t* inData = in->cbuffer().as<const int16_t*>();
+    int16_t* outData = out->buffer().as<int16_t*>();
+
+    ASSERT_NE(inData, nullptr);
+    ASSERT_NE(outData, nullptr);
+
+    const auto axesDims = axes->getTensorDesc().getDims();
+    ASSERT_EQ(axesDims.size(), 1);
+
+    const auto axesSize = axesDims[0];
+    const int32_t* axesData = axes->cbuffer().as<const int32_t*>();
+    ASSERT_TRUE(!(axesSize > 0) || (axesData != nullptr));
+
+    reduceImpl::refReduce(in, out, axesSize, axesData, keep_dims, op);
+}
+
+template<>
+void ref_reduce<int32_t>(const Blob::Ptr& in,
+                         const Blob::Ptr& axes,
+                         Blob::Ptr& out,
+                         int keep_dims,
+                         IReduceKernel<int32_t>* op)
+{
+    ASSERT_NE(in, nullptr);
+    ASSERT_NE(axes, nullptr);
+    ASSERT_NE(out, nullptr);
+
+    const int32_t* inData = in->cbuffer().as<const int32_t*>();
+    int32_t* outData = out->buffer().as<int32_t*>();
+
+    ASSERT_NE(inData, nullptr);
+    ASSERT_NE(outData, nullptr);
+
+    const auto axesDims = axes->getTensorDesc().getDims();
+    ASSERT_EQ(axesDims.size(), 1);
+
+    const auto axesSize = axesDims[0];
+    const int32_t* axesData = axes->cbuffer().as<const int32_t*>();
+    ASSERT_TRUE(!(axesSize > 0) || (axesData != nullptr));
+
+    reduceImpl::refReduce(in, out, axesSize, axesData, keep_dims, op);
+}
+
+namespace topk_impl {
+
+    // a pair of (value, index) to be sorted
+    typedef std::pair<float, int32_t> Pair;
+
+    // comparison function comp(a,b) should return True if a precedes b
+    typedef std::function<bool(const Pair&, const Pair&)> CompareFunction;
+
+    bool compareIndices(const Pair& a, const Pair& b) {
+        if (a.second < b.second) return true;
+        if (a.second > b.second) return false;
+
+        return true; // shouldn't occur since all indices are different
+    }
+
+    bool compareValuesMax(const Pair& a, const Pair& b) {
+        if (!(a.first <= b.first)) return true;
+        if (!(a.first >= b.first)) return false;
+
+        return compareIndices(a, b);
+    }
+
+    bool compareValuesMin(const Pair& a, const Pair& b) {
+        if (!(a.first >= b.first)) return true;
+        if (!(a.first <= b.first)) return false;
+
+        return compareIndices(a, b);
+    }
+
+    CompareFunction modeComparison(const std::string& modeString) {
+        if (modeString == "max")
+            return compareValuesMax;
+        if (modeString == "min")
+            return compareValuesMin;
+        THROW_IE_EXCEPTION << "Reference TopK can take only 'max' or 'min' for mode, but actually it has: " << modeString;
+    }
+
+    bool isIndicesSort(const std::string& sortString) {
+        if (sortString == "none")
+            return false;
+        if (sortString == "value")
+            return false;
+        if (sortString == "index")
+            return true;
+        THROW_IE_EXCEPTION << "Reference TopK can take only 'value', 'index' or 'none' for sort, but actually it has: " << sortString;
+    }
+
+    template <class Action>
+    void forEach(int ndims, const SizeVector& ranges, Action action) {
+        SizeVector indices(ndims, 0);
+        const auto total = std::accumulate(ranges.begin(), ranges.end(), 1, std::multiplies<int>());
+
+        for (int n = 0; n < total; ++n) {
+            action(indices);
+            for (int i = 0; i < ndims; ++i) {
+                ++indices[i];
+                if (indices[i] < ranges[i]) break;
+                indices[i] = 0;
+            }
+        }
+
+        for (int i = 0; i < ndims; ++i) {
+            ASSERT_EQ(indices[i], 0); // internal iterations count mismatch
+        }
+    }
+
+    void refTopK(const int16_t* inValuesData, int16_t* outValuesData, int32_t* outIndicesData,
+                 const SizeVector& inDims, const SizeVector& outDims, int axis,
+                 CompareFunction compareValues, bool doIndicesSort) {
+        const auto ndims = static_cast<int>(inDims.size());
+        const int n = inDims[axis];
+        const int k = outDims[axis];
+
+        // iterate over all dims except axis
+        auto dims = inDims;
+        dims[axis] = 1;
+
+        // elementwise step to iterate along axis dim
+        const auto axisStep = std::accumulate(dims.begin() + (axis + 1), dims.end(), 1, std::multiplies<int>());
+
+        // data access
+        auto offset = [ndims](const SizeVector& dims, const SizeVector& indices) -> size_t {
+            size_t ofs = indices[0];
+            for (int i = 1; i < ndims; ++i)
+                ofs = ofs * dims[i] + indices[i];
+            return ofs;
+        };
+
+        std::vector<Pair> temp;
+        temp.reserve(n);
+        forEach(ndims, dims, [&](const SizeVector& id)
+            {
+                auto inOfs = offset(inDims, id);
+                auto outOfs = offset(outDims, id);
+
+                temp.clear();
+                for (int i = 0; i < n; ++i)
+                    temp.emplace_back(PrecisionUtils::f16tof32(inValuesData[inOfs + i * axisStep]), i);
+
+                std::partial_sort(temp.begin(), temp.begin() + k, temp.begin() + n, compareValues);
+                if (doIndicesSort)
+                    std::sort(temp.begin(), temp.begin() + k, compareIndices);
+
+                for (int i = 0; i < k; ++i) {
+                    outValuesData[outOfs + i * axisStep] = PrecisionUtils::f32tof16(temp[i].first);
+                    outIndicesData[outOfs + i * axisStep] = temp[i].second;
+                }
+            });
+    }
+
+}; // namespace topk_impl
+
+void ref_topk(const InferenceEngine::Blob::Ptr& inValues,
+              const InferenceEngine::Blob::Ptr& inK,
+              InferenceEngine::Blob::Ptr outValues,
+              InferenceEngine::Blob::Ptr outIndices,
+              int axis,
+              const std::string& mode,
+              const std::string& sort) {
+    ASSERT_NE(inValues, nullptr);
+    ASSERT_NE(inK, nullptr);
+    ASSERT_NE(outValues, nullptr);
+    ASSERT_NE(outIndices, nullptr);
+
+    const auto inValuesData = inValues->cbuffer().as<const int16_t*>();
+    const auto inKData = inK->cbuffer().as<const int32_t*>();
+    auto outValuesData = outValues->buffer().as<int16_t*>();
+    auto outIndicesData = outIndices->buffer().as<int32_t*>();
+
+    ASSERT_NE(inValuesData, nullptr);
+    ASSERT_NE(inKData, nullptr);
+    ASSERT_NE(outValuesData, nullptr);
+    ASSERT_NE(outIndicesData, nullptr);
+
+    const auto inKDims = inK->getTensorDesc().getDims();
+    ASSERT_EQ(inKDims.size(), 1);
+    ASSERT_EQ(inKDims[0], 1);
+
+    const int k = inKData[0];
+
+    const auto inValuesDims = inValues->getTensorDesc().getDims();
+    const auto outValuesDims = outValues->getTensorDesc().getDims();
+    const auto outIndicesDims = outIndices->getTensorDesc().getDims();
+
+    const auto ndims = static_cast<int>(inValuesDims.size());
+    ASSERT_EQ(outValuesDims.size(), ndims);
+    ASSERT_EQ(outIndicesDims.size(), ndims);
+    ASSERT_EQ(outValuesDims, outIndicesDims);
+
+    ASSERT_TRUE((axis >= 0) && (axis < ndims));
+    ASSERT_EQ(outValuesDims[axis], k);
+    ASSERT_EQ(outIndicesDims[axis], k);
+
+    const int n = inValuesDims[axis];
+    ASSERT_LE(k, n);
+
+    topk_impl::refTopK(inValuesData, outValuesData, outIndicesData,
+                       inValuesDims, outValuesDims,
+                       axis, topk_impl::modeComparison(mode), topk_impl::isIndicesSort(sort));
+}
+
+void ref_strided_slice(const InferenceEngine::Blob::Ptr& src,
+                       InferenceEngine::Blob::Ptr& dst,
+                       InferenceEngine::SizeVector &out_dims,
+                       const std::vector<int32_t>& begin,
+                       const std::vector<int32_t>& end,
+                       const std::vector<int32_t>& strides,
+                       const InferenceEngine::SizeVector& begin_mask,
+                       const InferenceEngine::SizeVector& end_mask) {
+    ASSERT_NE(src, nullptr);
+    ASSERT_NE(dst, nullptr);
+
+    const auto src_data = src->buffer().as<ie_fp16*>();
+    auto dst_data = dst->buffer().as<ie_fp16*>();
+    ASSERT_NE(src_data, nullptr);
+    ASSERT_NE(dst_data, nullptr);
+
+    const auto src_dims = src->getTensorDesc().getDims();
+    const auto srcStrides = src->getTensorDesc().getBlockingDesc().getStrides();
+    const auto dst_dims = dst->getTensorDesc().getDims();
+    const auto dstStrides = dst->getTensorDesc().getBlockingDesc().getStrides();
+    const auto num_dims = src_dims.size();
+
+    auto work_strides = strides;
+
+    if (work_strides.empty()) {
+        work_strides.resize(num_dims, 1);
+    }
+
+    ASSERT_TRUE(begin.size() == num_dims);
+    ASSERT_TRUE(end.size() == num_dims);
+    ASSERT_TRUE(work_strides.size() == num_dims);
+
+    // Fill optional parameters by default values
+    auto _begin_mask = begin_mask;
+    auto _end_mask = end_mask;
+    _begin_mask.insert(_begin_mask.end(), num_dims - _begin_mask.size(), 1);
+    _end_mask.insert(_end_mask.end(), num_dims - _end_mask.size(), 1);
+
+    auto clip = [](int value, int min, int max) {
+        return std::min(std::max(min, value), max);
+    };
+
+    auto begin_dms = begin;
+    auto end_dms = end;
+
+    for (size_t i = 0; i < num_dims; i++) {
+        IE_ASSERT(_begin_mask[i] == 1 || _begin_mask[i] == 0);
+        IE_ASSERT(_end_mask[i] == 1 || _end_mask[i] == 0);
+
+        begin_dms[i] = _begin_mask[i] ? begin[i] : 0;
+        begin_dms[i] = clip(begin_dms[i], 0, src_dims[i]);
+
+        end_dms[i] = _end_mask[i] ? end[i] : src_dims[i];
+        end_dms[i] = clip(end_dms[i], 0, src_dims[i]);
+
+        IE_ASSERT(begin_dms[i] >= 0 && begin_dms[i] < end_dms[i]);
+        IE_ASSERT(end_dms[i] <= src_dims[i]);
+        IE_ASSERT(work_strides[i] > 0);
+
+        out_dims.push_back(static_cast<int>(std::ceil(
+            static_cast<float>(end_dms[i] - begin_dms[i]) / static_cast<float>(work_strides[i]))));
+    }
+
+    size_t work_amount_dst = dstStrides[0] * dst_dims[0];
+    InferenceEngine::SizeVector counters(num_dims, 0);
+
+    for (size_t iwork = 0, dst_idx = 0; iwork < work_amount_dst; ++iwork) {
+        int src_idx = 0;
+        for (size_t i = 0; i < num_dims; ++i) {
+            src_idx += (begin_dms[i] + counters[i] * work_strides[i]) * srcStrides[i];
+        }
+
+        dst_data[dst_idx++] = src_data[src_idx];
+
+        for (int i = num_dims - 1; i >= 0; i--) {
+            counters[i] = (counters[i] + 1) % out_dims[i];
+            if (counters[i] != 0) break;
+        }
+    }
+}
+
+class RefExpDetectionOutput
+{
+    const bool USE_STABLE_SORT = true; // original CPU implementation uses unstable sort, which is failed on testing
+public:
+    RefExpDetectionOutput(const ie_fp16* srcBoxes,   // [numRois][4]
+                          const ie_fp16* srcDeltas,  // [numRois][numClasses][4]
+                          const ie_fp16* srcScores,  // [numRois][numClasses]
+                          ie_fp16* dstBoxes,         // [maxDetections][4]
+                          int32_t* dstClasses,       // [maxDetections]
+                          ie_fp16* dstScores,        // [maxDetections]
+                          int32_t rois,
+                          int32_t classes,
+                          int32_t detections,
+                          const ExpDetectionOutputParams& params)
+        : inputBoxes(srcBoxes)
+        , inputDeltas(srcDeltas)
+        , inputScores(srcScores)
+        , outputBoxes(dstBoxes)
+        , outputClasses(dstClasses)
+        , outputScores(dstScores)
+        , numRois(rois)
+        , numClasses(classes)
+        , maxDetections(detections)
+        , layerParams(params)
+        { init(); }
+    ~RefExpDetectionOutput() {}
+    void operator()() { execute(); }
+protected:
+    void execute() {
+        // Apply deltas
+
+        refineBoxes(1.0f);
+
+        // Apply NMS class-wise
+
+        int total_detections_num = 0;
+        for (int class_idx = 1; class_idx < numClasses; ++class_idx) {
+            auto d = scoresNMS(&refinedBoxes[class_idx * numRois * 4],
+                               &refinedScores[class_idx * numRois],
+                               &refinedBoxesAreas[class_idx * numRois],
+                               &buffer[0],
+                               &indices[total_detections_num],
+                               -1,
+                               layerParams.post_nms_count);
+            detectionsPerClass[class_idx] = d;
+            total_detections_num += d;
+        }
+
+        // Leave only max_detections_per_image detections.
+        // confidence, <class, index>
+
+        int num_detections = 0;
+        int indices_offset = 0;
+        for (int class_idx = 0; class_idx < numClasses; ++class_idx) {
+            const ie_fp16* rscores = &refinedScores[class_idx * numRois];
+
+            int n = detectionsPerClass[class_idx];
+            for (int i = 0; i < n; ++i) {
+                const int roi_idx = indices[indices_offset + i];
+                auto& detection = confIndexClassMap[num_detections++];
+                detection.score     = rscores[roi_idx];
+                detection.class_idx = class_idx;
+                detection.roi_idx   = roi_idx;
+            }
+            indices_offset += n;
+        }
+
+        if (total_detections_num > layerParams.max_detections_per_image) {
+            if (USE_STABLE_SORT) {
+                std::stable_sort(confIndexClassMap.begin(),
+                                 confIndexClassMap.begin() + total_detections_num,
+                                 SortByScoresDescend);
+            } else {
+                std::partial_sort(confIndexClassMap.begin(),
+                                  confIndexClassMap.begin() + layerParams.max_detections_per_image,
+                                  confIndexClassMap.begin() + total_detections_num,
+                                  SortByScoresDescend);
+            }
+            total_detections_num = layerParams.max_detections_per_image;
+        }
+
+        // Fill outputs.
+
+        std::fill_n(outputBoxes, maxDetections * 4, ie_fp16(0.0f));
+        std::fill_n(outputClasses, maxDetections, 0);
+        std::fill_n(outputScores, maxDetections, ie_fp16(0.0f));
+
+        for (int i = 0; i < total_detections_num; ++i) {
+            const auto& detection = confIndexClassMap[i];
+            ie_fp16 score = detection.score;
+            int class_idx = detection.class_idx;
+            int roi_idx   = detection.roi_idx;
+
+            ie_fp16* oboxes = &outputBoxes[i * 4];
+            const ie_fp16* rboxes  = &refinedBoxes[(class_idx * numRois + roi_idx) * 4];
+
+            oboxes[0] = rboxes[0];
+            oboxes[1] = rboxes[1];
+            oboxes[2] = rboxes[2];
+            oboxes[3] = rboxes[3];
+            outputClasses[i] = static_cast<int32_t>( class_idx );
+            outputScores[i] = score;
+        }
+    }
+    void refineBoxes(const float coordinates_offset) {
+        for (int roi_idx = 0; roi_idx < numRois; ++roi_idx)
+        {
+            const ie_fp16* iboxes = &inputBoxes[roi_idx * 4];
+
+            float x0 = PrecisionUtils::f16tof32( iboxes[0] );
+            float y0 = PrecisionUtils::f16tof32( iboxes[1] );
+            float x1 = PrecisionUtils::f16tof32( iboxes[2] );
+            float y1 = PrecisionUtils::f16tof32( iboxes[3] );
+
+            if (x1 - x0 <= 0 || y1 - y0 <= 0) {
+                continue;
+            }
+
+            // width & height of box
+            const float ww = x1 - x0 + coordinates_offset;
+            const float hh = y1 - y0 + coordinates_offset;
+            // center location of box
+            const float ctr_x = x0 + 0.5f * ww;
+            const float ctr_y = y0 + 0.5f * hh;
+
+            for (int class_idx = 1; class_idx < numClasses; ++class_idx) {
+                const ie_fp16* ideltas = &inputDeltas[(roi_idx * numClasses + class_idx) * 4];
+                const ie_fp16* iscores = &inputScores[roi_idx * numClasses + class_idx];
+
+                const float dx      = PrecisionUtils::f16tof32( ideltas[0] ) / layerParams.deltas_weights[0];
+                const float dy      = PrecisionUtils::f16tof32( ideltas[1] ) / layerParams.deltas_weights[1];
+                const float d_log_w = PrecisionUtils::f16tof32( ideltas[2] ) / layerParams.deltas_weights[2];
+                const float d_log_h = PrecisionUtils::f16tof32( ideltas[3] ) / layerParams.deltas_weights[3];
+
+                // new center location according to deltas (dx, dy)
+                const float pred_ctr_x = dx * ww + ctr_x;
+                const float pred_ctr_y = dy * hh + ctr_y;
+                // new width & height according to deltas d(log w), d(log h)
+                const float pred_w = std::exp(std::min<float>(d_log_w, layerParams.max_delta_log_wh)) * ww;
+                const float pred_h = std::exp(std::min<float>(d_log_h, layerParams.max_delta_log_wh)) * hh;
+
+                // update upper-left corner location
+                float x0_new = pred_ctr_x - 0.5f * pred_w;
+                float y0_new = pred_ctr_y - 0.5f * pred_h;
+                // update lower-right corner location
+                float x1_new = pred_ctr_x + 0.5f * pred_w - coordinates_offset;
+                float y1_new = pred_ctr_y + 0.5f * pred_h - coordinates_offset;
+
+                // adjust new corner locations to be within the image region,
+                x0_new = std::max<float>(0.0f, x0_new);
+                y0_new = std::max<float>(0.0f, y0_new);
+                x1_new = std::max<float>(0.0f, x1_new);
+                y1_new = std::max<float>(0.0f, y1_new);
+
+                // recompute new width & height
+                const float box_w = x1_new - x0_new + coordinates_offset;
+                const float box_h = y1_new - y0_new + coordinates_offset;
+
+                ie_fp16* rboxes  = &refinedBoxes[(class_idx * numRois + roi_idx) * 4];
+                ie_fp16* rbareas = &refinedBoxesAreas[class_idx * numRois + roi_idx];
+                ie_fp16* rscores = &refinedScores[class_idx * numRois + roi_idx];
+
+                rboxes[0] = PrecisionUtils::f32tof16( x0_new );
+                rboxes[1] = PrecisionUtils::f32tof16( y0_new );
+                rboxes[2] = PrecisionUtils::f32tof16( x1_new );
+                rboxes[3] = PrecisionUtils::f32tof16( y1_new );
+
+                rbareas[0] = PrecisionUtils::f32tof16( box_w * box_h );
+                rscores[0] = iscores[0];
+            }
+        }
+    }
+    int scoresNMS(const ie_fp16* boxes_data,   // [numRois][4]
+                  const ie_fp16* scores_data,  // [numRois]
+                  const ie_fp16* sizes_data,   // [numRois]
+                  int32_t* buffer,
+                  int32_t* indices,
+                  const int pre_nms_topn,
+                  const int post_nms_topn) {
+        int detections = 0;
+
+        int count = 0;
+        for (int roi_idx = 0; roi_idx < numRois; ++roi_idx) {
+            float score = PrecisionUtils::f16tof32( scores_data[roi_idx] );
+            if (score > layerParams.score_threshold) {
+                indices[count] = roi_idx;
+                ++count;
+            }
+        }
+
+        int num_output_scores = (pre_nms_topn == -1 ? count : MIN(pre_nms_topn, count));
+
+        if (USE_STABLE_SORT) {
+            std::copy_n(indices,
+                        count,
+                        buffer);
+            std::stable_sort(buffer,
+                             buffer + count,
+                             ConfidenceComparator(scores_data));
+        } else {
+            std::partial_sort_copy(indices,
+                                   indices + count,
+                                   buffer,
+                                   buffer + num_output_scores,
+                                   ConfidenceComparator(scores_data));
+        }
+
+        detections = 0;
+        for (int i = 0; i < num_output_scores; ++i) {
+            const int idx = buffer[i];
+
+            bool keep = true;
+            for (int k = 0; k < detections; ++k) {
+                const int kept_idx = indices[k];
+                float overlap = JaccardOverlap(boxes_data,
+                                               sizes_data,
+                                               idx,
+                                               kept_idx);
+                if (overlap > layerParams.nms_threshold) {
+                    keep = false;
+                    break;
+                }
+            }
+            if (keep) {
+                indices[detections] = idx;
+                ++detections;
+            }
+        }
+
+        detections = (post_nms_topn == -1 ? detections : MIN(post_nms_topn, detections));
+
+        return detections;
+    }
+    float JaccardOverlap(const ie_fp16* boxes_data,  // [numRois][4]
+                         const ie_fp16* sizes_data,  // [numRois]
+                         const int idx1,
+                         const int idx2,
+                         const float coordinates_offset = 1.0f) {
+        const ie_fp16* boxes_data1 = &boxes_data[idx1 * 4 + 0];
+        const ie_fp16* boxes_data2 = &boxes_data[idx2 * 4 + 0];
+
+        float xmin1 = PrecisionUtils::f16tof32( boxes_data1[0] );
+        float ymin1 = PrecisionUtils::f16tof32( boxes_data1[1] );
+        float xmax1 = PrecisionUtils::f16tof32( boxes_data1[2] );
+        float ymax1 = PrecisionUtils::f16tof32( boxes_data1[3] );
+        float xmin2 = PrecisionUtils::f16tof32( boxes_data2[0] );
+        float ymin2 = PrecisionUtils::f16tof32( boxes_data2[1] );
+        float ymax2 = PrecisionUtils::f16tof32( boxes_data2[3] );
+        float xmax2 = PrecisionUtils::f16tof32( boxes_data2[2] );
+
+        if (xmin2 > xmax1 || xmax2 < xmin1 || ymin2 > ymax1 || ymax2 < ymin1) {
+            return 0.0f;
+        }
+
+        float intersect_xmin = std::max<float>(xmin1, xmin2);
+        float intersect_ymin = std::max<float>(ymin1, ymin2);
+        float intersect_xmax = std::min<float>(xmax1, xmax2);
+        float intersect_ymax = std::min<float>(ymax1, ymax2);
+
+        float intersect_width  = intersect_xmax - intersect_xmin + coordinates_offset;
+        float intersect_height = intersect_ymax - intersect_ymin + coordinates_offset;
+
+        if (intersect_width <= 0 || intersect_height <= 0) {
+            return 0.0f;
+        }
+
+        float intersect_size = intersect_width * intersect_height;
+        float bbox1_size = PrecisionUtils::f16tof32( sizes_data[idx1] );
+        float bbox2_size = PrecisionUtils::f16tof32( sizes_data[idx2] );
+        float IoU = intersect_size / (bbox1_size + bbox2_size - intersect_size);
+
+        return IoU;
+    }
+private:
+    struct ConfData {
+        ie_fp16 score;
+        int32_t class_idx;
+        int32_t roi_idx;
+    };
+    struct ConfidenceComparator {
+        ConfidenceComparator(const ie_fp16* _scores_data)
+            : scores_data(_scores_data)
+            {}
+        bool operator()(int idx1, int idx2)
+            {
+                const float val1 = PrecisionUtils::f16tof32( scores_data[idx1] );
+                const float val2 = PrecisionUtils::f16tof32( scores_data[idx2] );
+
+                if (val1 > val2) return true;
+                if (val1 < val2) return false;
+                return bool(idx1 < idx2);
+            }
+    private:
+        const ie_fp16* scores_data;
+    };
+    static bool SortByScoresDescend(const ConfData& data1, const ConfData& data2) {
+        const float val1 = PrecisionUtils::f16tof32( data1.score );
+        const float val2 = PrecisionUtils::f16tof32( data2.score );
+
+        return bool(val1 > val2);
+    }
+    void init() {
+        refinedBoxes.resize(numClasses * numRois * 4, 0.0f);
+        refinedScores.resize(numClasses * numRois, 0.0f);
+        refinedBoxesAreas.resize(numClasses * numRois, 0.0f);
+
+        buffer.resize(numRois, 0);
+        indices.resize(numClasses * numRois, 0);
+        detectionsPerClass.resize(numClasses, 0);
+
+        confIndexClassMap.resize(numClasses * numRois, ConfData{0, 0, 0});
+    }
+
+    const ie_fp16* inputBoxes;   // [numRois][4]
+    const ie_fp16* inputDeltas;  // [numRois][numClasses][4]
+    const ie_fp16* inputScores;  // [numRois][numClasses]
+    ie_fp16* outputBoxes;        // [maxDetections][4]
+    int32_t* outputClasses;      // [maxDetections]
+    ie_fp16* outputScores;       // [maxDetections]
+
+    const ExpDetectionOutputParams& layerParams;
+
+    int32_t numRois;
+    int32_t numClasses;
+    int32_t maxDetections;
+
+    std::vector<ie_fp16> refinedBoxes;       // [numClasses][numRois][4]
+    std::vector<ie_fp16> refinedScores;      // [numClasses][numRois]
+    std::vector<ie_fp16> refinedBoxesAreas;  // [numClasses][numRois]
+
+    std::vector<int32_t> buffer;              // [numRois]
+    std::vector<int32_t> indices;             // [numClasses][numRois]
+    std::vector<int32_t> detectionsPerClass;  // [numClasses]
+
+    std::vector<ConfData> confIndexClassMap; // [numClasses * numRois]
+};
+
+void ref_expDetectionOutput(const InferenceEngine::Blob::Ptr srcBoxes,   // [numRois][4]
+                            const InferenceEngine::Blob::Ptr srcDeltas,  // [numRois]([numClasses][4])
+                            const InferenceEngine::Blob::Ptr srcScores,  // [numRois][numClasses]
+                            const InferenceEngine::Blob::Ptr /*srcIMinfo*/,  // [2]
+                            InferenceEngine::Blob::Ptr dstBoxes,         // [maxDetections][4]
+                            InferenceEngine::Blob::Ptr dstClasses,       // [maxDetections]
+                            InferenceEngine::Blob::Ptr dstScores,        // [maxDetections]
+                            const int numRois,
+                            const int numClasses,
+                            const int maxDetections,
+                            const ExpDetectionOutputParams& layerParams) {
+    RefExpDetectionOutput detectionOutput(srcBoxes->cbuffer().as<const ie_fp16*>(),
+                                          srcDeltas->cbuffer().as<const ie_fp16*>(),
+                                          srcScores->cbuffer().as<const ie_fp16*>(),
+                                          dstBoxes->buffer().as<ie_fp16*>(),
+                                          dstClasses->buffer().as<int32_t*>(),
+                                          dstScores->buffer().as<ie_fp16*>(),
+                                          numRois,
+                                          numClasses,
+                                          maxDetections,
+                                          layerParams);
+    detectionOutput();
+}
+
+namespace internal {
+    // implementation taken from Caffe2
+    template <typename T>
+    struct PreCalc {
+      int pos1;
+      int pos2;
+      int pos3;
+      int pos4;
+      T w1;
+      T w2;
+      T w3;
+      T w4;
+    };
+
+    template <typename T>
+    void pre_calc_for_bilinear_interpolate(
+        const int height,
+        const int width,
+        const int pooled_height,
+        const int pooled_width,
+        const int iy_upper,
+        const int ix_upper,
+        T roi_start_h,
+        T roi_start_w,
+        T bin_size_h,
+        T bin_size_w,
+        int roi_bin_grid_h,
+        int roi_bin_grid_w,
+        std::vector<PreCalc<T>>& pre_calc) {
+      int pre_calc_index = 0;
+      for (int ph = 0; ph < pooled_height; ph++) {
+        for (int pw = 0; pw < pooled_width; pw++) {
+          for (int iy = 0; iy < iy_upper; iy++) {
+            const T yy = roi_start_h + ph * bin_size_h +
+                static_cast<T>(iy + .5f) * bin_size_h /
+                    static_cast<T>(roi_bin_grid_h);  // e.g., 0.5, 1.5
+            for (int ix = 0; ix < ix_upper; ix++) {
+              const T xx = roi_start_w + pw * bin_size_w +
+                  static_cast<T>(ix + .5f) * bin_size_w /
+                      static_cast<T>(roi_bin_grid_w);
+
+              T x = xx;
+              T y = yy;
+              // deal with: inverse elements are out of feature map boundary
+              if (y < -1.0 || y > height || x < -1.0 || x > width) {
+                // empty
+                PreCalc<T> pc;
+                pc.pos1 = 0;
+                pc.pos2 = 0;
+                pc.pos3 = 0;
+                pc.pos4 = 0;
+                pc.w1 = 0;
+                pc.w2 = 0;
+                pc.w3 = 0;
+                pc.w4 = 0;
+                pre_calc.at(pre_calc_index) = pc;
+                pre_calc_index += 1;
+                continue;
+              }
+
+              if (y <= 0) {
+                y = 0;
+              }
+              if (x <= 0) {
+                x = 0;
+              }
+
+              int y_low = static_cast<int>(y);
+              int x_low = static_cast<int>(x);
+              int y_high = 0;
+              int x_high = 0;
+
+              if (y_low >= height - 1) {
+                y_high = y_low = height - 1;
+                y = (T)y_low;
+              } else {
+                y_high = y_low + 1;
+              }
+
+              if (x_low >= width - 1) {
+                x_high = x_low = width - 1;
+                x = (T)x_low;
+              } else {
+                x_high = x_low + 1;
+              }
+
+              T ly = y - y_low;
+              T lx = x - x_low;
+              T hy = static_cast<T>(1) - ly, hx = static_cast<T>(1) - lx;
+              T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
+
+              // save weights and indeces
+              PreCalc<T> pc;
+              pc.pos1 = y_low * width + x_low;
+              pc.pos2 = y_low * width + x_high;
+              pc.pos3 = y_high * width + x_low;
+              pc.pos4 = y_high * width + x_high;
+              pc.w1 = w1;
+              pc.w2 = w2;
+              pc.w3 = w3;
+              pc.w4 = w4;
+              pre_calc[pre_calc_index] = pc;
+
+              pre_calc_index += 1;
+            }
+          }
+        }
+      }
+    }
+
+    template <typename T>
+    void ROIAlignForward_cpu_kernel(
+        const int nthreads,
+        const T* bottom_data,
+        const T& spatial_scale,
+        const int channels,
+        const int height,
+        const int width,
+        const int pooled_height,
+        const int pooled_width,
+        const int sampling_ratio,
+        const T* bottom_rois,
+        T* top_data) {
+      const int roi_cols = 4;
+
+      int n_rois = nthreads / channels / pooled_width / pooled_height;
+      // (n, c, ph, pw) is an element in the pooled output
+      for (int n = 0; n < n_rois; n++)
+      {
+        int index_n = n * channels * pooled_width * pooled_height;
+
+        // roi could have 4 or 5 columns
+        const T* offset_bottom_rois = bottom_rois + n * roi_cols;
+        int roi_batch_ind = 0;
+        if (roi_cols == 5) {
+          roi_batch_ind = static_cast<int>(offset_bottom_rois[0]);
+          offset_bottom_rois++;
+        }
+
+        // Do not using rounding; this implementation detail is critical
+        T roi_start_w = offset_bottom_rois[0] * spatial_scale;
+        T roi_start_h = offset_bottom_rois[1] * spatial_scale;
+        T roi_end_w = offset_bottom_rois[2] * spatial_scale;
+        T roi_end_h = offset_bottom_rois[3] * spatial_scale;
+
+        // Force malformed ROIs to be 1x1
+        T roi_width = std::max(roi_end_w - roi_start_w, (T)1.);
+        T roi_height = std::max(roi_end_h - roi_start_h, (T)1.);
+        T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
+        T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
+
+        // We use roi_bin_grid to sample the grid and mimic integral
+        int roi_bin_grid_h = (sampling_ratio > 0)
+            ? sampling_ratio
+            : static_cast<int>(ceil(roi_height / pooled_height));  // e.g., = 2
+        int roi_bin_grid_w =
+            (sampling_ratio > 0) ? sampling_ratio : static_cast<int>(ceil(roi_width / pooled_width));
+
+        // We do average (integral) pooling inside a bin
+        const T count = static_cast<T>(roi_bin_grid_h * roi_bin_grid_w);  // e.g. = 4
+
+        // we want to precalculate indeces and weights shared by all chanels,
+        // this is the key point of optimiation
+        std::vector<PreCalc<T>> pre_calc(
+            roi_bin_grid_h * roi_bin_grid_w * pooled_width * pooled_height);
+        pre_calc_for_bilinear_interpolate(
+            height,
+            width,
+            pooled_height,
+            pooled_width,
+            roi_bin_grid_h,
+            roi_bin_grid_w,
+            roi_start_h,
+            roi_start_w,
+            bin_size_h,
+            bin_size_w,
+            roi_bin_grid_h,
+            roi_bin_grid_w,
+            pre_calc);
+
+        for (int c = 0; c < channels; c++) {
+          int index_n_c = index_n + c * pooled_width * pooled_height;
+          const T* offset_bottom_data =
+              bottom_data + (roi_batch_ind * channels + c) * height * width;
+
+          int pre_calc_index = 0;
+
+          for (int ph = 0; ph < pooled_height; ph++) {
+            for (int pw = 0; pw < pooled_width; pw++) {
+              int index = index_n_c + ph * pooled_width + pw;
+
+              T output_val = 0.;
+              for (int iy = 0; iy < roi_bin_grid_h; iy++) {
+                for (int ix = 0; ix < roi_bin_grid_w; ix++) {
+                  PreCalc<T> pc = pre_calc[pre_calc_index];
+                  output_val += pc.w1 * offset_bottom_data[pc.pos1] +
+                      pc.w2 * offset_bottom_data[pc.pos2] +
+                      pc.w3 * offset_bottom_data[pc.pos3] +
+                      pc.w4 * offset_bottom_data[pc.pos4];
+
+                  pre_calc_index += 1;
+                }
+              }
+              output_val /= count;
+              top_data[index] = output_val;
+            }  // for pw
+          }  // for ph
+        }  // for c
+      }
+    }
+
+    void redistribute_rois(const float* rois, int* level_ids,
+                           const int num_rois, const int levels_num) {
+        const float canonical_scale = 224.0f;
+        const int canonical_level = 2;
+
+        for (int i = 0; i < num_rois; ++i) {
+            const float x0 = rois[4 * i + 0];
+            const float y0 = rois[4 * i + 1];
+            const float x1 = rois[4 * i + 2];
+            const float y1 = rois[4 * i + 3];
+
+            int target_level = levels_num;
+            float area = (x1 - x0) * (y1 - y0);
+            if (area > 0) {
+                area = std::sqrt(area) / canonical_scale;
+                area = std::log2(area + 1e-6f);
+                target_level = static_cast<int>(std::floor(area + canonical_level));
+                target_level = std::max<int>(0, std::min<int>(levels_num - 1, target_level));
+            }
+
+            level_ids[i] = target_level;
+        }
+    }
+
+    void reorder(const float* src_data, const int* ranks, const int n, const int step, float* dst_data,
+                 int* dst_mapping) {
+        std::iota(dst_mapping, dst_mapping + n, 0);
+        std::sort(dst_mapping, dst_mapping + n, [&ranks](size_t i1, size_t i2) {return ranks[i1] < ranks[i2];});
+        for (int i = 0; i < n; ++i) {
+            const int j = dst_mapping[i];
+            assert(0 <= j && j < n);
+            std::memcpy(dst_data + i * step, src_data + j * step, sizeof(float) * step);
+        }
+    }
+
+    void split_points(const std::vector<int>& ids, std::vector<int>& rois_per_level, const int levels_num) {
+        rois_per_level.clear();
+        rois_per_level.resize(levels_num, 0);
+        for (size_t i = 0; i < ids.size(); ++i) {
+            assert(0 <= ids[i] && ids[i] < levels_num);
+            rois_per_level[ids[i]]++;
+        }
+        for (int i = 1; i < levels_num; ++i) {
+            rois_per_level[i] += rois_per_level[i - 1];
+        }
+        rois_per_level.insert(rois_per_level.begin(), 0);
+    }
+
+    void reorder_rois(const float *rois, const int* ids, int* mapping, const int rois_num,
+                      float * reordered_rois, std::vector<int>& rois_per_level, const int levels_num) {
+        rois_per_level.clear();
+        rois_per_level.resize(levels_num, 0);
+        for (int i = 0; i < rois_num; ++i) {
+            assert(0 <= ids[i] && ids[i] < levels_num);
+            rois_per_level[ids[i]]++;
+        }
+        for (int i = 1; i < levels_num; ++i) {
+            rois_per_level[i] += rois_per_level[i - 1];
+        }
+        rois_per_level.insert(rois_per_level.begin(), 0);
+
+        std::vector<int> level_counter = rois_per_level;
+
+        for (int i = 0; i < rois_num; ++i) {
+            const int level = ids[i];
+            assert(level < levels_num);
+            const int j = level_counter[level];
+            assert(0 <= j && j < rois_num);
+            reordered_rois[j * 4 + 0] = rois[i * 4 + 0];
+            reordered_rois[j * 4 + 1] = rois[i * 4 + 1];
+            reordered_rois[j * 4 + 2] = rois[i * 4 + 2];
+            reordered_rois[j * 4 + 3] = rois[i * 4 + 3];
+            level_counter[level]++;
+        }
+    }
+
+    const int INPUT_ROIS {0};
+    const int INPUT_FEATURES_START {1};
+
+    const int OUTPUT_ROI_FEATURES {0};
+    const int OUTPUT_ROIS {1};
+
+    void refROIFeatureExtractor(std::vector<InferenceEngine::Blob::Ptr>& inputs, std::vector<InferenceEngine::Blob::Ptr>& outputs,
+                                std::vector<int> pyramid_scales_,
+                                int sampling_ratio_,
+                                int pooled_height_,
+                                int pooled_width_) {
+        const int levels_num = inputs.size() - INPUT_FEATURES_START;
+        const int num_rois = inputs[INPUT_ROIS]->getTensorDesc().getDims()[0];
+        const int channels_num = inputs[INPUT_FEATURES_START]->getTensorDesc().getDims()[1];
+        const int feaxels_per_roi = pooled_height_ * pooled_width_ * channels_num;
+
+        auto *input_rois = inputs[INPUT_ROIS]->buffer().as<const float *>();
+        auto *output_rois_features = outputs[OUTPUT_ROI_FEATURES]->buffer().as<float *>();
+        float *output_rois = nullptr;
+        if (OUTPUT_ROIS < static_cast<int>(outputs.size())) {
+            output_rois = outputs[OUTPUT_ROIS]->buffer().as<float *>();
+        }
+
+        std::vector<int> level_ids(num_rois, 0);
+        redistribute_rois(input_rois, reinterpret_cast<int *>(&level_ids[0]), num_rois, levels_num);
+
+        std::vector<float> reordered_rois(4 * num_rois, 0);
+        std::vector<int> original_rois_mapping(num_rois, 0);
+        reorder(input_rois, &level_ids[0], num_rois, 4, &reordered_rois[0], &original_rois_mapping[0]);
+
+        std::vector<int> rois_per_level;
+        split_points(level_ids, rois_per_level, levels_num + 1);
+
+        std::vector<float> output_rois_features_temp(feaxels_per_roi * num_rois, 0);
+        for (int i = 0; i < levels_num; ++i) {
+            const int level_rois_offset = rois_per_level[i];
+            const int level_rois_num = rois_per_level[i + 1] - level_rois_offset;
+            if (level_rois_num > 0) {
+                auto *featuremap = inputs[INPUT_FEATURES_START + i]->buffer().as<const float *>();
+
+                const int featuremap_height = inputs[INPUT_FEATURES_START + i]->getTensorDesc().getDims()[2];
+                const int featuremap_width = inputs[INPUT_FEATURES_START + i]->getTensorDesc().getDims()[3];
+                ROIAlignForward_cpu_kernel<float>(feaxels_per_roi * level_rois_num,
+                    featuremap,
+                    1.0f / pyramid_scales_[i],
+                    channels_num,
+                    featuremap_height,
+                    featuremap_width,
+                    pooled_height_,
+                    pooled_width_,
+                    sampling_ratio_,
+                    &reordered_rois[4 * level_rois_offset],
+                    &output_rois_features_temp[feaxels_per_roi * level_rois_offset]);
+
+            }
+        }
+
+        std::vector<int> dummy_mapping(num_rois, 0);
+        reorder(&output_rois_features_temp[0], &original_rois_mapping[0], num_rois, feaxels_per_roi,
+                output_rois_features, &dummy_mapping[0]);
+        if (output_rois != nullptr) {
+            std::memcpy(output_rois, input_rois, 4 * num_rois * sizeof(float));
+        }
+    }
+
+    static void nchw_to_nhwc(const float* src,
+                             float* dst,
+                             int N, int C, int H, int W) {
+        for (int n = 0; n < N; n++) {
+            for (int c = 0; c < C; c++) {
+                for (int h = 0; h < H; h++) {
+                    for (int w = 0; w < W; w++) {
+                        int ind_i = n * W * H * C + (w + h * W + c * H * W);
+                        int ind_o = n * W * H * C + (h * C * W + w * C + c);
+                        dst[ind_o] = src[ind_i];
+                    }
+                }
+            }
+        }
+    }
+
+    static void nhwc_to_nchw(const ie_fp16* src,
+                             float* dst,
+                             int N, int C, int H, int W
+                            ) {
+        for (int n = 0; n < N; n++) {
+            for (int c = 0; c < C; c++) {
+                for (int h = 0; h < H; h++) {
+                    for (int w = 0; w < W; w++) {
+                        int ind_o = n * W * H * C + (w + h * W + c * H * W);
+                        int ind_i = n * W * H * C + (h * C * W + w * C + c);
+                        dst[ind_o] = PrecisionUtils::f16tof32(src[ind_i]);
+                    }
+                }
+            }
+        }
+    }
+
+    typedef enum : uint32_t {
+        roi_align_avg = 0,
+        roi_align_max = 1,
+        roi_align_undefined = 100
+    } ROIAlignMode;
+
+    ROIAlignMode ROIAlignModeConvert(const std::string& modeString) {
+        if (modeString == "max")
+            return roi_align_max;
+        if (modeString == "avg")
+            return roi_align_avg;
+        VPU_THROW_FORMAT("Reference ROIAlign can take only 'max' or 'avg' for mode, but actually it has: {}", modeString);
+        return roi_align_undefined;
+    }
+
+    template <typename T>
+    static void refROIAlign(const int nthreads,
+                            const T* bottom_data,
+                            const T& spatial_scale,
+                            const int channels,
+                            const int height,
+                            const int width,
+                            const int pooled_height,
+                            const int pooled_width,
+                            const int sampling_ratio,
+                            const T* bottom_rois,
+                            T* top_data,
+
+                            const int* roi_batch_indices, int n_batches,
+
+                            ROIAlignMode mode) {
+        const int roi_cols = 4;
+
+        int n_rois = nthreads / channels / pooled_width / pooled_height;
+        // (n, c, ph, pw) is an element in the pooled output
+        for (size_t n = 0; n < n_rois; n++) {
+            int index_n = n * channels * pooled_width * pooled_height;
+            const T* offset_bottom_rois = bottom_rois + n * roi_cols;
+
+            int roi_batch_ind = (roi_batch_indices != nullptr) ? roi_batch_indices[n] : 0;
+            assert(roi_batch_ind <= n_batches);
+
+            // Do not using rounding; this implementation detail is critical
+            const T roi_start_w = offset_bottom_rois[0] * spatial_scale;
+            const T roi_start_h = offset_bottom_rois[1] * spatial_scale;
+            const T roi_end_w = offset_bottom_rois[2] * spatial_scale;
+            const T roi_end_h = offset_bottom_rois[3] * spatial_scale;
+
+            // Force malformed ROIs to be 1x1
+            const T roi_width  = std::max(roi_end_w - roi_start_w, (T)1.);
+            const T roi_height = std::max(roi_end_h - roi_start_h, (T)1.);
+            const T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
+            const T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
+
+            // We use roi_bin_grid to sample the grid and mimic integral
+            const int roi_bin_grid_h = (sampling_ratio > 0)
+                ? sampling_ratio
+                : static_cast<int>(ceil(roi_height / pooled_height));  // e.g., = 2
+            const int roi_bin_grid_w =
+                (sampling_ratio > 0) ? sampling_ratio : static_cast<int>(ceil(roi_width / pooled_width));
+
+            // We do average (integral) pooling inside a bin
+            const T count = static_cast<T>(roi_bin_grid_h * roi_bin_grid_w);  // e.g. = 4
+
+            // we want to precalculate indeces and weights shared by all chanels,
+            // this is the key point of optimiation
+            std::vector<PreCalc<T>> pre_calc(
+                roi_bin_grid_h * roi_bin_grid_w * pooled_width * pooled_height);
+            pre_calc_for_bilinear_interpolate(height,
+                                              width,
+                                              pooled_height,
+                                              pooled_width,
+                                              roi_bin_grid_h,
+                                              roi_bin_grid_w,
+                                              roi_start_h,
+                                              roi_start_w,
+                                              bin_size_h,
+                                              bin_size_w,
+                                              roi_bin_grid_h,
+                                              roi_bin_grid_w,
+                                              pre_calc);
+
+            for (int c = 0; c < channels; c++) {
+                int index_n_c = index_n + c * pooled_width * pooled_height;
+                const T* offset_bottom_data =
+                bottom_data + (roi_batch_ind * channels + c) * height * width;
+                int pre_calc_index = 0;
+
+                for (int ph = 0; ph < pooled_height; ph++) {
+                    for (int pw = 0; pw < pooled_width; pw++) {
+                        int index = index_n_c + ph * pooled_width + pw;
+
+                        T output_val = 0.;
+                        if (mode == roi_align_avg) {
+                            for (int iy = 0; iy < roi_bin_grid_h; iy++) {
+                                for (int ix = 0; ix < roi_bin_grid_w; ix++) {
+                                    PreCalc<T> pc = pre_calc[pre_calc_index];
+                                    output_val += pc.w1 * offset_bottom_data[pc.pos1] +
+                                                pc.w2 * offset_bottom_data[pc.pos2] +
+                                                pc.w3 * offset_bottom_data[pc.pos3] +
+                                                pc.w4 * offset_bottom_data[pc.pos4];
+
+                                    pre_calc_index += 1;
+                                }
+                            }
+                            output_val /= count;
+                        } else if (mode == roi_align_max) {
+                            bool isInitialized = false;
+
+                            for (int iy = 0; iy < roi_bin_grid_h; iy++) {
+                                for (int ix = 0; ix < roi_bin_grid_w; ix++) {
+                                    PreCalc<T> pc = pre_calc[pre_calc_index];
+                                    T val = pc.w1 * offset_bottom_data[pc.pos1] +
+                                            pc.w2 * offset_bottom_data[pc.pos2] +
+                                            pc.w3 * offset_bottom_data[pc.pos3] +
+                                            pc.w4 * offset_bottom_data[pc.pos4];
+                                    if (isInitialized == false)
+                                        output_val = val;
+                                    else
+                                        output_val = std::max<T>(val, output_val);
+                                    isInitialized = true;
+
+                                    pre_calc_index += 1;
+                                }
+                            }
+                        }
+                        top_data[index] = output_val;
+                    }  // for pw
+                }  // for ph
+            }  // for c
+        }  // for n_rois
+    }
+
+}; // namespace internal
+
+void ref_ROIAlign(InferenceEngine::Blob::Ptr feature_map,
+                  InferenceEngine::Blob::Ptr rois,
+                  InferenceEngine::Blob::Ptr batch_indices,
+                  InferenceEngine::Blob::Ptr output,
+                  const int sampling_ratio,
+                  const int pooled_h,
+                  const int pooled_w,
+                  const int num_rois,
+                  const float spatial_scale,
+                  const std::string mode) {
+    VPU_THROW_UNLESS(feature_map != nullptr, "feature_map must not be equal to nullptr");
+    VPU_THROW_UNLESS(rois != nullptr, "rois must not be equal to nullptr");
+    VPU_THROW_UNLESS(batch_indices != nullptr, "batch_indices must not be equal to nullptr");
+    VPU_THROW_UNLESS(output != nullptr, "output must not be equal to nullptr");
+
+    std::vector<InferenceEngine::Blob::Ptr> inputs;
+    inputs.push_back(feature_map);
+    inputs.push_back(rois);
+
+    std::vector<InferenceEngine::Blob::Ptr> InputBlobsF32;
+
+    for (auto blob : inputs) {
+        auto _refInputBlob = make_shared_blob<float>({Precision::FP32,
+                                                      blob->getTensorDesc().getDims(),
+                                                      blob->getTensorDesc().getLayout()
+                                                     });
+        _refInputBlob->allocate();
+
+        ie_fp16* blob_ptr_src = static_cast<ie_fp16*>(blob->buffer());
+        float* blob_ptr_dst = static_cast<float*>(_refInputBlob->buffer());
+
+        const auto& inputTensorDesc = blob->getTensorDesc();
+        const auto& inputDims = inputTensorDesc.getDims();
+
+        int num_elements = 1;
+        for (int i = 0; i < inputDims.size(); i++) {
+            num_elements *= inputDims[i];
+        }
+
+        for (int i = 0; i < num_elements; i++) {
+            blob_ptr_dst[i] = PrecisionUtils::f16tof32(blob_ptr_src[i]);
+        }
+
+        InputBlobsF32.push_back(_refInputBlob);
+    }
+
+    const float* feature_ptr = InputBlobsF32[0]->buffer().as<const float *>();
+    const float* rois_ptr = InputBlobsF32[1]->buffer().as<const float *>();
+    const int* batch_ind_ptr = batch_indices->buffer().as<const int *>();
+
+    const auto& inputDims = feature_map->getTensorDesc().getDims();
+    const int num_batches = inputDims[0];
+    const int channels    = inputDims[1];
+    const int height      = inputDims[2];
+    const int width       = inputDims[3];
+
+    const int top_area = pooled_h * pooled_w;
+
+    internal::refROIAlign<float>(num_rois * channels * top_area,
+                                 feature_ptr,
+                                 spatial_scale,
+                                 channels, height, width,
+                                 pooled_h, pooled_w,
+                                 sampling_ratio,
+                                 rois_ptr,
+                                 output->buffer().as<float*>(),
+                                 batch_ind_ptr, num_batches,
+                                 internal::ROIAlignModeConvert(mode));
+}
+
+void ref_ROIFeatureExtractor(std::vector<InferenceEngine::Blob::Ptr> inputs,
+                             InferenceEngine::Blob::Ptr output,
+                             InferenceEngine::Blob::Ptr output_rois,
+                             std::vector<int> pyramid_scales,
+                             int sampling_ratio,
+                             int pooled_height,
+                             int pooled_width)
+{
+    ASSERT_GE(inputs.size(), 2);
+    for (auto input : inputs) {
+        ASSERT_NE(input, nullptr);
+    }
+    ASSERT_NE(output, nullptr);
+
+    bool use_output_rois = (output_rois != nullptr);
+
+    auto _refOutputBlob_temp = make_shared_blob<float>(
+        {Precision::FP32, output->getTensorDesc().getDims(), output->getTensorDesc().getLayout()}
+    );
+    _refOutputBlob_temp->allocate();
+
+    std::vector<InferenceEngine::Blob::Ptr> outputs_ref;
+    outputs_ref.push_back(_refOutputBlob_temp);
+    if (use_output_rois)
+        outputs_ref.push_back(output_rois);
+
+    std::vector<InferenceEngine::Blob::Ptr> InputBlobsF32;
+
+    for (auto blob : inputs) {
+        auto _refInputBlob = make_shared_blob<float>(
+                                                     {Precision::FP32,
+                                                      blob->getTensorDesc().getDims(),
+                                                      blob->getTensorDesc().getLayout()
+                                                     });
+        _refInputBlob->allocate();
+
+        ie_fp16* blob_ptr_src = static_cast<ie_fp16*>(blob->buffer());
+        float* blob_ptr_dst = static_cast<float*>(_refInputBlob->buffer());
+
+        const auto& inputTensorDesc = blob->getTensorDesc();
+        const auto& inputDims = inputTensorDesc.getDims();
+
+        if (inputDims.size() == 4) {
+            internal::nhwc_to_nchw(blob->buffer().as<ie_fp16*>(),
+                                   _refInputBlob->buffer().as<float*>(),
+                                   inputDims[0],
+                                   inputDims[1],
+                                   inputDims[2],
+                                   inputDims[3]
+                                   );
+        } else {
+            int num_elements = 1;
+            for (int i = 0; i < inputDims.size(); i++) {
+                num_elements *= inputDims[i];
+            }
+
+            for (int i = 0; i < num_elements; i++) {
+                blob_ptr_dst[i] = PrecisionUtils::f16tof32(blob_ptr_src[i]);
+            }
+        }
+
+        InputBlobsF32.push_back(_refInputBlob);
+    }
+
+    internal::refROIFeatureExtractor(InputBlobsF32, outputs_ref,
+                                     pyramid_scales,
+                                     sampling_ratio,
+                                     pooled_width,
+                                     pooled_height);
+
+    const auto& outputTensorDesc = output->getTensorDesc();
+    const auto& outputDims = outputTensorDesc.getDims();
+
+    internal::nchw_to_nhwc(_refOutputBlob_temp->buffer().as<float*>(),
+                           output->buffer().as<float*>(),
+                           outputDims[0],
+                           outputDims[1],
+                           outputDims[2],
+                           outputDims[3]
+                          );
+}
+
+void ref_convert(const InferenceEngine::Blob::Ptr &src,
+                 InferenceEngine::Blob::Ptr &dst) {
+    ASSERT_NE(src, nullptr);
+    ASSERT_NE(dst, nullptr);
+    auto srcPrecision = src->getTensorDesc().getPrecision();
+    auto dstPrecision = dst->getTensorDesc().getPrecision();
+
+    if (srcPrecision == dstPrecision) {
+        std::copy(src->cbuffer().as<uint8_t*>(),
+                  src->cbuffer().as<uint8_t*>() + src->byteSize(),
+                  dst->buffer().as<uint8_t*>());
+        return;
+    }
+
+    for (size_t i = 0; i < dst->size(); i++) {
+        if (srcPrecision == Precision::U8 && dstPrecision == Precision::FP16) {
+            dst->buffer().as<ie_fp16 *>()[i] = PrecisionUtils::f32tof16(
+                static_cast<float >(src->cbuffer().as<uint8_t *>()[i]));
+        } else if (srcPrecision == Precision::FP32 && dstPrecision == Precision::FP16) {
+            dst->buffer().as<ie_fp16 *>()[i] = PrecisionUtils::f32tof16(
+                src->cbuffer().as<float *>()[i]);
+        } else if (srcPrecision == Precision::FP16 && dstPrecision == Precision::FP32) {
+            dst->buffer().as<float *>()[i] = PrecisionUtils::f16tof32(
+                src->cbuffer().as<ie_fp16 *>()[i]);
+        } else if (srcPrecision == Precision::FP16 && dstPrecision == Precision::I32) {
+            dst->buffer().as<int32_t *>()[i] = static_cast<int32_t >(PrecisionUtils::f16tof32(
+                src->cbuffer().as<ie_fp16 *>()[i]));
+        } else if (srcPrecision == Precision::I32 && dstPrecision == Precision::FP16) {
+            dst->buffer().as<ie_fp16 *>()[i] = PrecisionUtils::f32tof16(
+                static_cast<float >(src->cbuffer().as<int32_t *>()[i]));
+        } else {
+            THROW_IE_EXCEPTION << "Unsupported input or output precision";
+        }
+    }
+}
+
+void ref_convert_wrap(const InferenceEngine::Blob::Ptr src,
+                      InferenceEngine::Blob::Ptr dst,
+                      const ParamsStruct& params) {
+    ref_convert(src, dst);
+}
+
+void ref_Split(const InferenceEngine::Blob::Ptr src,
+               const InferenceEngine::BlobMap& dst,
+               const int axis)
+{
+    const ie_fp16* srcPtr = src->buffer().as<const ie_fp16*>();
+    const SizeVector inputDims = src->getTensorDesc().getDims();
+
+    const size_t prefixSize = std::accumulate(inputDims.cbegin() + axis + 1, inputDims.cend(), 1, std::multiplies<size_t>());
+    const size_t suffixSize = std::accumulate(inputDims.cbegin(), inputDims.cbegin() + axis, 1, std::multiplies<size_t>());
+    const size_t inputAxisDimSize = inputDims[axis];
+
+    size_t axisElemNum = 0;
+    for (const auto& item : dst) {
+        ie_fp16* dstPtr = item.second->buffer().as<ie_fp16*>();
+        const SizeVector outputDims = item.second->getTensorDesc().getDims();
+        const size_t axisDimSize = outputDims[axis];
+
+        for (size_t suffixIdx = 0; suffixIdx < suffixSize; ++suffixIdx) {
+            const size_t srcPlaneOffset = suffixIdx * inputAxisDimSize * prefixSize;
+            const size_t dstPlaneOffset = suffixIdx * axisDimSize * prefixSize;
+            for (size_t axisIdx = 0; axisIdx < axisDimSize; ++axisIdx) {
+                const size_t srcVecOffset = (axisIdx + axisElemNum) * prefixSize;
+                const size_t dstVecOffset = axisIdx * prefixSize;
+                for (size_t prefixIdx = 0; prefixIdx < prefixSize; ++prefixIdx) {
+                    dstPtr[dstPlaneOffset + dstVecOffset + prefixIdx] = srcPtr[srcPlaneOffset + srcVecOffset + prefixIdx];
+                }
+            }
+        }
+        axisElemNum += axisDimSize;
+    }
+}
+
+void ref_ExpPriorGridGenerator(std::vector<InferenceEngine::Blob::Ptr> inputs,
+                               std::vector<InferenceEngine::Blob::Ptr> output,
+                               int grid_w,
+                               int grid_h,
+                               float stride_w,
+                               float stride_h) {
+
+    const int INPUT_PRIORS = 0;
+    const int INPUT_FEATUREMAP = 1;
+    const int INPUT_IMAGE = 2;
+
+    const int num_priors_ = inputs[INPUT_PRIORS]->getTensorDesc().getDims()[0];
+    assert(inputs[INPUT_PRIORS]->getTensorDesc().getDims()[1] == 4);
+
+    const int layer_width = grid_w ? grid_w : inputs[INPUT_FEATUREMAP]->getTensorDesc().getDims()[3];
+    const int layer_height = grid_h ? grid_h : inputs[INPUT_FEATUREMAP]->getTensorDesc().getDims()[2];
+    const float step_w = stride_w ? stride_w : static_cast<float>(inputs[INPUT_IMAGE]->getTensorDesc().getDims()[3]) / layer_width;
+    const float step_h = stride_h ? stride_h : static_cast<float>(inputs[INPUT_IMAGE]->getTensorDesc().getDims()[2]) / layer_height;
+
+    const auto *bottom_data_0 = inputs[INPUT_PRIORS]->buffer().as<const ie_fp16 *>();
+    auto *top_data_0 = output[0]->buffer().as<ie_fp16 *>();
+
+    using namespace PrecisionUtils;
+
+    for (int h = 0; h < layer_height; ++h) {
+        for (int w = 0; w < layer_width; ++w) {
+            for (int s = 0; s < num_priors_; ++s) {
+                top_data_0[0] = f32tof16(f16tof32(bottom_data_0[4 * s + 0]) + step_w * (w + 0.5f));
+                top_data_0[1] = f32tof16(f16tof32(bottom_data_0[4 * s + 1]) + step_h * (h + 0.5f));
+                top_data_0[2] = f32tof16(f16tof32(bottom_data_0[4 * s + 2]) + step_w * (w + 0.5f));
+                top_data_0[3] = f32tof16(f16tof32(bottom_data_0[4 * s + 3]) + step_h * (h + 0.5f));
+                top_data_0 += 4;
+            }
+        }
+    }
+}
+namespace  gen_proposals_impl{
+    struct Indexer {
+      const std::vector<int> dims_;
+      int total_{1};
+
+      explicit Indexer(const std::vector<int>& dims) : dims_(dims) {
+          total_ = 1;
+          for (size_t i = 0; i < dims_.size(); ++i) {
+              total_ *= dims_[i];
+          }
+      }
+
+      int operator()(const std::vector<int>& idx) const {
+          int flat_idx = 0;
+          assert(idx.size() == dims_.size());
+          for (size_t i = 0; i < dims_.size(); ++i) {
+              assert(0 <= idx[i] && idx[i] < dims_[i]);
+              flat_idx = flat_idx * dims_[i] + idx[i];
+          }
+          assert(flat_idx < total_);
+          return flat_idx;
+      }
+    };
+
+
+    void refine_anchors(const ie_fp16* deltas, const ie_fp16* scores, const ie_fp16* anchors,
+                        float* proposals, const int anchors_num, const int bottom_H,
+                        const int bottom_W, const float img_H, const float img_W,
+                        const float min_box_H, const float min_box_W,
+                        const float max_delta_log_wh,
+                        float coordinates_offset) {
+        Indexer delta_idx({anchors_num, 4, bottom_H, bottom_W});
+        Indexer score_idx({anchors_num, 1, bottom_H, bottom_W});
+        Indexer proposal_idx({bottom_H, bottom_W, anchors_num, 5});
+        Indexer anchor_idx({bottom_H, bottom_W, anchors_num, 4});
+
+        for (int h = 0; h < bottom_H; ++ h) {
+            for (int w = 0; w < bottom_W; ++w) {
+                for (int anchor = 0; anchor < anchors_num; ++anchor) {
+                    float x0 = PrecisionUtils::f16tof32(anchors[anchor_idx({h, w, anchor, 0})]);
+                    float y0 = PrecisionUtils::f16tof32(anchors[anchor_idx({h, w, anchor, 1})]);
+                    float x1 = PrecisionUtils::f16tof32(anchors[anchor_idx({h, w, anchor, 2})]);
+                    float y1 = PrecisionUtils::f16tof32(anchors[anchor_idx({h, w, anchor, 3})]);
+
+                    const float dx = PrecisionUtils::f16tof32(deltas[delta_idx({anchor, 0, h, w})]);
+                    const float dy = PrecisionUtils::f16tof32(deltas[delta_idx({anchor, 1, h, w})]);
+                    const float d_log_w = PrecisionUtils::f16tof32(deltas[delta_idx({anchor, 2, h, w})]);
+                    const float d_log_h = PrecisionUtils::f16tof32(deltas[delta_idx({anchor, 3, h, w})]);
+
+                    const float score = PrecisionUtils::f16tof32(scores[score_idx({anchor, 0, h, w})]);
+
+                    // width & height of box
+                    const float ww = x1 - x0 + coordinates_offset;
+                    const float hh = y1 - y0 + coordinates_offset;
+                    // center location of box
+                    const float ctr_x = x0 + 0.5f * ww;
+                    const float ctr_y = y0 + 0.5f * hh;
+
+                    // new center location according to deltas (dx, dy)
+                    const float pred_ctr_x = dx * ww + ctr_x;
+                    const float pred_ctr_y = dy * hh + ctr_y;
+                    // new width & height according to deltas d(log w), d(log h)
+                    const float pred_w = std::exp(std::min(d_log_w, max_delta_log_wh)) * ww;
+                    const float pred_h = std::exp(std::min(d_log_h, max_delta_log_wh)) * hh;
+
+                    // update upper-left corner location
+                    x0 = pred_ctr_x - 0.5f * pred_w;
+                    y0 = pred_ctr_y - 0.5f * pred_h;
+                    // update lower-right corner location
+                    x1 = pred_ctr_x + 0.5f * pred_w - coordinates_offset;
+                    y1 = pred_ctr_y + 0.5f * pred_h - coordinates_offset;
+
+                    // adjust new corner locations to be within the image region,
+                    x0 = std::max<float>(0.0f, std::min<float>(x0, img_W - coordinates_offset));
+                    y0 = std::max<float>(0.0f, std::min<float>(y0, img_H - coordinates_offset));
+                    x1 = std::max<float>(0.0f, std::min<float>(x1, img_W - coordinates_offset));
+                    y1 = std::max<float>(0.0f, std::min<float>(y1, img_H - coordinates_offset));
+
+                    // recompute new width & height
+                    const float box_w = x1 - x0 + coordinates_offset;
+                    const float box_h = y1 - y0 + coordinates_offset;
+
+                    proposals[proposal_idx({h, w, anchor, 0})] = x0;
+                    proposals[proposal_idx({h, w, anchor, 1})] = y0;
+                    proposals[proposal_idx({h, w, anchor, 2})] = x1;
+                    proposals[proposal_idx({h, w, anchor, 3})] = y1;
+                    proposals[proposal_idx({h, w, anchor, 4})] = (min_box_W <= box_w) * (min_box_H <= box_h) * score;
+                }
+            }
+        }
+    }
+
+    void unpack_boxes(const float* p_proposals, ie_fp16* unpacked_boxes, int pre_nms_topn) {
+        for(int i = 0; i < pre_nms_topn; ++i) {
+            unpacked_boxes[0*pre_nms_topn + i] = PrecisionUtils::f32tof16(p_proposals[5*i + 0]);
+            unpacked_boxes[1*pre_nms_topn + i] = PrecisionUtils::f32tof16(p_proposals[5*i + 1]);
+            unpacked_boxes[2*pre_nms_topn + i] = PrecisionUtils::f32tof16(p_proposals[5*i + 2]);
+            unpacked_boxes[3*pre_nms_topn + i] = PrecisionUtils::f32tof16(p_proposals[5*i + 3]);
+            unpacked_boxes[4*pre_nms_topn + i] = PrecisionUtils::f32tof16(p_proposals[5*i + 4]);
+        }
+    }
+
+    void nms_cpu(const int num_boxes, int is_dead[], const ie_fp16* boxes,
+                 int index_out[], int* const num_out, const int base_index,
+                 const float nms_thresh, const int max_num_out,
+                 float coordinates_offset) {
+        const int num_proposals = num_boxes;
+        int count = 0;
+
+        const ie_fp16* x0 = boxes + 0 * num_proposals;
+        const ie_fp16* y0 = boxes + 1 * num_proposals;
+        const ie_fp16* x1 = boxes + 2 * num_proposals;
+        const ie_fp16* y1 = boxes + 3 * num_proposals;
+
+        std::fill_n(is_dead, num_boxes, 0);
+
+        for (int box = 0; box < num_boxes; ++box) {
+            if (is_dead[box])
+                continue;
+
+            index_out[count++] = base_index + box;
+            if (count == max_num_out)
+                break;
+
+            int tail = box + 1;
+
+            for (; tail < num_boxes; ++tail) {
+                float res = 0.0f;
+
+                const float x0i = PrecisionUtils::f16tof32(x0[box]);
+                const float y0i = PrecisionUtils::f16tof32(y0[box]);
+                const float x1i = PrecisionUtils::f16tof32(x1[box]);
+                const float y1i = PrecisionUtils::f16tof32(y1[box]);
+
+                const float x0j = PrecisionUtils::f16tof32(x0[tail]);
+                const float y0j = PrecisionUtils::f16tof32(y0[tail]);
+                const float x1j = PrecisionUtils::f16tof32(x1[tail]);
+                const float y1j = PrecisionUtils::f16tof32(y1[tail]);
+
+                if (x0i <= x1j && y0i <= y1j && x0j <= x1i && y0j <= y1i) {
+                    // overlapped region (= box)
+                    const float x0 = std::max<float>(x0i, x0j);
+                    const float y0 = std::max<float>(y0i, y0j);
+                    const float x1 = std::min<float>(x1i, x1j);
+                    const float y1 = std::min<float>(y1i, y1j);
+
+                    // intersection area
+                    const float width  = std::max<float>(0.0f,  x1 - x0 + coordinates_offset);
+                    const float height = std::max<float>(0.0f,  y1 - y0 + coordinates_offset);
+                    const float area   = width * height;
+
+                    // area of A, B
+                    const float A_area = (x1i - x0i + coordinates_offset) * (y1i - y0i + coordinates_offset);
+                    const float B_area = (x1j - x0j + coordinates_offset) * (y1j - y0j + coordinates_offset);
+
+                    // IoU
+                    res = area / (A_area + B_area - area);
+                }
+
+                if (nms_thresh < res)
+                    is_dead[tail] = 1;
+            }
+        }
+
+        *num_out = count;
+    }
+
+
+    void fill_output_blobs(const ie_fp16* proposals, const int* roi_indices,
+                        ie_fp16* rois, ie_fp16* scores, const int num_proposals,
+                        const int num_rois, const int post_nms_topn) {
+        const ie_fp16 *src_x0 = proposals + 0 * num_proposals;
+        const ie_fp16 *src_y0 = proposals + 1 * num_proposals;
+        const ie_fp16 *src_x1 = proposals + 2 * num_proposals;
+        const ie_fp16 *src_y1 = proposals + 3 * num_proposals;
+        const ie_fp16 *src_score = proposals + 4 * num_proposals;
+
+        for(int i = 0; i < num_rois; ++i) {
+            int index = roi_indices[i];
+            rois[i * 4 + 0] = src_x0[index];
+            rois[i * 4 + 1] = src_y0[index];
+            rois[i * 4 + 2] = src_x1[index];
+            rois[i * 4 + 3] = src_y1[index];
+            scores[i] = src_score[index];
+        }
+
+        if (num_rois < post_nms_topn) {
+            for (int i = 4 * num_rois; i < 4 * post_nms_topn; i++) {
+                rois[i] = 0;
+            }
+            for (int i = num_rois; i < post_nms_topn; i++) {
+                scores[i] = 0;
+            }
+        }
+    }
+} // namespace gen_proposals_impl
+
+void ref_ExpGenerateProposals(std::vector<InferenceEngine::Blob::Ptr> inputs,
+                              std::vector<InferenceEngine::Blob::Ptr> outputs,
+                              float min_size_,
+                              float nms_threshold_,
+                              int post_nms_topn_,
+                              int pre_nms_topn_) {
+
+    const int INPUT_IM_INFO = 0;
+    const int INPUT_ANCHORS = 1;
+    const int INPUT_DELTAS = 2;
+    const int INPUT_SCORES = 3;
+
+    const int OUTPUT_ROIS = 0;
+    const int OUTPUT_SCORES = 1;
+
+    const auto* p_deltas_item = inputs[INPUT_DELTAS]->buffer().as<const ie_fp16*>();
+    const auto* p_scores_item = inputs[INPUT_SCORES]->buffer().as<const ie_fp16*>();
+    const auto* p_anchors_item = inputs[INPUT_ANCHORS]->buffer().as<const ie_fp16*>();
+    const auto* p_img_info_cpu = inputs[INPUT_IM_INFO]->buffer().as<const ie_fp16*>();
+
+    auto* p_roi_item = outputs[OUTPUT_ROIS]->buffer().as<ie_fp16*>();
+    auto* p_roi_score_item = outputs[OUTPUT_SCORES]->buffer().as<ie_fp16*>();
+
+    size_t img_info_size = 1;
+    for (size_t i = 0; i < inputs[INPUT_IM_INFO]->getTensorDesc().getDims().size(); i++) {
+        img_info_size *= inputs[INPUT_IM_INFO]->getTensorDesc().getDims()[i];
+    }
+
+    const int anchors_num = inputs[INPUT_SCORES]->getTensorDesc().getDims()[0];
+
+    // bottom shape: (num_anchors) x H x W
+    const int bottom_H = inputs[INPUT_DELTAS]->getTensorDesc().getDims()[1];
+    const int bottom_W = inputs[INPUT_DELTAS]->getTensorDesc().getDims()[2];
+
+    // input image height & width
+    const float img_H = PrecisionUtils::f16tof32(p_img_info_cpu[0]);
+    const float img_W = PrecisionUtils::f16tof32(p_img_info_cpu[1]);
+
+    // minimum box width & height
+    const float min_box_H = min_size_;
+    const float min_box_W = min_size_;
+
+    // number of all proposals = num_anchors * H * W
+    const int num_proposals = anchors_num * bottom_H * bottom_W;
+
+    // number of top-n proposals before NMS
+    const int pre_nms_topn = std::min<int>(num_proposals, pre_nms_topn_);
+
+    // number of final RoIs
+    int num_rois = 0;
+
+    // enumerate all proposals
+    //   num_proposals = num_anchors * H * W
+    //   (x1, y1, x2, y2, score) for each proposal
+    // NOTE: for bottom, only foreground scores are passed
+    struct ProposalBox {
+        float x0;
+        float y0;
+        float x1;
+        float y1;
+        float score;
+    };
+    std::vector<ProposalBox> proposals_(num_proposals);
+    std::vector<ie_fp16> unpacked_boxes(5 * pre_nms_topn);
+    std::vector<int> is_dead(pre_nms_topn);
+    std::vector<int> roi_indices_(post_nms_topn_);
+
+    // Execute
+    int batch_size = 1;  // inputs[INPUT_DELTAS]->getTensorDesc().getDims()[0];
+    for (int n = 0; n < batch_size; ++n) {
+        gen_proposals_impl::refine_anchors(p_deltas_item, p_scores_item, p_anchors_item,
+                       reinterpret_cast<float *>(&proposals_[0]), anchors_num, bottom_H,
+                       bottom_W, img_H, img_W,
+                       min_box_H, min_box_W,
+                       static_cast<const float>(log(1000. / 16.)),
+                       1.0f);
+        std::stable_sort(proposals_.begin(), proposals_.end(),
+                         [](const ProposalBox& struct1, const ProposalBox& struct2) {
+                             return (struct1.score > struct2.score);
+                         });
+
+        gen_proposals_impl::unpack_boxes(reinterpret_cast<float *>(&proposals_[0]),
+                                         &unpacked_boxes[0], pre_nms_topn);
+        gen_proposals_impl::nms_cpu(pre_nms_topn, &is_dead[0], &unpacked_boxes[0],
+                                    &roi_indices_[0], &num_rois, 0,
+                                    nms_threshold_, post_nms_topn_, 0.0f);
+        gen_proposals_impl::fill_output_blobs(&unpacked_boxes[0], &roi_indices_[0],
+                                              p_roi_item, p_roi_score_item,
+                                              pre_nms_topn, num_rois, post_nms_topn_);
+    }
+}
+
+void ref_ExpTopKROIs(std::vector<InferenceEngine::Blob::Ptr> inputs,
+                     std::vector<InferenceEngine::Blob::Ptr> outputs,
+                     int max_rois) {
+    const int INPUT_ROIS   = 0;
+    const int INPUT_PROBS = 1;
+
+    const int OUTPUT_ROIS = 0;
+
+    const auto* p_iroi_item  = InferenceEngine::as<MemoryBlob>(inputs[INPUT_ROIS])->rmap().as<const ie_fp16*>();
+    const auto* p_probs_item = InferenceEngine::as<MemoryBlob>(inputs[INPUT_PROBS])->rmap().as<const ie_fp16*>();
+
+    auto* p_oroi_item = InferenceEngine::as<MemoryBlob>(outputs[OUTPUT_ROIS])->rwmap().as<ie_fp16*>();
+
+    const int input_rois_num = inputs[INPUT_ROIS]->getTensorDesc().getDims()[0];
+    const int top_rois_num = std::min<int>(max_rois, input_rois_num);
+
+    std::vector<size_t> idx(input_rois_num);
+    std::iota(idx.begin(), idx.end(), 0);
+
+    std::stable_sort(idx.begin(), idx.end(),
+                     [&p_probs_item](size_t i1, size_t i2) {
+                         return PrecisionUtils::f16tof32(p_probs_item[i1]) > PrecisionUtils::f16tof32(p_probs_item[i2]);
+                     });
+
+    for (int i = 0; i < top_rois_num; ++i) {
+        std::copy_n(p_iroi_item + 4 * idx[i], 4, p_oroi_item + 4 * i);
+    }
+
+    if (top_rois_num < max_rois)
+    {
+        std::fill_n(p_oroi_item + top_rois_num * 4, 4 * (max_rois - top_rois_num), 0);
+    }
+}
+
+void ref_nonZero(const InferenceEngine::Blob::Ptr& src,
+                 InferenceEngine::Blob::Ptr& outIndices,
+                 InferenceEngine::Blob::Ptr& outDims) {
+    auto outIndicesPtr = InferenceEngine::as<MemoryBlob>(outIndices)->rwmap().as<int32_t*>();
+    auto outDimsPtr = InferenceEngine::as<MemoryBlob>(outDims)->rwmap().as<int32_t*>();
+
+    const auto srcTotalDimSize = src->size();
+
+    const auto getCoord = [&src](int offset){
+        std::vector<size_t> coord;
+        for (const size_t& stride : src->getTensorDesc().getBlockingDesc().getStrides()) {
+            coord.insert(coord.begin(), offset / stride);
+            offset %= stride;
+        }
+        return coord;
+    };
+
+    const auto addCoordToIndices = [&outIndicesPtr, &srcTotalDimSize](const std::vector<size_t> &coord,
+                                                                      const size_t numNonZeros) {
+        for (int j = 0; j < coord.size(); ++j) {
+            outIndicesPtr[j * srcTotalDimSize + numNonZeros] = coord[j];
+        }
+    };
+
+    const auto isNonZero = [&src](const size_t i) {
+        if (src->getTensorDesc().getPrecision() == InferenceEngine::Precision::I32) {
+            const auto srcPtr = InferenceEngine::as<MemoryBlob>(src)->rmap().as<const int32_t*>();
+            return srcPtr[i] != 0;
+        } else if (src->getTensorDesc().getPrecision() == InferenceEngine::Precision::U8) {
+            const auto srcPtr = InferenceEngine::as<MemoryBlob>(src)->rmap().as<const uint8_t*>();
+            return srcPtr[i] != 0;
+        } else {  // FP16
+            const auto srcPtr = InferenceEngine::as<MemoryBlob>(src)->rmap().as<const ie_fp16*>();
+            const auto zero = PrecisionUtils::f32tof16(0.f);
+            return srcPtr[i] != zero;
+        }
+    };
+
+    size_t numNonZeros = 0;
+    for (size_t i = 0; i < srcTotalDimSize; ++i) {
+        if (isNonZero(i)) {
+            addCoordToIndices(getCoord(i), numNonZeros++);
+        }
+    }
+
+    outDimsPtr[0] = numNonZeros;
+    outDimsPtr[1] = src->getTensorDesc().getDims().size();
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/vpu_base/myriad_layers_reference_functions.hpp b/inference-engine/tests_deprecated/functional/vpu/vpu_base/myriad_layers_reference_functions.hpp
new file mode 100644 (file)
index 0000000..edbbe17
--- /dev/null
@@ -0,0 +1,366 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <precision_utils.h>
+#include <ie_blob.h>
+
+#include <iomanip> // std::setw
+
+typedef std::map<std::string, std::string> ParamsStruct;
+
+typedef float (*eltwise_kernel)(float a, float b, float c);
+
+struct param_size {
+    size_t x;
+    size_t y;
+};
+
+struct param_size_3d {
+    size_t x;
+    size_t y;
+    size_t z;
+};
+
+struct paddings4 {
+    size_t left;
+    size_t top;
+    size_t right;
+    size_t bottom;
+};
+
+struct tensor_test_params {
+    size_t n;
+    size_t c;
+    size_t h;
+    size_t w;
+    friend std::ostream& operator<<(std::ostream& os, tensor_test_params const& tst)
+    {
+        return os << "tensor (" << tst.n
+                  << ", " << tst.c
+                  << ", " << tst.h
+                  << ", " << tst.w
+                  << ")";
+    }
+    InferenceEngine::SizeVector asVector() const { return {n,c,h,w};}
+};
+
+struct tensor_test_params_3d {
+    size_t n;
+    size_t c;
+    size_t d;
+    size_t h;
+    size_t w;
+    friend std::ostream& operator<<(std::ostream& os, tensor_test_params_3d const& tst)
+    {
+        return os << "tensor (" << tst.n
+                  << ", " << tst.c
+                  << ", " << tst.d
+                  << ", " << tst.h
+                  << ", " << tst.w
+                  << ")";
+    }
+    InferenceEngine::SizeVector asVector() const { return {n,c,d,h,w};}
+};
+
+/* Wrappers to gen subnets:
+ reference function signature should have following structure:
+    input blob,
+    output blob,
+    pointer to weights (if they are required)
+    weights number (if pointer to weights is set)
+    bias number (if pointer to weights is set)
+    other parameters
+
+*/
+static inline void PrintTo(const param_size& sz, std::ostream* os) {
+    *os << "{" << std::setw(2) << sz.x << ", " << std::setw(2) << sz.y << "}";
+};
+
+template <typename DataType>
+class IReduceKernel
+{
+public:
+    virtual void init() = 0;
+    virtual void accumulate(const DataType& val) = 0;
+    virtual DataType result() const = 0;
+    virtual DataType copy(const DataType& val) const = 0;
+};
+
+void ref_innerproduct_wrap(const InferenceEngine::Blob::Ptr src,
+                      InferenceEngine::Blob::Ptr dst,
+                      const uint16_t *weights,
+                      size_t weightsSize,
+                      const uint16_t *biases,
+                      size_t biasSize,
+                      const ParamsStruct& params);
+
+void ref_ReLU_wrap(const InferenceEngine::Blob::Ptr inTensor,
+                   InferenceEngine::Blob::Ptr outTensor,
+                   const ParamsStruct& params);
+
+void ref_Clamp_wrap(const InferenceEngine::Blob::Ptr inTensor,
+                   InferenceEngine::Blob::Ptr outTensor,
+                   const ParamsStruct& params);
+
+void ref_pooling_wrap(const InferenceEngine::Blob::Ptr src,
+                    InferenceEngine::Blob::Ptr dst,
+                    const ParamsStruct& params);
+
+void ref_copy_wrap(const InferenceEngine::Blob::Ptr src,
+                   InferenceEngine::Blob::Ptr dst,
+                   const ParamsStruct& params);
+
+void ref_convolution_wrap(const InferenceEngine::Blob::Ptr src,
+                          InferenceEngine::Blob::Ptr dst,
+                          const uint16_t *weights,
+                          size_t weightsSize,
+                          const uint16_t *biases,
+                          size_t biasSize,
+                          const ParamsStruct& params);
+
+void ref_deconvolution_wrap(const InferenceEngine::Blob::Ptr src,
+                          InferenceEngine::Blob::Ptr dst,
+                          const uint16_t *weights,
+                          size_t weightsSize,
+                          const uint16_t *biases,
+                          size_t biasSize,
+                          const ParamsStruct& params);
+
+void ref_tanh_wrap(const InferenceEngine::Blob::Ptr inTensor,
+                   InferenceEngine::Blob::Ptr outTensor,
+                   const ParamsStruct& params);
+
+void ref_sigmoid_wrap(const InferenceEngine::Blob::Ptr src,
+                      InferenceEngine::Blob::Ptr dst,
+                      const ParamsStruct& params);
+
+void ref_PReLU_wrap(const InferenceEngine::Blob::Ptr src,
+                    InferenceEngine::Blob::Ptr dst,
+                    const uint16_t *weights,
+                    size_t weightsSize,
+                    const uint16_t *biases,
+                    size_t biasSize,
+                    const ParamsStruct& params);
+
+void ref_RegionYolo_wrap(InferenceEngine::Blob::Ptr inTensor,
+              InferenceEngine::Blob::Ptr outTensor,
+              const ParamsStruct& params);
+
+void ref_reshape_wrap(const InferenceEngine::Blob::Ptr src,
+                      InferenceEngine::Blob::Ptr dst,
+                      const ParamsStruct& params);
+
+void ref_permute_wrap(const InferenceEngine::Blob::Ptr src,
+                 InferenceEngine::Blob::Ptr dst,
+                 const ParamsStruct& params);
+
+void ref_log_wrap(const InferenceEngine::Blob::Ptr& src,
+                  InferenceEngine::Blob::Ptr& dst,
+                  const ParamsStruct& params);
+
+void ref_exp_wrap(const InferenceEngine::Blob::Ptr& src,
+                  InferenceEngine::Blob::Ptr& dst,
+                  const ParamsStruct& params);
+
+void ref_convert_wrap(const InferenceEngine::Blob::Ptr src,
+                      InferenceEngine::Blob::Ptr dst,
+                      const ParamsStruct& params);
+
+/* Original functions*/
+
+void ref_innerproduct(const InferenceEngine::Blob::Ptr src,
+                      InferenceEngine::Blob::Ptr dst,
+                      const uint16_t *weights,
+                      size_t weightsSize,
+                      const uint16_t *biases,
+                      size_t biasSize,
+                      uint32_t OC);
+
+void ref_convolution(const InferenceEngine::Blob::Ptr src,
+                     InferenceEngine::Blob::Ptr dst,
+                     const InferenceEngine::ie_fp16* weights_data,
+                     const InferenceEngine::ie_fp16* bias_data,
+                     param_size kernel,
+                     param_size stride,
+                     param_size pad,
+                     size_t group,
+                     param_size dilation = {1, 1});
+
+void ref_maxPooling(const InferenceEngine::Blob::Ptr src,
+                    InferenceEngine::Blob::Ptr dst,
+                    param_size kernel,
+                    param_size stride,
+                    param_size pad,
+                    bool exclude_pad = false);
+
+void ref_avgPooling(const InferenceEngine::Blob::Ptr src,
+                    InferenceEngine::Blob::Ptr dst,
+                    param_size kernel,
+                    param_size stride,
+                    param_size pad,
+                    bool exclude_pad = false);
+
+void ref_ReLU(const InferenceEngine::Blob::Ptr inTensor,
+              InferenceEngine::Blob::Ptr outTensor,
+              float negative_slope);
+
+void ref_copy(const InferenceEngine::Blob::Ptr src,
+              InferenceEngine::Blob::Ptr dst);
+
+void ref_tanh(const InferenceEngine::Blob::Ptr src,
+              InferenceEngine::Blob::Ptr dst);
+
+void ref_sigmoid(const InferenceEngine::Blob::Ptr src,
+                 InferenceEngine::Blob::Ptr dst);
+
+void ref_PReLU(const InferenceEngine::Blob::Ptr src,
+               InferenceEngine::Blob::Ptr dst,
+               const uint16_t *weights,
+               size_t weightsSize);
+
+void ref_eltwise(const InferenceEngine::Blob::Ptr src1,
+                const InferenceEngine::Blob::Ptr src2,
+                const InferenceEngine::Blob::Ptr src3,
+                InferenceEngine::Blob::Ptr dst,
+                eltwise_kernel fun, std::vector<float> coeff);
+
+void ref_RegionYolo(const InferenceEngine::Blob::Ptr src,
+                    InferenceEngine::Blob::Ptr dst,
+                    int coords,
+                    int classes,
+                    int num,
+                    int maskSize,
+                    int doSoftMax);
+
+template <typename T>
+void ref_Permute(const InferenceEngine::Blob::Ptr src,
+                 InferenceEngine::Blob::Ptr dst,
+                 std::vector<size_t> order);
+
+void ref_softMax(const InferenceEngine::Blob::Ptr& src,
+                  InferenceEngine::Blob::Ptr& dst,
+                  int axis);
+void ref_reshape(const InferenceEngine::Blob::Ptr src,
+                 InferenceEngine::Blob::Ptr dst);
+
+void ref_Clamp(const InferenceEngine::Blob::Ptr inTensor,
+              InferenceEngine::Blob::Ptr outTensor,
+              float min,
+              float max);
+
+void ref_log(const InferenceEngine::Blob::Ptr& src,
+             InferenceEngine::Blob::Ptr& dst);
+
+void ref_exp(const InferenceEngine::Blob::Ptr& src,
+             InferenceEngine::Blob::Ptr& dst);
+
+void ref_gather(const InferenceEngine::Blob::Ptr& srcIdx,
+                const InferenceEngine::Blob::Ptr& srcDct,
+                const InferenceEngine::Blob::Ptr& dst,
+                const                        int  axis);
+
+void ref_scatter_elements_update(InferenceEngine::Blob::Ptr& input,
+                                 InferenceEngine::Blob::Ptr& indices,
+                                 InferenceEngine::Blob::Ptr& updates,
+                                                  const int  axis,
+                                 InferenceEngine::Blob::Ptr& output);
+
+template<typename DataType>
+void ref_reduce(const InferenceEngine::Blob::Ptr& src,
+                const InferenceEngine::Blob::Ptr& axes,
+                InferenceEngine::Blob::Ptr& dst,
+                int keep_dims,
+                IReduceKernel<DataType>* op);
+
+void ref_topk(const InferenceEngine::Blob::Ptr& srcValues,
+              const InferenceEngine::Blob::Ptr& srcK,
+              InferenceEngine::Blob::Ptr dstValues,
+              InferenceEngine::Blob::Ptr dstIndices,
+              int axis,
+              const std::string& mode,
+              const std::string& sort);
+
+void ref_strided_slice(const InferenceEngine::Blob::Ptr& src,
+                       InferenceEngine::Blob::Ptr& dst,
+                       InferenceEngine::SizeVector &out_dims,
+                       const std::vector<int32_t>& begin,
+                       const std::vector<int32_t>& end,
+                       const std::vector<int32_t>& stride,
+                       const InferenceEngine::SizeVector& begin_mask,
+                       const InferenceEngine::SizeVector& end_mask);
+
+struct ExpDetectionOutputParams {
+    float   deltas_weights[4];
+    float   max_delta_log_wh;
+    float   nms_threshold;
+    float   score_threshold;
+    int32_t max_detections_per_image;       // int
+    int32_t num_classes;                    // int
+    int32_t post_nms_count;                 // int
+    int32_t class_agnostic_box_regression;  // bool
+};
+
+void ref_expDetectionOutput(const InferenceEngine::Blob::Ptr srcBoxes,   // [numRois][4]
+                            const InferenceEngine::Blob::Ptr srcDeltas,  // [numRois]([numClasses][4])
+                            const InferenceEngine::Blob::Ptr srcScores,  // [numRois][numClasses]
+                            const InferenceEngine::Blob::Ptr srcIMinfo,  // [2]
+                            InferenceEngine::Blob::Ptr dstBoxes,         // [maxDetections][4]
+                            InferenceEngine::Blob::Ptr dstClasses,       // [maxDetections]
+                            InferenceEngine::Blob::Ptr dstScores,        // [maxDetections]
+                            const int numRois,
+                            const int numClasses,
+                            const int maxDetections,
+                            const ExpDetectionOutputParams& layerParams);
+
+void ref_ROIFeatureExtractor(std::vector<InferenceEngine::Blob::Ptr> inputs,
+                             InferenceEngine::Blob::Ptr output,
+                             InferenceEngine::Blob::Ptr output_rois,
+                             std::vector<int> pyramid_scales,
+                             int sampling_ratio,
+                             int pooled_height,
+                             int pooled_width);
+
+void ref_ROIAlign(InferenceEngine::Blob::Ptr feature_map,
+                  InferenceEngine::Blob::Ptr rois,
+                  InferenceEngine::Blob::Ptr batch_indices,
+                  InferenceEngine::Blob::Ptr output,
+                  const int sampling_ratio,
+                  const int pooled_h,
+                  const int pooled_w,
+                  const int num_rois,
+                  const float spatial_scale,
+                  const std::string mode);
+
+void ref_convert(const InferenceEngine::Blob::Ptr &src,
+                 InferenceEngine::Blob::Ptr &dst);
+
+void ref_Split(const InferenceEngine::Blob::Ptr src,
+               const InferenceEngine::BlobMap& dst,
+               const int axis);
+
+void ref_ExpPriorGridGenerator(std::vector<InferenceEngine::Blob::Ptr> inputs,
+                               std::vector<InferenceEngine::Blob::Ptr> output,
+                               int grid_w,
+                               int grid_h,
+                               float stride_w,
+                               float stride_h);
+
+void ref_ExpGenerateProposals(std::vector<InferenceEngine::Blob::Ptr> inputs,
+                              std::vector<InferenceEngine::Blob::Ptr> output,
+                              float min_size_,
+                              float nms_threshold_,
+                              int post_nms_topn_,
+                              int pre_nms_topn_);
+
+void ref_ExpTopKROIs(std::vector<InferenceEngine::Blob::Ptr> inputs,
+                     std::vector<InferenceEngine::Blob::Ptr> output,
+                     int max_rois);
+
+void ref_nonZero(const InferenceEngine::Blob::Ptr& src,
+                 InferenceEngine::Blob::Ptr& outIndices,
+                 InferenceEngine::Blob::Ptr& outDims);
+
+static constexpr char const PRELU_PARAM[] = "channel_shared";
diff --git a/inference-engine/tests_deprecated/functional/vpu/vpu_base/myriad_layers_tests.cpp b/inference-engine/tests_deprecated/functional/vpu/vpu_base/myriad_layers_tests.cpp
new file mode 100644 (file)
index 0000000..9a3512e
--- /dev/null
@@ -0,0 +1,101 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_layers_tests.hpp"
+
+#include <thread>
+#include <chrono>
+#include <iostream>
+
+#include "ie_ir_reader.hpp"
+#include "functional_test_utils/plugin_cache.hpp"
+
+using namespace InferenceEngine;
+
+void myriadLayersTests_nightly::makeSingleLayerNetwork(const LayerParams& layerParams,
+                                                       const NetworkParams& networkParams,
+                                                       const WeightsBlob::Ptr& weights) {
+    // Disable reorder in per-layer tests to make sure intended layout is used
+    _config[VPU_CONFIG_KEY(DISABLE_REORDER)] = CONFIG_VALUE(YES);
+
+    // White list of per-layer tests that allowed to reorder
+    if (layerParams._layerType == "Flatten") {
+        _config[VPU_CONFIG_KEY(DISABLE_REORDER)] = CONFIG_VALUE(NO);
+    }
+
+    ASSERT_NO_FATAL_FAILURE(
+        makeSingleLayerNetworkImpl(layerParams, networkParams, weights);
+    );
+}
+
+const std::vector<InferenceEngine::SizeVector> g_poolingInput = {
+    {{1,  1,  16,  16},
+     {1,  8, 228, 128},
+     {1, 16,  32,  64}}
+};
+
+const std::vector<InferenceEngine::SizeVector> g_poolingInput_postOp = {
+    {{1, 32,  86, 100}, // postOp issue MX
+     {1, 32,  62, 104}} // postOp issue M2
+};
+
+const std::vector<pooling_layer_params> g_poolingLayerParamsFull = {
+    /* kernel stride  pad */
+    {{2, 2}, {2, 2}, {0, 0}},
+    {{2, 2}, {2, 2}, {1, 1}},
+    {{2, 2}, {2, 2}, {2, 2}},
+    {{2, 2}, {1, 1}, {0, 0}},
+    {{2, 2}, {1, 1}, {1, 1}},
+    {{2, 2}, {1, 1}, {2, 2}},
+    {{4, 2}, {2, 2}, {0, 0}},
+    {{4, 2}, {2, 2}, {1, 1}},
+    {{4, 2}, {2, 2}, {2, 2}},
+    {{4, 2}, {1, 1}, {0, 0}},
+    {{4, 2}, {1, 1}, {1, 1}},
+    {{4, 2}, {1, 1}, {2, 2}},
+    {{2, 4}, {2, 2}, {0, 0}},
+    {{2, 4}, {2, 2}, {1, 1}},
+    {{2, 4}, {2, 2}, {2, 2}},
+    {{2, 4}, {1, 1}, {0, 0}},
+    {{2, 4}, {1, 1}, {1, 1}},
+    {{2, 4}, {1, 1}, {2, 2}},
+};
+
+const std::vector<pooling_layer_params> g_poolingLayerParamsLite = {
+    /* kernel stride  pad */
+    {{2, 2}, {1, 1}, {0, 0}},
+    {{4, 2}, {2, 2}, {1, 1}},
+    {{2, 4}, {1, 1}, {2, 2}},
+};
+
+const std::vector<vpu::LayoutPreference> g_poolingLayout = {
+    vpu::LayoutPreference::ChannelMajor,
+    vpu::LayoutPreference::ChannelMinor
+};
+
+const std::vector<InferenceEngine::SizeVector> g_convolutionTensors = {
+    {1, 8, 4, 16},
+
+    // FIXME: the test is written for [N]HWC layout, but InferenceEngine doesn't have 3D HWC layout.
+//    {16, 8, 16},
+};
+
+const std::vector<InferenceEngine::SizeVector> g_convolutionTensors_postOp = {
+    {{1, 32, 112, 96}}  /* postOp issue */
+};
+
+const std::vector<fcon_test_params> g_fcTestParamsSubset = {
+    {{1, 4, 8, 16}, 4, 0.065f},
+    {{1, 16, 8, 8}, 8, 0.065f}
+};
+
+/* tests subset to check 2 layers operation invocation */
+/* additional tests for 2D and 3D tensors added        */
+const std::vector<int32_t> g_dimensionsFC = {
+    4, 3
+};
+
+const std::vector<int32_t> g_addBiasFC = {
+    1, 0
+};
diff --git a/inference-engine/tests_deprecated/functional/vpu/vpu_base/myriad_layers_tests.hpp b/inference-engine/tests_deprecated/functional/vpu/vpu_base/myriad_layers_tests.hpp
new file mode 100644 (file)
index 0000000..a01238d
--- /dev/null
@@ -0,0 +1,329 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <gtest/gtest.h>
+#include <ie_version.hpp>
+#include <algorithm>
+#include <cstddef>
+#include <precision_utils.h>
+#include <tuple>
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+#include <vpu/vpu_plugin_config.hpp>
+#include <vpu/private_plugin_config.hpp>
+#include "myriad_layers_reference_functions.hpp"
+#include "vpu_layers_tests.hpp"
+#include <file_utils.h>
+
+/* Function to calculate CHW dimensions for the blob generated by */
+/* Myriad plugin.                                            */
+
+class myriadLayersTests_nightly : public vpuLayersTests {
+protected:
+    void makeSingleLayerNetwork(const LayerParams& layerParams,
+                                const NetworkParams& networkParams = {},
+                                const WeightsBlob::Ptr& weights = nullptr);
+};
+
+template<class T>
+class myriadLayerTestBaseWithParam: public myriadLayersTests_nightly,
+                           public testing::WithParamInterface<T> {
+};
+
+/* common classes for different basic tests */
+extern const char POOLING_MAX[];
+extern const char POOLING_AVG[];
+
+struct pooling_layer_params {
+    param_size kernel;
+    param_size stride;
+    param_size pad;
+};
+
+struct nd_tensor_test_params {
+    static constexpr int MAX_DIMS = 8;
+
+    size_t dims[MAX_DIMS];
+};
+
+template <const char* poolType, typename... Types>
+class PoolingTest : public myriadLayersTests_nightly,
+                    public testing::WithParamInterface<std::tuple<InferenceEngine::SizeVector, 
+                                                       pooling_layer_params, vpu::LayoutPreference, Types...>>
+{
+public:
+    virtual void SetUp() {
+        myriadLayersTests_nightly::SetUp();
+        auto p = ::testing::WithParamInterface<std::tuple<InferenceEngine::SizeVector, pooling_layer_params, vpu::LayoutPreference, Types...>>::GetParam();
+        _input_tensor       = std::get<0>(p);
+        _kernel_val         = std::get<1>(p).kernel;
+        _stride_val         = std::get<1>(p).stride;
+        _pad_val            = std::get<1>(p).pad;
+        _layout_preference  = std::get<2>(p);
+
+        if (_pad_val.x >= _kernel_val.x) {
+            _pad_val.x = _kernel_val.x - 1;
+        }
+        if (_pad_val.y >= _kernel_val.y) {
+            _pad_val.y = _kernel_val.y - 1;
+        }
+
+        _params["kernel-x"] = std::to_string(_kernel_val.x);
+        _params["kernel-y"] = std::to_string(_kernel_val.y);
+        _params["stride-x"] = std::to_string(_stride_val.x);
+        _params["stride-y"] = std::to_string(_stride_val.y);
+        _params["pad-x"] = std::to_string(_pad_val.x);
+        _params["pad-y"] = std::to_string(_pad_val.y);
+        _params["kernel"]   = std::to_string(_kernel_val.y) + "," + std::to_string(_kernel_val.x);
+        _params["strides"]  = std::to_string(_stride_val.y) + "," + std::to_string(_stride_val.x);
+        _params["pads_begin"] = std::to_string(_pad_val.y) + "," + std::to_string(_pad_val.x);
+        _params["pads_end"] = std::to_string(_pad_val.y) + "," + std::to_string(_pad_val.x);
+        _params["pool-method"] = poolType;
+        const bool isMaxPool = poolType == std::string("max");
+        if (!isMaxPool)
+            _params["exclude-pad"] = "true";
+        _output_tensor.resize(4);
+        _output_tensor[3] = std::ceil((_input_tensor[3] + 2. * _pad_val.x - _kernel_val.x) / _stride_val.x + 1);
+        _output_tensor[2] = std::ceil((_input_tensor[2] + 2. * _pad_val.y - _kernel_val.y) / _stride_val.y + 1);
+        _output_tensor[1] = _input_tensor[1];
+        _output_tensor[0] = 1;
+        ASSERT_EQ(_input_tensor.size(), 4);
+        _testNet.addLayer(LayerInitParams(_irVersion == IRVersion::v10 ? (isMaxPool ? "MaxPool" : "AvgPool") : "Pooling")
+                 .params(_params)
+                 .in({_input_tensor})
+                 .out({_output_tensor}),
+                 ref_pooling_wrap);
+    }
+
+    InferenceEngine::SizeVector _input_tensor;
+    InferenceEngine::SizeVector _output_tensor;
+    param_size _kernel_val;
+    param_size _stride_val;
+    param_size _pad_val;
+    vpu::LayoutPreference _layout_preference;
+    std::map<std::string, std::string> _params;
+};
+
+using KernelSizeParam = param_size;
+using PadsParam = param_size;
+using StridesParam = param_size;
+using GlobalPoolingTestParam = std::tuple<InferenceEngine::SizeVector, KernelSizeParam, PadsParam, StridesParam>;
+
+template <const char* poolType/*, typename... Types*/>
+class GlobalPoolingTest : public myriadLayersTests_nightly,
+                    public testing::WithParamInterface<GlobalPoolingTestParam>
+{
+public:
+    virtual void SetUp() {
+        myriadLayersTests_nightly::SetUp();
+         auto params = ::testing::WithParamInterface<GlobalPoolingTestParam>::GetParam();
+        _input_tensor = std::get<0>(params);
+        _kernel_val   = std::get<1>(params);
+        _pad_val      = std::get<2>(params);
+        _stride_val   = std::get<3>(params);
+
+#if 0 // 4DGP
+        // TODO: make it the test argument
+        _config[VPU_CONFIG_KEY(COMPUTE_LAYOUT)] = VPU_CONFIG_VALUE(NCHW);
+//        _config[VPU_CONFIG_KEY(COMPUTE_LAYOUT)] = VPU_CONFIG_VALUE(NHWC);
+#endif
+
+        _params["kernel-x"] = std::to_string(_kernel_val.x);
+        _params["kernel-y"] = std::to_string(_kernel_val.y);
+        _params["stride-x"] = std::to_string(_stride_val.x);
+        _params["stride-y"] = std::to_string(_stride_val.y);
+        _params["pad-x"] = std::to_string(_pad_val.x);
+        _params["pad-y"] = std::to_string(_pad_val.y);
+        _params["pool-method"] = poolType;
+        _output_tensor.resize(4);
+        _output_tensor[3] = std::floor((_input_tensor[3] + 2. * _pad_val.x - _kernel_val.x) / _stride_val.x) + 1;
+        _output_tensor[2] = std::floor((_input_tensor[2] + 2. * _pad_val.y - _kernel_val.y) / _stride_val.y) + 1;
+        _output_tensor[1] = _input_tensor[1];
+        _output_tensor[0] = _input_tensor[0];
+        _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+        ASSERT_EQ(_input_tensor.size(), 4);
+        _testNet.addLayer(LayerInitParams("Pooling")
+                 .params(_params)
+                 .in({_input_tensor})
+                 .out({_output_tensor}),
+                 ref_pooling_wrap);
+    }
+
+    InferenceEngine::SizeVector _input_tensor;
+    InferenceEngine::SizeVector _output_tensor;
+    param_size _kernel_val;
+    param_size _stride_val;
+    param_size _pad_val;
+    std::map<std::string, std::string> _params;
+};
+
+template <const char* poolType, const bool excludePad = false, typename... Types>
+class PoolingTestPad4 : public myriadLayersTests_nightly,
+                    public testing::WithParamInterface<std::tuple<InferenceEngine::SizeVector, param_size, param_size, paddings4, vpu::LayoutPreference, Types...>>
+{
+public:
+    virtual void SetUp() {
+        myriadLayersTests_nightly::SetUp();
+        auto p = ::testing::WithParamInterface<std::tuple<InferenceEngine::SizeVector, param_size, param_size, paddings4, vpu::LayoutPreference, Types...>>::GetParam();
+        _input_tensor       = std::get<0>(p);
+        _kernel_val         = std::get<1>(p);
+        _stride_val         = std::get<2>(p);
+        _pad_val            = std::get<3>(p);
+        _layout_preference  = std::get<4>(p);
+
+        if (_pad_val.left >= _kernel_val.x) {
+            _pad_val.left = _kernel_val.x - 1;
+        }
+        if (_pad_val.right >= _kernel_val.x) {
+            _pad_val.right = _kernel_val.x - 1;
+        }
+        if (_pad_val.top >= _kernel_val.y) {
+            _pad_val.top = _kernel_val.y - 1;
+        }
+        if (_pad_val.bottom >= _kernel_val.y) {
+            _pad_val.bottom = _kernel_val.y - 1;
+        }
+        auto bool2str = [](bool value) {
+            return value ? "true" : "false";
+        };
+        _params["kernel-x"] = std::to_string(_kernel_val.x);
+        _params["kernel-y"] = std::to_string(_kernel_val.y);
+        _params["stride-x"] = std::to_string(_stride_val.x);
+        _params["stride-y"] = std::to_string(_stride_val.y);
+        _params["pad-x"] = std::to_string(_pad_val.left);
+        _params["pad-y"] = std::to_string(_pad_val.top);
+        _params["exclude-pad"] = bool2str(excludePad);
+        _params["pool-method"] = poolType;
+        _output_tensor.resize(4);
+        _output_tensor[3] = std::ceil((_input_tensor[3] + _pad_val.left + _pad_val.right  - _kernel_val.x) / _stride_val.x + 1);
+        _output_tensor[2] = std::ceil((_input_tensor[2] + _pad_val.top  + _pad_val.bottom - _kernel_val.y) / _stride_val.y + 1);
+        _output_tensor[1] = _input_tensor[1];
+        _output_tensor[0] = 1;
+        ASSERT_EQ(_input_tensor.size(), 4);
+        _testNet.addLayer(LayerInitParams("Pooling")
+                 .params(_params)
+                 .in({_input_tensor})
+                 .out({_output_tensor}),
+                 ref_pooling_wrap);
+    }
+
+    InferenceEngine::SizeVector _input_tensor;
+    InferenceEngine::SizeVector _output_tensor;
+    param_size _kernel_val;
+    param_size _stride_val;
+    paddings4 _pad_val;
+    vpu::LayoutPreference _layout_preference;
+    std::map<std::string, std::string> _params;
+};
+
+template <typename... Types>
+class ConvolutionTest : public myriadLayersTests_nightly,
+                        public testing::WithParamInterface<std::tuple<InferenceEngine::SizeVector, param_size, param_size, param_size, uint32_t, uint32_t, Types...>>
+{
+public:
+    virtual void SetUp() {
+        myriadLayersTests_nightly::SetUp();
+        auto p = ::testing::WithParamInterface<std::tuple<InferenceEngine::SizeVector, param_size, param_size, param_size, uint32_t, uint32_t, Types...>>::GetParam();
+        _input_tensor = std::get<0>(p);
+        kernel = std::get<1>(p);
+        param_size stride = std::get<2>(p);
+        param_size pad = std::get<3>(p);
+        size_t out_channels = std::get<4>(p);
+        group = std::get<5>(p);
+        get_dims(_input_tensor, IW, IH,IC);
+        size_t out_w = (IW + 2 * pad.x - kernel.x + stride.x) / stride.x;
+        size_t out_h = (IH + 2 * pad.y - kernel.y + stride.y) / stride.y;
+
+        gen_dims(_output_tensor, _input_tensor.size(), out_w, out_h, out_channels);
+
+        size_t num_weights = kernel.x * kernel.y * (IC / group) * out_channels;
+        size_t num_bias    = out_channels;
+
+        std::map<std::string, std::string> layer_params = {
+                  {"kernel-x", std::to_string(kernel.x)}
+                , {"kernel-y", std::to_string(kernel.y)}
+                , {"stride-x", std::to_string(stride.x)}
+                , {"stride-y", std::to_string(stride.y)}
+                , {"pad-x", std::to_string(pad.x)}
+                , {"pad-y", std::to_string(pad.y)}
+                , {"dilations", "1,1"}
+                , {"strides", std::to_string(stride.y) + "," + std::to_string(stride.x)}
+                , {"pads_begin", std::to_string(pad.y) + "," + std::to_string(pad.x)}
+                , {"pads_end", std::to_string(pad.y) + "," + std::to_string(pad.x)}
+                , {"output", std::to_string(out_channels)}
+                , {"group", std::to_string(group)}
+        };
+        _testNet.addLayer(LayerInitParams("Convolution")
+                 .params(layer_params)
+                 .in({_input_tensor})
+                 .out({_output_tensor})
+                 .weights(num_weights).fillWeights(defaultWeightsRange)
+                 .biases(num_bias).fillBiases(defaultWeightsRange)
+                 .weightsDim({{out_channels, (IC / group), kernel.y, kernel.x}})
+                 .biasesDim({{1, out_channels, 1, 1}}),
+                 ref_convolution_wrap);
+    }
+    InferenceEngine::SizeVector _input_tensor;
+    InferenceEngine::SizeVector _output_tensor;
+    int32_t IW = 0;
+    int32_t IH = 0;
+    int32_t IC = 0;
+    size_t  group = 0;
+    param_size kernel;
+};
+
+template <typename... Types>
+class FCTest : public myriadLayersTests_nightly,
+               public testing::WithParamInterface<std::tuple<fcon_test_params, int32_t, int32_t, Types...>>
+{
+public:
+    virtual void SetUp() {
+        myriadLayersTests_nightly::SetUp();
+        auto p = ::testing::WithParamInterface<std::tuple<fcon_test_params, int32_t, int32_t, Types...>>::GetParam();
+        _par = std::get<0>(p);
+        int32_t input_dim = std::get<1>(p);
+        int32_t add_bias = std::get<2>(p);
+        std::map<std::string, std::string> params;
+        params["out-size"] = std::to_string(_par.out_c);
+        int32_t IW = _par.in.w;
+        int32_t IH = _par.in.h;
+        int32_t IC = _par.in.c;
+        gen_dims(_input_tensor, input_dim, IW, IH, IC);
+
+        _output_tensor.push_back(1);
+        _output_tensor.push_back(_par.out_c);
+
+        size_t sz_weights = IC * IH * IW * _par.out_c;
+        size_t sz_bias = 0;
+        if (add_bias) {
+            sz_bias = _par.out_c;
+        }
+        size_t sz = sz_weights + sz_bias;
+        // @todo: FullyConnected is not present in IRv10. Need to move to MatMul somehow. MatMul need different initializetion here.
+        _testNet.addLayer(LayerInitParams(_irVersion == IRVersion::v10 ? "MatMul" : "FullyConnected")
+                  .params(params)
+                  .in({_input_tensor})
+                  .out({_output_tensor})
+                 .weights(sz_weights).fillWeights(defaultWeightsRange).weightsDim({{1U, 0U + IC * _par.out_c, 0U + IH, 0U + IW}})
+                 .biases(sz_bias).fillBiases(defaultWeightsRange).biasesDim({{sz_bias}}),
+                 ref_innerproduct_wrap);
+    }
+    InferenceEngine::SizeVector _input_tensor;
+    InferenceEngine::SizeVector _output_tensor;
+    fcon_test_params _par;
+};
+
+/* parameters definitions for the tests with several layers within the NET */
+extern const std::vector<InferenceEngine::SizeVector> g_poolingInput;
+extern const std::vector<InferenceEngine::SizeVector> g_poolingInput_postOp;
+extern const std::vector<pooling_layer_params> g_poolingLayerParamsFull;
+extern const std::vector<pooling_layer_params> g_poolingLayerParamsLite;
+extern const std::vector<vpu::LayoutPreference> g_poolingLayout;
+extern const std::vector<InferenceEngine::SizeVector> g_convolutionTensors;
+extern const std::vector<InferenceEngine::SizeVector> g_convolutionTensors_postOp;
+extern const std::vector<fcon_test_params> g_fcTestParamsSubset;
+extern const std::vector<int32_t> g_dimensionsFC;
+extern const std::vector<int32_t> g_addBiasFC;
diff --git a/inference-engine/tests_deprecated/functional/vpu/vpu_base/vpu_ir_dumper.cpp b/inference-engine/tests_deprecated/functional/vpu/vpu_base/vpu_ir_dumper.cpp
new file mode 100644 (file)
index 0000000..fbb7069
--- /dev/null
@@ -0,0 +1,322 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "vpu_ir_dumper.hpp"
+#include "single_layer_common.hpp"
+#include "debug.h"
+
+namespace {
+    std::string paramterPresitionToString(InferenceEngine::Precision precision) {
+        switch (precision) {
+        case InferenceEngine::Precision::FP16: return "f16";
+        case InferenceEngine::Precision::FP32: return "f32";
+        default:
+            break;
+        }
+        IE_ASSERT(false) << "Unsupported input presision type: " << precision;
+        return "";
+    }
+}
+
+class IRDumperEdge {
+public:
+    static constexpr int startingOutputPort = 10;
+
+public:
+    IRDumperEdge() = delete;
+    IRDumperEdge(const IRDumperLayer * from, const IRDumperLayer * to, const size_t portFrom, const size_t portTo)
+        : _from(from), _to(to), _portFrom(portFrom), _portTo(portTo) {}
+
+public:
+    const IRDumperLayer * _from = nullptr;
+    const IRDumperLayer * _to   = nullptr;
+    size_t                _portFrom = startingOutputPort;
+    size_t                _portTo = 0;
+
+public:
+    IRXmlNode dump() const;
+};
+
+// -------------------------  IRWeights -------------------------------------------------
+
+size_t IRWeightsDescription::size() const { return _data.size(); }
+
+bool IRWeightsDescription::empty() const { return _data.empty(); }
+
+InferenceEngine::SizeVector IRWeightsDescription::desc() const {
+    // _desc.empty() means "autogenerate tensor description for common cases".
+    // if we explicitly set weights to be scalar, then return empty description.
+    // that is mostly done for symplyfying tests code which does not support IRv10.
+    return _isScalar ? InferenceEngine::SizeVector{}
+                     : (_desc.empty() ? InferenceEngine::SizeVector{_data.size()}
+                                      : _desc);
+}
+
+size_t IRWeightsDescription::fill(uint8_t* destination, size_t offset) {
+    IE_ASSERT(_data.size());
+    memcpy(destination, _data.data(), _data.size());
+    _dataOffset = offset;
+    return _data.size();
+}
+
+// -------------------------  IRDumperNetwork -------------------------------------------------
+
+IRDumperNetwork::IRDumperNetwork(IRVersion version) : _version(version) {}
+
+IRDumperNetwork::~IRDumperNetwork() = default;
+
+IRXmlNode IRDumperNetwork::dump() const {
+    IRXmlNode net {"net", {
+            {"batch", "1"},
+            {"name" , "model.ckpt"},
+            {"precision"  , "FP16"},
+            {"version"   , (_version == IRVersion::v7 ? "7" : "10")},
+        }, {}, {}};
+    IRXmlNode layers {"layers", {}, {}, {}};
+    for (const auto& layer : _layers) {
+        layers.children.push_back(layer.dump());
+    }
+    IRXmlNode edges {"edges", {}, {}, {}};
+    for (const auto& edge : _edges) {
+        edges.children.push_back(edge.dump());
+    }
+    net.children.push_back(std::move(layers));
+    net.children.push_back(std::move(edges));
+    return net;
+}
+
+IRDumperLayer& IRDumperNetwork::addLayer(const std::string& name, const std::string& type, const IN_OUT_desc& in, const IN_OUT_desc& out) {
+    IRDumperLayer l;
+    l._version = _version;
+    l._name = name;
+    l._type = type;
+    l._inDesc = in;
+    l._outDesc = out;
+    _layers.push_back(l);
+    return *_layers.rbegin();
+}
+
+void IRDumperNetwork::addInput(const std::string& name, const IN_OUT_desc& out) {
+    IE_ASSERT(out.size() >= 1);
+    _inputLayersCount = out.size();
+    if (_inputLayersCount == 1) {
+        auto & l = addLayer(name, _version == IRVersion::v7 ? "Input" : "Parameter", {}, out);
+        if (_version == IRVersion::v10)
+            l._parameterPrecision = InferenceEngine::Precision::FP16;
+    } else {
+        for (size_t i = 0; i < _inputLayersCount; ++i) {
+            auto & l = addLayer(name + std::to_string(i), _version == IRVersion::v7 ? "Input" : "Parameter", {}, {out[i]});
+            if (_version == IRVersion::v10)
+                l._parameterPrecision = InferenceEngine::Precision::FP16;
+        }
+    }
+}
+
+void IRDumperNetwork::addOutput(const std::string& name, const IN_OUT_desc& in) {
+    if (_version == IRVersion::v10)
+        addLayer(name,  "Result", in, {});
+}
+
+void IRDumperNetwork::finalize() {
+    makeEdges();
+    populateWeights();
+    makeLayerSequence();
+}
+
+void IRDumperNetwork::makeEdges() {
+    for (size_t i = 0; i < _inputLayersCount; ++i) {
+        createEdge(_layers[i], _layers[_inputLayersCount], IRDumperEdge::startingOutputPort, i);
+    }
+    for (size_t i = _inputLayersCount; i < _layers.size() - 1; ++i) {
+        createEdge(_layers[i], _layers[i + 1], IRDumperEdge::startingOutputPort, 0);
+    }
+}
+
+void IRDumperNetwork::populateWeights() {
+    size_t totalSize = 0;
+    for (const auto& layer : _layers) {
+        totalSize += layer._weights.size();
+        totalSize += layer._biases.size();
+        for (const auto& param : layer._paramWeights)
+            totalSize += param.size();
+    }
+    if (!totalSize)
+        return;
+
+    uint8_t* dataPtr;
+    {
+        auto* w = new WeightsBlob({InferenceEngine::Precision::U8, {(totalSize)}, InferenceEngine::C});
+        w->allocate(); // private
+        _weights.reset(w);
+        auto d = w->data();
+        dataPtr = d.as<uint8_t*>();
+    }
+
+    std::vector<IRDumperLayer*> ptrs;
+    for (auto& layer : _layers) {
+        ptrs.push_back(&layer);
+    }
+    if (_version == IRVersion::v10) {
+        for (auto* layerPtr : ptrs) {
+            IRDumperLayer & layer = *layerPtr;
+            if (!layer._weights.empty()) {
+                layer._paramWeights.emplace_back(std::move(layer._weights));
+            }
+            for (auto&& weightsDesc : layer._paramWeights) {
+                const size_t oldInSize = layer._inDesc.size();
+
+                auto& dataLayer = addLayer(layer._name + "/" + weightsDesc._description, "Const", {}, {weightsDesc.desc()});
+                layer._inDesc.push_back(weightsDesc.desc());
+
+                dataLayer._weights = std::move(weightsDesc);
+                dataLayer._outputPrecision = dataLayer._weights._precision;
+
+                createEdge(dataLayer         , layer, IRDumperEdge::startingOutputPort, oldInSize);
+            }
+            layer._paramWeights.clear();
+
+            if (!layer._biases.empty()) {
+                auto& constDataLayer = addLayer(layer._name + "/biasData", "Const", {}, {layer._biases.desc()});
+                auto& additionLayer  = addLayer(layer._name + "/add", "Add", {layer._outDesc[0], layer._biases.desc()}, layer._outDesc);
+                constDataLayer._weights = std::move(layer._biases);
+
+                IRDumperEdge* currentConvOutEdge = nullptr;
+                for (auto& edge : _edges) {
+                    if (edge._from == &layer) {
+                        currentConvOutEdge = &edge;
+                        break;
+                    }
+                }
+                IE_ASSERT(currentConvOutEdge);
+                currentConvOutEdge->_from = &additionLayer;
+
+                createEdge(layer         , additionLayer, IRDumperEdge::startingOutputPort, 0);
+                createEdge(constDataLayer, additionLayer, IRDumperEdge::startingOutputPort, 1);
+            }
+        }
+    }
+    size_t offset = 0;
+    for (auto& layer : _layers) {
+        if (!layer._weights.empty()) {
+            offset += layer._weights.fill(dataPtr + offset, offset);
+        }
+        if (!layer._biases.empty()) {
+            offset += layer._biases.fill(dataPtr + offset, offset);
+        }
+    }
+}
+
+void IRDumperNetwork::makeLayerSequence() {
+    for (size_t i = 0; i < _layers.size(); ++i) {
+        _layers[i]._id = i;
+    }
+}
+
+void IRDumperNetwork::createEdge(const IRDumperLayer& from, const IRDumperLayer& to, size_t portFrom, size_t portTo) {
+    _edges.emplace_back(&from, &to, portFrom, portTo);
+}
+
+// -------------------------  IRDumperLayer -------------------------------------------------
+
+IRXmlNode IRDumperLayer::dump() const {
+    IRXmlNode layer {"layer", {{"id", std::to_string(_id)}, {"name", _name}, {"type", _type}}, {}, {}};
+
+    if (_version == IRVersion::v10) {
+        layer.attributes["version"] = "opset1";
+        if (!_weights.empty()) {
+            IRXmlNode dataNode {"data", {
+                    {"offset", std::to_string(_weights._dataOffset)},
+                    {"size", std::to_string(_weights.size())}}, {}, {}};
+            layer.children.push_back(std::move(dataNode));
+        }
+        else if (_parameterPrecision != InferenceEngine::Precision::UNSPECIFIED) {
+            IRXmlNode dataNode {"data", {
+                    {"element_type", paramterPresitionToString(_parameterPrecision)},
+                    {"shape", InferenceEngine::details::joinVec(_outDesc[0])}}, {}, {}};
+            layer.children.push_back(std::move(dataNode));
+        }
+        else if (!_dataParams.empty()) {
+            IRXmlNode dataNode {"data", _dataParams, {}, {}};
+            layer.children.push_back(std::move(dataNode));
+        }
+    }else {
+        if (!_dataParams.empty()) {
+            IRXmlNode dataNode {"data", _dataParams, {}, {}};
+            layer.children.push_back(std::move(dataNode));
+        }
+        if (!_weights.empty()) {
+            IRXmlNode weights {"weights", {
+                    {"offset", std::to_string(_weights._dataOffset)},
+                    {"size", std::to_string(_weights.size())}}, {}, {}};
+            layer.children.push_back(std::move(weights));
+        }
+        if (!_biases.empty()) {
+            IRXmlNode biases {"biases", {
+                    {"offset", std::to_string(_biases._dataOffset)},
+                    {"size", std::to_string(_biases.size())}}, {}, {}};
+            layer.children.push_back(std::move(biases));
+        }
+    }
+    if (!_inDesc.empty())
+        layer.children.push_back(dumpDesc(_inDesc, "input", 0, InferenceEngine::Precision::UNSPECIFIED));
+    if (!_outDesc.empty())
+        layer.children.push_back(dumpDesc(_outDesc, "output", IRDumperEdge::startingOutputPort, _outputPrecision));
+
+    return layer;
+}
+
+IRXmlNode IRDumperLayer::dumpDesc(const IN_OUT_desc& desc, const std::string& portsTag, int portIndexStart, const InferenceEngine::Precision& precision) const {
+    IRXmlNode ports {portsTag, {}, {}, {}};
+
+    int portIndex = portIndexStart;
+    for (const auto& portDims : desc) {
+        IRXmlNode port {"port", {{"id", std::to_string(portIndex++)}}, {}, {}};
+        if (precision != InferenceEngine::Precision::UNSPECIFIED) {
+            port.attributes["precision"] = precision.name();
+        }
+        for (const auto& dim : portDims) {
+            IRXmlNode dimNode {"dim", {}, std::to_string(dim), {}};
+            port.children.push_back(std::move(dimNode));
+        }
+        ports.children.push_back(std::move(port));
+    }
+    return ports;
+}
+
+// -------------------------  IRDumperEdge -------------------------------------------------
+
+IRXmlNode IRDumperEdge::dump() const {
+    IRXmlNode egde {"edge", {
+            {"from-layer", std::to_string(_from->id())},
+            {"from-port" , std::to_string(_portFrom)},
+            {"to-layer"  , std::to_string(_to->id())},
+            {"to-port"   , std::to_string(_portTo)},
+        }, {}, {}};
+    return egde;
+}
+
+// -------------------------  common utils -------------------------------------------------
+
+std::string formatXmlNode(const IRXmlNode& node, int indent) {
+    std::ostringstream os;
+    os << std::string(indent, '\t') << "<" << node.name;
+    for (const auto& pair : node.attributes)
+        os << " " << pair.first + "=\"" + pair.second + "\"";
+    if (node.rawText.empty() && node.children.empty()) {
+        os << "/>\n";
+        return os.str();
+    }
+    os << ">";
+
+    if (!node.rawText.empty()) {
+        os << node.rawText << "</" << node.name << ">\n";
+        return os.str();
+    }
+    os << "\n";
+    for (const auto& child : node.children)
+        os << formatXmlNode(child, indent + 1);
+
+    os << std::string(indent, '\t') << "</" << node.name << ">\n";
+    return os.str();
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/vpu_base/vpu_ir_dumper.hpp b/inference-engine/tests_deprecated/functional/vpu/vpu_base/vpu_ir_dumper.hpp
new file mode 100644 (file)
index 0000000..a4e6366
--- /dev/null
@@ -0,0 +1,99 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <deque>
+
+#include "vpu_test_common_definitions.hpp"
+
+class IRDumperEdge;
+
+class IRWeightsDescription {
+public:
+    std::vector<uint8_t>        _data;
+    InferenceEngine::Precision  _precision = InferenceEngine::Precision::FP16;
+    InferenceEngine::SizeVector _desc;
+    size_t                      _dataOffset = 0;
+    std::string                 _description = "data";
+    bool                        _isScalar = false;
+
+public:
+    size_t size() const;
+    bool empty() const;
+    InferenceEngine::SizeVector desc() const;
+    size_t fill(uint8_t* destination, size_t offset);
+};
+
+struct IRXmlNode {
+    std::string name;
+    std::map<std::string, std::string> attributes;
+    std::string rawText;
+    std::vector<IRXmlNode> children;
+};
+
+class IRDumperLayer {    
+public:
+    std::string _name;
+    std::string _type;
+    IN_OUT_desc _inDesc;
+    IN_OUT_desc _outDesc;
+
+    InferenceEngine::Precision _parameterPrecision = InferenceEngine::Precision::UNSPECIFIED;
+    InferenceEngine::Precision _outputPrecision = InferenceEngine::Precision::FP16;
+
+    IRWeightsDescription _weights;
+    IRWeightsDescription _biases;
+    std::vector<IRWeightsDescription> _paramWeights;
+
+    std::map<std::string, std::string> _dataParams;
+
+public:
+    IRXmlNode dump() const;
+    IRXmlNode dumpDesc(const IN_OUT_desc& desc, const std::string& portsTag, int portIndexStart, const InferenceEngine::Precision& precision) const;
+    size_t id() const { return _id; }
+
+private:
+    size_t _id = 0;
+    IRVersion _version = IRVersion::v7;
+
+private:
+    friend class IRDumperNetwork;
+};
+
+class IRDumperNetwork {
+public:
+    IRDumperNetwork(IRVersion version);
+    ~IRDumperNetwork();
+
+    IRXmlNode dump() const;
+
+    IRDumperLayer& addLayer(const std::string& name,
+                  const std::string& type,
+                  const IN_OUT_desc& in,
+                  const IN_OUT_desc& out);
+    void addInput(const std::string& name,
+                  const IN_OUT_desc& out);
+    void addOutput(const std::string& name,
+                   const IN_OUT_desc& in);
+    void finalize();
+
+    WeightsBlob::Ptr getWeights() const {return _weights;}
+
+private:
+    void makeEdges();
+    void populateWeights();
+    void makeLayerSequence();
+
+    void createEdge(const IRDumperLayer& from, const IRDumperLayer& to, size_t portFrom, size_t portTo = 0);
+
+private:
+    IRVersion                             _version;
+    size_t                                _inputLayersCount = 0;
+    std::deque<IRDumperLayer>             _layers;   //!< deque used for stable pointers in edges.
+    std::vector<IRDumperEdge>             _edges;
+    WeightsBlob::Ptr  _weights;
+};
+
+std::string formatXmlNode(const IRXmlNode& node, int indent = 0);
diff --git a/inference-engine/tests_deprecated/functional/vpu/vpu_base/vpu_layer_tests_utils.cpp b/inference-engine/tests_deprecated/functional/vpu/vpu_base/vpu_layer_tests_utils.cpp
new file mode 100644 (file)
index 0000000..8618db8
--- /dev/null
@@ -0,0 +1,280 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "vpu_layer_tests_utils.hpp"
+
+#include "common_test_utils/common_layers_params.hpp"
+
+using namespace InferenceEngine;
+
+
+void PrintTo(const IRVersion& version, std::ostream* os)
+{
+    IE_ASSERT(version == IRVersion::v7 || version == IRVersion::v10);
+    *os << (version == IRVersion::v7 ? "[IR v7]" : "[IR v10]");
+}
+
+void PrintTo(const tensor_test_params& sz, std::ostream* os)
+{
+    *os << "{" << std::setw(2) << sz.n << ", " << std::setw(3) << sz.c << ", "
+            << std::setw(3) << sz.h << ", " << std::setw(3) << sz.w << "}";
+}
+
+void print_buffer_HWC_fp16(ie_fp16 *src_data, int32_t IW, int32_t IH, int32_t IC, const char * tname, int32_t iw0, int32_t iw1, int32_t ih0, int32_t ih1, int32_t ic0, int32_t ic1 )
+{
+    iw1 = (iw1 == -1) ? IW-1 : iw1;
+    ih1 = (ih1 == -1) ? IH-1 : ih1;
+    ic1 = (ic1 == -1) ? IC-1 : ic1;
+
+    printf("%s: H=%i, W=%i, C=%i\n", tname, IH, IW, IC);
+    for (int ih = ih0; ih <= ih1; ih++)
+    {
+        printf("h %i: ", ih);
+        for (int iw = iw0; iw <= iw1 ; iw++)
+        {
+            printf("(");
+            for (int ic = ic0; ic <= ic1; ic++)
+            {
+                printf("%8.4f ", PrecisionUtils::f16tof32(src_data[ic + iw * IC + ih * IC * IW]));
+            }
+            printf("), ");
+        }
+        printf("\n");
+    }
+}
+
+void print_tensor_HWC_fp16(const Blob::Ptr src, const char * tname, int32_t iw0, int32_t iw1, int32_t ih0, int32_t ih1, int32_t ic0, int32_t ic1)
+{
+    ie_fp16 *src_data = static_cast<ie_fp16*>(src->buffer());
+
+    int32_t IW = 0;
+    int32_t IH = 0;
+    int32_t IC = 0;
+    get_dims(src, IW, IH, IC);
+
+    print_buffer_HWC_fp16(src_data, IW, IH, IC, tname, iw0, iw1, ih0, ih1, ic0, ic1);
+}
+
+void get_ndims(const InferenceEngine::Blob::Ptr blob,
+               int32_t &dimx,
+               int32_t &dimy,
+               int32_t &dimz,
+               int32_t &dimn) {
+    ASSERT_NE(blob, nullptr);
+    auto dims = blob->getTensorDesc().getDims();
+
+    if (dims.size() == 1) {
+        dimn = 1;
+        dimz = dims[0];
+        dimy = 1;
+        dimx = 1;
+    }
+    else if (dims.size() == 2) {
+        dimn = 1;
+        dimz = 1;
+        dimy = dims[0];
+        dimx = dims[1];
+    } else if (dims.size() == 3) {
+        dimx = dims[2];
+        dimy = dims[1];
+        dimz = dims[0];
+        dimn = 1;
+    } else if (dims.size() == 4) {
+        dimx = dims[3];
+        dimy = dims[2];
+        dimz = dims[1];
+        dimn = dims[0];
+    }
+}
+
+void get_dims(const InferenceEngine::Blob::Ptr blob,
+                    int32_t &dimx,
+                    int32_t &dimy,
+                    int32_t &dimz) {
+    ASSERT_NE(blob, nullptr);
+    CommonTestUtils::get_common_dims(*blob.get(), dimx, dimy, dimz);
+}
+
+void get_dims(const InferenceEngine::SizeVector& input_dims,
+                    int32_t &IW,
+                    int32_t &IH,
+                    int32_t &IC) {
+    IW = 0;
+    IH = 0;
+    IC = 0;
+    int32_t stub = 0;
+
+    get_dims(input_dims, IW, IH, IC, stub);
+}
+
+void get_dims(const InferenceEngine::SizeVector& input_dims,
+                    int32_t &IW,
+                    int32_t &IH,
+                    int32_t &IC,
+                    int32_t &I_N) {
+    IW = 0;
+    IH = 0;
+    IC = 0;
+    I_N = 1;
+    switch (input_dims.size()) {
+        case 2:
+            /* Fully connected tests */
+            IW = 1;
+            IC = 1;
+            IC = input_dims[1];
+            break;
+        case 3:
+            IW = input_dims[2];
+            IH = input_dims[1];
+            IC = input_dims[0];
+            break;
+        case 4:
+            IW = input_dims[3];
+            IH = input_dims[2];
+            IC = input_dims[1];
+            I_N = input_dims[0];
+            break;
+        default:
+            FAIL() << "Unsupported input dimension.";
+            break;
+    }
+}
+
+void gen_dims(InferenceEngine::SizeVector& out_dims,
+              int32_t dimension,
+              int32_t IW,
+              int32_t IH,
+              int32_t IC) {
+    if (dimension < 2 ||
+        dimension > 4)
+        FAIL() << "Unsupported input dimension:" << dimension;
+    out_dims.reserve(dimension);
+    switch (dimension) {
+        case 4:
+            out_dims.push_back(1);
+        case 3:
+            out_dims.push_back(IC);
+            out_dims.push_back(IH);
+            out_dims.push_back(IW);
+            break;
+        default:
+            break;
+    }
+}
+
+void gen_dims(InferenceEngine::SizeVector& out_dims,
+              int32_t dimension,
+              int32_t IW,
+              int32_t IH,
+              int32_t IC,
+              int32_t I_N) {
+    if (dimension < 2 ||
+        dimension > 4)
+        FAIL() << "Unsupported input dimension:" << dimension;
+    out_dims.reserve(dimension);
+    switch (dimension) {
+        case 4:
+            out_dims.push_back(I_N);
+        case 3:
+            out_dims.push_back(IC);
+            out_dims.push_back(IH);
+            out_dims.push_back(IW);
+            break;
+        default:
+            break;
+    }
+}
+
+void zeroWeightsRange(uint16_t* ptr, size_t weightsSize) {
+    ASSERT_NE(ptr, nullptr);
+    for (size_t count = 0 ; count < weightsSize; ++count) {
+        ptr[count] = PrecisionUtils::f32tof16(0.);
+    }
+}
+
+void defaultWeightsRange(uint16_t* ptr, size_t weightsSize) {
+    ASSERT_NE(ptr, nullptr);
+    float scale  = 2.0f / RAND_MAX;
+    for (size_t count = 0 ; count < weightsSize; ++count) {
+        float val = rand();
+        val = val * scale - 1.0f;
+        ptr[count] = PrecisionUtils::f32tof16(val);
+    }
+}
+
+void smallWeightsRange(uint16_t* ptr, size_t weightsSize) {
+    ASSERT_NE(ptr, nullptr);
+    float scale  = 2.0f / RAND_MAX;
+    for (size_t count = 0 ; count < weightsSize; ++count) {
+        float val = rand();
+        val = (val * scale - 1.0f) / 512;
+        ptr[count] = PrecisionUtils::f32tof16(val);
+    }
+}
+
+std::string gen_param(const param_size& in_param) {
+    std::string res = std::to_string(in_param.x) + ",";
+    res += std::to_string(in_param.y);
+    return res;
+}
+
+void GenRandomData(InferenceEngine::Blob::Ptr blob)
+{
+    GenRandomDataCommon(blob);
+}
+
+bool fromBinaryFile(std::string input_binary, InferenceEngine::Blob::Ptr blob) {
+
+    std::ifstream in(input_binary, std::ios_base::binary | std::ios_base::ate);
+
+    size_t sizeFile = in.tellg();
+    in.seekg(0, std::ios_base::beg);
+    size_t count = blob->size();
+    bool status = false;
+    if(in.good()) {
+        if (blob->getTensorDesc().getPrecision() == InferenceEngine::Precision::FP16) {
+            ie_fp16 *blobRawDataFP16 = blob->buffer().as<ie_fp16 *>();
+            if(sizeFile == count * sizeof(float)) {
+                for (size_t i = 0; i < count; i++) {
+                    float tmp;
+                    in.read(reinterpret_cast<char *>(&tmp), sizeof(float));
+                    blobRawDataFP16[i] = PrecisionUtils::f32tof16(tmp);
+                }
+                status = true;
+            } else if(sizeFile == count * sizeof(ie_fp16)) {
+                for (size_t i = 0; i < count; i++) {
+                    ie_fp16 tmp;
+                    in.read(reinterpret_cast<char *>(&tmp), sizeof(ie_fp16));
+                    blobRawDataFP16[i] = tmp;
+                }
+                status = true;
+            }
+        }else if (blob->getTensorDesc().getPrecision() == InferenceEngine::Precision::FP32) {
+            float *blobRawData = blob->buffer();
+            if(sizeFile == count * sizeof(float)) {
+                in.read(reinterpret_cast<char *>(blobRawData), count * sizeof(float));
+                status = true;
+            }
+        }
+    }
+    return status;
+}
+
+
+WeightsBlob* GenWeights(size_t sz, float min_val, float max_val) {
+    // TODO: pass seed as parameter
+
+    float scale  = (max_val - min_val) / RAND_MAX;
+    WeightsBlob *weights = new WeightsBlob({InferenceEngine::Precision::U8, {(sz) * sizeof(uint16_t)}, InferenceEngine::C});
+    weights->allocate();
+    uint16_t *inputBlobRawDataFp16 = weights->data().as<uint16_t *>();
+    size_t indx = 0;
+
+    for (; indx < sz; ++indx) {
+        float val = rand();
+        val = val * scale + min_val;
+        inputBlobRawDataFp16[indx] = PrecisionUtils::f32tof16(val);
+    }
+    return weights;
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/vpu_base/vpu_layer_tests_utils.hpp b/inference-engine/tests_deprecated/functional/vpu/vpu_base/vpu_layer_tests_utils.hpp
new file mode 100644 (file)
index 0000000..95d4150
--- /dev/null
@@ -0,0 +1,121 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include "vpu_test_common_definitions.hpp"
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+#include "myriad_layers_reference_functions.hpp"
+
+/* Function to calculate CHW dimensions for the blob generated by */
+/* Myriad/HDDL plugin.                                            */
+void get_dims(const InferenceEngine::Blob::Ptr blob,
+                    int32_t &dimx,
+                    int32_t &dimy,
+                    int32_t &dimz);
+
+void get_ndims(const InferenceEngine::Blob::Ptr blob,
+              int32_t &dimx,
+              int32_t &dimy,
+              int32_t &dimz,
+              int32_t &dimn);
+
+void get_dims(const InferenceEngine::SizeVector& input_dims,
+                    int32_t &IW,
+                    int32_t &IH,
+                    int32_t &IC);
+
+void get_dims(const InferenceEngine::SizeVector& input_dims,
+                    int32_t &IW,
+                    int32_t &IH,
+                    int32_t &IC,
+                    int32_t &I_N);
+
+void gen_dims(InferenceEngine::SizeVector& out_dims,
+              int32_t dimension,
+              int32_t IW,
+              int32_t IH,
+              int32_t IC);
+
+void gen_dims(InferenceEngine::SizeVector& out_dims,
+              int32_t dimension,
+              int32_t IW,
+              int32_t IH,
+              int32_t IC,
+              int32_t I_N);
+
+void zeroWeightsRange(uint16_t* ptr,
+                      size_t weightsSize);
+
+void defaultWeightsRange(uint16_t* ptr,
+                         size_t weightsSize);
+
+void smallWeightsRange(uint16_t* ptr,
+                       size_t weightsSize);
+
+std::string gen_param(const param_size& in_param);
+
+#define DEFAULT_SEED_VALUE (43)
+
+#if defined(_MSC_VER)
+#  define MAKE_STRUCT(name, ...) [=]()->name{ name make_struct_tmp = {__VA_ARGS__}; return make_struct_tmp; }()
+#else
+#  define MAKE_STRUCT(name, ...) ((name){__VA_ARGS__})
+#endif
+
+
+struct fcon_test_params {
+    tensor_test_params in;
+    friend std::ostream& operator<<(std::ostream& os, fcon_test_params const& tst)
+    {
+        return os << tst.in
+                  << ", out_c = " << tst.out_c
+                  << ", error_bound = " << tst.error_bound;
+    };
+    uint32_t out_c;
+    float    error_bound;
+};
+
+struct interp_test_params {
+    size_t iw;
+    size_t ih;
+    size_t ow;
+    size_t oh;
+    size_t c;
+
+    friend std::ostream& operator<<(std::ostream& os, interp_test_params const& tst)
+    {
+        return os << "iw = " << tst.iw << ", "
+                  << "ih = " << tst.ih << ", "
+                  << "ow = " << tst.ow << ", "
+                  << "oh = " << tst.oh << ", "
+                  << "channels = " << tst.c;
+    };
+};
+
+void PrintTo(const IRVersion& version, std::ostream* os);
+
+void PrintTo(const tensor_test_params& sz, std::ostream* os);
+
+void print_buffer_HWC_fp16(InferenceEngine::ie_fp16 *src_data, int32_t IW, int32_t IH, int32_t IC, const char * tname,
+        int32_t iw0=0, int32_t iw1=-1, int32_t ih0=0, int32_t ih1=-1, int32_t ic0=0, int32_t ic1=-1);
+void print_tensor_HWC_fp16(const InferenceEngine::Blob::Ptr src, const char * tname,
+        int32_t iw0=0, int32_t iw1=-1, int32_t ih0=0, int32_t ih1=-1, int32_t ic0=0, int32_t ic1=-1);
+
+PRETTY_PARAM(Dims, tensor_test_params);
+PRETTY_PARAM(DimsInput, tensor_test_params);
+PRETTY_PARAM(DimsInput3D, tensor_test_params_3d);
+PRETTY_PARAM(DimsOutput, tensor_test_params);
+PRETTY_PARAM(DimsOutput3D, tensor_test_params_3d);
+
+/*this helper class is defined to add post op operation without significant */
+/* modification of the existing codebase                                    */
+
+void GenRandomData(InferenceEngine::Blob::Ptr blob);
+bool fromBinaryFile(std::string input_binary, InferenceEngine::Blob::Ptr blob);
+
+WeightsBlob* GenWeights(size_t sz,
+                                            float min_val = -1.0f,
+                                            float max_val = 1.0f);
diff --git a/inference-engine/tests_deprecated/functional/vpu/vpu_base/vpu_layers_tests.cpp b/inference-engine/tests_deprecated/functional/vpu/vpu_base/vpu_layers_tests.cpp
new file mode 100644 (file)
index 0000000..d589097
--- /dev/null
@@ -0,0 +1,360 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "vpu_layers_tests.hpp"
+
+#include <thread>
+#include <chrono>
+#include <iostream>
+
+#include "functional_test_utils/plugin_cache.hpp"
+#include "ie_memcpy.h"
+#include "common_test_utils/common_layers_params.hpp"
+
+#include <common/include/vpu/utils/error.hpp>
+
+#include "blob_factory.hpp"
+#include "ie_ir_reader.hpp"
+#include "debug.h"
+#include "vpu_tests_config.hpp"
+
+using namespace InferenceEngine;
+
+void vpuLayersTests::SetUp() {
+    _vpuPluginPtr = std::make_shared<IECoreAdapter>(PluginCache::get().ie(), vpu::tests::deviceName());
+
+    _genDataCallback = GenRandomData;
+    TestsCommon::SetUp();
+    SetSeed(DEFAULT_SEED_VALUE);
+}
+
+void vpuLayersTests::TearDown() {
+    if (auto test_info = testing::UnitTest::GetInstance()->current_test_info()) {
+        if (auto type_param = test_info->type_param()) {
+            std::cout << "[ TYPE     ] \t" << type_param << std::endl;
+        }
+        if (auto value_param = test_info->value_param()) {
+            std::cout << "[ VALUE    ] \t" << value_param << std::endl;
+        }
+
+        if (auto dumpModelsPath = std::getenv("IE_VPU_DUMP_LAYER_TESTS_MODELS_DIRECTORY")) {
+            std::string testName = test_info->name();
+            std::replace(testName.begin(), testName.end(), '/', '_');
+
+            auto filename = dumpModelsPath + std::string("/") + testName;
+
+            std::string xmlName = filename + ".xml";
+            std::string weightsName = filename + ".bin";
+            _cnnNetwork.serialize(xmlName, weightsName);
+
+            std::string blobName = filename + ".blob";
+            _exeNetwork->Export(blobName, nullptr);
+        }
+    }
+
+    _vpuPluginPtr = {};
+}
+
+bool vpuLayersTests::CheckMyriadX() {
+    if (auto envVar = std::getenv("IE_VPU_MYRIADX")) {
+        return std::stoi(envVar) != 0;
+    }
+    return false;
+}
+
+void vpuLayersTests::SetSeed(uint32_t seed) {
+    /*just to be able to repeat results */
+    std::srand(seed);
+}
+
+Blob::Ptr vpuLayersTests::getReferenceOutput() {
+    return _testNet.getLastOutput();
+}
+
+void vpuLayersTests::dumpPerformance() {
+    std::map<std::string, InferenceEngine::InferenceEngineProfileInfo> perfMap;
+    _inferRequest->GetPerformanceCounts(perfMap, nullptr);
+    std::vector <std::pair<std::string, InferenceEngine::InferenceEngineProfileInfo>> perfVec(perfMap.begin(), perfMap.end());
+    std::sort(perfVec.begin(), perfVec.end(),
+              [=](const std::pair<std::string, InferenceEngine::InferenceEngineProfileInfo> &pair1,
+                  const std::pair<std::string, InferenceEngine::InferenceEngineProfileInfo> &pair2) -> bool {
+                  return pair1.second.execution_index < pair2.second.execution_index;
+              });
+
+    unsigned currentIndex = 0;
+    for (auto it = perfVec.begin(); it != perfVec.end(); ++it) {
+        std::string layerName = it->first;
+        InferenceEngine::InferenceEngineProfileInfo info = it->second;
+        if (info.status == InferenceEngine::InferenceEngineProfileInfo::EXECUTED) {
+            printf("\x1B[32m[----------]\x1B[0m Myriad time = '%s' layer with '%s' type is %f ms.\n", layerName.c_str(), info.exec_type, info.realTime_uSec / 1000.f);
+        }
+    }
+}
+
+namespace {
+
+template<class TensorDescriptor>
+Blob::Ptr allocateBlob(const TensorDescriptor& source, bool lockLayout) {
+    const auto& descriptor = source->getTensorDesc();
+
+    // reference functions work only with NHWC layout
+    const auto& outputLayout = descriptor.getLayout();
+    const auto& layout = lockLayout ? outputLayout : (outputLayout == NHWC || outputLayout == NCHW) ? NHWC : outputLayout;
+
+    // it is required to create new TensorDesc object: #-26746
+    auto blob = make_blob_with_precision(TensorDesc{descriptor.getPrecision(), descriptor.getDims(), layout});
+
+    blob->allocate();
+    return blob;
+}
+
+template<class Blob>
+void configure(const Blob& blob, const InferenceEngine::Precision& precision, const vpu::LayoutPreference& layoutPreference) {
+    if (precision != InferenceEngine::Precision::UNSPECIFIED) {
+        // default behavior is to set FP16 precision to avoid "Convert" layer from FP32 to FP16
+        // in case of network with precision other than just FP16 or FP32 (e.g. "I32" or mixed) user changes precision to "UNSPECIFIED"
+        // so precision defined in IR will be used
+        blob->setPrecision(precision);
+    }
+
+    blob->setLayout(vpu::deviceLayout(blob->getLayout(), layoutPreference));
+}
+
+}
+
+void vpuLayersTests::genInputBlobs(bool lockLayout) {
+    auto genDataCallback = (_genDataCallback0 != nullptr) ? _genDataCallback0 : _genDataCallback;
+    for (const auto& input : _inputsInfo) {
+        auto inputBlob = allocateBlob(input.second, lockLayout);
+
+        ASSERT_NE(genDataCallback, nullptr);
+        genDataCallback(inputBlob);
+
+        ASSERT_EQ(InferenceEngine::StatusCode::OK, _inferRequest->SetBlob(input.first.c_str(), inputBlob, &_resp)) << _resp.msg;
+
+        _inputMap[input.first] = inputBlob;
+        genDataCallback = _genDataCallback;
+    }
+}
+
+void vpuLayersTests::genRefBlob(bool lockLayout) {
+    _refBlob = allocateBlob(_outputsInfo.begin()->second, lockLayout);
+}
+
+void vpuLayersTests::genOutputBlobs(bool lockLayout) {
+    for (const auto& output : _outputsInfo) {
+        auto outputBlob = allocateBlob(output.second, lockLayout);
+
+        ASSERT_EQ(InferenceEngine::StatusCode::OK, _inferRequest->SetBlob(output.first.c_str(), outputBlob, &_resp)) << _resp.msg;
+
+        _outputMap[output.first] = outputBlob;
+    }
+}
+
+void vpuLayersTests::createInferRequest(const NetworkParams& params) {
+    for (auto& input : _inputsInfo) {
+        configure(input.second, params._inputPrecision, params._layoutPreference);
+    }
+
+    for (auto& output : _outputsInfo) {
+        configure(output.second, params._outputPrecision, params._layoutPreference);
+    }
+
+    std::map<std::string, std::string> config(_config);
+    if (params._useHWOpt) {
+        config[VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION)] = CONFIG_VALUE(YES);
+    } else {
+        config[VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION)] = CONFIG_VALUE(NO);
+    }
+#if 0
+    config[CONFIG_KEY(LOG_LEVEL)] = CONFIG_VALUE(LOG_INFO);
+#endif
+    config[CONFIG_KEY(PERF_COUNT)] = CONFIG_VALUE(YES);
+    config[VPU_CONFIG_KEY(PERF_REPORT_MODE)] = VPU_CONFIG_VALUE(PER_STAGE);
+    config[VPU_CONFIG_KEY(FORCE_DEPRECATED_CNN_CONVERSION)] = CONFIG_VALUE(NO); // Make VPU plugin be able to use NGraph network.
+
+    InferenceEngine::StatusCode st = InferenceEngine::StatusCode::GENERAL_ERROR;
+    ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, _cnnNetwork, config, &_resp));
+    ASSERT_NE(_exeNetwork, nullptr) << _resp.msg;
+    ASSERT_NO_THROW(_exeNetwork->CreateInferRequest(_inferRequest, &_resp)) << _resp.msg;
+    ASSERT_EQ((int) InferenceEngine::StatusCode::OK, st) << _resp.msg;
+    ASSERT_NE(_inferRequest, nullptr) << _resp.msg;
+
+    genInputBlobs(params._lockLayout);
+    genOutputBlobs(params._lockLayout);
+    genRefBlob(params._lockLayout);
+}
+
+void vpuLayersTests::makeSingleLayerNetworkImpl(const LayerParams& layerParams,
+                                                const NetworkParams& networkParams,
+                                                const WeightsBlob::Ptr& weights) {
+    IE_ASSERT(!layerParams._layerType.empty());
+
+    if (_doReshape) {
+        auto reshapedInput = _inputTensors;
+        reshapedInput[0].insert(reshapedInput[0].begin(), 4 - _inputTensors[0].size(), 1);
+        _testNet.addLayer(VpuTestNet::LayerInitParams("Reshape")
+                 .params({})
+                 .in({reshapedInput})
+                 .out({_inputTensors}));
+    }
+    VpuTestNet::CalcWeights weightsCallback, biasesCallback;
+    if (weights) {
+        auto* weightsPtr = weights->data().as<uint16_t*>();
+        auto* biasesPtr  = weightsPtr + layerParams._weightsSize;
+        weightsCallback = [weightsPtr](uint16_t* ptr, size_t weightsSize){ memcpy(ptr, weightsPtr, weightsSize * sizeof (uint16_t)); };
+        biasesCallback  = [biasesPtr ](uint16_t* ptr, size_t weightsSize){ memcpy(ptr, biasesPtr , weightsSize * sizeof (uint16_t)); };
+    }
+   _testNet.addLayer(VpuTestNet::LayerInitParams(layerParams._layerType)
+             .params(layerParams._params)
+             .in(_inputTensors)
+             .out(_outputTensors)
+             .weights(layerParams._weightsSize).fillWeights(std::move(weightsCallback))
+             .biases(layerParams._biasesSize).fillBiases(std::move(biasesCallback)));
+
+    genNetwork();
+
+    if (networkParams._createInference)
+        createInferRequest(networkParams);
+}
+
+void vpuLayersTests::readNetwork(const std::string& model, const WeightsBlob::Ptr& modelWeights) {
+    _cnnNetwork = PluginCache::get().ie()->ReadNetwork(model, modelWeights);
+
+    ASSERT_NO_THROW(_inputsInfo = _cnnNetwork.getInputsInfo());
+    ASSERT_NO_THROW(_outputsInfo = _cnnNetwork.getOutputsInfo());
+}
+
+void vpuLayersTests::readNetwork(const std::string& modelFilename, const std::string& weightsFilename) {
+    _cnnNetwork = PluginCache::get().ie()->ReadNetwork(modelFilename, weightsFilename);
+
+    ASSERT_NO_THROW(_inputsInfo = _cnnNetwork.getInputsInfo());
+    ASSERT_NO_THROW(_outputsInfo = _cnnNetwork.getOutputsInfo());
+}
+
+bool vpuLayersTests::Infer() {
+    if (_inferRequest == nullptr ||
+        _inputMap.empty() ||
+        _outputMap.empty())
+        return false;
+    const auto st = _inferRequest->Infer(&_resp);
+    EXPECT_EQ(InferenceEngine::StatusCode::OK, st) << _resp.msg;
+    //dumpPerformance();
+    return true;
+}
+
+bool vpuLayersTests::generateNetAndInfer(const NetworkParams& params) {
+    genNetwork();
+    createInferRequest(params);
+    if (params._runRefGraph) {
+        ReferenceGraph();
+    }
+    return Infer();
+}
+
+void vpuLayersTests::ResetGeneratedNet() {
+    SetSeed(DEFAULT_SEED_VALUE);
+    _exeNetwork.reset();
+    _inferRequest.reset();
+}
+
+void vpuLayersTests::ResetReferenceLayers() {
+    _testNet.clear();
+}
+
+void vpuLayersTests::SetInputReshape() {
+    _doReshape = true;
+}
+
+void vpuLayersTests::SetInputTensor(const tensor_test_params & tensor) {
+    _inputTensors = {tensor.asVector()};
+}
+
+void vpuLayersTests::SetInputTensor(const tensor_test_params_3d& tensor) {
+    _inputTensors = {tensor.asVector()};
+}
+
+void vpuLayersTests::SetInputTensors(const IN_OUT_desc& in_tensors) {
+    _inputTensors = in_tensors;
+}
+
+void vpuLayersTests::SetOutputTensor(const tensor_test_params& tensor) {
+    _outputTensors = {tensor.asVector()};
+}
+
+void vpuLayersTests::SetOutputTensor(const tensor_test_params_3d& tensor) {
+    _outputTensors = {tensor.asVector()};
+}
+
+void vpuLayersTests::SetOutputTensors(const IN_OUT_desc& out_tensors) {
+    _outputTensors = out_tensors;
+}
+
+void vpuLayersTests::SetFirstInputToRange(float start, float finish) {
+    ASSERT_NE(_inputMap.size(), 0);
+    ASSERT_LT(start, finish);
+    float range = finish - start;
+    /* input data preparation */
+    auto inputBlob = _inputMap[_inputsInfo.begin()->first];
+    uint16_t *inputBlobRawDataFp16 = inputBlob->buffer().as<uint16_t*>();
+    ASSERT_NE(inputBlobRawDataFp16, nullptr);
+    /* values generation in the range (start, finish) to check difference with float output */
+    size_t count = inputBlob->size();
+    float shift = range / count;
+    float i = start;
+    for (size_t indx = 0; indx < count; i += shift, indx++) {
+        inputBlobRawDataFp16[indx] = PrecisionUtils::f32tof16(i);
+    }
+}
+
+void vpuLayersTests::SetInputInOrder() {
+    ASSERT_NE(_inputsInfo.size(), 0);
+    auto inputBlob = _inputMap[_inputsInfo.begin()->first];
+    ASSERT_NE(inputBlob, nullptr);
+    uint16_t *inputBlobRawDataFp16 = inputBlob->buffer().as<uint16_t*>();
+    ASSERT_NE(inputBlobRawDataFp16, nullptr);
+    /* values generation in the range (-BOUND, BOUND) to check difference with float output */
+    int  count = inputBlob->size();
+
+    for (int indx = 0; indx < count; indx++) {
+        inputBlobRawDataFp16[indx] = PrecisionUtils::f32tof16((float)indx);
+    }
+}
+
+void vpuLayersTests::SetInputInOrderReverse() {
+    ASSERT_NE(_inputsInfo.size(), 0);
+    auto inputBlob = _inputMap[_inputsInfo.begin()->first];
+    ASSERT_NE(inputBlob, nullptr);
+    uint16_t *dstPtr = inputBlob->buffer().as<uint16_t*>();
+    ASSERT_NE(dstPtr, nullptr);
+    size_t count = inputBlob->size();
+    for (size_t indx = 0; indx < count; indx++) {
+        dstPtr[indx] = PrecisionUtils::f32tof16((float)(count - 1 - indx));
+    }
+}
+
+void vpuLayersTests::genNetwork() {
+    const auto& networkData = _testNet.genNetwork(_irVersion);
+    readNetwork(networkData.model, networkData.weights);
+    ASSERT_NE(_cnnNetwork.layerCount(), 0);
+    ASSERT_GE(_cnnNetwork.getInputsInfo().size(), 1);
+    ASSERT_GE(_cnnNetwork.getOutputsInfo().size(), 1);
+}
+
+void vpuLayersTests::ReferenceGraph() {
+    /* data preparation */
+    ASSERT_EQ(_inputsInfo.size(), 1);
+    ASSERT_TRUE(!_testNet.empty());
+    auto referenceInput = _testNet.getFirstInput();
+    auto realInput = _inputMap[_inputsInfo.begin()->first];
+    ASSERT_NE(referenceInput, nullptr);
+    ASSERT_NE(realInput, nullptr);
+    const size_t count = referenceInput->size();
+    ASSERT_EQ(count, realInput->size());
+    const uint16_t* inputBlobRawDataFp16 = realInput->buffer();
+    uint16_t* refBlobRawDataFp16 = referenceInput->buffer();
+    ASSERT_NE(inputBlobRawDataFp16, nullptr);
+    ie_memcpy(refBlobRawDataFp16, realInput->byteSize(), inputBlobRawDataFp16, count * sizeof(uint16_t));
+    _testNet.run();
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/vpu_base/vpu_layers_tests.hpp b/inference-engine/tests_deprecated/functional/vpu/vpu_base/vpu_layers_tests.hpp
new file mode 100644 (file)
index 0000000..e67d3ae
--- /dev/null
@@ -0,0 +1,138 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <gtest/gtest.h>
+
+#include <algorithm>
+#include <tuple>
+
+#include <ie_version.hpp>
+#include <precision_utils.h>
+
+#include <vpu/vpu_plugin_config.hpp>
+#include <vpu/private_plugin_config.hpp>
+#include <vpu/utils/enums.hpp>
+#include <vpu/utils/ie_helpers.hpp>
+
+#include "ie_core_adapter.hpp"
+#include "tests_common.hpp"
+#include "single_layer_common.hpp"
+#include "myriad_layers_reference_functions.hpp"
+#include "vpu_layer_tests_utils.hpp"
+#include "vpu_test_net.hpp"
+
+class vpuLayersTests : public TestsCommon {
+protected:
+    class NetworkParams {
+    public:
+        vpu::LayoutPreference      _layoutPreference = vpu::LayoutPreference::ChannelMajor;
+        InferenceEngine::Precision _outputPrecision  = InferenceEngine::Precision::FP16;
+        InferenceEngine::Precision _inputPrecision   = InferenceEngine::Precision::FP16;
+
+        bool _useHWOpt = false;
+        // For historical reasons, createInferRequest() function use to 'hack' blob layout:
+        // replace NCHW with NHWC even if you explicitly setup layout preference
+        // be channel-major. To disable this hack, please set lockLayout = true
+        bool _lockLayout = false;
+
+        bool _runRefGraph = true;
+        bool _createInference = true;
+    };
+
+    class NetworkInitParams : public NetworkParams {
+    public:
+        NetworkInitParams& layoutPreference(const vpu::LayoutPreference layoutPreference)
+            { _layoutPreference = layoutPreference; return *this;}
+        NetworkInitParams& outputPrecision(InferenceEngine::Precision outputPrecision)
+            { _outputPrecision = outputPrecision; return *this;}
+        NetworkInitParams& inputPrecision(InferenceEngine::Precision inputPrecision)
+            { _inputPrecision = inputPrecision; return *this;}
+
+        NetworkInitParams& useHWOpt(const bool useHWOpt)
+            { _useHWOpt = useHWOpt; return *this;}
+        NetworkInitParams& lockLayout(const bool lockLayout)
+            { _lockLayout = lockLayout; return *this;}
+        NetworkInitParams& runRefGraph(const bool runRefGraph)
+            { _runRefGraph = runRefGraph; return *this;}
+        NetworkInitParams& createInference(const bool createInference)
+            { _createInference = createInference; return *this;}
+    };
+    using DataGenerator   = void (*)(InferenceEngine::Blob::Ptr blob);
+    using LayerParams     = VpuTestNet::LayerParams;
+    using LayerInitParams = VpuTestNet::LayerInitParams;
+
+protected:
+    void SetUp() override;
+    void TearDown() override;
+    bool CheckMyriadX();
+    void dumpPerformance();
+
+    // For historical reasons, gen-blob functions use to 'hack' blob layout:
+    // replace NCHW with NHWC even if you explicitly setup layout preference
+    // be channel-major. To disable this hack, please set lockLayout = true
+    void genInputBlobs(bool lockLayout = false);
+    void genOutputBlobs(bool lockLayout = false);
+    void genRefBlob(bool lockLayout = false);
+
+    void ReferenceGraph();
+    bool Infer();
+    bool generateNetAndInfer(const NetworkParams& params);
+    void ResetGeneratedNet();
+    void ResetReferenceLayers();
+
+    void SetInputReshape();
+    void SetInputTensor(const tensor_test_params& tensor);
+    void SetInputTensor(const tensor_test_params_3d& tensor);
+    void SetOutputTensor(const tensor_test_params& tensor);
+    void SetOutputTensor(const tensor_test_params_3d& tensor);
+    void SetInputTensors(const IN_OUT_desc& in_tensors);
+    void SetOutputTensors(const IN_OUT_desc& out_tensors);
+
+    void SetFirstInputToRange(float start,
+                              float finish);
+
+    void SetInputInOrder();
+    void SetInputInOrderReverse();
+    void SetSeed(uint32_t seed);
+
+    InferenceEngine::Blob::Ptr getReferenceOutput();
+
+    void genNetwork();
+    void makeSingleLayerNetworkImpl(const LayerParams& layerParams,
+                       const NetworkParams& networkParams,
+                       const WeightsBlob::Ptr& weights = nullptr);
+
+    void readNetwork(const std::string& model, const WeightsBlob::Ptr& modelWeights = nullptr);
+    void readNetwork(const std::string& modelFilename, const std::string& weightsFilename);
+    void createInferRequest(const NetworkParams& params);
+
+protected:
+    IECoreAdapter::Ptr                        _vpuPluginPtr;
+
+    std::map<std::string, std::string>             _config;
+
+    IRVersion                                      _irVersion = IRVersion::v7;
+
+    InferenceEngine::CNNNetwork                    _cnnNetwork;
+    InferenceEngine::ResponseDesc                  _resp;
+    InferenceEngine::InputsDataMap                 _inputsInfo;
+    InferenceEngine::BlobMap                       _inputMap;
+    InferenceEngine::BlobMap                       _outputMap;
+    InferenceEngine::OutputsDataMap                _outputsInfo;
+    InferenceEngine::IExecutableNetwork::Ptr       _exeNetwork;
+    InferenceEngine::IInferRequest::Ptr            _inferRequest;
+
+    InferenceEngine::Blob::Ptr                     _refBlob;
+    VpuTestNet                                     _testNet;
+
+    DataGenerator                                  _genDataCallback0 = nullptr;
+    DataGenerator                                  _genDataCallback = GenRandomData;
+
+private:
+    IN_OUT_desc                                    _inputTensors;
+    IN_OUT_desc                                    _outputTensors;
+    bool                                           _doReshape = false;  // reshape 4D input to layer input Tensor
+};
diff --git a/inference-engine/tests_deprecated/functional/vpu/vpu_base/vpu_test_common_definitions.hpp b/inference-engine/tests_deprecated/functional/vpu/vpu_base/vpu_test_common_definitions.hpp
new file mode 100644 (file)
index 0000000..c0a9fbd
--- /dev/null
@@ -0,0 +1,14 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <ie_common.h>
+#include <ie_blob.h>
+
+enum class IRVersion { v7, v10 };
+
+using IN_OUT_desc = std::vector<InferenceEngine::SizeVector>;
+
+using WeightsBlob = InferenceEngine::TBlob<uint8_t>;
diff --git a/inference-engine/tests_deprecated/functional/vpu/vpu_base/vpu_test_net.cpp b/inference-engine/tests_deprecated/functional/vpu/vpu_base/vpu_test_net.cpp
new file mode 100644 (file)
index 0000000..efcaf7d
--- /dev/null
@@ -0,0 +1,256 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "vpu_test_net.hpp"
+
+#include <ie_memcpy.h>
+#include <blob_factory.hpp>
+
+#include "vpu_ir_dumper.hpp"
+
+namespace  {
+
+std::vector<double> GetParamAsDoubles(const std::string& vals) {
+    std::vector<double> result;
+    std::istringstream stream(vals);
+    std::string str;
+    while (getline(stream, str, ',')) {
+        try {
+            result.push_back(std::stod(str));
+        } catch (...) {
+            THROW_IE_EXCEPTION << "Cannot parse parameter " << str
+                               << ". Value " << vals << " cannot be casted to double.";
+        }
+    }
+    return result;
+}
+template<class To>
+struct PackStaticCastConverter {
+    static To convert(double value) {
+        return static_cast<To>(value);
+    }
+};
+
+struct PackFP16Converter {
+    static int16_t convert(double value) {
+        return InferenceEngine::PrecisionUtils::f32tof16(static_cast<float>(value));
+    }
+};
+
+template<typename T, typename Converter = PackStaticCastConverter<T>>
+std::vector<uint8_t> PackData(const std::vector<double>& values) {
+    std::vector<uint8_t> result(values.size() * sizeof (T));
+    std::vector<T> tmp(values.size());
+    std::transform(values.cbegin(), values.cend(), tmp.begin(), Converter::convert);
+    ie_memcpy(result.data(), result.size(), tmp.data(), tmp.size() * sizeof(T));
+    return result;
+}
+
+std::vector<uint8_t> PackData(const std::vector<double>& values, const InferenceEngine::Precision& precision) {
+    if (precision == InferenceEngine::Precision::I64)
+        return PackData<int64_t>(values);
+    if (precision == InferenceEngine::Precision::FP16)
+        return PackData<int16_t, PackFP16Converter>(values);
+
+    THROW_IE_EXCEPTION << "unsupported pack format '" << precision << "'";
+}
+
+InferenceEngine::Layout getLayout(const IN_OUT_desc& inDim) {
+    switch(inDim[0].size()) {
+        case 2:
+            return InferenceEngine::HW;
+        case 3:
+            return InferenceEngine::CHW;
+        case 4:
+            return InferenceEngine::NHWC;
+        case 5:
+            return InferenceEngine::NDHWC;
+    }
+    return InferenceEngine::ANY;
+}
+
+}
+
+void VpuTestNet::ReferenceFunctionWrapper::setCallback(VpuTestNet::CallbackBasic&& f, const ParamsStruct& params) {
+    if (f)
+        _callback = std::bind(std::move(f), _input, _output,
+                              params);
+}
+
+void VpuTestNet::ReferenceFunctionWrapper::setCallback(VpuTestNet::CallbackWithWeights&& f, const ParamsStruct& params) {
+    if (f)
+        _callback = std::bind(std::move(f), _input, _output,
+                              _weights, _weightsSize, _biases, _biasesSize,
+                              params);
+}
+
+void VpuTestNet::genInputOutput(VpuTestNet::ReferenceFunctionWrapper& obj, const LayerParams& params) {
+    auto outW = params._outDim[0];
+    if (_callbacks.empty()) {
+        auto newW = params._inDim[0];
+        const InferenceEngine::Layout inputLayout = getLayout(params._inDim);
+        obj._input = InferenceEngine::make_shared_blob<uint16_t>({InferenceEngine::Precision::FP16, newW, inputLayout});
+        obj._input->allocate();
+    } else {
+        auto val = _callbacks.back();
+        ASSERT_EQ(params._inDim[0].size(), val._output->getTensorDesc().getDims().size());
+        obj._input = val._output;
+        auto inW = params._inDim[0];
+        for (size_t i = 0; i < params._outDim[0].size(); ++i) {
+            ASSERT_EQ(inW[i], val._output->getTensorDesc().getDims()[i]);
+        }
+    }
+    const InferenceEngine::Layout outLayout = getLayout(params._outDim);
+    obj._output = make_blob_with_precision(
+            InferenceEngine::TensorDesc(params._outPrecision, {outW}, outLayout));
+    obj._output->allocate();
+}
+
+VpuTestNet::ReferenceFunctionWrapper& VpuTestNet::addLayerImpl(const LayerParams& params) {
+    _layers.push_back(params);
+    ReferenceFunctionWrapper obj;
+    genInputOutput(obj, params);
+    obj._weightsSize= params._weightsSize;
+    obj._biasesSize = params._biasesSize;
+    if (params._weightsSize) {
+        WeightsBlob* weights = new WeightsBlob({InferenceEngine::Precision::U8,
+                                               {(params._weightsSize) * sizeof(uint16_t)},
+                                               InferenceEngine::C});
+        weights->allocate();
+        obj._weightsPtr = WeightsBlob::Ptr(weights);
+        obj._weights = weights->data().as<uint16_t *>();
+    }
+    if (params._biasesSize) {
+        WeightsBlob* biases = new WeightsBlob({InferenceEngine::Precision::U8,
+                                              {(params._biasesSize) * sizeof(uint16_t)},
+                                              InferenceEngine::C});
+        biases->allocate();
+        obj._biasesPtr = WeightsBlob::Ptr(biases);
+        obj._biases = biases->data().as<uint16_t *>();
+    }
+    _callbacks.push_back(obj);
+    return *_callbacks.rbegin();
+}
+
+void VpuTestNet::addLayer(const VpuTestNet::LayerParams& params) {
+    addLayerImpl(params);
+}
+
+void VpuTestNet::addLayer(const VpuTestNet::LayerParams& params, VpuTestNet::CallbackBasic&& callback) {
+    addLayerImpl(params).setCallback(std::move(callback), params._params);
+}
+void VpuTestNet::addLayer(const VpuTestNet::LayerParams& params, VpuTestNet::CallbackWithWeights&& callback) {
+    addLayerImpl(params).setCallback(std::move(callback), params._params);
+}
+
+void VpuTestNet::run() const {
+    for (auto& elem : _callbacks) {
+        if (elem._callback)
+            elem._callback();
+    }
+}
+
+void VpuTestNet::clear() {
+    _callbacks.clear();
+    _layers.clear();
+}
+
+VpuTestNet::NetworkSerializedData VpuTestNet::genNetwork(IRVersion version) {
+    IE_ASSERT(!_layers.empty());
+    IRDumperNetwork IRDumper(version);
+    IRDumper.addInput("input"  , _layers.begin()->_inDim);
+    const size_t inputsSize = _layers.begin()->_inDim.size();
+
+    size_t testNetIndex = 0;
+    for (auto& elem : _layers) {
+        auto & layer = IRDumper.addLayer(elem._layerName + "_" + std::to_string(testNetIndex),
+                                         elem._layerType, elem._inDim, elem._outDim);
+        layer._outputPrecision = elem._outPrecision;
+
+        auto params = elem._params;
+        if (!params.empty()) {
+            if (version == IRVersion::v10) {
+                static const std::map<std::string, std::vector<std::string>> constLayerParams {
+                    {"Transpose", {"order"}},
+                    {"Pad"      , {"pads_begin", "pads_end", "pad_value"}},
+                };
+                auto paramsIt = constLayerParams.find(elem._layerType);
+                if (paramsIt != constLayerParams.cend()) {
+                    for (const auto& paramName : paramsIt->second) {
+                        if (params.find(paramName) == params.cend())
+                            continue;
+
+                        const auto paramValues = GetParamAsDoubles(params[paramName]);
+                        IRWeightsDescription weights;
+                        weights._precision = InferenceEngine::Precision::I64;
+                        if (paramName == "pad_value")
+                            weights._precision = InferenceEngine::Precision::FP16;
+
+                        weights._data = PackData(paramValues, weights._precision);
+
+                        weights._desc = {paramValues.size()};
+                        if (paramValues.size() == 1)
+                            weights._isScalar = true;
+
+                        weights._description = paramName;
+                        params.erase(paramName);
+                        layer._paramWeights.emplace_back(std::move(weights));
+                    }
+                }
+            }
+            layer._dataParams = params;
+        }
+        if (elem._weightsSize) {
+            IE_ASSERT(layer._weights.empty());
+            layer._weights._data.resize(elem._weightsSize * sizeof(int16_t));
+            if (!elem._weightsDim.empty())
+                layer._weights._desc = elem._weightsDim[0];
+
+            if (elem._fillWeights) {
+                auto& refLayer = _callbacks[testNetIndex];
+                elem._fillWeights(reinterpret_cast<uint16_t*>(layer._weights._data.data()), elem._weightsSize);
+                ie_memcpy(refLayer._weights, refLayer._weightsSize * sizeof(uint16_t), layer._weights._data.data(), elem._weightsSize * sizeof(uint16_t));
+            }
+        }
+        if (elem._biasesSize) {
+            IE_ASSERT(layer._biases.empty());
+            layer._biases._data.resize(elem._biasesSize * sizeof(int16_t));
+            if (!elem._biasesDim.empty())
+                layer._biases._desc = elem._biasesDim[0];
+
+            if (elem._fillBiases) {
+                auto& refLayer = _callbacks[testNetIndex];
+                elem._fillBiases(reinterpret_cast<uint16_t*>(layer._biases._data.data()), elem._biasesSize);
+                ie_memcpy(refLayer._biases, refLayer._biasesSize * sizeof(uint16_t), layer._biases._data.data(), elem._biasesSize * sizeof(uint16_t));
+            }
+        }
+        ++testNetIndex;
+    }
+
+    IRDumper.addOutput("output", _layers.rbegin()->_outDim);
+    IRDumper.finalize();
+
+    // separate lines here for debugging purpose.
+    auto modelNode = IRDumper.dump();
+    auto modelText = formatXmlNode(modelNode);
+    return {std::move(modelText), IRDumper.getWeights()};
+}
+
+void VpuTestNet::setWeightsCallbackForLayer(size_t index, VpuTestNet::CalcWeights&& callback) {
+    _layers[index]._fillWeights = std::move(callback);
+}
+
+void VpuTestNet::setBiasesCallbackForLayer(size_t index, VpuTestNet::CalcWeights&& callback) {
+    _layers[index]._fillBiases = std::move(callback);
+}
+
+InferenceEngine::Blob::Ptr VpuTestNet::getFirstInput() const {
+    IE_ASSERT(!empty());
+    return _callbacks.begin()->_input;
+}
+
+InferenceEngine::Blob::Ptr VpuTestNet::getLastOutput() const {
+    IE_ASSERT(!empty());
+    return _callbacks.rbegin()->_output;
+}
diff --git a/inference-engine/tests_deprecated/functional/vpu/vpu_base/vpu_test_net.hpp b/inference-engine/tests_deprecated/functional/vpu/vpu_base/vpu_test_net.hpp
new file mode 100644 (file)
index 0000000..abe29d9
--- /dev/null
@@ -0,0 +1,134 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <ie_common.h>
+#include <ie_blob.h>
+
+#include "vpu_test_common_definitions.hpp"
+#include "single_layer_common.hpp"
+#include "myriad_layers_reference_functions.hpp"
+
+class VpuTestNet
+{
+public:
+    using CallbackBasic = std::function<void(InferenceEngine::Blob::Ptr inTensor,
+                                             InferenceEngine::Blob::Ptr outTensor,
+                                             const ParamsStruct& params)>;
+
+    using CallbackWithWeights = std::function<void(const InferenceEngine::Blob::Ptr src,
+                                                   InferenceEngine::Blob::Ptr dst,
+                                                   const uint16_t *weights,
+                                                   size_t weightsSize,
+                                                   const uint16_t *biases,
+                                                   size_t biasSize,
+                                                   const ParamsStruct& params)>;
+
+
+    using CalcWeights = std::function<void(uint16_t* ptr, size_t weightsSize)>;
+
+    class LayerParams {
+    public:
+        std::string _layerType;
+        std::string _layerName;
+        ParamsStruct _params;
+        size_t _weightsSize = 0;
+        size_t _biasesSize = 0;
+        CalcWeights _fillWeights;
+        CalcWeights _fillBiases ;
+        IN_OUT_desc _inDim;
+        IN_OUT_desc _outDim;
+        IN_OUT_desc _weightsDim;
+        IN_OUT_desc _biasesDim;
+        InferenceEngine::Precision _outPrecision = InferenceEngine::Precision::FP16;
+    };
+
+    class LayerInitParams : public LayerParams {
+    public:
+        LayerInitParams(const std::string& layerType) { _layerType = layerType; _layerName = layerType + "_TEST"; }
+
+        LayerInitParams& name(const std::string& name)
+            { _layerName = name; return *this;}
+
+        LayerInitParams& params(ParamsStruct params)
+            { _params = std::move(params); return *this;}
+        LayerInitParams& weights(const size_t weightsSize)
+            { _weightsSize = weightsSize; return *this;}
+        LayerInitParams& biases(const size_t biasesSize)
+            { _biasesSize = biasesSize; return *this;}
+
+        LayerInitParams& in(IN_OUT_desc inDim)
+            { _inDim = std::move(inDim); return *this;}
+        LayerInitParams& out(IN_OUT_desc outDim)
+            { _outDim = std::move(outDim); return *this;}
+        LayerInitParams& weightsDim(IN_OUT_desc weightsDim)
+            { _weightsDim = std::move(weightsDim); return *this;}
+        LayerInitParams& biasesDim(IN_OUT_desc biasesDim)
+            { _biasesDim = std::move(biasesDim); return *this;}
+
+        LayerInitParams& fillWeights(CalcWeights && fillWeightsCallback)
+            { _fillWeights = std::move(fillWeightsCallback); return *this;}
+        LayerInitParams& fillBiases(CalcWeights && fillBiasesCallback)
+            { _fillBiases = std::move(fillBiasesCallback); return *this;}
+
+        LayerInitParams& outPrecision(const InferenceEngine::Precision outPrecision)
+            { _outPrecision = outPrecision; return *this;}
+    };
+
+    /* This is limited implementation of functionality required for graphs generation.  */
+    /* The code allows to build linear chains of layers to provide testing of functions */
+    /* with one input and one output                                                    */
+    void addLayer(const LayerParams& params);
+    void addLayer(const LayerParams& params, CallbackBasic&& callback);
+    void addLayer(const LayerParams& params, CallbackWithWeights&& callback);
+
+    void run() const;
+
+    void clear();
+    bool empty() const {
+        return _layers.empty() && _callbacks.empty();
+    }
+
+    struct NetworkSerializedData {
+        std::string model;
+        WeightsBlob::Ptr weights;
+    };
+
+    NetworkSerializedData genNetwork(IRVersion version);
+    void setWeightsCallbackForLayer(size_t index, CalcWeights&& callback);
+    void setBiasesCallbackForLayer(size_t index, CalcWeights&& callback);
+
+    InferenceEngine::Blob::Ptr getFirstInput() const;
+    InferenceEngine::Blob::Ptr getLastOutput() const;
+
+private:
+    class ReferenceFunctionWrapper {
+    public:
+        std::function<void()> _callback;
+        InferenceEngine::Blob::Ptr _input;
+        InferenceEngine::Blob::Ptr _output;
+        WeightsBlob::Ptr _weightsPtr;
+        WeightsBlob::Ptr _biasesPtr;
+        uint16_t* _weights = nullptr;
+        uint16_t* _biases = nullptr;
+        size_t _weightsSize = 0;
+        size_t _biasesSize = 0;
+
+    public:
+        void setCallback(CallbackBasic&& f, const ParamsStruct& params);
+        void setCallback(CallbackWithWeights&& f, const ParamsStruct& params);
+    };
+
+private:
+    void genInputOutput(ReferenceFunctionWrapper& obj,
+                        const LayerParams& params);
+
+    ReferenceFunctionWrapper& addLayerImpl(const LayerParams& params);
+
+private:
+    std::vector<LayerParams> _layers;
+    std::vector<ReferenceFunctionWrapper> _callbacks;
+};
+
diff --git a/inference-engine/tests_deprecated/functional/vpu/vpu_base/vpu_tests_config.hpp b/inference-engine/tests_deprecated/functional/vpu/vpu_base/vpu_tests_config.hpp
new file mode 100644 (file)
index 0000000..a983cda
--- /dev/null
@@ -0,0 +1,31 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <vector>
+#include <string>
+
+#include <gtest/gtest.h>
+
+#include "functional_test_utils/skip_tests_config.hpp"
+
+namespace vpu {
+namespace tests {
+
+const char* pluginName();
+const char* pluginNameShort();
+const char* deviceName();
+bool deviceForceReset();
+
+}  // namespace tests
+}  // namespace vpu
+
+// IE macro forcing gave us no ability to pass device name as variable.
+// So we create this two replacements to PLUGING_CASE_WITH_SUFFIX.
+#define VPU_PLUGING_CASE_WITH_SUFFIX(_suffix, _test, _params) \
+    INSTANTIATE_TEST_CASE_P(VPU_run##_suffix, _test, ::testing::Combine(::testing::Values(::vpu::tests::deviceName()), _params) )
+
+#define DISABLED_VPU_PLUGING_CASE_WITH_SUFFIX(_suffix, _test, _params) \
+    INSTANTIATE_TEST_CASE_P(DISABLED_VPU_run##_suffix, _test, ::testing::Combine(::testing::Values(::vpu::tests::deviceName()), _params) )
index 27ef66b..622af44 100644 (file)
@@ -111,16 +111,6 @@ buildSingleLayerNetworkCommon(InferenceEngine::details::IFormatParser *parser,
     return result;
 }
 
-inline std::string getTestDeviceName(std::string libraryName) {
-    if (libraryName == "MKLDNNPlugin") {
-        return "CPU";
-    } else if (libraryName == "clDNNPlugin") {
-        return "GPU";
-    } else {
-        return libraryName;
-    }
-}
-
 void GenRandomDataCommon(InferenceEngine::Blob::Ptr blob);
 
 class BufferWrapper {
index 2725631..154307c 100644 (file)
@@ -14,35 +14,24 @@ const char *getModelPathNonFatal() noexcept;
 std::string get_data_path();
 
 inline const char *getModelPathNonFatalDefault() noexcept {
-#ifdef MODELS_PATH
-    const char *models_path = std::getenv("MODELS_PATH");
-
-    if (models_path == nullptr && MODELS_PATH == nullptr) {
-        return nullptr;
-    }
-
-    if (models_path == nullptr) {
-        return MODELS_PATH;
+    if (const auto envVar = std::getenv("MODELS_PATH")) {
+        return envVar;
     }
 
-    return models_path;
+#ifdef MODELS_PATH
+    return MODELS_PATH;
 #else
     return nullptr;
 #endif
 };
 
 inline std::string get_data_path_default() {
-#ifdef DATA_PATH
-    const char *data_path = std::getenv("DATA_PATH");
-
-    if (data_path == NULL) {
-        if (DATA_PATH != NULL) {
-            data_path = DATA_PATH;
-        } else {
-            return nullptr;
-        }
+    if (const auto envVar = std::getenv("DATA_PATH")) {
+        return envVar;
     }
-    return std::string(data_path);
+
+#ifdef DATA_PATH
+    return DATA_PATH;
 #else
     return nullptr;
 #endif
index c7a8150..8a6188d 100644 (file)
@@ -173,7 +173,8 @@ static std::vector<std::shared_ptr<BaseTestCreator>>& getCreators() {
             std::make_shared<LayerTestCreator<InferenceEngine::ReduceLayer>>("ReduceSumSquare"),
             std::make_shared<LayerTestCreator<InferenceEngine::TopKLayer>>("TopK"),
             std::make_shared<LayerTestCreator<InferenceEngine::NonMaxSuppressionLayer>>("NonMaxSuppression"),
-            std::make_shared<LayerTestCreator<InferenceEngine::ScatterUpdateLayer>>("ScatterUpdate")
+            std::make_shared<LayerTestCreator<InferenceEngine::ScatterUpdateLayer>>("ScatterUpdate"),
+            std::make_shared<LayerTestCreator<InferenceEngine::ScatterElementsUpdateLayer>>("ScatterElementsUpdate")
     };
     return creators;
 }
index cbbd110..6711bc2 100644 (file)
@@ -235,3 +235,73 @@ inline InferenceEngine::InputInfo::Ptr getFirstInput(InferenceEngine::ICNNNetwor
     //ASSERT_GT(inputs.size(), 0);
     return inputs.begin()->second;
 }
+
+/**
+ * @brief Copies a 8-bit RGB image to the blob.
+ *
+ * Throws an exception in case of dimensions or input size mismatch
+ *
+ * @tparam data_t Type of the target blob
+ * @param RGB8 8-bit RGB image
+ * @param RGB8_size Size of the image
+ * @param blob Target blob to write image to
+ */
+template <typename data_t>
+void copyFromRGB8(uint8_t* RGB8, size_t RGB8_size, InferenceEngine::TBlob<data_t>* blob) {
+    InferenceEngine::SizeVector dims = blob->getTensorDesc().getDims();
+    if (4 != dims.size())
+        THROW_IE_EXCEPTION << "Cannot write data to input blob! Blob has incorrect dimensions size " << dims.size();
+    size_t num_channels = dims[1];  // because RGB
+    size_t num_images = dims[0];
+    size_t w = dims[3];
+    size_t h = dims[2];
+    size_t nPixels = w * h;
+
+    if (RGB8_size != w * h * num_channels * num_images)
+        THROW_IE_EXCEPTION << "input pixels mismatch, expecting " << w * h * num_channels * num_images
+                           << " bytes, got: " << RGB8_size;
+
+    std::vector<data_t*> dataArray;
+    for (unsigned int n = 0; n < num_images; n++) {
+        for (unsigned int i = 0; i < num_channels; i++) {
+            if (!n && !i && dataArray.empty()) {
+                dataArray.push_back(blob->data());
+            } else {
+                dataArray.push_back(dataArray.at(n * num_channels + i - 1) + nPixels);
+            }
+        }
+    }
+    for (size_t n = 0; n < num_images; n++) {
+        size_t n_num_channels = n * num_channels;
+        size_t n_num_channels_nPixels = n_num_channels * nPixels;
+        for (size_t i = 0; i < nPixels; i++) {
+            size_t i_num_channels = i * num_channels + n_num_channels_nPixels;
+            for (size_t j = 0; j < num_channels; j++) {
+                dataArray.at(n_num_channels + j)[i] = RGB8[i_num_channels + j];
+            }
+        }
+    }
+}
+
+/**
+ * @brief Splits the RGB channels to either I16 Blob or float blob.
+ *
+ * The image buffer is assumed to be packed with no support for strides.
+ *
+ * @param imgBufRGB8 Packed 24bit RGB image (3 bytes per pixel: R-G-B)
+ * @param lengthbytesSize Size in bytes of the RGB image. It is equal to amount of pixels times 3 (number of channels)
+ * @param input Blob to contain the split image (to 3 channels)
+ */
+inline void ConvertImageToInput(unsigned char* imgBufRGB8, size_t lengthbytesSize, InferenceEngine::Blob& input) {
+   InferenceEngine::TBlob<float>* float_input = dynamic_cast<InferenceEngine::TBlob<float>*>(&input);
+    if (float_input != nullptr)
+        copyFromRGB8(imgBufRGB8, lengthbytesSize, float_input);
+
+    InferenceEngine::TBlob<short>* short_input = dynamic_cast<InferenceEngine::TBlob<short>*>(&input);
+    if (short_input != nullptr)
+        copyFromRGB8(imgBufRGB8, lengthbytesSize, short_input);
+
+    InferenceEngine::TBlob<uint8_t>* byte_input = dynamic_cast<InferenceEngine::TBlob<uint8_t>*>(&input);
+    if (byte_input != nullptr)
+        copyFromRGB8(imgBufRGB8, lengthbytesSize, byte_input);
+}
\ No newline at end of file
index 8702871..fe392e6 100644 (file)
@@ -9,7 +9,6 @@
 
 #include <gtest/gtest.h>
 
-#include <ie_builders.hpp>
 #include <ie_precision.hpp>
 
 #include "single_layer_common.hpp"
index f83425d..a87543c 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright (C) 2018-2020 Intel Corporation
+# Copyright (C) 2019 Intel Corporation
 # SPDX-License-Identifier: Apache-2.0
 #
 
index 1a968f7..dac4a97 100644 (file)
@@ -21,25 +21,11 @@ file(GLOB
         inference_engine_tests/transformations/*.cpp
         inference_engine_tests/transformations/*.hpp
         cnn_network/*.cpp
-        builders/*.cpp
-        # TODO: apeskov: Please fix issue CVS
-        # shape_infer/*.cpp
-        shape_infer/built-in/*.cpp
         topology_verification_tests/*.cpp
         stress_tests/*.cpp
         cpp_api/*.cpp
         )
 
-# disable deprecated warnings for NN Builder
-
-function(nn_builder_disable_warnings)
-    disable_deprecated_warnings()
-    file(GLOB NN_BUILDER_TESTS_SRC builders/*.cpp)
-    set_source_files_properties(${NN_BUILDER_TESTS_SRC} PROPERTIES COMPILE_FLAGS "${ie_c_cxx_deprecated}")
-endfunction()
-
-nn_builder_disable_warnings()
-
 if (ENABLE_GNA)
     file(GLOB
             GNA_TESTS
diff --git a/inference-engine/tests_deprecated/unit/builders/argmax_layer_test.cpp b/inference-engine/tests_deprecated/unit/builders/argmax_layer_test.cpp
deleted file mode 100644 (file)
index 55e0848..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <gtest/gtest.h>
-#include <string.h>
-#include <ie_builders.hpp>
-#include <builders/ie_argmax_layer.hpp>
-
-#include "builder_test.hpp"
-
-using namespace testing;
-using namespace InferenceEngine;
-
-class ArgMaxLayerBuilderTest : public BuilderTestCommon {};
-
-TEST_F(ArgMaxLayerBuilderTest, getExistsLayerFromNetworkBuilder) {
-    Builder::Network network("network");
-    Builder::ArgMaxLayer argMaxLayer("ArgMax layer");
-    argMaxLayer.setAxis(1);
-    argMaxLayer.setOutMaxVal(0);
-    argMaxLayer.setTopK(20);
-    size_t ind = 0;
-    ASSERT_NO_THROW(ind = network.addLayer(argMaxLayer));
-    Builder::ArgMaxLayer layerFromNetwork(network.getLayer(ind));
-    ASSERT_EQ(argMaxLayer.getAxis(), layerFromNetwork.getAxis());
-    ASSERT_EQ(argMaxLayer.getOutMaxVal(), layerFromNetwork.getOutMaxVal());
-    ASSERT_EQ(argMaxLayer.getTopK(), layerFromNetwork.getTopK());
-}
-
-TEST_F(ArgMaxLayerBuilderTest, cannotAddLayerWithWrongAxis) {
-    Builder::Network network("network");
-    Builder::ArgMaxLayer argMaxLayer("ArgMax layer");
-    argMaxLayer.setAxis(500);  // here
-    argMaxLayer.setOutMaxVal(0);
-    argMaxLayer.setTopK(20);
-    ASSERT_THROW(network.addLayer(argMaxLayer), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(ArgMaxLayerBuilderTest, cannotAddLayerWithWrongOutMaxVal) {
-    Builder::Network network("network");
-    Builder::ArgMaxLayer argMaxLayer("ArgMax layer");
-    argMaxLayer.setAxis(1);
-    argMaxLayer.setOutMaxVal(500);  // here
-    argMaxLayer.setTopK(20);
-    ASSERT_THROW(network.addLayer(argMaxLayer), InferenceEngine::details::InferenceEngineException);
-}
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/unit/builders/batch_normalization_layer_test.cpp b/inference-engine/tests_deprecated/unit/builders/batch_normalization_layer_test.cpp
deleted file mode 100644 (file)
index 64ebd71..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <gtest/gtest.h>
-#include <string.h>
-#include <ie_builders.hpp>
-#include <builders/ie_batch_normalization_layer.hpp>
-
-#include "builder_test.hpp"
-
-using namespace testing;
-using namespace InferenceEngine;
-
-class BatchNormalizationLayerBuilderTest : public BuilderTestCommon {};
-
-//TEST_F(BatchNormalizationLayerBuilderTest, cannotCreateBatchNormalizationWithoutWeightOrBiases) {
-//    ASSERT_THROW(((Builder::Layer)Builder::BatchNormalizationLayer("in1")), InferenceEngine::details::InferenceEngineException);
-//    ASSERT_THROW(((Builder::Layer)Builder::BatchNormalizationLayer("in1")
-//            .setWeights(generateBlob(Precision::FP32, {3}, Layout::C))), InferenceEngine::details::InferenceEngineException);
-//    ASSERT_THROW(((Builder::Layer)Builder::BatchNormalizationLayer("in1")
-//            .setBiases(generateBlob(Precision::FP32, {3}, Layout::C))), InferenceEngine::details::InferenceEngineException);
-//}
-
-TEST_F(BatchNormalizationLayerBuilderTest, getExistsLayerFromNetworkBuilder) {
-    Builder::Network network("Test");
-    idx_t weightsId = network.addLayer(Builder::ConstLayer("weights").setData(generateBlob(Precision::FP32, {3}, Layout::C)));
-    idx_t biasesId = network.addLayer(Builder::ConstLayer("biases").setData(generateBlob(Precision::FP32, {3}, Layout::C)));
-    Builder::BatchNormalizationLayer bnBuilder("bn");
-    idx_t bnId = network.addLayer({{0}, {weightsId}, {biasesId}}, bnBuilder);
-    Builder::BatchNormalizationLayer bnBuilderFromNetwork(network.getLayer(bnId));
-    ASSERT_EQ(bnBuilderFromNetwork.getEpsilon(), bnBuilder.getEpsilon());
-    bnBuilderFromNetwork.setEpsilon(2);
-    ASSERT_NE(bnBuilderFromNetwork.getEpsilon(), bnBuilder.getEpsilon());
-    ASSERT_EQ(bnBuilderFromNetwork.getEpsilon(), network.getLayer(bnId)->getParameters()["epsilon"].as<float>());
-}
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/unit/builders/builder_test.hpp b/inference-engine/tests_deprecated/unit/builders/builder_test.hpp
deleted file mode 100644 (file)
index e1d545b..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <string.h>
-#include <ie_builders.hpp>
-#include <blob_factory.hpp>
-
-#include "tests_common.hpp"
-
-
-class BuilderTestCommon : public TestsCommon {
-public:
-    InferenceEngine::Blob::Ptr generateBlob(InferenceEngine::Precision precision,
-                                            InferenceEngine::SizeVector dims, InferenceEngine::Layout layout) {
-        InferenceEngine::Blob::Ptr blob = make_blob_with_precision(InferenceEngine::TensorDesc(precision, dims, layout));
-        blob->allocate();
-        fill_data(blob);
-        return blob;
-    }
-
-    template<class T>
-    InferenceEngine::Blob::Ptr generateBlob(InferenceEngine::Precision precision,
-                                            InferenceEngine::SizeVector dims, InferenceEngine::Layout layout,
-                                            std::vector<T> data) {
-        auto blob = generateBlob(precision, dims, layout);
-        auto *blbData = blob->buffer().as<T *>();
-        for (size_t i = 0; i < data.size(); i++) {
-            blbData[i] = data[i];
-        }
-        return blob;
-    }
-};
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/unit/builders/clamp_layer_test.cpp b/inference-engine/tests_deprecated/unit/builders/clamp_layer_test.cpp
deleted file mode 100644 (file)
index 22380d8..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <gtest/gtest.h>
-#include <string.h>
-#include <ie_builders.hpp>
-#include <builders/ie_clamp_layer.hpp>
-
-#include "builder_test.hpp"
-
-using namespace testing;
-using namespace InferenceEngine;
-
-class ClampLayerBuilderTest : public BuilderTestCommon {};
-
-TEST_F(ClampLayerBuilderTest, getExistsLayerFromNetworkBuilder) {
-    Builder::Network net("network");
-    Builder::ClampLayer clampLayer("clampLayer");
-    clampLayer.setMinValue(0.1).setMaxValue(0.2);
-    size_t ind = net.addLayer(clampLayer);
-    Builder::ClampLayer layerFromNet(net.getLayer(ind));
-    ASSERT_EQ(layerFromNet.getMinValue(), clampLayer.getMinValue());
-    ASSERT_EQ(layerFromNet.getMaxValue(), clampLayer.getMaxValue());
-}
-
-TEST_F(ClampLayerBuilderTest, cannotCreateLayerWithWrongMinValue) {
-    Builder::Network net("network");
-    Builder::ClampLayer clampLayer("clampLayer");
-    clampLayer.setMinValue(0).setMaxValue(0.2);
-    ASSERT_NO_THROW(net.addLayer(clampLayer));
-}
-
-TEST_F(ClampLayerBuilderTest, cannotCreateLayerWithWrongMaxValue) {
-    Builder::Network net("network");
-    Builder::ClampLayer clampLayer("clampLayer");
-    clampLayer.setMinValue(10).setMaxValue(-0.2);
-    ASSERT_THROW(net.addLayer(clampLayer), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(ClampLayerBuilderTest, cannotCreateLayerWithWrongShapes) {
-    Builder::Network net("network");
-    Builder::Layer::Ptr fakeClampLayerPtr = std::make_shared<Builder::Layer>("Clamp", "Clamp layer");
-    fakeClampLayerPtr->getInputPorts().push_back(Port({1, 1, 1, 1}));
-    fakeClampLayerPtr->getOutputPorts().push_back(Port({1, 1, 1, 2}));
-    Builder::ClampLayer clampLayer(fakeClampLayerPtr);
-    clampLayer.setMinValue(0.0f).setMaxValue(1.0f);
-    ASSERT_THROW(net.addLayer(clampLayer), InferenceEngine::details::InferenceEngineException);
-}
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/unit/builders/concat_layer_test.cpp b/inference-engine/tests_deprecated/unit/builders/concat_layer_test.cpp
deleted file mode 100644 (file)
index 1f6770c..0000000
+++ /dev/null
@@ -1,151 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <gtest/gtest.h>
-#include <string.h>
-#include <ie_builders.hpp>
-#include <builders/ie_concat_layer.hpp>
-
-#include "builder_test.hpp"
-
-using namespace testing;
-using namespace InferenceEngine;
-
-class ConcatLayerBuilderTest : public BuilderTestCommon {};
-
-TEST_F(ConcatLayerBuilderTest, getExistsLayerFromNetworkBuilderAxis) {
-    Builder::Network network("network");
-    Builder::ConcatLayer layer("concat layer");
-
-    layer.setAxis(0);
-    layer.setInputPorts({Port({1, 2, 55, 55}), Port({3, 2, 55, 55})});
-    layer.setOutputPort(Port({1 + 3, 2, 55, 55}));
-
-    size_t ind = 0;
-    ASSERT_NO_THROW(ind = network.addLayer(layer));
-    network.getLayer(ind)->validate(false);
-    ASSERT_NO_THROW(network.getLayer(ind)->validate(false));
-    Builder::ConcatLayer layerFromNet(network.getLayer(ind));
-
-    ASSERT_EQ(layer.getAxis(), layerFromNet.getAxis());
-    ASSERT_EQ(layer.getInputPorts(), layerFromNet.getInputPorts());
-    ASSERT_EQ(layer.getOutputPort(), layerFromNet.getOutputPort());
-}
-
-TEST_F(ConcatLayerBuilderTest, cannotCreateLayerWithNoInputPorts) {
-    Builder::Network network("network");
-    Builder::ConcatLayer layer("concat layer");
-
-    layer.setAxis(1);
-    layer.setOutputPort(Port({1, 2 + 4, 55, 55}));
-    // here should be layer.setInputPort(...)
-
-    size_t ind = 0;
-    ASSERT_NO_THROW(ind = network.addLayer(layer));
-    ASSERT_THROW(network.getLayer(ind)->validate(false), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(ConcatLayerBuilderTest, cannotCreateLayerWithOneInputPort) {
-    Builder::Network network("network");
-    Builder::ConcatLayer layer("concat layer");
-
-    layer.setAxis(1);
-    layer.setInputPorts({Port({1, 2, 55, 55})});  // here
-    layer.setOutputPort(Port({1, 2 + 4, 55, 55}));
-
-    size_t ind = 0;
-    ASSERT_NO_THROW(ind = network.addLayer(layer));
-    ASSERT_THROW(network.getLayer(ind)->validate(false), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(ConcatLayerBuilderTest, cannotCreateLayerWithWrongAxis) {
-    Builder::Network network("network");
-    Builder::ConcatLayer layer("concat layer");
-
-    layer.setAxis(50);  // here
-    layer.setInputPorts({Port({1, 2, 55, 55}), Port({3, 2, 55, 55})});
-    layer.setOutputPort(Port({1 + 3, 2, 55, 55}));
-
-    size_t ind = 0;
-    ASSERT_NO_THROW(ind = network.addLayer(layer));
-    ASSERT_THROW(network.getLayer(ind)->validate(false), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(ConcatLayerBuilderTest, cannotCreateLayerWithUnalignedPorts1) {
-    Builder::Network network("network");
-    Builder::ConcatLayer layer("concat layer");
-
-    layer.setAxis(0);
-    layer.setInputPorts({Port({1, 2, 55, 55}), Port({3, 2, 55, 55})});
-    layer.setOutputPort(Port({1 + 3, 2, 55, 155}));  // should be {1 + 3, 2, 55, 55}
-
-    size_t ind = 0;
-    ASSERT_NO_THROW(ind = network.addLayer(layer));
-    ASSERT_THROW(network.getLayer(ind)->validate(false), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(ConcatLayerBuilderTest, cannotCreateLayerWithUnalignedPorts2) {
-    Builder::Network network("network");
-    Builder::ConcatLayer layer("concat layer");
-
-    layer.setAxis(0);
-    layer.setInputPorts({Port({1, 2, 55, 55}), Port({3, 2, 55, 55})});
-    layer.setOutputPort(Port({1 + 3, 2, 155, 55}));  // should be {1 + 3, 2, 55, 55}
-
-    size_t ind = 0;
-    ASSERT_NO_THROW(ind = network.addLayer(layer));
-    ASSERT_THROW(network.getLayer(ind)->validate(false), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(ConcatLayerBuilderTest, cannotCreateLayerWithUnalignedPorts3) {
-    Builder::Network network("network");
-    Builder::ConcatLayer layer("concat layer");
-
-    layer.setAxis(0);
-    layer.setInputPorts({Port({1, 2, 55, 55}), Port({3, 2, 55, 55})});
-    layer.setOutputPort(Port({100, 2, 55, 55}));  // should be {1 + 3, 2, 55, 55}
-
-    size_t ind = 0;
-    ASSERT_NO_THROW(ind = network.addLayer(layer));
-    ASSERT_THROW(network.getLayer(ind)->validate(false), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(ConcatLayerBuilderTest, cannotCreateLayerWithUnalignedPorts4) {
-    Builder::Network network("network");
-    Builder::ConcatLayer layer("concat layer");
-
-    layer.setAxis(1);
-    layer.setInputPorts({Port({1, 2, 55, 55}), Port({3, 2, 55, 55})});
-    layer.setOutputPort(Port({1, 100, 55, 55}));  // should be {1, 2 + 4, 55, 55}
-
-    size_t ind = 0;
-    ASSERT_NO_THROW(ind = network.addLayer(layer));
-    ASSERT_THROW(network.getLayer(ind)->validate(false), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(ConcatLayerBuilderTest, cannotCreateLayerWithDifferentInputPorts1) {
-    Builder::Network network("network");
-    Builder::ConcatLayer layer("concat layer");
-
-    layer.setAxis(0);
-    layer.setInputPorts({Port({1, 2, 55, 55}), Port({3, 2, 55, 155})});  // here
-    layer.setOutputPort(Port({1 + 3, 4, 55, 55}));
-
-    size_t ind = 0;
-    ASSERT_NO_THROW(ind = network.addLayer(layer));
-    ASSERT_THROW(network.getLayer(ind)->validate(false), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(ConcatLayerBuilderTest, cannotCreateLayerWithDifferentInputPorts2) {
-    Builder::Network network("network");
-    Builder::ConcatLayer layer("concat layer");
-
-    layer.setAxis(0);
-    layer.setInputPorts({Port({1, 2, 55, 55}), Port({3, 2, 155, 55})});  // here
-    layer.setOutputPort(Port({1 + 3, 4, 55, 55}));
-
-    size_t ind = 0;
-    ASSERT_NO_THROW(ind = network.addLayer(layer));
-    ASSERT_THROW(network.getLayer(ind)->validate(false), InferenceEngine::details::InferenceEngineException);
-}
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/unit/builders/const_layer_test.cpp b/inference-engine/tests_deprecated/unit/builders/const_layer_test.cpp
deleted file mode 100644 (file)
index 0ed4e31..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <gtest/gtest.h>
-#include <string.h>
-#include <ie_builders.hpp>
-#include <builders/ie_const_layer.hpp>
-
-#include "builder_test.hpp"
-
-using namespace testing;
-using namespace InferenceEngine;
-
-class ConstLayerBuilderTest : public BuilderTestCommon {};
-
-TEST_F(ConstLayerBuilderTest, getExistsLayerFromNetworkBuilder) {
-    Builder::Network net("network");
-    Builder::ConstLayer layer("const layer");
-    layer.setData(generateBlob(Precision::FP32, {3}, Layout::C));
-    const size_t ind = net.addLayer(layer);
-    ASSERT_NO_THROW(net.getLayer(ind)->validate(false));
-}
-
-TEST_F(ConstLayerBuilderTest, cannotCreateLayerWithoutData) {
-    Builder::Network net("network");
-    Builder::ConstLayer layer("const layer");
-    ASSERT_THROW(net.addLayer(layer),
-            InferenceEngine::details::InferenceEngineException);
-}
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/unit/builders/convolution_layer_test.cpp b/inference-engine/tests_deprecated/unit/builders/convolution_layer_test.cpp
deleted file mode 100644 (file)
index a828737..0000000
+++ /dev/null
@@ -1,307 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <gtest/gtest.h>
-#include <string.h>
-#include <ie_builders.hpp>
-#include <builders/ie_convolution_layer.hpp>
-
-#include "builder_test.hpp"
-
-using namespace testing;
-using namespace InferenceEngine;
-
-class ConvolutionLayerBuilderTest : public BuilderTestCommon {};
-
-TEST_F(ConvolutionLayerBuilderTest, cannotCreateConvolutionWithoutWeight) {
-    Builder::Network network("Test");
-
-    Builder::ConvolutionLayer convBuilder("Convolution");
-    convBuilder.setStrides({4, 4});
-    convBuilder.setKernel({11, 11});
-    convBuilder.setOutDepth(96);
-    convBuilder.setInputPort(Port({1, 3, 225, 225}));
-    convBuilder.setDilation({1, 1});
-    size_t ind = network.addLayer(convBuilder);
-    ASSERT_THROW(network.getLayer(ind)->validate(false), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(ConvolutionLayerBuilderTest, getExistsLayerFromNetworkBuilderWithInputPort) {
-    Builder::Network network("Test");
-    Builder::ConvolutionLayer convBuilder("Convolution");
-
-    convBuilder.setStrides({4, 4});
-    convBuilder.setKernel({11, 11});
-    convBuilder.setOutDepth(96);
-    convBuilder.setInputPort(Port({1, 3, 225, 225}));
-    convBuilder.setDilation({1, 1});
-
-    idx_t convId = network.addLayer(convBuilder);
-
-    idx_t weightsId = network.addLayer(Builder::ConstLayer("weights").setData(generateBlob(Precision::FP32, {96, 3, 11, 11}, Layout::OIHW)));
-    network.connect({weightsId}, {convId, 1});
-
-    idx_t biasesId = network.addLayer(Builder::ConstLayer("biases").setData(generateBlob(Precision::FP32, {96}, Layout::C)));
-    network.connect({biasesId}, {convId, 2});
-
-    Builder::ConvolutionLayer convBuilderFromNetwork(network.getLayer(convId));
-
-    ASSERT_EQ(convBuilderFromNetwork.getStrides(), convBuilder.getStrides());
-    ASSERT_EQ(convBuilderFromNetwork.getKernel(), convBuilder.getKernel());
-    ASSERT_EQ(convBuilderFromNetwork.getPaddingsEnd(), convBuilder.getPaddingsEnd());
-    ASSERT_EQ(convBuilderFromNetwork.getPaddingsBegin(), convBuilder.getPaddingsBegin());
-    ASSERT_EQ(convBuilderFromNetwork.getOutDepth(), convBuilder.getOutDepth());
-    ASSERT_EQ(convBuilderFromNetwork.getDilation(), convBuilder.getDilation());
-}
-
-TEST_F(ConvolutionLayerBuilderTest, getExistsLayerFromNetworkBuilderWithoutInputPort) {
-    Builder::Network network("Test");
-    Builder::ConvolutionLayer convBuilder("Convolution");
-
-    convBuilder.setStrides({4, 4});
-    convBuilder.setKernel({11, 11});
-    convBuilder.setOutDepth(96);
-    convBuilder.setDilation({1, 1});
-
-    idx_t convId = network.addLayer(convBuilder);
-
-    idx_t weightsId = network.addLayer(Builder::ConstLayer("weights").setData(generateBlob(Precision::FP32, {96, 3, 11, 11}, Layout::OIHW)));
-    network.connect({weightsId}, {convId, 1});
-
-    idx_t biasesId = network.addLayer(Builder::ConstLayer("biases").setData(generateBlob(Precision::FP32, {96}, Layout::C)));
-    network.connect({biasesId}, {convId, 2});
-
-    Builder::ConvolutionLayer convBuilderFromNetwork(network.getLayer(convId));
-
-    ASSERT_EQ(convBuilderFromNetwork.getStrides(), convBuilder.getStrides());
-    ASSERT_EQ(convBuilderFromNetwork.getKernel(), convBuilder.getKernel());
-    ASSERT_EQ(convBuilderFromNetwork.getPaddingsEnd(), convBuilder.getPaddingsEnd());
-    ASSERT_EQ(convBuilderFromNetwork.getPaddingsBegin(), convBuilder.getPaddingsBegin());
-    ASSERT_EQ(convBuilderFromNetwork.getOutDepth(), convBuilder.getOutDepth());
-    ASSERT_EQ(convBuilderFromNetwork.getDilation(), convBuilder.getDilation());
-}
-
-TEST_F(ConvolutionLayerBuilderTest, cannotCreateConvolutionWithWrongNumberOfInputChannels) {
-    Builder::Network network("Test");
-    Builder::ConvolutionLayer convBuilder("Convolution");
-
-    convBuilder.setStrides({4, 4});
-    convBuilder.setKernel({11, 11});
-    convBuilder.setOutDepth(96);
-    convBuilder.setInputPort(Port({1, 64, 225, 225}));  // here
-
-    idx_t convId = network.addLayer(convBuilder);
-
-    idx_t weightsId = network.addLayer(Builder::ConstLayer("weights").setData(generateBlob(Precision::FP32, {96, 3, 11, 11}, Layout::OIHW)));
-    network.connect({weightsId}, {convId, 1});
-
-    idx_t biasesId = network.addLayer(Builder::ConstLayer("biases").setData(generateBlob(Precision::FP32, {96}, Layout::C)));
-    network.connect({biasesId}, {convId, 2});
-
-    ASSERT_THROW(network.getLayer(convId)->validate(false), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(ConvolutionLayerBuilderTest, canCreateCorrcetConvolution) {
-    Builder::Network network("Test");
-    Builder::ConvolutionLayer convBuilder("Convolution");
-
-    convBuilder.setStrides({4, 4});
-    convBuilder.setKernel({11, 11});
-    convBuilder.setOutDepth(96);
-    convBuilder.setInputPort(Port({1, 3, 225, 225}));  // here
-
-    idx_t convId = network.addLayer(convBuilder);
-
-    idx_t weightsId = network.addLayer(Builder::ConstLayer("weights").setData(generateBlob(Precision::FP32, {96, 3, 11, 11}, Layout::OIHW)));
-    network.connect({weightsId}, {convId, 1});
-
-    idx_t biasesId = network.addLayer(Builder::ConstLayer("biases").setData(generateBlob(Precision::FP32, {96}, Layout::C)));
-    network.connect({biasesId}, {convId, 2});
-
-    ASSERT_NO_THROW(network.getLayer(convId)->validate(false));
-}
-
-TEST_F(ConvolutionLayerBuilderTest, cannotCreateConvolutionWithGroup) {
-    Builder::Network network("Test");
-    Builder::ConvolutionLayer convBuilder("Convolution");
-
-    convBuilder.setStrides({4, 4});
-    convBuilder.setKernel({11, 11});
-    convBuilder.setOutDepth(96);
-    convBuilder.setGroup(2);
-    convBuilder.setInputPort(Port({1, 6, 225, 225}));
-
-    idx_t convId = network.addLayer(convBuilder);
-
-    idx_t weightsId = network.addLayer(Builder::ConstLayer("weights").setData(generateBlob(Precision::FP32, {96, 6, 11, 11}, Layout::OIHW)));
-    // should be {96, 6 / 2, 11, 11}
-    network.connect({weightsId}, {convId, 1});
-
-    idx_t biasesId = network.addLayer(Builder::ConstLayer("biases").setData(generateBlob(Precision::FP32, {96}, Layout::C)));
-    network.connect({biasesId}, {convId, 2});
-
-    ASSERT_THROW(network.getLayer(convId)->validate(false), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(ConvolutionLayerBuilderTest, canCreateConvolution) {
-    Builder::Network network("Test");
-    Builder::ConvolutionLayer convBuilder("Convolution");
-
-    convBuilder.setStrides({4, 4});
-    convBuilder.setKernel({11, 11});
-    convBuilder.setOutDepth(96);
-    convBuilder.setGroup(2);
-    convBuilder.setInputPort(Port({1, 6, 225, 225}));  // here
-
-    idx_t convId = network.addLayer(convBuilder);
-
-    idx_t weightsId = network.addLayer(Builder::ConstLayer("weights").setData(generateBlob(Precision::FP32, {96, 3, 11, 11}, Layout::OIHW)));
-    network.connect({weightsId}, {convId, 1});
-
-    idx_t biasesId = network.addLayer(Builder::ConstLayer("biases").setData(generateBlob(Precision::FP32, {96}, Layout::C)));
-    network.connect({biasesId}, {convId, 2});
-
-    ASSERT_NO_THROW(network.getLayer(convId)->validate(false));
-}
-
-TEST_F(ConvolutionLayerBuilderTest, cannotCreateConvolutionWithWrongOutDepth) {
-    Builder::Network network("Test");
-    Builder::ConvolutionLayer convBuilder("Convolution");
-
-    convBuilder.setStrides({4, 4});
-    convBuilder.setKernel({11, 11});
-    convBuilder.setOutDepth(4);  // here
-    convBuilder.setInputPort(Port({1, 3, 225, 225}));
-
-    idx_t convId = network.addLayer(convBuilder);
-
-    idx_t weightsId = network.addLayer(Builder::ConstLayer("weights").setData(generateBlob(Precision::FP32, {96, 3, 11, 11}, Layout::OIHW)));
-    network.connect({weightsId}, {convId, 1});
-
-    idx_t biasesId = network.addLayer(Builder::ConstLayer("biases").setData(generateBlob(Precision::FP32, {96}, Layout::C)));
-    network.connect({biasesId}, {convId, 2});
-
-    ASSERT_THROW(network.getLayer(convId)->validate(false), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(ConvolutionLayerBuilderTest, cannotCreateConvolutionWithWrongStrides) {
-    Builder::Network network("Test");
-    Builder::ConvolutionLayer convBuilder("Convolution");
-
-    convBuilder.setStrides({4, 0});  // here
-    convBuilder.setKernel({11, 11});
-    convBuilder.setOutDepth(96);
-    convBuilder.setInputPort(Port({1, 3, 225, 225}));
-    convBuilder.setPaddingsEnd({0, 0});
-    convBuilder.setPaddingsBegin({0, 0});
-    convBuilder.setDilation({0, 0});
-    ASSERT_THROW(network.addLayer(convBuilder), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(ConvolutionLayerBuilderTest, cannotCreateConvolutionWithWrongKernel1) {
-    Builder::Network network("Test");
-    Builder::ConvolutionLayer convBuilder("Convolution");
-
-    convBuilder.setStrides({4, 4});
-    convBuilder.setKernel({11, 0});  // here
-    convBuilder.setOutDepth(96);
-    convBuilder.setInputPort(Port({1, 3, 225, 225}));
-
-    ASSERT_THROW(network.addLayer(convBuilder), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(ConvolutionLayerBuilderTest, cannotCreateConvolutionWithWrongKernel2) {
-    Builder::Network network("Test");
-    Builder::ConvolutionLayer convBuilder("Convolution");
-
-    convBuilder.setStrides({4, 4});
-    convBuilder.setKernel({11, 11, 11});  // here
-    convBuilder.setOutDepth(96);
-    convBuilder.setInputPort(Port({1, 3, 225, 225}));
-
-    ASSERT_THROW(network.addLayer(convBuilder), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(ConvolutionLayerBuilderTest, cannotCreateConvolutionWithWrongDilation1) {
-    Builder::Network network("Test");
-    Builder::ConvolutionLayer convBuilder("Convolution");
-
-    convBuilder.setStrides({4, 4});
-    convBuilder.setKernel({11, 11});
-    convBuilder.setOutDepth(96);
-    convBuilder.setInputPort(Port({1, 3, 225, 225}));
-    convBuilder.setDilation({1, 0});  // here
-
-    ASSERT_THROW(network.addLayer(convBuilder), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(ConvolutionLayerBuilderTest, cannotCreateConvolutionWithWrongDilation2) {
-    Builder::Network network("Test");
-    Builder::ConvolutionLayer convBuilder("Convolution");
-
-    convBuilder.setStrides({4, 4});
-    convBuilder.setKernel({11, 11});
-    convBuilder.setOutDepth(96);
-    convBuilder.setInputPort(Port({1, 3, 225, 225}));
-    convBuilder.setDilation({1, 1, 1});  // here
-
-    ASSERT_THROW(network.addLayer(convBuilder), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(ConvolutionLayerBuilderTest, canCreateLayerWithNumberOfGroupDividingNumberOfInputChannels) {
-    Builder::Network network("Test");
-    Builder::ConvolutionLayer convLayer("Convolution");
-
-    size_t weightsId = network.addLayer(Builder::ConstLayer("weights").setData(generateBlob(Precision::FP32, {96, 2, 11, 11}, Layout::OIHW)));
-    size_t biasesId = network.addLayer(Builder::ConstLayer("biases").setData(generateBlob(Precision::FP32, {96}, Layout::C)));
-
-    convLayer.setStrides({4, 4});
-    convLayer.setKernel({11, 11});
-    convLayer.setOutDepth(96);
-    convLayer.setInputPort(Port({1, 6, 225, 225}));
-    convLayer.setDilation({1, 1});
-
-    convLayer.setGroup(3);
-    size_t convId = network.addLayer(convLayer);
-    network.connect({weightsId}, {convId, 1});
-    network.connect({biasesId}, {convId, 2});
-    ASSERT_NO_THROW(network.getLayer(convId)->validate(false));
-}
-
-TEST_F(ConvolutionLayerBuilderTest, canCreateLayerWithWeightsNotAvailableForGroup) {
-    Builder::Network network("Test");
-    Builder::ConvolutionLayer convLayer("Convolution");
-
-    size_t weightsId = network.addLayer(Builder::ConstLayer("weights").setData(generateBlob(Precision::FP32, {96, 5, 11, 11}, Layout::OIHW)));
-    size_t biasesId = network.addLayer(Builder::ConstLayer("biases").setData(generateBlob(Precision::FP32, {96}, Layout::C)));
-
-    convLayer.setStrides({4, 4});
-    convLayer.setKernel({11, 11});
-    convLayer.setOutDepth(96);
-    convLayer.setInputPort(Port({1, 6, 225, 225}));
-    convLayer.setDilation({1, 1});
-
-    convLayer.setGroup(3);
-    ASSERT_THROW(network.addLayer({{weightsId}, {biasesId}}, convLayer),
-                 InferenceEngine::details::InferenceEngineException);  // 6 / 3 != 5
-}
-
-TEST_F(ConvolutionLayerBuilderTest, cannotCreateLayerWithNumberOfGroupNotDividingNumberOfInputChannels) {
-    Builder::Network network("Test");
-    Builder::ConvolutionLayer convLayer("Convolution");
-
-    size_t weightsId = network.addLayer(Builder::ConstLayer("weights").setData(generateBlob(Precision::FP32, {96, 2, 11, 11}, Layout::OIHW)));
-    size_t biasesId = network.addLayer(Builder::ConstLayer("biases").setData(generateBlob(Precision::FP32, {96}, Layout::C)));
-
-    convLayer.setStrides({4, 4});
-    convLayer.setKernel({11, 11});
-    convLayer.setOutDepth(96);
-    convLayer.setInputPort(Port({1, 6, 225, 225}));
-    convLayer.setDilation({1, 1});
-
-    convLayer.setGroup(4);
-    ASSERT_THROW(network.addLayer({{weightsId}, {biasesId}}, convLayer),
-                 InferenceEngine::details::InferenceEngineException);  // 6 % 4 == 2
-}
-
diff --git a/inference-engine/tests_deprecated/unit/builders/crop_layer_test.cpp b/inference-engine/tests_deprecated/unit/builders/crop_layer_test.cpp
deleted file mode 100644 (file)
index f89c2f8..0000000
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <gtest/gtest.h>
-#include <string.h>
-#include <ie_builders.hpp>
-#include <builders/ie_crop_layer.hpp>
-
-#include "builder_test.hpp"
-
-using namespace testing;
-using namespace InferenceEngine;
-
-class CropLayerBuilderTest : public BuilderTestCommon {};
-
-TEST_F(CropLayerBuilderTest, getExistsLayerFromNetworkBuilder) {
-    Builder::Network network("network");
-    Builder::CropLayer cropLayer("Crop layer");
-    std::vector<Port> input_ports;
-    input_ports.push_back(Port({1, 21, 44, 44}));
-    input_ports.push_back(Port({1, 21, 44, 44}));
-    cropLayer.setInputPorts(input_ports);
-    cropLayer.setOutputPort(Port({1, 21, 44, 44}));
-    cropLayer.setAxis({2, 3});
-    cropLayer.setOffset({0, 0});
-    size_t ind = 0;
-    ASSERT_NO_THROW(ind = network.addLayer(cropLayer));
-    Builder::CropLayer layerFromNet(network.getLayer(ind));
-    ASSERT_EQ(layerFromNet.getAxis(), cropLayer.getAxis());
-    ASSERT_EQ(layerFromNet.getOffset(), cropLayer.getOffset());
-}
-
-TEST_F(CropLayerBuilderTest, cannotCreateLayerWithOneInputShape) {
-    Builder::Network network("network");
-    Builder::CropLayer cropLayer("Crop layer");
-    std::vector<Port> input_ports;
-    input_ports.push_back(Port({1, 21, 44, 44}));  // here
-    cropLayer.setInputPorts(input_ports);
-    cropLayer.setOutputPort(Port({1, 21, 44, 44}));
-    cropLayer.setAxis({2, 3});
-    cropLayer.setOffset({0, 0});
-    ASSERT_THROW(network.addLayer(cropLayer), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(CropLayerBuilderTest, cannotCreateLayerWithThreeInputShapes) {
-    Builder::Network network("network");
-    Builder::CropLayer cropLayer("Crop layer");
-    std::vector<Port> input_ports;
-    input_ports.push_back(Port({1, 21, 44, 44}));
-    input_ports.push_back(Port({1, 21, 44, 44}));
-    input_ports.push_back(Port({1, 21, 44, 44}));  // here
-    cropLayer.setInputPorts(input_ports);
-    cropLayer.setOutputPort(Port({1, 21, 44, 44}));
-    cropLayer.setAxis({2, 3});
-    cropLayer.setOffset({0, 0});
-    ASSERT_THROW(network.addLayer(cropLayer), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(CropLayerBuilderTest, cannotCreateLayerWithDifferentSizeOfAxisAndOffset) {
-    Builder::Network network("network");
-    Builder::CropLayer cropLayer("Crop layer");
-    std::vector<Port> input_ports;
-    input_ports.push_back(Port({1, 21, 44, 44}));
-    input_ports.push_back(Port({1, 21, 44, 44}));
-    cropLayer.setInputPorts(input_ports);
-    cropLayer.setOutputPort(Port({1, 21, 44, 44}));
-    cropLayer.setAxis({2, 3});
-    cropLayer.setOffset({0, 0, 0});  // here
-    ASSERT_THROW(network.addLayer(cropLayer), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(CropLayerBuilderTest, cannotCreateLayerWithSoBigOffset) {
-    Builder::Network network("network");
-    Builder::CropLayer cropLayer("Crop layer");
-    std::vector<Port> input_ports;
-    input_ports.push_back(Port({1, 21, 44, 44}));
-    input_ports.push_back(Port({1, 21, 34, 34}));
-    cropLayer.setInputPorts(input_ports);
-    cropLayer.setOutputPort(Port({1, 21, 34, 34}));
-    cropLayer.setAxis({2, 3});
-    cropLayer.setOffset({0, 50});  // here
-    ASSERT_THROW(network.addLayer(cropLayer), InferenceEngine::details::InferenceEngineException);
-}
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/unit/builders/ctc_greedy_decoder_layer_test.cpp b/inference-engine/tests_deprecated/unit/builders/ctc_greedy_decoder_layer_test.cpp
deleted file mode 100644 (file)
index 26c3f5c..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <gtest/gtest.h>
-#include <string.h>
-#include <ie_builders.hpp>
-#include <builders/ie_ctc_greedy_decoder_layer.hpp>
-
-#include "builder_test.hpp"
-
-using namespace testing;
-using namespace InferenceEngine;
-
-class CTCGreedyDecoderLayerBuilderTest : public BuilderTestCommon {};
-
-TEST_F(CTCGreedyDecoderLayerBuilderTest, getExistsLayerFromNetworkBuilder) {
-    Builder::Network network("network");
-    Builder::CTCGreedyDecoderLayer ctcGreedyDecoderLayer("CTCGreedyDecoder");
-    ctcGreedyDecoderLayer.setInputPorts({Port({88, 1, 71}), Port({88, 1})});
-    ctcGreedyDecoderLayer.setOutputPort(Port({1, 88, 1, 1}));
-    size_t ind = 0;
-    ASSERT_NO_THROW(ind = network.addLayer(ctcGreedyDecoderLayer));
-    Builder::CTCGreedyDecoderLayer layerFromNet(network.getLayer(ind));
-    ASSERT_EQ(ctcGreedyDecoderLayer.getInputPorts(), layerFromNet.getInputPorts());
-    ASSERT_EQ(ctcGreedyDecoderLayer.getOutputPort(), layerFromNet.getOutputPort());
-}
-
-TEST_F(CTCGreedyDecoderLayerBuilderTest, cannotCreateLayerWithoutInputPorts) {
-    Builder::Network network("network");
-    Builder::CTCGreedyDecoderLayer ctcGreedyDecoderLayer("CTCGreedyDecoder");
-    ctcGreedyDecoderLayer.setOutputPort(Port({1, 88, 1, 1}));
-    ASSERT_THROW(network.addLayer(ctcGreedyDecoderLayer), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(CTCGreedyDecoderLayerBuilderTest, cannotCreateLayerWithThreeInputPorts) {
-    Builder::Network network("network");
-    Builder::CTCGreedyDecoderLayer ctcGreedyDecoderLayer("CTCGreedyDecoder");
-    ctcGreedyDecoderLayer.setInputPorts({Port({88, 1, 71}), Port({88, 1}), Port({88, 1})});
-    ctcGreedyDecoderLayer.setOutputPort(Port({1, 88, 1, 1}));
-    ASSERT_THROW(network.addLayer(ctcGreedyDecoderLayer), InferenceEngine::details::InferenceEngineException);
-}
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/unit/builders/deconvolution_layer_test.cpp b/inference-engine/tests_deprecated/unit/builders/deconvolution_layer_test.cpp
deleted file mode 100644 (file)
index 9db119f..0000000
+++ /dev/null
@@ -1,306 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <gtest/gtest.h>
-#include <string.h>
-#include <ie_builders.hpp>
-#include <builders/ie_deconvolution_layer.hpp>
-
-#include "builder_test.hpp"
-
-using namespace testing;
-using namespace InferenceEngine;
-
-class DeconvolutionLayerBuilderTest : public BuilderTestCommon {};
-
-TEST_F(DeconvolutionLayerBuilderTest, cannotCreateConvolutionWithoutWeight) {
-    Builder::Network network("Test");
-
-    Builder::DeconvolutionLayer deconvBuilder("Deconvolution");
-    deconvBuilder.setStrides({4, 4});
-    deconvBuilder.setKernel({11, 11});
-    deconvBuilder.setOutDepth(96);
-    deconvBuilder.setInputPort(Port({1, 3, 225, 225}));
-    deconvBuilder.setDilation({1, 1});
-    size_t ind = network.addLayer(deconvBuilder);
-    ASSERT_THROW(network.getLayer(ind)->validate(false), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(DeconvolutionLayerBuilderTest, getExistsLayerFromNetworkBuilderWithInputPort) {
-    Builder::Network network("Test");
-    Builder::DeconvolutionLayer deconvBuilder("Deconvolution");
-
-    deconvBuilder.setStrides({4, 4});
-    deconvBuilder.setKernel({11, 11});
-    deconvBuilder.setOutDepth(96);
-    deconvBuilder.setInputPort(Port({1, 3, 225, 225}));
-    deconvBuilder.setDilation({1, 1});
-
-    idx_t convId = network.addLayer(deconvBuilder);
-
-    idx_t weightsId = network.addLayer(Builder::ConstLayer("weights").setData(generateBlob(Precision::FP32, {96, 3, 11, 11}, Layout::OIHW)));
-    network.connect({weightsId}, {convId, 1});
-
-    idx_t biasesId = network.addLayer(Builder::ConstLayer("biases").setData(generateBlob(Precision::FP32, {96}, Layout::C)));
-    network.connect({biasesId}, {convId, 2});
-
-    Builder::DeconvolutionLayer deconvBuilderFromNetwork(network.getLayer(convId));
-
-    ASSERT_EQ(deconvBuilderFromNetwork.getStrides(), deconvBuilder.getStrides());
-    ASSERT_EQ(deconvBuilderFromNetwork.getKernel(), deconvBuilder.getKernel());
-    ASSERT_EQ(deconvBuilderFromNetwork.getPaddingsEnd(), deconvBuilder.getPaddingsEnd());
-    ASSERT_EQ(deconvBuilderFromNetwork.getPaddingsBegin(), deconvBuilder.getPaddingsBegin());
-    ASSERT_EQ(deconvBuilderFromNetwork.getOutDepth(), deconvBuilder.getOutDepth());
-    ASSERT_EQ(deconvBuilderFromNetwork.getDilation(), deconvBuilder.getDilation());
-}
-
-TEST_F(DeconvolutionLayerBuilderTest, getExistsLayerFromNetworkBuilderWithoutInputPort) {
-    Builder::Network network("Test");
-    Builder::DeconvolutionLayer deconvBuilder("Deconvolution");
-
-    deconvBuilder.setStrides({4, 4});
-    deconvBuilder.setKernel({11, 11});
-    deconvBuilder.setOutDepth(96);
-    deconvBuilder.setDilation({1, 1});
-
-    idx_t convId = network.addLayer(deconvBuilder);
-
-    idx_t weightsId = network.addLayer(Builder::ConstLayer("weights").setData(generateBlob(Precision::FP32, {96, 3, 11, 11}, Layout::OIHW)));
-    network.connect({weightsId}, {convId, 1});
-
-    idx_t biasesId = network.addLayer(Builder::ConstLayer("biases").setData(generateBlob(Precision::FP32, {96}, Layout::C)));
-    network.connect({biasesId}, {convId, 2});
-
-    Builder::DeconvolutionLayer deconvBuilderFromNetwork(network.getLayer(convId));
-
-    ASSERT_EQ(deconvBuilderFromNetwork.getStrides(), deconvBuilder.getStrides());
-    ASSERT_EQ(deconvBuilderFromNetwork.getKernel(), deconvBuilder.getKernel());
-    ASSERT_EQ(deconvBuilderFromNetwork.getPaddingsEnd(), deconvBuilder.getPaddingsEnd());
-    ASSERT_EQ(deconvBuilderFromNetwork.getPaddingsBegin(), deconvBuilder.getPaddingsBegin());
-    ASSERT_EQ(deconvBuilderFromNetwork.getOutDepth(), deconvBuilder.getOutDepth());
-    ASSERT_EQ(deconvBuilderFromNetwork.getDilation(), deconvBuilder.getDilation());
-}
-
-TEST_F(DeconvolutionLayerBuilderTest, cannotCreateConvolutionWithWrongNumberOfInputChannels) {
-    Builder::Network network("Test");
-    Builder::DeconvolutionLayer deconvBuilder("Deconvolution");
-
-    deconvBuilder.setStrides({4, 4});
-    deconvBuilder.setKernel({11, 11});
-    deconvBuilder.setOutDepth(96);
-    deconvBuilder.setInputPort(Port({1, 64, 225, 225}));  // here
-
-    idx_t convId = network.addLayer(deconvBuilder);
-
-    idx_t weightsId = network.addLayer(Builder::ConstLayer("weights").setData(generateBlob(Precision::FP32, {96, 3, 11, 11}, Layout::OIHW)));
-    network.connect({weightsId}, {convId, 1});
-
-    idx_t biasesId = network.addLayer(Builder::ConstLayer("biases").setData(generateBlob(Precision::FP32, {96}, Layout::C)));
-    network.connect({biasesId}, {convId, 2});
-
-    ASSERT_THROW(network.getLayer(convId)->validate(false), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(DeconvolutionLayerBuilderTest, canCreateCorrcetConvolution) {
-    Builder::Network network("Test");
-    Builder::DeconvolutionLayer deconvBuilder("Deconvolution");
-
-    deconvBuilder.setStrides({4, 4});
-    deconvBuilder.setKernel({11, 11});
-    deconvBuilder.setOutDepth(96);
-    deconvBuilder.setInputPort(Port({1, 3, 225, 225}));  // here
-
-    idx_t convId = network.addLayer(deconvBuilder);
-
-    idx_t weightsId = network.addLayer(Builder::ConstLayer("weights").setData(generateBlob(Precision::FP32, {96, 3, 11, 11}, Layout::OIHW)));
-    network.connect({weightsId}, {convId, 1});
-
-    idx_t biasesId = network.addLayer(Builder::ConstLayer("biases").setData(generateBlob(Precision::FP32, {96}, Layout::C)));
-    network.connect({biasesId}, {convId, 2});
-
-    ASSERT_NO_THROW(network.getLayer(convId)->validate(false));
-}
-
-TEST_F(DeconvolutionLayerBuilderTest, cannotCreateConvolutionWithGroup) {
-    Builder::Network network("Test");
-    Builder::DeconvolutionLayer deconvBuilder("Deconvolution");
-
-    deconvBuilder.setStrides({4, 4});
-    deconvBuilder.setKernel({11, 11});
-    deconvBuilder.setOutDepth(96);
-    deconvBuilder.setGroup(2);
-    deconvBuilder.setInputPort(Port({1, 6, 225, 225}));
-
-    idx_t convId = network.addLayer(deconvBuilder);
-
-    idx_t weightsId = network.addLayer(Builder::ConstLayer("weights").setData(generateBlob(Precision::FP32, {96, 6, 11, 11}, Layout::OIHW)));
-    // should be {96, 6 / 2, 11, 11}
-    network.connect({weightsId}, {convId, 1});
-
-    idx_t biasesId = network.addLayer(Builder::ConstLayer("biases").setData(generateBlob(Precision::FP32, {96}, Layout::C)));
-    network.connect({biasesId}, {convId, 2});
-
-    ASSERT_THROW(network.getLayer(convId)->validate(false), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(DeconvolutionLayerBuilderTest, canCreateConvolution) {
-    Builder::Network network("Test");
-    Builder::DeconvolutionLayer deconvBuilder("Deconvolution");
-
-    deconvBuilder.setStrides({4, 4});
-    deconvBuilder.setKernel({11, 11});
-    deconvBuilder.setOutDepth(96);
-    deconvBuilder.setGroup(2);
-    deconvBuilder.setInputPort(Port({1, 6, 225, 225}));  // here
-
-    idx_t convId = network.addLayer(deconvBuilder);
-
-    idx_t weightsId = network.addLayer(Builder::ConstLayer("weights").setData(generateBlob(Precision::FP32, {96, 3, 11, 11}, Layout::OIHW)));
-    network.connect({weightsId}, {convId, 1});
-
-    idx_t biasesId = network.addLayer(Builder::ConstLayer("biases").setData(generateBlob(Precision::FP32, {96}, Layout::C)));
-    network.connect({biasesId}, {convId, 2});
-
-    ASSERT_NO_THROW(network.getLayer(convId)->validate(false));
-}
-
-TEST_F(DeconvolutionLayerBuilderTest, cannotCreateConvolutionWithWrongOutDepth) {
-    Builder::Network network("Test");
-    Builder::DeconvolutionLayer deconvBuilder("Deconvolution");
-
-    deconvBuilder.setStrides({4, 4});
-    deconvBuilder.setKernel({11, 11});
-    deconvBuilder.setOutDepth(4);  // here
-    deconvBuilder.setInputPort(Port({1, 3, 225, 225}));
-
-    idx_t convId = network.addLayer(deconvBuilder);
-
-    idx_t weightsId = network.addLayer(Builder::ConstLayer("weights").setData(generateBlob(Precision::FP32, {96, 3, 11, 11}, Layout::OIHW)));
-    network.connect({weightsId}, {convId, 1});
-
-    idx_t biasesId = network.addLayer(Builder::ConstLayer("biases").setData(generateBlob(Precision::FP32, {96}, Layout::C)));
-    network.connect({biasesId}, {convId, 2});
-
-    ASSERT_THROW(network.getLayer(convId)->validate(false), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(DeconvolutionLayerBuilderTest, cannotCreateConvolutionWithWrongStrides) {
-    Builder::Network network("Test");
-    Builder::DeconvolutionLayer deconvBuilder("Deconvolution");
-
-    deconvBuilder.setStrides({4, 0});  // here
-    deconvBuilder.setKernel({11, 11});
-    deconvBuilder.setOutDepth(96);
-    deconvBuilder.setInputPort(Port({1, 3, 225, 225}));
-    deconvBuilder.setPaddingsEnd({0, 0});
-    deconvBuilder.setPaddingsBegin({0, 0});
-    deconvBuilder.setDilation({0, 0});
-    ASSERT_THROW(network.addLayer(deconvBuilder), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(DeconvolutionLayerBuilderTest, cannotCreateConvolutionWithWrongKernel1) {
-    Builder::Network network("Test");
-    Builder::DeconvolutionLayer deconvBuilder("Deconvolution");
-
-    deconvBuilder.setStrides({4, 4});
-    deconvBuilder.setKernel({11, 0});  // here
-    deconvBuilder.setOutDepth(96);
-    deconvBuilder.setInputPort(Port({1, 3, 225, 225}));
-
-    ASSERT_THROW(network.addLayer(deconvBuilder), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(DeconvolutionLayerBuilderTest, cannotCreateConvolutionWithWrongKernel2) {
-    Builder::Network network("Test");
-    Builder::DeconvolutionLayer convBuilder("Deconvolution");
-
-    convBuilder.setStrides({4, 4});
-    convBuilder.setKernel({11, 11, 11});  // here
-    convBuilder.setOutDepth(96);
-    convBuilder.setInputPort(Port({1, 3, 225, 225}));
-
-    ASSERT_THROW(network.addLayer(convBuilder), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(DeconvolutionLayerBuilderTest, cannotCreateConvolutionWithWrongDilation1) {
-    Builder::Network network("Test");
-    Builder::DeconvolutionLayer deconvBuilder("Deconvolution");
-
-    deconvBuilder.setStrides({4, 4});
-    deconvBuilder.setKernel({11, 11});
-    deconvBuilder.setOutDepth(96);
-    deconvBuilder.setInputPort(Port({1, 3, 225, 225}));
-    deconvBuilder.setDilation({1, 0});  // here
-
-    ASSERT_THROW(network.addLayer(deconvBuilder), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(DeconvolutionLayerBuilderTest, cannotCreateConvolutionWithWrongDilation2) {
-    Builder::Network network("Test");
-    Builder::DeconvolutionLayer convBuilder("Deconvolution");
-
-    convBuilder.setStrides({4, 4});
-    convBuilder.setKernel({11, 11});
-    convBuilder.setOutDepth(96);
-    convBuilder.setInputPort(Port({1, 3, 225, 225}));
-    convBuilder.setDilation({1, 1, 1});  // here
-
-    ASSERT_THROW(network.addLayer(convBuilder), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(DeconvolutionLayerBuilderTest, canCreateLayerWithNumberOfGroupDividingNumberOfInputChannels) {
-    Builder::Network network("Test");
-    Builder::DeconvolutionLayer deconvBuilder("Deconvolution");
-
-    size_t weightsId = network.addLayer(Builder::ConstLayer("weights").setData(generateBlob(Precision::FP32, {96, 2, 11, 11}, Layout::OIHW)));
-    size_t biasesId = network.addLayer(Builder::ConstLayer("biases").setData(generateBlob(Precision::FP32, {96}, Layout::C)));
-
-    deconvBuilder.setStrides({4, 4});
-    deconvBuilder.setKernel({11, 11});
-    deconvBuilder.setOutDepth(96);
-    deconvBuilder.setInputPort(Port({1, 6, 225, 225}));
-    deconvBuilder.setDilation({1, 1});
-
-    deconvBuilder.setGroup(3);
-    size_t convId = network.addLayer(deconvBuilder);
-    network.connect({weightsId}, {convId, 1});
-    network.connect({biasesId}, {convId, 2});
-    ASSERT_NO_THROW(network.getLayer(convId)->validate(false));
-}
-
-TEST_F(DeconvolutionLayerBuilderTest, canCreateLayerWithWeightsNotAvailableForGroup) {
-    Builder::Network network("Test");
-    Builder::DeconvolutionLayer deconvBuilder("Deconvolution");
-
-    size_t weightsId = network.addLayer(Builder::ConstLayer("weights").setData(generateBlob(Precision::FP32, {96, 5, 11, 11}, Layout::OIHW)));
-    size_t biasesId = network.addLayer(Builder::ConstLayer("biases").setData(generateBlob(Precision::FP32, {96}, Layout::C)));
-
-    deconvBuilder.setStrides({4, 4});
-    deconvBuilder.setKernel({11, 11});
-    deconvBuilder.setOutDepth(96);
-    deconvBuilder.setInputPort(Port({1, 6, 225, 225}));
-    deconvBuilder.setDilation({1, 1});
-
-    deconvBuilder.setGroup(3);
-    ASSERT_THROW(network.addLayer({{weightsId}, {biasesId}}, deconvBuilder),
-                 InferenceEngine::details::InferenceEngineException);  // 6 / 3 != 5
-}
-
-TEST_F(DeconvolutionLayerBuilderTest, cannotCreateLayerWithNumberOfGroupNotDividingNumberOfInputChannels) {
-    Builder::Network network("Test");
-    Builder::DeconvolutionLayer deconvBuilder("Deconvolution");
-
-    size_t weightsId = network.addLayer(Builder::ConstLayer("weights").setData(generateBlob(Precision::FP32, {96, 2, 11, 11}, Layout::OIHW)));
-    size_t biasesId = network.addLayer(Builder::ConstLayer("biases").setData(generateBlob(Precision::FP32, {96}, Layout::C)));
-
-    deconvBuilder.setStrides({4, 4});
-    deconvBuilder.setKernel({11, 11});
-    deconvBuilder.setOutDepth(96);
-    deconvBuilder.setInputPort(Port({1, 6, 225, 225}));
-    deconvBuilder.setDilation({1, 1});
-
-    deconvBuilder.setGroup(4);
-    ASSERT_THROW(network.addLayer({{weightsId}, {biasesId}}, deconvBuilder),
-                 InferenceEngine::details::InferenceEngineException);  // 6 % 4 == 2
-}
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/unit/builders/detection_output_layer_test.cpp b/inference-engine/tests_deprecated/unit/builders/detection_output_layer_test.cpp
deleted file mode 100644 (file)
index 36fdcca..0000000
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <gtest/gtest.h>
-#include <string.h>
-#include <ie_builders.hpp>
-#include <builders/ie_detection_output_layer.hpp>
-
-#include "builder_test.hpp"
-
-using namespace testing;
-using namespace InferenceEngine;
-
-class DetectionOutputLayerBuilderTest : public BuilderTestCommon {};
-
-TEST_F(DetectionOutputLayerBuilderTest, getExistsLayerFromNetworkBuilder) {
-    Builder::Network network("network");
-    Builder::DetectionOutputLayer layer("detection output layer");
-    layer.setNumClasses(2);
-    layer.setShareLocation(true);
-    layer.setBackgroudLabelId(-1);
-    layer.setNMSThreshold(0.45);
-    layer.setTopK(400);
-    layer.setCodeType("caffe.PriorBoxParameter.CENTER_SIZE");
-    layer.setVariantEncodedInTarget(false);
-    layer.setKeepTopK(200);
-    layer.setConfidenceThreshold(0.01);
-    size_t ind = 0;
-    ASSERT_NO_THROW(ind = network.addLayer(layer));
-    Builder::DetectionOutputLayer layerFromNet(network.getLayer(ind));
-    ASSERT_EQ(layerFromNet.getName(), layer.getName());
-    ASSERT_EQ(layerFromNet.getNumClasses(), layer.getNumClasses());
-    ASSERT_EQ(layerFromNet.getShareLocation(), layer.getShareLocation());
-    ASSERT_EQ(layerFromNet.getBackgroudLabelId(), layer.getBackgroudLabelId());
-    ASSERT_EQ(layerFromNet.getNMSThreshold(), layer.getNMSThreshold());
-    ASSERT_EQ(layerFromNet.getTopK(), layer.getTopK());
-    ASSERT_EQ(layerFromNet.getCodeType(), layer.getCodeType());
-    ASSERT_EQ(layerFromNet.getVariantEncodedInTarget(), layer.getVariantEncodedInTarget());
-    ASSERT_EQ(layerFromNet.getKeepTopK(), layer.getKeepTopK());
-    ASSERT_EQ(layerFromNet.getConfidenceThreshold(), layer.getConfidenceThreshold());
-}
-
-TEST_F(DetectionOutputLayerBuilderTest, cannotCreateLayerWithWrongNumClasses) {
-    Builder::Network network("network");
-    Builder::DetectionOutputLayer layer("detection output layer");
-    layer.setNumClasses(0);  // here
-    layer.setShareLocation(true);
-    layer.setBackgroudLabelId(-1);
-    layer.setNMSThreshold(0.45);
-    layer.setTopK(400);
-    layer.setCodeType("caffe.PriorBoxParameter.CENTER_SIZE");
-    layer.setVariantEncodedInTarget(false);
-    layer.setKeepTopK(200);
-    layer.setConfidenceThreshold(0.01);
-    ASSERT_THROW(network.addLayer(layer), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(DetectionOutputLayerBuilderTest, cannotCreateLayerWithWrongCodeType) {
-    Builder::Network network("network");
-    Builder::DetectionOutputLayer layer("detection output layer");
-    layer.setNumClasses(2);
-    layer.setShareLocation(true);
-    layer.setBackgroudLabelId(-1);
-    layer.setNMSThreshold(0.45);
-    layer.setTopK(400);
-    layer.setCodeType("trololo");  // here
-    layer.setVariantEncodedInTarget(false);
-    layer.setKeepTopK(200);
-    layer.setConfidenceThreshold(0.01);
-    ASSERT_THROW(network.addLayer(layer), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(DetectionOutputLayerBuilderTest, cannotCreateLayerWithWrongBackLabelId) {
-    Builder::Network network("network");
-    Builder::DetectionOutputLayer layer("detection output layer");
-    layer.setNumClasses(2);
-    layer.setShareLocation(true);
-    layer.setBackgroudLabelId(-100);  // here
-    layer.setNMSThreshold(0.45);
-    layer.setTopK(400);
-    layer.setCodeType("caffe.PriorBoxParameter.CENTER_SIZE");
-    layer.setVariantEncodedInTarget(false);
-    layer.setKeepTopK(200);
-    layer.setConfidenceThreshold(0.01);
-    ASSERT_THROW(network.addLayer(layer), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(DetectionOutputLayerBuilderTest, cannotCreateLayerWithWrongNMSThreshold) {
-    Builder::Network network("network");
-    Builder::DetectionOutputLayer layer("detection output layer");
-    layer.setNumClasses(2);
-    layer.setShareLocation(true);
-    layer.setBackgroudLabelId(-1);
-    layer.setNMSThreshold(-0.02);  // here
-    layer.setTopK(400);
-    layer.setCodeType("caffe.PriorBoxParameter.CENTER_SIZE");
-    layer.setVariantEncodedInTarget(false);
-    layer.setKeepTopK(200);
-    layer.setConfidenceThreshold(0.01);
-    ASSERT_THROW(network.addLayer(layer), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(DetectionOutputLayerBuilderTest, cannotCreateLayerWithWrongConfidenceThreshold) {
-    Builder::Network network("network");
-    Builder::DetectionOutputLayer layer("detection output layer");
-    layer.setNumClasses(2);
-    layer.setShareLocation(true);
-    layer.setBackgroudLabelId(-1);
-    layer.setNMSThreshold(0.45);
-    layer.setTopK(400);
-    layer.setCodeType("caffe.PriorBoxParameter.CENTER_SIZE");
-    layer.setVariantEncodedInTarget(false);
-    layer.setKeepTopK(200);
-    layer.setConfidenceThreshold(-0.1);  // here
-    ASSERT_THROW(network.addLayer(layer), InferenceEngine::details::InferenceEngineException);
-}
diff --git a/inference-engine/tests_deprecated/unit/builders/eltwise_layer_test.cpp b/inference-engine/tests_deprecated/unit/builders/eltwise_layer_test.cpp
deleted file mode 100644 (file)
index b338f39..0000000
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <gtest/gtest.h>
-#include <string.h>
-#include <ie_builders.hpp>
-#include <builders/ie_eltwise_layer.hpp>
-
-#include "builder_test.hpp"
-
-using namespace testing;
-using namespace InferenceEngine;
-
-class EltwiseLayerBuilderTest : public BuilderTestCommon {};
-
-TEST_F(EltwiseLayerBuilderTest, getExistsLayerFromNetworkBuilder) {
-    Builder::Network net("network");
-    Builder::EltwiseLayer layer("Eltwise layer");
-
-    layer.setInputPorts({Port({1, 2, 3, 4}), Port({1, 2, 3, 4})});
-    layer.setOutputPort(Port({1, 2, 3, 4}));
-    size_t ind = 0;
-    ASSERT_NO_THROW(ind = net.addLayer(layer));
-    Builder::EltwiseLayer layerFromNet(net.getLayer(ind));
-
-    ASSERT_EQ(layer.getInputPorts(), layerFromNet.getInputPorts());
-    ASSERT_EQ(layer.getOutputPort(), layerFromNet.getOutputPort());
-    ASSERT_EQ(layer.getEltwiseType(), layerFromNet.getEltwiseType());
-}
-
-TEST_F(EltwiseLayerBuilderTest, checkOnlineEltwiseTypeChanging) {
-    Builder::Network net("network");
-    Builder::EltwiseLayer layer("Eltwise layer");
-
-    layer.setInputPorts({Port({1, 2, 3}), Port({1, 2, 3})});
-    layer.setOutputPort(Port({1, 2, 3}));
-
-    layer.setEltwiseType(Builder::EltwiseLayer::EltwiseType::MAX);
-    ASSERT_EQ(layer.getEltwiseType(), Builder::EltwiseLayer::EltwiseType::MAX);
-    ASSERT_NO_THROW(net.addLayer(layer));
-
-    layer.setEltwiseType(Builder::EltwiseLayer::EltwiseType::DIV);
-    ASSERT_EQ(layer.getEltwiseType(), Builder::EltwiseLayer::EltwiseType::DIV);
-    ASSERT_NO_THROW(net.addLayer(layer));
-
-    layer.setEltwiseType(Builder::EltwiseLayer::EltwiseType::MIN);
-    ASSERT_EQ(layer.getEltwiseType(), Builder::EltwiseLayer::EltwiseType::MIN);
-    ASSERT_NO_THROW(net.addLayer(layer));
-
-    layer.setEltwiseType(Builder::EltwiseLayer::EltwiseType::MUL);
-    ASSERT_EQ(layer.getEltwiseType(), Builder::EltwiseLayer::EltwiseType::MUL);
-    ASSERT_NO_THROW(net.addLayer(layer));
-
-    layer.setEltwiseType(Builder::EltwiseLayer::EltwiseType::SQUARED_DIFF);
-    ASSERT_EQ(layer.getEltwiseType(), Builder::EltwiseLayer::EltwiseType::SQUARED_DIFF);
-    ASSERT_NO_THROW(net.addLayer(layer));
-
-    layer.setEltwiseType(Builder::EltwiseLayer::EltwiseType::SUB);
-    ASSERT_EQ(layer.getEltwiseType(), Builder::EltwiseLayer::EltwiseType::SUB);
-    ASSERT_NO_THROW(net.addLayer(layer));
-
-    layer.setEltwiseType(Builder::EltwiseLayer::EltwiseType::SUM);
-    ASSERT_EQ(layer.getEltwiseType(), Builder::EltwiseLayer::EltwiseType::SUM);
-    ASSERT_NO_THROW(net.addLayer(layer));
-}
-
-TEST_F(EltwiseLayerBuilderTest, cannotCreateLayerWithOneInputPort) {
-    Builder::Network net("network");
-    Builder::EltwiseLayer layer("Eltwise layer");
-
-    layer.setInputPorts({Port({1, 2, 3, 4})});   // here
-    layer.setOutputPort(Port({1, 2, 3, 4}));
-    ASSERT_THROW(net.addLayer(layer), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(EltwiseLayerBuilderTest, canCreateLayerWithThreeInputPort) {
-    Builder::Network net("network");
-    Builder::EltwiseLayer layer("Eltwise layer");
-
-    layer.setInputPorts({Port({1, 2, 3, 4}), Port({1, 2, 3, 4}), Port({1, 2, 3, 4})});   // here
-    layer.setOutputPort(Port({1, 2, 3, 4}));
-    ASSERT_NO_THROW(net.addLayer(layer));
-}
-
-TEST_F(EltwiseLayerBuilderTest, cannotCreateLayerWithDifferentInputPorts) {
-    Builder::Network net("network");
-    Builder::EltwiseLayer layer("Eltwise layer");
-
-    layer.setInputPorts({Port({1, 2, 3, 4}), Port({1, 2, 3, 1000})});   // here
-    layer.setOutputPort(Port({1, 2, 3, 4}));
-    ASSERT_THROW(net.addLayer(layer), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(EltwiseLayerBuilderTest, cannotCreateLayerWithDifferentInputAndOutputPorts) {
-    Builder::Network net("network");
-    Builder::EltwiseLayer layer("Eltwise layer");
-
-    layer.setInputPorts({Port({1, 2, 3, 4}), Port({1, 2, 3, 4})});
-    layer.setOutputPort(Port({1, 2, 3, 100}));   // here
-    ASSERT_THROW(net.addLayer(layer), InferenceEngine::details::InferenceEngineException);
-}
diff --git a/inference-engine/tests_deprecated/unit/builders/elu_layer_test.cpp b/inference-engine/tests_deprecated/unit/builders/elu_layer_test.cpp
deleted file mode 100644 (file)
index 55a1292..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <gtest/gtest.h>
-#include <string.h>
-#include <ie_builders.hpp>
-#include <builders/ie_elu_layer.hpp>
-
-#include "builder_test.hpp"
-
-using namespace testing;
-using namespace InferenceEngine;
-
-class ELULayerBuilderTest : public BuilderTestCommon {};
-
-TEST_F(ELULayerBuilderTest, getExistsLayerFromNetworkBuilder) {
-    Builder::Network net("network");
-    Builder::ELULayer eluLayer("ELU_layer");
-    eluLayer.setAlpha(100);
-    size_t ind = net.addLayer(eluLayer);
-    Builder::ELULayer layerFromNet(net.getLayer(ind));
-    ASSERT_EQ(eluLayer.getAlpha(), layerFromNet.getAlpha());
-}
-
-TEST_F(ELULayerBuilderTest, cannotCreateLayerWithWrongShapes) {
-    Builder::Network net("network");
-    Builder::Layer::Ptr fakeELULayerPtr = std::make_shared<Builder::Layer>("ELU", "ELU layer");
-    fakeELULayerPtr->getInputPorts().push_back(Port({1, 1, 1, 1}));
-    fakeELULayerPtr->getOutputPorts().push_back(Port({1, 1, 1, 2}));
-    Builder::ELULayer eluLayer(fakeELULayerPtr);
-    eluLayer.setAlpha(100);
-    ASSERT_THROW(net.addLayer(eluLayer), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(ELULayerBuilderTest, cannotCreateLayerWithWrongAlpha) {
-    Builder::Network net("network");
-    Builder::ELULayer eluLayer("ELU_layer");
-    eluLayer.setAlpha(-100);
-    ASSERT_THROW(net.addLayer(eluLayer), InferenceEngine::details::InferenceEngineException);
-}
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/unit/builders/input_layer_test.cpp b/inference-engine/tests_deprecated/unit/builders/input_layer_test.cpp
deleted file mode 100644 (file)
index 75f80c1..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <gtest/gtest.h>
-#include <string.h>
-#include <ie_builders.hpp>
-
-#include "builder_test.hpp"
-
-using namespace testing;
-using namespace InferenceEngine;
-
-class InputLayerBuilderTest : public BuilderTestCommon {};
-
-TEST_F(InputLayerBuilderTest, cannotCreateInputWithoutPort) {
-    ASSERT_THROW(((Builder::Layer)Builder::InputLayer("in1")).build(), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(InputLayerBuilderTest, getExistsLayerFromNetworkBuilder) {
-    Builder::Network network("Test");
-    Builder::InputLayer inBuilder("in1");
-    inBuilder.setPort(Port({1, 3, 3, 3}));
-    size_t inId = network.addLayer(inBuilder);
-    ASSERT_EQ(inBuilder.getPort().shape(), Port({1, 3, 3, 3}).shape());
-    Builder::InputLayer inBuilderFromNetwork(network.getLayer(inId));
-    ASSERT_EQ(inBuilderFromNetwork.getPort().shape(), Port({1, 3, 3, 3}).shape());
-    inBuilderFromNetwork.setPort(Port({1, 3, 4, 4}));
-    ASSERT_EQ(inBuilderFromNetwork.getPort().shape(), Port({1, 3, 4, 4}).shape());
-    ASSERT_EQ(network.getLayer(inId)->getOutputPorts()[0].shape(), Port({1, 3, 4, 4}).shape());
-    ASSERT_EQ(inBuilder.getPort().shape(), Port({1, 3, 3, 3}).shape());
-}
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/unit/builders/memory_layer_test.cpp b/inference-engine/tests_deprecated/unit/builders/memory_layer_test.cpp
deleted file mode 100644 (file)
index 488f44f..0000000
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <gtest/gtest.h>
-#include <string.h>
-#include <ie_builders.hpp>
-
-#include "builder_test.hpp"
-
-using namespace testing;
-using namespace InferenceEngine;
-
-class MemoryLayerBuilderTest : public BuilderTestCommon {};
-
-
-TEST_F(MemoryLayerBuilderTest, getExistsLayerFromNetworkBuilder) {
-    Builder::Network network("Test");
-    Builder::MemoryLayer memoryInBuilder("MemoryIn1"), memoryOutBuilder("MemoryOut1");
-    Builder::ConcatLayer concat("concat");
-    Builder::InputLayer input("inLayer");
-    Builder::FullyConnectedLayer fc("fc0");
-
-    memoryInBuilder.setOutputPort(Port({1, 30}));
-    memoryOutBuilder.setInputPort(Port({1, 30}));
-
-    input.setPort(Port({1, 30}));
-    concat.setInputPorts({Port({1,30}), Port({1, 30})});
-    concat.setOutputPort(Port({1, 60}));
-    fc.setInputPort(Port({1, 60}));
-    fc.setOutputPort(Port({1, 30}));
-
-    size_t inId  = network.addLayer(memoryInBuilder);
-    size_t outId  = network.addLayer(memoryOutBuilder);
-    size_t inId2  = network.addLayer(concat);
-    size_t inId3  = network.addLayer(input);
-    size_t inIdfc = network.addLayer(fc);
-
-    network.connect({inId3}, {inId2, 0});
-    network.connect({inId}, {inId2, 1});
-    network.connect({inId2}, {inIdfc});
-    network.connect({inIdfc}, {outId});
-
-
-    ASSERT_EQ(memoryInBuilder.getOutputPort().shape(), Port({1, 30}).shape());
-    auto cnn_network = Builder::convertToICNNNetwork(network.build());
-
-    CNNLayerPtr layer;
-    cnn_network->getLayerByName("concat", layer, nullptr);
-    ASSERT_EQ(layer->outData.size(), 1);
-}
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/unit/builders/mvn_layer_test.cpp b/inference-engine/tests_deprecated/unit/builders/mvn_layer_test.cpp
deleted file mode 100644 (file)
index 4f9af88..0000000
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <gtest/gtest.h>
-#include <string.h>
-#include <ie_builders.hpp>
-#include <builders/ie_mvn_layer.hpp>
-
-#include "builder_test.hpp"
-
-using namespace testing;
-using namespace InferenceEngine;
-
-class MVNLayerBuilderTest : public BuilderTestCommon {};
-
-TEST_F(MVNLayerBuilderTest, getExistsLayerFromNetworkBuilder1) {
-    Builder::Network net("network");
-    Builder::MVNLayer mvnLayer("MVN_layer");
-    mvnLayer.setEpsilon(99.9).setAcrossChannels(true).setNormalize(true);
-    size_t ind = net.addLayer(mvnLayer);
-    Builder::MVNLayer layerFromNet(net.getLayer(ind));
-}
-
-TEST_F(MVNLayerBuilderTest, getExistsLayerFromNetworkBuilder2) {
-    Builder::Network net("network");
-    Builder::MVNLayer mvnLayer("MVN_layer");
-    mvnLayer.setEpsilon(99.9).setAcrossChannels(true).setNormalize(false);
-    size_t ind = net.addLayer(mvnLayer);
-    Builder::MVNLayer layerFromNet(net.getLayer(ind));
-}
-
-TEST_F(MVNLayerBuilderTest, getExistsLayerFromNetworkBuilder3) {
-    Builder::Network net("network");
-    Builder::MVNLayer mvnLayer("MVN_layer");
-    mvnLayer.setEpsilon(99.9).setAcrossChannels(false).setNormalize(true);
-    size_t ind = net.addLayer(mvnLayer);
-    Builder::MVNLayer layerFromNet(net.getLayer(ind));
-}
-
-TEST_F(MVNLayerBuilderTest, getExistsLayerFromNetworkBuilder4) {
-    Builder::Network net("network");
-    Builder::MVNLayer mvnLayer("MVN_layer");
-    mvnLayer.setEpsilon(99.9).setAcrossChannels(false).setNormalize(false);
-    size_t ind = net.addLayer(mvnLayer);
-    Builder::MVNLayer layerFromNet(net.getLayer(ind));
-}
-
-TEST_F(MVNLayerBuilderTest, cannotCreateLayerWithWrongEpsion) {
-    Builder::Network net("network");
-    Builder::MVNLayer mvnLayer("MVN_layer");
-    mvnLayer.setEpsilon(-100).setAcrossChannels(true).setNormalize(true);  // here
-    ASSERT_THROW(net.addLayer(mvnLayer), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(MVNLayerBuilderTest, cannotCreateLayerWithWrongShapes) {
-    Builder::Network net("network");
-    Builder::Layer::Ptr fakeMVNLayerPtr = std::make_shared<Builder::Layer>("MVN", "MVN layer");
-    fakeMVNLayerPtr->getInputPorts().push_back(Port({1, 1, 1, 1}));
-    fakeMVNLayerPtr->getOutputPorts().push_back(Port({1, 1, 1, 2}));
-    Builder::MVNLayer mvnLayer(fakeMVNLayerPtr);
-    mvnLayer.setEpsilon(100).setAcrossChannels(true).setNormalize(true);
-    ASSERT_THROW(net.addLayer(mvnLayer), InferenceEngine::details::InferenceEngineException);
-}
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/unit/builders/network_builder_test.cpp b/inference-engine/tests_deprecated/unit/builders/network_builder_test.cpp
deleted file mode 100644 (file)
index 8239e72..0000000
+++ /dev/null
@@ -1,1238 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <gtest/gtest.h>
-#include <string.h>
-#include <ie_builders.hpp>
-#include <cpp/ie_cnn_net_reader.h>
-
-#include "builder_test.hpp"
-
-using namespace testing;
-using namespace InferenceEngine;
-
-class NetworkBuilderTest : public BuilderTestCommon {
-protected:
-    std::vector<std::string> alexNetNames = {
-            "in1",
-            "mean",
-            "conv1",
-            "relu1",
-            "norm1",
-            "pool1",
-            "conv2",
-            "relu2",
-            "norm2",
-            "pool2",
-            "conv3",
-            "relu3",
-            "conv4",
-            "relu4",
-            "conv5",
-            "relu5",
-            "pool5",
-            "fc6",
-            "relu6",
-            "fc7",
-            "relu7",
-            "fc8",
-            "prob",
-            "sf_out"
-    };
-
-public:
-
-    Builder::Network prepateAlexnetBuilder(Precision precision = Precision::FP32) {
-        Context ctx;
-        Builder::Network builder(ctx, "AlexNet");
-        idx_t weightsId, biasesId;
-        idx_t layerId = builder.addLayer(Builder::InputLayer(alexNetNames[0]).setPort(Port({1,3, 227, 227})));
-        biasesId = builder.addLayer(Builder::ConstLayer("biases").setData(generateBlob(precision, {3}, Layout::C)));
-        layerId = builder.addLayer({{layerId}}, Builder::ScaleShiftLayer(alexNetNames[1]));
-        builder.connect({biasesId}, {layerId, 2});
-        weightsId = builder.addLayer(Builder::ConstLayer("weights").setData(generateBlob(precision, {96, 3, 11, 11}, Layout::OIHW)));
-        biasesId = builder.addLayer(Builder::ConstLayer("biases").setData(generateBlob(precision, {96}, Layout::C)));
-        layerId = builder.addLayer({{layerId}, {weightsId}, {biasesId}}, Builder::ConvolutionLayer(alexNetNames[2]).setKernel({11, 11})
-                .setStrides({4, 4}).setOutDepth(96));
-        layerId = builder.addLayer({{layerId}}, Builder::ReLULayer(alexNetNames[3]));
-        layerId = builder.addLayer({{layerId}}, Builder::NormLayer(alexNetNames[4]).setAlpha(9.999999747378752e-05f).setBeta(0.75f).setSize(5).setAcrossMaps(true));
-        layerId = builder.addLayer({{layerId}}, Builder::PoolingLayer(alexNetNames[5]).setExcludePad(false).setKernel({3, 3}).setPaddingsBegin({0, 0})
-                .setPaddingsEnd({0, 0}).setPoolingType(Builder::PoolingLayer::PoolingType::MAX).setStrides({2, 2}));
-        weightsId = builder.addLayer(Builder::ConstLayer("weights").setData(generateBlob(precision, {256, 96 / 2, 5, 5}, Layout::OIHW)));
-        biasesId = builder.addLayer(Builder::ConstLayer("biases").setData(generateBlob(precision, {256}, Layout::C)));
-        layerId = builder.addLayer({{layerId}, {weightsId}, {biasesId}}, Builder::ConvolutionLayer(alexNetNames[6]).setKernel({5, 5}).setStrides({1, 1}).setOutDepth(256)
-                .setPaddingsBegin({2, 2}).setPaddingsEnd({2, 2}).setGroup(2).setDilation({1, 1}));
-        layerId = builder.addLayer({{layerId}}, Builder::ReLULayer(alexNetNames[7]));
-        layerId = builder.addLayer({{layerId}}, Builder::NormLayer(alexNetNames[8]).setAlpha(9.999999747378752e-05f).setBeta(0.75f).setSize(5).setAcrossMaps(true));
-        layerId = builder.addLayer({{layerId}}, Builder::PoolingLayer(alexNetNames[9]).setExcludePad(false).setKernel({3, 3}).setPaddingsBegin({0, 0})
-                .setPaddingsEnd({0, 0}).setPoolingType(Builder::PoolingLayer::PoolingType::MAX).setStrides({2, 2}));
-        weightsId = builder.addLayer(Builder::ConstLayer("weights").setData(generateBlob(precision, {256, 384, 3, 3}, Layout::OIHW)));
-        biasesId = builder.addLayer(Builder::ConstLayer("biases").setData(generateBlob(precision, {384}, Layout::C)));
-        layerId = builder.addLayer({{layerId}, {weightsId}, {biasesId}}, Builder::ConvolutionLayer(alexNetNames[10]).setKernel({3, 3})
-                .setStrides({1, 1}).setOutDepth(384).setPaddingsBegin({1, 1}).setPaddingsEnd({1, 1}).setGroup(1).setDilation({1, 1}));
-        layerId = builder.addLayer({{layerId}}, Builder::ReLULayer(alexNetNames[11]));
-        weightsId = builder.addLayer(Builder::ConstLayer("weights").setData(generateBlob(precision, {384, 384 / 2, 3, 3}, Layout::OIHW)));
-        biasesId = builder.addLayer(Builder::ConstLayer("biases").setData(generateBlob(precision, {384}, Layout::C)));
-        layerId = builder.addLayer({{layerId}, {weightsId}, {biasesId}}, Builder::ConvolutionLayer(alexNetNames[12]).setKernel({3, 3})
-                .setStrides({1, 1}).setOutDepth(384).setPaddingsBegin({1, 1}).setPaddingsEnd({1, 1}).setGroup(2).setDilation({1, 1}));
-        layerId = builder.addLayer({{layerId}}, Builder::ReLULayer(alexNetNames[13]));
-        weightsId = builder.addLayer(Builder::ConstLayer("weights").setData(generateBlob(precision, {256, 384 / 2, 3, 3}, Layout::OIHW)));
-        biasesId = builder.addLayer(Builder::ConstLayer("biases").setData(generateBlob(precision, {256}, Layout::C)));
-        layerId = builder.addLayer({{layerId}, {weightsId}, {biasesId}}, Builder::ConvolutionLayer(alexNetNames[14]).setKernel({3, 3})
-                .setStrides({1, 1}).setOutDepth(256).setPaddingsBegin({1, 1}).setPaddingsEnd({1, 1}).setGroup(2).setDilation({1, 1}));
-        layerId = builder.addLayer({{layerId}}, Builder::ReLULayer(alexNetNames[15]));
-        layerId = builder.addLayer({{layerId}}, Builder::PoolingLayer(alexNetNames[16]).setExcludePad(false).setKernel({3, 3}).setPaddingsBegin({0, 0})
-                .setPaddingsEnd({0, 0}).setPoolingType(Builder::PoolingLayer::PoolingType::MAX).setStrides({2, 2}));
-        weightsId = builder.addLayer(Builder::ConstLayer("weights").setData(generateBlob(precision, {4096, 256, 6, 6}, Layout::OIHW)));
-        biasesId = builder.addLayer(Builder::ConstLayer("biases").setData(generateBlob(precision, {4096}, Layout::C)));
-        layerId = builder.addLayer({{layerId}, {weightsId}, {biasesId}}, Builder::FullyConnectedLayer(alexNetNames[17]).setOutputNum(4096));
-        layerId = builder.addLayer({{layerId}}, Builder::ReLULayer(alexNetNames[18]));
-        weightsId = builder.addLayer(Builder::ConstLayer("weights").setData(generateBlob(precision, {4096, 4096}, Layout::NC)));
-        biasesId = builder.addLayer(Builder::ConstLayer("biases").setData(generateBlob(precision, {4096}, Layout::C)));
-        layerId = builder.addLayer({{layerId}, {weightsId}, {biasesId}}, Builder::FullyConnectedLayer(alexNetNames[19]).setOutputNum(4096));
-        layerId = builder.addLayer({{layerId}}, Builder::ReLULayer(alexNetNames[20]));
-        weightsId = builder.addLayer(Builder::ConstLayer("weights").setData(generateBlob(precision, {1000, 4096}, Layout::NC)));
-        biasesId = builder.addLayer(Builder::ConstLayer("biases").setData(generateBlob(precision, {1000}, Layout::C)));
-        layerId = builder.addLayer({{layerId}, {weightsId}, {biasesId}}, Builder::FullyConnectedLayer(alexNetNames[21]).setOutputNum(1000));
-        layerId = builder.addLayer({{layerId}}, Builder::SoftMaxLayer(alexNetNames[22]).setAxis(1));
-
-        idx_t outputId = builder.addLayer({PortInfo(layerId)}, Builder::OutputLayer(alexNetNames[23]));
-        return builder;
-    }
-
-    const INetwork::CPtr createAlexnet() {
-        return prepateAlexnetBuilder().build();
-    }
-
-    void compareWithICNNNetwork(const INetwork& network, const ICNNNetwork& cnnNetwork) {
-        for (const auto& layer : network) {
-            auto connections = network.getLayerConnections(layer->getId());
-            CNNLayerPtr cnnLayer;
-            StatusCode sts = cnnNetwork.getLayerByName(layer->getName().c_str(), cnnLayer, nullptr);
-            if (sts != OK && (layer->getType() == "Output" || layer->getType() == "Const"))
-                continue;
-            else if (sts != OK)
-                THROW_IE_EXCEPTION << "Cannot find CNNLayer by name: " << layer->getName();
-
-            // Output connections
-            for (size_t i = 0; i < cnnLayer->outData.size(); i++) {
-                for (const auto& it : cnnLayer->outData[i]->getInputTo()) {
-                    size_t j = 0;
-                    for (; j < it.second->insData.size(); j++) {
-                        auto lockedData = it.second->insData[j].lock();
-                        if (lockedData && lockedData.get() == cnnLayer->outData[i].get()) {
-                            break;
-                        }
-                    }
-
-                    for (auto conIt = connections.begin(); conIt != connections.end(); conIt++) {
-                        const auto& inputPorts = network.getLayer(conIt->to().layerId())->getInputPorts();
-                        idx_t realPortId(0);
-                        for (size_t q = 0; q < conIt->to().portId() && q < inputPorts.size(); q++) {
-                            if (inputPorts[q].getParameters().find("type") == inputPorts[q].getParameters().end())
-                                realPortId++;
-                        }
-
-                        if (conIt->from().layerId() == layer->getId() && conIt->from().portId() == i &&
-                                network.getLayer(conIt->to().layerId())->getName() == it.second->name &&
-                                realPortId == j) {
-                            connections.erase(conIt);
-                            break;
-                        }
-                    }
-                }
-            }
-
-            // Input connections
-            for (size_t i = 0; i < cnnLayer->insData.size(); i++) {
-                auto inData = cnnLayer->insData[i].lock();
-                if (!inData)
-                    continue;
-                auto creatorLayer = inData->getCreatorLayer().lock();
-                if (!creatorLayer)
-                    continue;
-                size_t j = 0;
-                for (; j < creatorLayer->outData.size(); j++) {
-                    if (creatorLayer->outData[j] && creatorLayer->outData[j].get() == inData.get()) {
-                        break;
-                    }
-                }
-
-                for (auto conIt = connections.begin(); conIt != connections.end(); conIt++) {
-                    if (conIt->to().layerId() == layer->getId() && conIt->from().portId() == j &&
-                        network.getLayer(conIt->from().layerId())->getName() == creatorLayer->name &&
-                        conIt->to().portId() == i) {
-                        connections.erase(conIt);
-                        break;
-                    }
-                }
-            }
-
-            if (connections.size() == 1 && network.getLayer(connections[0].to().layerId())->getType() == "Output")
-                connections.erase(connections.begin());
-
-            bool connectionsConnected = true;
-            for (const auto& connection : connections) {
-                if (connection.to().layerId() != layer->getId()) {
-                    connectionsConnected = false;
-                    break;
-                }
-                const auto& port = layer->getInputPorts()[connection.to().portId()];
-                if (port.getParameters().find("type") == port.getParameters().end()) {
-                    connectionsConnected = false;
-                    break;
-                }
-            }
-
-            if (!connectionsConnected)
-                THROW_IE_EXCEPTION << "Not all connections were connected.";
-        }
-    }
-
-    void compareICNNNetworks(const ICNNNetwork::Ptr newNetwork, const ICNNNetwork& oldNetwork) {
-        CNNNetwork network(newNetwork);
-
-        if (newNetwork->layerCount() != oldNetwork.layerCount())
-            THROW_IE_EXCEPTION << "ICNNNetworks have different numbers of layers!";
-        for (const auto& layer : network) {
-            CNNLayerPtr oldLayer;
-            StatusCode sts = oldNetwork.getLayerByName(layer->name.c_str(), oldLayer, nullptr);
-            bool success = sts == OK && layer->name == oldLayer->name &&
-                    layer->type == oldLayer->type &&
-                    layer->insData.size() == oldLayer->insData.size() &&
-                    layer->outData.size() == oldLayer->outData.size() &&
-                    layer->precision == oldLayer->precision;
-
-            for (size_t i = 0; i < layer->insData.size() && success; i++) {
-                auto lockedOldData = oldLayer->insData[i].lock();
-                auto lockedData = layer->insData[i].lock();
-                success = success && lockedOldData->getName() == lockedData->getName() &&
-                          lockedOldData->getTensorDesc() == lockedData->getTensorDesc();
-            }
-            for (size_t i = 0; i < layer->outData.size() && success; i++) {
-                success = success && oldLayer->outData[i]->getName() == layer->outData[i]->getName() &&
-                        oldLayer->outData[i]->getTensorDesc() == layer->outData[i]->getTensorDesc();
-            }
-
-            if (!success)
-                THROW_IE_EXCEPTION << "ICNNNetworks have different layers!";
-        }
-
-        InputsDataMap newInput;
-        OutputsDataMap newOutput;
-        newNetwork->getInputsInfo(newInput);
-        newNetwork->getOutputsInfo(newOutput);
-        InputsDataMap oldInput;
-        OutputsDataMap oldOutput;
-        oldNetwork.getInputsInfo(oldInput);
-        oldNetwork.getOutputsInfo(oldOutput);
-
-        bool success = newInput.size() == oldInput.size();
-        for (const auto& it : newInput) {
-            if (!success)
-                break;
-            success = success && oldInput.find(it.first) != oldInput.end();
-        }
-        if (!success)
-            THROW_IE_EXCEPTION << "ICNNNetworks have different inputs!";
-
-        success = newOutput.size() == oldOutput.size();
-        for (const auto& it : newOutput) {
-            if (!success)
-                break;
-            success = success && oldOutput.find(it.first) != oldOutput.end();
-        }
-        if (!success)
-            THROW_IE_EXCEPTION << "ICNNNetworks have different outputs!";
-    }
-};
-
-TEST_F(NetworkBuilderTest, checkReshapeAlexNet) {
-    std::map<std::string, std::vector<SizeVector>> inPorts = {
-            {alexNetNames[0], {}},
-            {alexNetNames[1], {{1, 3, 227, 227}}},
-            {alexNetNames[2], {{1, 3, 227, 227}}},
-            {alexNetNames[3], {{1, 96, 55, 55}}},
-            {alexNetNames[4], {{1, 96, 55, 55}}},
-            {alexNetNames[5], {{1, 96, 55, 55}}},
-            {alexNetNames[6], {{1, 96, 27, 27}}},
-            {alexNetNames[7], {{1, 256, 27, 27}}},
-            {alexNetNames[8], {{1, 256, 27, 27}}},
-            {alexNetNames[9], {{1, 256, 27, 27}}},
-            {alexNetNames[10], {{1, 256, 13, 13}}},
-            {alexNetNames[11], {{1, 384, 13, 13}}},
-            {alexNetNames[12], {{1, 384, 13, 13}}},
-            {alexNetNames[13], {{1, 384, 13, 13}}},
-            {alexNetNames[14], {{1, 384, 13, 13}}},
-            {alexNetNames[15], {{1, 256, 13, 13}}},
-            {alexNetNames[16], {{1, 256, 13, 13}}},
-            {alexNetNames[17], {{1, 256, 6, 6}}},
-            {alexNetNames[18], {{1, 4096}}},
-            {alexNetNames[19], {{1, 4096}}},
-            {alexNetNames[20], {{1, 4096}}},
-            {alexNetNames[21], {{1, 4096}}},
-            {alexNetNames[22], {{1, 1000}}},
-            {alexNetNames[23], {{1, 1000}}}
-    };
-
-    std::map<std::string, std::vector<SizeVector>> outPorts = {
-            {alexNetNames[0], {{1, 3, 227, 227}}},
-            {alexNetNames[1], {{1, 3, 227, 227}}},
-            {alexNetNames[2], {{1, 96, 55, 55}}},
-            {alexNetNames[3], {{1, 96, 55, 55}}},
-            {alexNetNames[4], {{1, 96, 55, 55}}},
-            {alexNetNames[5], {{1, 96, 27, 27}}},
-            {alexNetNames[6], {{1, 256, 27, 27}}},
-            {alexNetNames[7], {{1, 256, 27, 27}}},
-            {alexNetNames[8], {{1, 256, 27, 27}}},
-            {alexNetNames[9], {{1, 256, 13, 13}}},
-            {alexNetNames[10], {{1, 384, 13, 13}}},
-            {alexNetNames[11], {{1, 384, 13, 13}}},
-            {alexNetNames[12], {{1, 384, 13, 13}}},
-            {alexNetNames[13], {{1, 384, 13, 13}}},
-            {alexNetNames[14], {{1, 256, 13, 13}}},
-            {alexNetNames[15], {{1, 256, 13, 13}}},
-            {alexNetNames[16], {{1, 256, 6, 6}}},
-            {alexNetNames[17], {{1, 4096}}},
-            {alexNetNames[18], {{1, 4096}}},
-            {alexNetNames[19], {{1, 4096}}},
-            {alexNetNames[20], {{1, 4096}}},
-            {alexNetNames[21], {{1, 1000}}},
-            {alexNetNames[22], {{1, 1000}}},
-            {alexNetNames[23], {}}
-    };
-
-    Builder::Network builder = prepateAlexnetBuilder();
-    for (const auto &layer : builder.getLayers()) {
-        if (layer->getType() == "Input") {
-            ASSERT_EQ(outPorts[layer->getName()][0], layer->getOutputPorts()[0].shape());
-        } else if (layer->getType() != "Const") {
-            for (const auto &port : layer->getOutputPorts()) {
-                ASSERT_TRUE(port.shape().empty());
-            }
-        }
-    }
-    INetwork::CPtr graph;
-    ASSERT_NO_THROW(graph = builder.build());
-    for (const auto &layer : *graph) {
-        if (layer->getType() == "Const")
-            continue;
-        for (size_t i = 0; i < layer->getInputPorts().size(); i++) {
-            if (layer->getInputPorts()[i].getParameters().find("type") != layer->getInputPorts()[i].getParameters().end())
-                continue;
-            ASSERT_EQ(inPorts[layer->getName()][i], layer->getInputPorts()[i].shape());
-        }
-        for (size_t i = 0; i < layer->getOutputPorts().size(); i++) {
-            ASSERT_EQ(outPorts[layer->getName()][i], layer->getOutputPorts()[i].shape());
-        }
-    }
-}
-
-TEST_F(NetworkBuilderTest, checkNoImplWithCorrectPorts) {
-    Context ctx;
-    Builder::Network builder(ctx, "TestAlexNet");
-    idx_t inId = builder.addLayer(Builder::InputLayer(alexNetNames[0]).setPort(Port({1,3, 227, 227})));
-    idx_t weightsId = builder.addLayer(Builder::ConstLayer("weights").setData(generateBlob(Precision::FP32, {96, 3, 11, 11}, Layout::OIHW)));
-    idx_t biasesId = builder.addLayer(Builder::ConstLayer("biases").setData(generateBlob(Precision::FP32, {96}, Layout::C)));
-    idx_t convId = builder.addLayer({{inId}, {weightsId}, {biasesId}}, Builder::ConvolutionLayer(alexNetNames[2]).setKernel({11, 11})
-            .setStrides({4, 4}).setOutDepth(96).setInputPort(Port({1,3, 227, 227})).setOutputPort(Port({1, 96, 55, 55})));
-    idx_t testLayerId = builder.addLayer({PortInfo(convId)}, Builder::Layer("TestLayer", "testPort")
-            .setInputPorts({Port({1, 96, 55, 55})}).setOutputPorts({Port({1, 96, 55, 55})}));
-    idx_t outputId = builder.addLayer({PortInfo(testLayerId)}, Builder::OutputLayer("out").setPort({Port({1, 96, 55, 55})}));
-
-    ASSERT_NO_THROW(builder.build());
-}
-
-TEST_F(NetworkBuilderTest, checkNoImplWithIncorrectPorts) {
-    Context ctx;
-    Builder::Network builder(ctx, "TestAlexNet");
-    idx_t inId = builder.addLayer(Builder::InputLayer(alexNetNames[0]).setPort(Port({1,3, 227, 227})));
-    idx_t weightsId = builder.addLayer(Builder::ConstLayer("weights").setData(generateBlob(Precision::FP32, {96, 3, 11, 11}, Layout::OIHW)));
-    idx_t biasesId = builder.addLayer(Builder::ConstLayer("biases").setData(generateBlob(Precision::FP32, {96}, Layout::C)));
-    idx_t convId = builder.addLayer({{inId}, {weightsId}, {biasesId}}, Builder::ConvolutionLayer(alexNetNames[2]).setKernel({11, 11})
-            .setStrides({4, 4}).setOutDepth(96).setInputPort(Port({1,3, 227, 227})).setOutputPort(Port({1, 96, 55, 55})));
-    ASSERT_THROW(builder.addLayer({PortInfo(convId)}, Builder::Layer("TestLayer", "testPort")
-            .setInputPorts({Port({1, 3, 55, 55})}).setOutputPorts({Port({1, 96, 55, 55})})),
-                    InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(NetworkBuilderTest, createNetworkIterator) {
-    const INetwork::CPtr graph = createAlexnet();
-
-    ASSERT_NO_THROW(graph->begin());
-}
-
-TEST_F(NetworkBuilderTest, checkNetworkSize) {
-    const INetwork::CPtr graph = createAlexnet();
-
-    ASSERT_EQ(41, graph->size());
-}
-
-TEST_F(NetworkBuilderTest, iterateNetworkForeach) {
-    const INetwork::CPtr graph = createAlexnet();
-
-    size_t idx = 0;
-    for (const auto& layer : *graph) {
-        if (layer->getType() == "Const")
-            continue;
-        ASSERT_NE(idx, alexNetNames.size());
-        ASSERT_EQ(alexNetNames[idx], layer->getName());
-        idx++;
-    }
-}
-
-TEST_F(NetworkBuilderTest, iterateNetworkFor) {
-    const INetwork::CPtr graph = createAlexnet();
-
-    size_t idx = 0;
-    for (auto it = graph->begin(); it != graph->end(); it++) {
-        if ((*it)->getType() == "Const")
-            continue;
-        ASSERT_EQ(alexNetNames[idx], (*it)->getName());
-        idx++;
-    }
-}
-
-TEST_F(NetworkBuilderTest, convertFromICNNNetwork) {
-    std::string model = R"V0G0N(
-<net name="PVANET" version="2" batch="1">
-    <layers>
-        <layer name="data" type="Input" precision="FP32" id="0">
-            <output>
-                <port id="0">
-                    <dim>1</dim>
-                    <dim>3</dim>
-                    <dim>544</dim>
-                    <dim>992</dim>
-                </port>
-            </output>
-        </layer>
-        <layer name="conv1_1_conv" type="Convolution" precision="FP32" id="2">
-            <convolution_data stride-x="2" stride-y="2" pad-x="3" pad-y="3" kernel-x="7" kernel-y="7" output="16" group="1"/>
-            <input>
-                <port id="2">
-                    <dim>1</dim>
-                    <dim>3</dim>
-                    <dim>544</dim>
-                    <dim>992</dim>
-                </port>
-            </input>
-            <output>
-                <port id="3">
-                    <dim>1</dim>
-                    <dim>16</dim>
-                    <dim>272</dim>
-                    <dim>496</dim>
-                </port>
-            </output>
-            <weights offset="0" size="9408"/>
-            <biases offset="9408" size="64"/>
-        </layer>
-        <layer name="conv1_1_neg" type="Power" precision="FP32" id="3">
-            <power_data power="1" scale="-1" shift="0"/>
-            <input>
-                <port id="4">
-                    <dim>1</dim>
-                    <dim>16</dim>
-                    <dim>272</dim>
-                    <dim>496</dim>
-                </port>
-            </input>
-            <output>
-                <port id="5">
-                    <dim>1</dim>
-                    <dim>16</dim>
-                    <dim>272</dim>
-                    <dim>496</dim>
-                </port>
-            </output>
-        </layer>
-        <layer name="conv1_1_concat" type="Concat" precision="FP32" id="4">
-            <concat_data axis="1"/>
-            <input>
-                <port id="6">
-                    <dim>1</dim>
-                    <dim>16</dim>
-                    <dim>272</dim>
-                    <dim>496</dim>
-                </port>
-                <port id="7">
-                    <dim>1</dim>
-                    <dim>16</dim>
-                    <dim>272</dim>
-                    <dim>496</dim>
-                </port>
-            </input>
-            <output>
-                <port id="8">
-                    <dim>1</dim>
-                    <dim>32</dim>
-                    <dim>272</dim>
-                    <dim>496</dim>
-                </port>
-            </output>
-        </layer>
-        <layer name="conv1_1_scale" type="ScaleShift" precision="FP32" id="5">
-            <input>
-                <port id="9">
-                    <dim>1</dim>
-                    <dim>32</dim>
-                    <dim>272</dim>
-                    <dim>496</dim>
-                </port>
-            </input>
-            <output>
-                <port id="10">
-                    <dim>1</dim>
-                    <dim>32</dim>
-                    <dim>272</dim>
-                    <dim>496</dim>
-                </port>
-            </output>
-            <weights offset="9472" size="128"/>
-            <biases offset="9600" size="128"/>
-        </layer>
-        <layer name="conv1_1_relu" type="ReLU" precision="FP32" id="6">
-            <data negative_slope="0" engine="caffe.ReLUParameter.DEFAULT"/>
-            <input>
-                <port id="11">
-                    <dim>1</dim>
-                    <dim>32</dim>
-                    <dim>272</dim>
-                    <dim>496</dim>
-                </port>
-            </input>
-            <output>
-                <port id="12">
-                    <dim>1</dim>
-                    <dim>32</dim>
-                    <dim>272</dim>
-                    <dim>496</dim>
-                </port>
-            </output>
-        </layer>
-        <layer name="pool1" type="Pooling" precision="FP32" id="7">
-            <pooling_data kernel-x="3" kernel-y="3" pad-x="0" pad-y="0" stride-x="2" stride-y="2" rounding-type="ceil" pool-method="max"/>
-            <input>
-                <port id="13">
-                    <dim>1</dim>
-                    <dim>32</dim>
-                    <dim>272</dim>
-                    <dim>496</dim>
-                </port>
-            </input>
-            <output>
-                <port id="14">
-                    <dim>1</dim>
-                    <dim>32</dim>
-                    <dim>136</dim>
-                    <dim>248</dim>
-                </port>
-            </output>
-        </layer>
-    </layers>
-    <edges>
-        <edge from-layer="0" from-port="0" to-layer="2" to-port="2"/>
-        <edge from-layer="2" from-port="3" to-layer="3" to-port="4"/>
-        <edge from-layer="2" from-port="3" to-layer="4" to-port="6"/>
-        <edge from-layer="3" from-port="5" to-layer="4" to-port="7"/>
-        <edge from-layer="4" from-port="8" to-layer="5" to-port="9"/>
-        <edge from-layer="5" from-port="10" to-layer="6" to-port="11"/>
-        <edge from-layer="6" from-port="12" to-layer="7" to-port="13"/>
-    </edges>
-</net>)V0G0N";
-
-    InferenceEngine::CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-
-    InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::U8, {9728}, InferenceEngine::C });
-    weights->allocate();
-    fill_data((float *) weights->buffer(), weights->size() / sizeof(float));
-    InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
-
-    net_reader.SetWeights(weights_ptr);
-    INetwork::CPtr network = Builder::Network(net_reader.getNetwork()).build();
-
-    try {
-        compareWithICNNNetwork(*network, net_reader.getNetwork());
-    } catch (InferenceEngine::details::InferenceEngineException &ex) {
-        FAIL() << ex.what();
-    }
-}
-
-TEST_F(NetworkBuilderTest, convertFromICNNNetworkToICNNNetwork) {
-    std::string model = R"V0G0N(
-<net name="PVANET" version="2" batch="1">
-    <layers>
-        <layer name="data" type="Input" precision="FP32" id="0">
-            <output>
-                <port id="0">
-                    <dim>1</dim>
-                    <dim>3</dim>
-                    <dim>544</dim>
-                    <dim>992</dim>
-                </port>
-            </output>
-        </layer>
-        <layer name="conv1_1_conv" type="Convolution" precision="FP32" id="2">
-            <convolution_data stride-x="2" stride-y="2" pad-x="3" pad-y="3" kernel-x="7" kernel-y="7" output="16" group="1"/>
-            <input>
-                <port id="2">
-                    <dim>1</dim>
-                    <dim>3</dim>
-                    <dim>544</dim>
-                    <dim>992</dim>
-                </port>
-            </input>
-            <output>
-                <port id="3">
-                    <dim>1</dim>
-                    <dim>16</dim>
-                    <dim>272</dim>
-                    <dim>496</dim>
-                </port>
-            </output>
-            <weights offset="0" size="9408"/>
-            <biases offset="9408" size="64"/>
-        </layer>
-        <layer name="conv1_1_neg" type="Power" precision="FP32" id="3">
-            <power_data power="1" scale="-1" shift="0"/>
-            <input>
-                <port id="4">
-                    <dim>1</dim>
-                    <dim>16</dim>
-                    <dim>272</dim>
-                    <dim>496</dim>
-                </port>
-            </input>
-            <output>
-                <port id="5">
-                    <dim>1</dim>
-                    <dim>16</dim>
-                    <dim>272</dim>
-                    <dim>496</dim>
-                </port>
-            </output>
-        </layer>
-        <layer name="conv1_1_concat" type="Concat" precision="FP32" id="4">
-            <concat_data axis="1"/>
-            <input>
-                <port id="6">
-                    <dim>1</dim>
-                    <dim>16</dim>
-                    <dim>272</dim>
-                    <dim>496</dim>
-                </port>
-                <port id="7">
-                    <dim>1</dim>
-                    <dim>16</dim>
-                    <dim>272</dim>
-                    <dim>496</dim>
-                </port>
-            </input>
-            <output>
-                <port id="8">
-                    <dim>1</dim>
-                    <dim>32</dim>
-                    <dim>272</dim>
-                    <dim>496</dim>
-                </port>
-            </output>
-        </layer>
-        <layer name="conv1_1_scale" type="ScaleShift" precision="FP32" id="5">
-            <input>
-                <port id="9">
-                    <dim>1</dim>
-                    <dim>32</dim>
-                    <dim>272</dim>
-                    <dim>496</dim>
-                </port>
-            </input>
-            <output>
-                <port id="10">
-                    <dim>1</dim>
-                    <dim>32</dim>
-                    <dim>272</dim>
-                    <dim>496</dim>
-                </port>
-            </output>
-            <weights offset="9472" size="128"/>
-            <biases offset="9600" size="128"/>
-        </layer>
-        <layer name="conv1_1_relu" type="ReLU" precision="FP32" id="6">
-            <data negative_slope="0" engine="caffe.ReLUParameter.DEFAULT"/>
-            <input>
-                <port id="11">
-                    <dim>1</dim>
-                    <dim>32</dim>
-                    <dim>272</dim>
-                    <dim>496</dim>
-                </port>
-            </input>
-            <output>
-                <port id="12">
-                    <dim>1</dim>
-                    <dim>32</dim>
-                    <dim>272</dim>
-                    <dim>496</dim>
-                </port>
-            </output>
-        </layer>
-        <layer name="pool1" type="Pooling" precision="FP32" id="7">
-            <pooling_data kernel-x="3" kernel-y="3" pad-x="0" pad-y="0" stride-x="2" stride-y="2" rounding-type="ceil" pool-method="max"/>
-            <input>
-                <port id="13">
-                    <dim>1</dim>
-                    <dim>32</dim>
-                    <dim>272</dim>
-                    <dim>496</dim>
-                </port>
-            </input>
-            <output>
-                <port id="14">
-                    <dim>1</dim>
-                    <dim>32</dim>
-                    <dim>136</dim>
-                    <dim>248</dim>
-                </port>
-            </output>
-        </layer>
-    </layers>
-    <edges>
-        <edge from-layer="0" from-port="0" to-layer="2" to-port="2"/>
-        <edge from-layer="2" from-port="3" to-layer="3" to-port="4"/>
-        <edge from-layer="2" from-port="3" to-layer="4" to-port="6"/>
-        <edge from-layer="3" from-port="5" to-layer="4" to-port="7"/>
-        <edge from-layer="4" from-port="8" to-layer="5" to-port="9"/>
-        <edge from-layer="5" from-port="10" to-layer="6" to-port="11"/>
-        <edge from-layer="6" from-port="12" to-layer="7" to-port="13"/>
-    </edges>
-</net>)V0G0N";
-
-    InferenceEngine::CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-
-    InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::U8, {9728}, InferenceEngine::C });
-    weights->allocate();
-    fill_data((float *) weights->buffer(), weights->size() / sizeof(float));
-    InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
-
-    net_reader.SetWeights(weights_ptr);
-    std::shared_ptr<ICNNNetwork> network = Builder::convertToICNNNetwork(Builder::Network(net_reader.getNetwork()).build());
-
-    try {
-        compareICNNNetworks(network, net_reader.getNetwork());
-    } catch (InferenceEngine::details::InferenceEngineException &ex) {
-        FAIL() << ex.what();
-    }
-}
-
-TEST_F(NetworkBuilderTest, connectTwoNetworks) {
-    std::string model = R"V0G0N(
-<net name="PVANET" version="2" batch="1">
-    <layers>
-        <layer name="data" type="Input" precision="FP32" id="0">
-            <output>
-                <port id="0">
-                    <dim>1</dim>
-                    <dim>3</dim>
-                    <dim>544</dim>
-                    <dim>992</dim>
-                </port>
-            </output>
-        </layer>
-        <layer name="conv1_1_conv" type="Convolution" precision="FP32" id="2">
-            <convolution_data stride-x="2" stride-y="2" pad-x="3" pad-y="3" pad-r="3" pad-b="3" kernel-x="7" kernel-y="7" output="16" group="1"/>
-            <input>
-                <port id="2">
-                    <dim>1</dim>
-                    <dim>3</dim>
-                    <dim>544</dim>
-                    <dim>992</dim>
-                </port>
-            </input>
-            <output>
-                <port id="3">
-                    <dim>1</dim>
-                    <dim>16</dim>
-                    <dim>272</dim>
-                    <dim>496</dim>
-                </port>
-            </output>
-            <weights offset="0" size="9408"/>
-            <biases offset="9408" size="64"/>
-        </layer>
-        <layer name="conv1_1_neg" type="Power" precision="FP32" id="3">
-            <power_data power="1" scale="-1" shift="0"/>
-            <input>
-                <port id="4">
-                    <dim>1</dim>
-                    <dim>16</dim>
-                    <dim>272</dim>
-                    <dim>496</dim>
-                </port>
-            </input>
-            <output>
-                <port id="5">
-                    <dim>1</dim>
-                    <dim>16</dim>
-                    <dim>272</dim>
-                    <dim>496</dim>
-                </port>
-            </output>
-        </layer>
-        <layer name="conv1_1_concat" type="Concat" precision="FP32" id="4">
-            <concat_data axis="1"/>
-            <input>
-                <port id="6">
-                    <dim>1</dim>
-                    <dim>16</dim>
-                    <dim>272</dim>
-                    <dim>496</dim>
-                </port>
-                <port id="7">
-                    <dim>1</dim>
-                    <dim>16</dim>
-                    <dim>272</dim>
-                    <dim>496</dim>
-                </port>
-            </input>
-            <output>
-                <port id="8">
-                    <dim>1</dim>
-                    <dim>32</dim>
-                    <dim>272</dim>
-                    <dim>496</dim>
-                </port>
-            </output>
-        </layer>
-    </layers>
-    <edges>
-        <edge from-layer="0" from-port="0" to-layer="2" to-port="2"/>
-        <edge from-layer="2" from-port="3" to-layer="3" to-port="4"/>
-        <edge from-layer="2" from-port="3" to-layer="4" to-port="6"/>
-        <edge from-layer="3" from-port="5" to-layer="4" to-port="7"/>
-    </edges>
-</net>)V0G0N";
-
-    InferenceEngine::CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-
-    InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::U8, {9472}, InferenceEngine::C });
-    weights->allocate();
-    fill_data((float *) weights->buffer(), weights->size() / sizeof(float));
-    InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
-
-    net_reader.SetWeights(weights_ptr);
-    Builder::Network originalNetwork(net_reader.getNetwork());
-    Builder::Network addNetwork(net_reader.getNetwork());
-
-    // Find output
-    idx_t lastLayerId(0);
-    for (const auto& layer : originalNetwork.getLayers()) {
-        if (layer->getType() != "Output")
-            continue;
-        const auto connections = originalNetwork.getLayerConnections(layer->getId());
-        ASSERT_EQ(1, connections.size());
-        ASSERT_EQ(layer->getId(), connections[0].to().layerId());
-        ASSERT_EQ(0, connections[0].from().portId());
-        lastLayerId = connections[0].from().layerId();
-        originalNetwork.disconnect(connections[0]);
-        originalNetwork.removeLayer(layer->getId());
-        break;
-    }
-
-    std::map<idx_t, idx_t> oldNewId;
-    for (const auto& layer : addNetwork) {
-        if (layer->getType() == "Input") {
-            oldNewId[layer->getId()] = lastLayerId;
-            continue;
-        }
-        auto newLayer = layer;
-        if (newLayer->getType() != "Const") {
-            for (size_t i = 0; i < newLayer->getInputPorts().size(); i++) {
-                newLayer->getInputPorts()[i].setData(std::make_shared<PortData>());
-            }
-            for (size_t i = 0; i < newLayer->getOutputPorts().size(); i++) {
-                newLayer->getOutputPorts()[i].setData(std::make_shared<PortData>());
-            }
-        }
-        oldNewId[layer->getId()] = originalNetwork.addLayer(*newLayer);
-        const auto connections = addNetwork.getLayerConnections(layer->getId());
-        for (const auto& connection : connections) {
-            if (oldNewId.find(connection.from().layerId()) == oldNewId.end() ||
-                    oldNewId.find(connection.to().layerId()) == oldNewId.end())
-                continue;
-            originalNetwork.connect({oldNewId[connection.from().layerId()], connection.from().portId()},
-                    {oldNewId[connection.to().layerId()], connection.to().portId()});
-        }
-
-        if (layer->getType() == "Convolution") {
-            idx_t weightsId = originalNetwork.addLayer(Builder::ConstLayer("weights").setData(generateBlob(Precision::FP32, {16, 32, 7, 7}, Layout::OIHW)));
-            for (const auto& connection : originalNetwork.getLayerConnections(oldNewId[layer->getId()])) {
-                if (connection.to().layerId() != oldNewId[layer->getId()] || connection.to().portId() != 1)
-                    continue;
-                originalNetwork.removeLayer(connection.from().layerId());
-                originalNetwork.disconnect(connection);
-            }
-            originalNetwork.connect({weightsId}, {oldNewId[layer->getId()], 1});
-        }
-    }
-    ASSERT_NO_THROW(originalNetwork.build());
-}
-
-TEST_F(NetworkBuilderTest, createLayersWithTheSameNames) {
-    InferenceEngine::Builder::Network netBuilder("");
-
-    // Connect conolutional layer with it's inputs and outputs.
-    InferenceEngine::Builder::InputLayer inpLayer("data");
-    inpLayer.setPort(InferenceEngine::Port({1, 1, 10, 10}));
-    auto inpLayerId = netBuilder.addLayer(inpLayer);
-
-    // Create convolutional layer
-    const size_t outCn = 1, inpCn = 1, kernelH = 3, kernelW = 3;
-    InferenceEngine::Builder::ConvolutionLayer ieLayer("conv1");
-
-    ieLayer.setKernel({outCn, inpCn, kernelH, kernelW});
-    ieLayer.setStrides({1, 1, 1, 1});
-    ieLayer.setDilation({1, 1, 1, 1});
-    ieLayer.setPaddingsBegin({0, 0, 0, 0});
-    ieLayer.setPaddingsEnd({0, 0, 0, 0});
-    ieLayer.setGroup(1);
-    ieLayer.setOutDepth(outCn);
-    idx_t weightsId = netBuilder.addLayer(Builder::ConstLayer("weights").setData(generateBlob(Precision::FP32, {1, 1, 3, 3}, Layout::OIHW)));
-    auto convLayerId = netBuilder.addLayer({{inpLayerId}, {weightsId}}, ieLayer);
-
-    // Connect convolution layer with it's output
-    InferenceEngine::Builder::OutputLayer outLayer("conv1");
-    auto convOutLayerId = netBuilder.addLayer({convLayerId}, outLayer);
-    ASSERT_NE(netBuilder.getLayer(convLayerId)->getName(), netBuilder.getLayer(convOutLayerId)->getName());
-    InferenceEngine::Builder::ReLULayer reLULayer("relu1");
-    reLULayer.setNegativeSlope(0);
-    auto reluLayerId = netBuilder.addLayer({convLayerId}, reLULayer);
-    InferenceEngine::Builder::OutputLayer outReLULayer("relu1");
-    auto reluOutLayerId = netBuilder.addLayer({reluLayerId}, outReLULayer);
-    ASSERT_NE(netBuilder.getLayer(reluLayerId)->getName(), netBuilder.getLayer(reluOutLayerId)->getName());
-
-    ASSERT_NO_THROW(netBuilder.build());
-}
-
-TEST_F(NetworkBuilderTest, RemoveLayerAndBuild) {
-    auto builder = prepateAlexnetBuilder();
-    builder.removeLayer(builder.getLayers()[2]->getId());
-
-    ASSERT_THROW(builder.build(), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(NetworkBuilderTest, CheckConnectionsData) {
-    auto builder = prepateAlexnetBuilder();
-
-    for (const auto& connection : builder.getConnections()) {
-        const auto srcPort = builder.getLayer(connection.from().layerId())->getOutputPorts()[connection.from().portId()];
-        const auto dstPort = builder.getLayer(connection.to().layerId())->getInputPorts()[connection.to().portId()];
-
-        ASSERT_EQ(srcPort.getData(), dstPort.getData());
-    }
-}
-
-TEST_F(NetworkBuilderTest, DocumentationExample) {
-    // Create graph with name
-    InferenceEngine::Builder::Network graph("Example1");
-
-    // Create network
-    // In-place add input layer
-    idx_t inputLayerId = graph.addLayer(Builder::InputLayer("in").setPort(Port({1, 3, 22, 22})));
-
-    // In-place add ReLU layer builder with a negative slope 0.1 and connect it with 0 output port of the Input layer builder
-    // In this example layerId is equal new Input layer builder ID, port index isn't set because 0 is a default value ({layerId} == {layerId, 0})
-    idx_t relu1Id = graph.addLayer({{inputLayerId}}, Builder::ReLULayer("relu1").setNegativeSlope(0.1f));
-
-    // In-place add ScaleShift layer builder
-    InferenceEngine::Blob::Ptr blobWithScaleShiftBiases = make_shared_blob<float>(TensorDesc(Precision::FP32, {3}, Layout::C));
-    blobWithScaleShiftBiases->allocate();
-    auto *data = blobWithScaleShiftBiases->buffer().as<float *>();
-    data[0] = 1;
-    data[1] = 2;
-    data[2] = 3;
-    idx_t biasesId = graph.addLayer(Builder::ConstLayer("biases").setData(blobWithScaleShiftBiases));
-    idx_t scaleShiftId = graph.addLayer(Builder::ScaleShiftLayer("scaleShift1"));
-
-    // Connect ScaleShift layer with relu1
-    graph.connect({relu1Id}, {scaleShiftId}); // Also port indexes could be defined (0 is default value) builder.connect({layerId, outPortIdx}, {scaleShiftId, inPortIdx});
-    graph.connect({biasesId}, {scaleShiftId, 2});
-    // Create ReLU layer with a negative slope 0.2 using generic layer builder and connect it with scaleShift
-    idx_t relu2Id = graph.addLayer({{scaleShiftId}}, Builder::Layer("ReLU", "relu2").setParameters({{"negative_slope", 0.2f}}).setOutputPorts({Port()}).setInputPorts({Port()}));
-
-    // All branches in the graph should be ended by Output layer. Let's create Output layer
-    idx_t outId = graph.addLayer({{relu2Id, 0}}, Builder::OutputLayer("out"));
-
-    // Build original network
-    InferenceEngine::INetwork::CPtr finalNetwork = graph.build();
-    std::shared_ptr<InferenceEngine::ICNNNetwork> cnnNetwork = InferenceEngine::Builder::convertToICNNNetwork(finalNetwork);
-
-    // Modify network
-    // Remove relu2 layer from the topology
-    std::vector<InferenceEngine::Connection> connections = graph.getLayerConnections(relu2Id);
-    for (const auto& connection : connections) {
-        graph.disconnect(connection);
-    }
-    graph.removeLayer(relu2Id);
-
-    // Connect scaleShift1 and out
-    graph.connect({scaleShiftId}, {outId});
-    // Build network without relu2
-    InferenceEngine::INetwork::CPtr changedNetwork = graph.build();
-}
-
-TEST_F(NetworkBuilderTest, CreateFullyConnectedWithoutBiases) {
-    Builder::Network builder("network");
-    Builder::FullyConnectedLayer fcBuilder("FullyConnected");
-
-    SizeVector inputDims = {1, 2, 16, 16}; // 1 KB
-
-    idx_t layerId = builder.addLayer(Builder::InputLayer("input").setPort(Port(inputDims)));
-
-    idx_t weightsId = builder.addLayer(Builder::ConstLayer("weights").setData(generateBlob(Precision::FP32,
-                                                                                           {1024, 2, 16, 16}, Layout::OIHW)));
-
-    layerId = builder.addLayer({{layerId}, {weightsId} }, Builder::FullyConnectedLayer("FullyConnected").setOutputNum(1024 * 1));
-
-    builder.addLayer({PortInfo(layerId)}, Builder::OutputLayer("output"));
-
-    ASSERT_NO_THROW(std::shared_ptr<InferenceEngine::ICNNNetwork> cnnNetwork = InferenceEngine::Builder::convertToICNNNetwork(builder.build()));
-}
-
-TEST_F(NetworkBuilderTest, CreateAndConvertNetworkWithoutWeightsWithConst) {
-    Builder::Network builder("network");
-
-    idx_t layerId = builder.addLayer(Builder::InputLayer("input").setPort(Port({1, 1, 10, 10})));
-    layerId = builder.addLayer({layerId}, Builder::PoolingLayer("pool").setKernel({2, 2}).setStrides({2, 2})
-            .setPoolingType(Builder::PoolingLayer::PoolingType::MAX));
-    builder.addLayer({layerId}, Builder::OutputLayer("output"));
-
-
-    layerId = builder.addLayer(Builder::ConstLayer("constWA").setData(generateBlob(Precision::FP16, {1}, Layout::C)));
-    builder.addLayer({layerId}, Builder::OutputLayer("output_const"));
-
-    auto cnnNetwork = InferenceEngine::CNNNetwork(InferenceEngine::Builder::convertToICNNNetwork(builder.build()));
-    ASSERT_EQ(Precision::FP16, cnnNetwork.getPrecision());
-}
-
-TEST_F(NetworkBuilderTest, CreateAndConvertNetworkWithoutWeights) {
-    Builder::Network builder("network");
-
-    idx_t layerId = builder.addLayer(Builder::InputLayer("input").setPort(Port({1, 1, 10, 10}, Precision::FP16)));
-    layerId = builder.addLayer({layerId}, Builder::PoolingLayer("pool").setKernel({2, 2}).setStrides({2, 2})
-            .setPoolingType(Builder::PoolingLayer::PoolingType::MAX));
-    builder.addLayer({layerId}, Builder::OutputLayer("output"));
-
-    auto cnnNetwork = InferenceEngine::CNNNetwork(InferenceEngine::Builder::convertToICNNNetwork(builder.build()));
-    ASSERT_EQ(Precision::FP16, cnnNetwork.getPrecision());
-}
-
-TEST_F(NetworkBuilderTest, CreateAndNetworkWithPadLayer) {
-    Builder::Network builder("network");
-
-    idx_t layerId = builder.addLayer(Builder::InputLayer("input").setPort(Port({1, 2, 3, 4})));
-    Builder::Layer padLayer("Pad", "padding");
-    padLayer.getParameters()["pads_begin"] = std::vector<int>({0, 0, 1, 1});
-    padLayer.getParameters()["pads_end"] = std::vector<int>({0, 0, 1, 1});
-    padLayer.getParameters()["pad_mode"] = std::string("constant");
-    padLayer.getParameters()["pad_value"] = 0;
-    padLayer.setInputPorts(std::vector<InferenceEngine::Port>(1));
-    padLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
-    layerId = builder.addLayer({layerId}, padLayer);
-    builder.addLayer({layerId}, Builder::OutputLayer("output"));
-
-    ASSERT_NO_THROW(InferenceEngine::CNNNetwork(InferenceEngine::Builder::convertToICNNNetwork(builder.build())));
-}
-
-TEST_F(NetworkBuilderTest, CreateLSTMFromBuilder) {
-    std::string model = R"V0G0N(
-<net name="LSTMTINet" precision="FP32" version="2" batch="1">
-    <layers>
-        <layer name="Input0" precision="FP32" type="Input" id="0">
-            <output>
-                <port id="0">
-                    <dim>1</dim>
-                    <dim>3</dim>
-                    <dim>10</dim>
-                </port>
-            </output>
-        </layer>
-        <layer name="Input1" precision="FP32" type="Input" id="1">
-            <output>
-                <port id="1">
-                    <dim>1</dim>
-                    <dim>5</dim>
-                </port>
-            </output>
-        </layer>
-        <layer name="Input2" precision="FP32" type="Input" id="2">
-            <output>
-                <port id="2">
-                    <dim>1</dim>
-                    <dim>5</dim>
-                </port>
-            </output>
-        </layer>
-        <layer name="RNN3" precision="FP32" type="RNN" id="3">
-            <data axis="1" direction="Backward" hidden_size="5"></data>
-            <input>
-                <port id="3">
-                    <dim>1</dim>
-                    <dim>3</dim>
-                    <dim>10</dim>
-                </port>
-                <port id="4">
-                    <dim>1</dim>
-                    <dim>5</dim>
-                </port>
-                <port id="5">
-                    <dim>1</dim>
-                    <dim>5</dim>
-                </port>
-            </input>
-            <output>
-                <port id="6">
-                    <dim>1</dim>
-                    <dim>3</dim>
-                    <dim>5</dim>
-                </port>
-                <port id="7">
-                    <dim>1</dim>
-                    <dim>5</dim>
-                </port>
-                <port id="8">
-                    <dim>1</dim>
-                    <dim>5</dim>
-                </port>
-            </output>
-
-            <weights offset="0" size="1200"></weights>
-            <biases offset="1200" size="80"></biases>
-        </layer>
-    </layers>
-    <edges>
-        <edge from-layer="0" from-port="0" to-layer="3" to-port="3"></edge>
-        <edge from-layer="1" from-port="1" to-layer="3" to-port="4"></edge>
-        <edge from-layer="2" from-port="2" to-layer="3" to-port="5"></edge>
-    </edges>
-</net>
-    )V0G0N";
-
-    InferenceEngine::CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-
-    Builder::Network builder("LSTMTINet");
-    idx_t in0 = builder.addLayer(Builder::InputLayer("Input0").setPort(Port({1, 3, 10})));
-    idx_t in1 = builder.addLayer(Builder::InputLayer("Input1").setPort(Port({1, 5})));
-    idx_t in2 = builder.addLayer(Builder::InputLayer("Input2").setPort(Port({1, 5})));
-    idx_t weightId = builder.addLayer(Builder::ConstLayer("weights").setData(generateBlob(Precision::FP32, {300}, Layout::C)));
-    idx_t biasesId = builder.addLayer(Builder::ConstLayer("biases").setData(generateBlob(Precision::FP32, {20}, Layout::C)));
-    idx_t lstm = builder.addLayer({{in0}, {weightId}, {biasesId}},
-            Builder::LSTMSequenceLayer("RNN3")
-            .setDirection("Backward")
-            .setHiddenSize(5));
-    builder.getLayer(lstm)->getOutputPorts()[0].setShape({1, 3, 5});
-    builder.getLayer(lstm)->getOutputPorts()[1].setShape({1, 5});
-    builder.getLayer(lstm)->getOutputPorts()[2].setShape({1, 5});
-    builder.connect({in1}, {lstm, 4});
-    builder.connect({in2}, {lstm, 5});
-
-    builder.addLayer({{lstm, 0}}, Builder::OutputLayer("output0"));
-    builder.addLayer({{lstm, 1}}, Builder::OutputLayer("output1"));
-    builder.addLayer({{lstm, 2}}, Builder::OutputLayer("output2"));
-    const auto network = Builder::convertToICNNNetwork(builder.build());
-    try {
-        compareICNNNetworks(network, net_reader.getNetwork());
-    } catch (InferenceEngine::details::InferenceEngineException &ex) {
-        FAIL() << ex.what();
-    }
-}
-
-TEST_F(NetworkBuilderTest, Fp16AlexNetInputPrecision) {
-    auto cnnNetwork = Builder::convertToICNNNetwork(prepateAlexnetBuilder(Precision::FP16).build());
-
-    OutputsDataMap outputs;
-    InputsDataMap inputs;
-
-    cnnNetwork->getInputsInfo(inputs);
-    cnnNetwork->getOutputsInfo(outputs);
-
-    auto input = inputs.begin()->second;
-    auto output = outputs.begin()->second;
-    ASSERT_EQ(Precision::FP32, input->getPrecision());
-    ASSERT_EQ(Precision::FP32, output->getPrecision());
-}
-
-TEST_F(NetworkBuilderTest, CheckPreProcessAlexNet) {
-    auto cnnNetwork = Builder::convertToICNNNetwork(createAlexnet());
-
-    InputsDataMap inputs;
-
-    cnnNetwork->getInputsInfo(inputs);
-
-    auto input = inputs.begin()->second;
-    ASSERT_NE(input->getPreProcess().getResizeAlgorithm(), ResizeAlgorithm::RESIZE_BILINEAR);
-    input->getPreProcess().setResizeAlgorithm(ResizeAlgorithm::RESIZE_BILINEAR);
-
-    auto newCnnNetwork = Builder::convertToICNNNetwork(Builder::Network(*cnnNetwork).build());
-    newCnnNetwork->getInputsInfo(inputs);
-    input = inputs.begin()->second;
-    ASSERT_EQ(input->getPreProcess().getResizeAlgorithm(), ResizeAlgorithm::RESIZE_BILINEAR);
-}
-
-TEST_F(NetworkBuilderTest, ReshapeNetworkTest) {
-    Builder::ReshapeLayer("WA");
-
-    std::string model = R"V0G0N(
-<net name="Reshape" version="2" batch="1">
-    <layers>
-        <layer name="data" type="Input" precision="FP32" id="0">
-            <output>
-                <port id="0">
-                    <dim>1</dim>
-                    <dim>1000</dim>
-                    <dim>1</dim>
-                    <dim>1</dim>
-                </port>
-            </output>
-        </layer>
-        <layer id="1" name="flatten" precision="FP32" type="Reshape">
-                       <data axis="1"/>
-                       <input>
-                               <port id="0">
-                                       <dim>1</dim>
-                                       <dim>1000</dim>
-                                       <dim>1</dim>
-                                       <dim>1</dim>
-                               </port>
-                       </input>
-                       <output>
-                               <port id="1">
-                                       <dim>1</dim>
-                                       <dim>1000</dim>
-                               </port>
-                       </output>
-               </layer>
-    </layers>
-    <edges>
-        <edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
-    </edges>
-</net>)V0G0N";
-
-    InferenceEngine::CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-    auto network = Builder::convertToICNNNetwork(Builder::Network(net_reader.getNetwork()).build());
-
-    CNNLayerPtr layer;
-    network->getLayerByName("flatten", layer, nullptr);
-    ASSERT_EQ(layer->outData[0]->getDims().size(), 2);
-    try {
-        compareICNNNetworks(network, net_reader.getNetwork());
-    } catch (InferenceEngine::details::InferenceEngineException &ex) {
-        FAIL() << ex.what();
-    }
-}
diff --git a/inference-engine/tests_deprecated/unit/builders/norm_layer_test.cpp b/inference-engine/tests_deprecated/unit/builders/norm_layer_test.cpp
deleted file mode 100644 (file)
index e499697..0000000
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <gtest/gtest.h>
-#include <string.h>
-#include <ie_builders.hpp>
-#include <builders/ie_norm_layer.hpp>
-
-#include "builder_test.hpp"
-
-using namespace testing;
-using namespace InferenceEngine;
-
-class NormLayerBuilderTest : public BuilderTestCommon {};
-
-TEST_F(NormLayerBuilderTest, getExistsLayerFromNetworkBuilderWithAcrossMapsEqualTrue) {
-    Builder::Network net("Test");
-    auto layer = Builder::NormLayer("NormLayer").setAlpha(9.999999747378752e-05f).setBeta(0.75f).setSize(5).setAcrossMaps(true).setPort(Port({10, 10, 100, 100}));
-    size_t id = net.addLayer(layer);
-    Builder::NormLayer layerFromNetwork(net.getLayer(id));
-    ASSERT_EQ(layer.getAlpha(), layerFromNetwork.getAlpha());
-    ASSERT_EQ(layer.getBeta(), layerFromNetwork.getBeta());
-    ASSERT_EQ(layer.getAcrossMaps(), layerFromNetwork.getAcrossMaps());
-}
-
-TEST_F(NormLayerBuilderTest, getExistsLayerFromNetworkBuilderWithAcrossMapsEqualFalse) {
-    Builder::Network net("Test");
-    auto layer = Builder::NormLayer("NormLayer").setAlpha(9.999999747378752e-05f).setBeta(0.75f).setSize(5).setAcrossMaps(false).setPort(Port({10, 10, 100, 100}));
-    size_t id = net.addLayer(layer);
-    Builder::NormLayer layerFromNetwork(net.getLayer(id));
-    ASSERT_EQ(layer.getAlpha(), layerFromNetwork.getAlpha());
-    ASSERT_EQ(layer.getBeta(), layerFromNetwork.getBeta());
-    ASSERT_EQ(layer.getAcrossMaps(), layerFromNetwork.getAcrossMaps());
-}
-
-TEST_F(NormLayerBuilderTest, cannotCreateNormLayerWithWrongAlpha) {
-    Builder::Network net("Test");
-    auto layer = Builder::NormLayer("NormLayer").setAlpha(0).setBeta(0.75f).setSize(5).setAcrossMaps(true).setPort(Port({10, 10, 100, 100}));
-    ASSERT_THROW(net.addLayer(layer), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(NormLayerBuilderTest, cannotCreateNormLayerWithWrongBeta) {
-    Builder::Network net("Test");
-    auto layer = Builder::NormLayer("NormLayer").setAlpha(1).setBeta(0).setSize(5).setAcrossMaps(true).setPort(Port({10, 10, 100, 100}));
-    ASSERT_THROW(net.addLayer(layer), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(NormLayerBuilderTest, cannotCreateNormLayerWithWrongSize) {
-    Builder::Network net("Test");
-    auto layer = Builder::NormLayer("NormLayer").setAlpha(1).setBeta(1).setSize(0).setAcrossMaps(true).setPort(Port({10, 10, 100, 100}));
-    ASSERT_THROW(net.addLayer(layer), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(NormLayerBuilderTest, cannotCreateLayerWithWrongShapes) {
-    Builder::Network net("network");
-    Builder::Layer::Ptr fakeNormLayerPtr = std::make_shared<Builder::Layer>("Norm", "Norm layer");
-    fakeNormLayerPtr->getInputPorts().push_back(Port({1, 1, 1, 1}));
-    fakeNormLayerPtr->getOutputPorts().push_back(Port({1, 1, 1, 2}));
-    Builder::NormLayer normLayer(fakeNormLayerPtr);
-    normLayer.setAlpha(1).setBeta(0).setSize(5).setAcrossMaps(true);
-    ASSERT_THROW(net.addLayer(normLayer), InferenceEngine::details::InferenceEngineException);
-}
-
diff --git a/inference-engine/tests_deprecated/unit/builders/normalize_layer_test.cpp b/inference-engine/tests_deprecated/unit/builders/normalize_layer_test.cpp
deleted file mode 100644 (file)
index 1732b8d..0000000
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <gtest/gtest.h>
-#include <string.h>
-#include <ie_builders.hpp>
-#include <builders/ie_normalize_layer.hpp>
-
-#include "builder_test.hpp"
-
-using namespace testing;
-using namespace InferenceEngine;
-
-class NormalizeLayerBuilderTest : public BuilderTestCommon {};
-
-TEST_F(NormalizeLayerBuilderTest, getExistsLayerFromNetworkBuilder1) {
-    Builder::Network net("network");
-    Builder::NormalizeLayer normalizeLayer("normalizeLayer");
-    normalizeLayer.setEpsilon(0.1).setChannelShared(true).setAcrossMaps(true);
-    size_t ind = net.addLayer(normalizeLayer);
-    Builder::NormalizeLayer layerFromNet(net.getLayer(ind));
-    ASSERT_EQ(normalizeLayer.getEpsilon(), layerFromNet.getEpsilon());
-}
-
-TEST_F(NormalizeLayerBuilderTest, getExistsLayerFromNetworkBuilder2) {
-    Builder::Network net("network");
-    Builder::NormalizeLayer normalizeLayer("normalizeLayer");
-    normalizeLayer.setEpsilon(0.1).setChannelShared(true).setAcrossMaps(false);
-    size_t ind = net.addLayer(normalizeLayer);
-    Builder::NormalizeLayer layerFromNet(net.getLayer(ind));
-    ASSERT_EQ(normalizeLayer.getEpsilon(), layerFromNet.getEpsilon());
-}
-
-TEST_F(NormalizeLayerBuilderTest, getExistsLayerFromNetworkBuilder3) {
-    Builder::Network net("network");
-    Builder::NormalizeLayer normalizeLayer("normalizeLayer");
-    normalizeLayer.setEpsilon(0.1).setChannelShared(false).setAcrossMaps(true);
-    size_t ind = net.addLayer(normalizeLayer);
-    Builder::NormalizeLayer layerFromNet(net.getLayer(ind));
-    ASSERT_EQ(normalizeLayer.getEpsilon(), layerFromNet.getEpsilon());
-}
-
-TEST_F(NormalizeLayerBuilderTest, getExistsLayerFromNetworkBuilder4) {
-    Builder::Network net("network");
-    Builder::NormalizeLayer normalizeLayer("normalizeLayer");
-    normalizeLayer.setEpsilon(0.1).setChannelShared(false).setAcrossMaps(false);
-    size_t ind = net.addLayer(normalizeLayer);
-    Builder::NormalizeLayer layerFromNet(net.getLayer(ind));
-    ASSERT_EQ(normalizeLayer.getEpsilon(), layerFromNet.getEpsilon());
-}
-
-TEST_F(NormalizeLayerBuilderTest, cannotCreateLayerWithWrongEpsilon1) {
-    Builder::Network net("network");
-    Builder::NormalizeLayer normalizeLayer("normalizeLayer");
-    normalizeLayer.setEpsilon(0).setChannelShared(true).setAcrossMaps(true);
-    ASSERT_THROW(net.addLayer(normalizeLayer), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(NormalizeLayerBuilderTest, cannotCreateLayerWithWrongEpsilon2) {
-    Builder::Network net("network");
-    Builder::NormalizeLayer normalizeLayer("normalizeLayer");
-    normalizeLayer.setEpsilon(0).setChannelShared(true).setAcrossMaps(false);
-    ASSERT_THROW(net.addLayer(normalizeLayer), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(NormalizeLayerBuilderTest, cannotCreateLayerWithWrongEpsilon3) {
-    Builder::Network net("network");
-    Builder::NormalizeLayer normalizeLayer("normalizeLayer");
-    normalizeLayer.setEpsilon(0).setChannelShared(false).setAcrossMaps(true);
-    ASSERT_THROW(net.addLayer(normalizeLayer), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(NormalizeLayerBuilderTest, cannotCreateLayerWithWrongEpsilon4) {
-    Builder::Network net("network");
-    Builder::NormalizeLayer normalizeLayer("normalizeLayer");
-    normalizeLayer.setEpsilon(0).setChannelShared(false).setAcrossMaps(false);
-    ASSERT_THROW(net.addLayer(normalizeLayer), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(NormalizeLayerBuilderTest, cannotCreateLayerWithWrongShapes) {
-    Builder::Network net("network");
-    Builder::Layer::Ptr fakeNormalizeLayerPtr = std::make_shared<Builder::Layer>("Normalize", "Normalize layer");
-    fakeNormalizeLayerPtr->getInputPorts().push_back(Port({1, 1, 1, 1}));
-    fakeNormalizeLayerPtr->getOutputPorts().push_back(Port({1, 1, 1, 2}));
-    Builder::NormalizeLayer normalizeLayer(fakeNormalizeLayerPtr);
-    normalizeLayer.setEpsilon(0.1).setChannelShared(true).setAcrossMaps(true);
-    ASSERT_THROW(net.addLayer(normalizeLayer), InferenceEngine::details::InferenceEngineException);
-}
diff --git a/inference-engine/tests_deprecated/unit/builders/output_layer_test.cpp b/inference-engine/tests_deprecated/unit/builders/output_layer_test.cpp
deleted file mode 100644 (file)
index 9f93ba6..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <gtest/gtest.h>
-#include <string.h>
-#include <ie_builders.hpp>
-#include <builders/ie_output_layer.hpp>
-
-#include "builder_test.hpp"
-
-using namespace testing;
-using namespace InferenceEngine;
-
-class OutputLayerBuilderTest : public BuilderTestCommon {};
-
-TEST_F(OutputLayerBuilderTest, getExistsLayerFromNetworkBuilder) {
-    Builder::Network network("network");
-    Builder::OutputLayer layer("output layer");
-    layer.setPort(Port({1, 1, 1, 1}));
-    size_t ind = network.addLayer(layer);
-    Builder::OutputLayer layerFromNet(network.getLayer(ind));
-    ASSERT_EQ(layer.getPort().shape(), layerFromNet.getPort().shape());
-    ASSERT_EQ(layer.getPort().shape(), Port({1, 1, 1, 1}).shape());
-}
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/unit/builders/relu6_layer_test.cpp b/inference-engine/tests_deprecated/unit/builders/relu6_layer_test.cpp
deleted file mode 100644 (file)
index 2773ff0..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <gtest/gtest.h>
-#include <string.h>
-#include <ie_builders.hpp>
-#include <builders/ie_relu6_layer.hpp>
-
-#include "builder_test.hpp"
-
-using namespace testing;
-using namespace InferenceEngine;
-
-class ReLU6LayerBuilderTest : public BuilderTestCommon {};
-
-TEST_F(ReLU6LayerBuilderTest, getExistsLayerFromNetworkBuilder) {
-    Builder::Network net("network");
-    Builder::ReLU6Layer relu6Layer("relu6layer");
-    relu6Layer.setN(100);
-    size_t ind = net.addLayer(relu6Layer);
-    Builder::ReLU6Layer layerFromNet(net.getLayer(ind));
-    ASSERT_EQ(relu6Layer.getN(), layerFromNet.getN());
-}
-
-TEST_F(ReLU6LayerBuilderTest, cannotCreateLayerWithWrongShapes) {
-    Builder::Network net("network");
-    Builder::Layer::Ptr fakeReLU6LayerPtr = std::make_shared<Builder::Layer>("ReLU6", "ReLU6 layer");
-    fakeReLU6LayerPtr->getInputPorts().push_back(Port({1, 1, 1, 1}));
-    fakeReLU6LayerPtr->getOutputPorts().push_back(Port({1, 1, 1, 2}));
-    Builder::ReLU6Layer reLU6Layer(fakeReLU6LayerPtr);
-    reLU6Layer.setN(10);
-    ASSERT_THROW(net.addLayer(reLU6Layer), InferenceEngine::details::InferenceEngineException);
-}
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/unit/builders/relu_layer_test.cpp b/inference-engine/tests_deprecated/unit/builders/relu_layer_test.cpp
deleted file mode 100644 (file)
index 0752845..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <gtest/gtest.h>
-#include <string.h>
-#include <ie_builders.hpp>
-#include <builders/ie_relu_layer.hpp>
-
-#include "builder_test.hpp"
-
-using namespace testing;
-using namespace InferenceEngine;
-
-class ReLULayerBuilderTest : public BuilderTestCommon {};
-
-TEST_F(ReLULayerBuilderTest, getExistsLayerFromNetworkBuilder) {
-    Builder::Network net("network");
-    Builder::ReLULayer reluLayer("ReLU_layer");
-    reluLayer.setNegativeSlope(100);
-    size_t ind = net.addLayer(reluLayer);
-    Builder::ReLULayer layerFromNet(net.getLayer(ind));
-    ASSERT_EQ(reluLayer.getNegativeSlope(), layerFromNet.getNegativeSlope());
-}
-
-TEST_F(ReLULayerBuilderTest, cannotCreateLayerWithWrongNegativeSlope) {
-    Builder::Network net("network");
-    Builder::ReLULayer reluLayer("ReLU_layer");
-    reluLayer.setNegativeSlope(-10);
-    ASSERT_NO_THROW(net.addLayer(reluLayer));
-}
-
-TEST_F(ReLULayerBuilderTest, cannotCreateLayerWithWrongShapes) {
-    Builder::Network net("network");
-    Builder::Layer::Ptr fakeReLULayerPtr = std::make_shared<Builder::Layer>("ReLU", "ReLU layer");
-    fakeReLULayerPtr->getInputPorts().push_back(Port({1, 1, 1, 1}));
-    fakeReLULayerPtr->getOutputPorts().push_back(Port({1, 1, 1, 2}));
-    Builder::ReLULayer reluLayer(fakeReLULayerPtr);
-    reluLayer.setNegativeSlope(100);
-    ASSERT_THROW(net.addLayer(reluLayer), InferenceEngine::details::InferenceEngineException);
-}
diff --git a/inference-engine/tests_deprecated/unit/builders/resample_layer_test.cpp b/inference-engine/tests_deprecated/unit/builders/resample_layer_test.cpp
deleted file mode 100644 (file)
index c4c1abf..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <gtest/gtest.h>
-#include <string.h>
-#include <ie_builders.hpp>
-
-#include "builder_test.hpp"
-
-using namespace testing;
-using namespace InferenceEngine;
-
-class ResampleLayerBuilderTest : public BuilderTestCommon {};
-
-TEST_F(ResampleLayerBuilderTest, checkTypeParameter) {
-    InferenceEngine::Builder::Layer ieLayer("Resample", "upsample");
-    ieLayer.getParameters()["type"] = std::string("caffe.ResampleParameter.NEAREST");
-    ieLayer.getParameters()["antialias"] = false;
-    ieLayer.getParameters()["factor"] = 2.0f;
-    ieLayer.getParameters()["width"] = 10;
-    ieLayer.getParameters()["height"] = 10;
-
-    ASSERT_EQ("Resample", ieLayer.getType());
-    ASSERT_EQ("caffe.ResampleParameter.NEAREST", ieLayer.getParameters()["type"].as<std::string>());
-
-    InferenceEngine::Builder::ResampleLayer resampleLayer("upsample");
-    resampleLayer.setResampleType("caffe.ResampleParameter.NEAREST");
-    resampleLayer.setAntialias(false);
-    resampleLayer.setFactor(2);
-    resampleLayer.setWidth(10);
-    resampleLayer.setHeight(10);
-    ASSERT_EQ("Resample", resampleLayer.getType());
-    ASSERT_EQ("caffe.ResampleParameter.NEAREST", resampleLayer.getResampleType());
-}
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/unit/builders/split_layer_test.cpp b/inference-engine/tests_deprecated/unit/builders/split_layer_test.cpp
deleted file mode 100644 (file)
index 09e613a..0000000
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <gtest/gtest.h>
-#include <string.h>
-#include <ie_builders.hpp>
-
-#include "builder_test.hpp"
-
-using namespace testing;
-using namespace InferenceEngine;
-
-class SplitLayerBuilderTest : public BuilderTestCommon {};
-
-TEST_F(SplitLayerBuilderTest, CreateIdentitySplitLayer) {
-    Builder::Network builder("network");
-    SizeVector shape = {1, 4, 3, 4};
-    idx_t layerId = builder.addLayer(Builder::InputLayer("input").setPort(Port(shape, Precision::FP16)));
-    layerId = builder.addLayer({layerId}, Builder::SplitLayer("identity").setOutputPorts({Port()}));
-    builder.addLayer({layerId}, Builder::OutputLayer("output"));
-
-    const auto network = builder.build();
-    ASSERT_EQ(shape, network->getLayer(layerId)->getOutputPorts()[0].shape());
-}
-
-TEST_F(SplitLayerBuilderTest, CreateSplitLayerWithTwoOutputs) {
-    Builder::Network builder("network");
-    SizeVector shape = {1, 4, 3, 4};
-    SizeVector outShape = {1, 2, 3, 4};
-    idx_t layerId = builder.addLayer(Builder::InputLayer("input").setPort(Port(shape, Precision::FP16)));
-    layerId = builder.addLayer({layerId}, Builder::SplitLayer("split").setOutputPorts({Port(), Port()}));
-    builder.addLayer({{layerId}}, Builder::OutputLayer("output1"));
-    builder.addLayer({{layerId, 1}}, Builder::OutputLayer("output2"));
-
-    const auto network = builder.build();
-    ASSERT_EQ(outShape, network->getLayer(layerId)->getOutputPorts()[0].shape());
-    ASSERT_EQ(outShape, network->getLayer(layerId)->getOutputPorts()[1].shape());
-}
-
-TEST_F(SplitLayerBuilderTest, CreateSplitLayerWithTwoOutputsAndOneInitialized) {
-    Builder::Network builder("network");
-    SizeVector shape = {1, 4, 3, 4};
-    SizeVector outShape1 = {1, 3, 3, 4};
-    SizeVector outShape2 = {1, 1, 3, 4};
-    idx_t layerId = builder.addLayer(Builder::InputLayer("input").setPort(Port(shape, Precision::FP16)));
-    layerId = builder.addLayer({layerId}, Builder::SplitLayer("split").setOutputPorts({Port(outShape1), Port()}));
-    builder.addLayer({{layerId}}, Builder::OutputLayer("output1"));
-    builder.addLayer({{layerId, 1}}, Builder::OutputLayer("output2"));
-
-    const auto network = builder.build();
-    ASSERT_EQ(outShape1, network->getLayer(layerId)->getOutputPorts()[0].shape());
-    ASSERT_EQ(outShape2, network->getLayer(layerId)->getOutputPorts()[1].shape());
-}
-
-TEST_F(SplitLayerBuilderTest, CreateSplitLayerWithTwoOutputsAxis3) {
-    Builder::Network builder("network");
-    SizeVector shape = {1, 4, 3, 4};
-    SizeVector outShape = {1, 4, 3, 2};
-    idx_t layerId = builder.addLayer(Builder::InputLayer("input").setPort(Port(shape, Precision::FP16)));
-    layerId = builder.addLayer({layerId}, Builder::SplitLayer("split").setAxis(3).setOutputPorts({Port(), Port()}));
-    builder.addLayer({{layerId}}, Builder::OutputLayer("output1"));
-    builder.addLayer({{layerId, 1}}, Builder::OutputLayer("output2"));
-
-    const auto network = builder.build();
-    ASSERT_EQ(outShape, network->getLayer(layerId)->getOutputPorts()[0].shape());
-    ASSERT_EQ(outShape, network->getLayer(layerId)->getOutputPorts()[1].shape());
-}
-
-TEST_F(SplitLayerBuilderTest, CreateSplitLayerWithTwoOutputsAxis3AndOneInitialized) {
-    Builder::Network builder("network");
-    SizeVector shape = {1, 4, 3, 4};
-    SizeVector outShape1 = {1, 4, 3, 1};
-    SizeVector outShape2 = {1, 4, 3, 3};
-    idx_t layerId = builder.addLayer(Builder::InputLayer("input").setPort(Port(shape, Precision::FP16)));
-    layerId = builder.addLayer({layerId}, Builder::SplitLayer("split").setAxis(3).setOutputPorts({Port(outShape1), Port()}));
-    builder.addLayer({{layerId}}, Builder::OutputLayer("output1"));
-    builder.addLayer({{layerId, 1}}, Builder::OutputLayer("output2"));
-
-    const auto network = builder.build();
-    ASSERT_EQ(outShape1, network->getLayer(layerId)->getOutputPorts()[0].shape());
-    ASSERT_EQ(outShape2, network->getLayer(layerId)->getOutputPorts()[1].shape());
-}
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/unit/builders/tanh_layer_test.cpp b/inference-engine/tests_deprecated/unit/builders/tanh_layer_test.cpp
deleted file mode 100644 (file)
index fc6d310..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <gtest/gtest.h>
-#include <string.h>
-#include <ie_builders.hpp>
-#include <builders/ie_tanh_layer.hpp>
-
-#include "builder_test.hpp"
-
-using namespace testing;
-using namespace InferenceEngine;
-
-class TanHLayerBuilderTest : public BuilderTestCommon {};
-
-TEST_F(TanHLayerBuilderTest, getExistsLayerFromNetworkBuilder) {
-    Builder::Network net("network");
-    Builder::TanHLayer tanhLayer("TanH_layer");
-    size_t ind = net.addLayer(tanhLayer);
-    Builder::TanHLayer layerFromNet(net.getLayer(ind));
-}
-
-TEST_F(TanHLayerBuilderTest, cannotCreateLayerWithWrongShapes) {
-    Builder::Network net("network");
-    Builder::Layer::Ptr fakeTanHLayerPtr = std::make_shared<Builder::Layer>("TanH", "TanH layer");
-    fakeTanHLayerPtr->getInputPorts().push_back(Port({1, 1, 1, 1}));
-    fakeTanHLayerPtr->getOutputPorts().push_back(Port({1, 1, 1, 2}));
-    Builder::TanHLayer tanhLayer(fakeTanHLayerPtr);
-    ASSERT_THROW(net.addLayer(tanhLayer), InferenceEngine::details::InferenceEngineException);
-}
\ No newline at end of file
index 47e8baa..df6d134 100644 (file)
@@ -4,7 +4,6 @@
 
 #include <gtest/gtest.h>
 #include <ie_layer_validators.hpp>
-#include <shape_infer/built_in_shape_infer_general_test.hpp>
 #include <memory>
 #include <ie_data.h>
 
index 6b3b351..1ffba47 100644 (file)
@@ -4,10 +4,10 @@
 
 #include <gtest/gtest.h>
 #include <parsers.h>
-#include <cpp/ie_cnn_net_reader.h>
 #include <ie_cnn_net_reader_impl.h>
 #include <gmock/gmock-more-actions.h>
 #include "cnn_network_impl.hpp"
+#include <ie_core.hpp>
 #include <thread>
 
 #include "unit_test_utils/mocks/mock_icnn_network.hpp"
@@ -2290,10 +2290,9 @@ TEST_F(CNNNetReaderImplTest, ReadInThreads) {
     std::vector<std::thread> threads;
     for (int i = 0; i < 20; i++) {
         threads.push_back(std::thread([i, model]{
-                    CNNNetReader networkReader;
+                    InferenceEngine::Core core;
                     /** Read network model **/
-                    networkReader.ReadNetwork(model.data(), model.length());
-                    CNNNetwork network = networkReader.getNetwork();
+                    CNNNetwork network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr());
                     // -----------------------------------------------------------------------------------------------------
 
                     // --------------------------- 3. Configure input & output ---------------------------------------------
index 5aa776b..7c42c3d 100644 (file)
@@ -6,7 +6,6 @@
 #include <gtest/gtest.h>
 #include <layer_transform.hpp>
 #include <frontend/model_quantizer.hpp>
-#include <cpp/ie_cnn_net_reader.h>
 #include "frontend/layer_quantizer.hpp"
 #include "gna_matcher.hpp"
 
@@ -63,17 +62,16 @@ TEST_F(I8QuantisationTest, canQuantizeActivation){
 }
 
 TEST_F(I8QuantisationTest, inputPrecisionIs16Bits){
-
     ModelQuantizer<QuantI8> q;
 
-    CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(Fc2DOutputModel().data(), Fc2DOutputModel().length()));
-
     auto weights = make_shared_blob<uint8_t >({ Precision::U8, {440}, C });
     weights->allocate();
     fillWeights(weights);
-    net_reader.SetWeights(weights);
-    auto newNet = q.quantize(net_reader.getNetwork(), 1000);
+
+    Core ie;
+    auto network = ie.ReadNetwork(Fc2DOutputModel(), weights);
+
+    auto newNet = q.quantize(network, 1000);
     InputsDataMap inputs;
     newNet->getInputsInfo(inputs);
     auto inputLayer = inputs.begin()->second->getInputData()->getInputTo().begin()->second->insData.front().lock()->getCreatorLayer().lock();
@@ -82,33 +80,29 @@ TEST_F(I8QuantisationTest, inputPrecisionIs16Bits){
 }
 
 TEST_F(I8QuantisationTest, failIfFCDimensionIs1){
-
     ModelQuantizer<QuantI8> q;
 
-    CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(FCOnlyModel().data(), FCOnlyModel().length()));
-
     auto weights = make_shared_blob<uint8_t >({ Precision::U8, {440}, C });
     weights->allocate();
     fillWeights(weights);
-    net_reader.SetWeights(weights);
+    
+    Core ie;
+    auto network = ie.ReadNetwork(FCOnlyModel(), weights);
 
-    ASSERT_ANY_THROW(q.quantize(net_reader.getNetwork(), 1000));
+    ASSERT_ANY_THROW(q.quantize(network, 1000));
 }
 
 TEST_F(I8QuantisationTest, outputAffinePrecisionIs32Bits){
-
     ModelQuantizer<QuantI8> q;
 
-    CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(Fc2DOutputModel().data(), Fc2DOutputModel().length()));
-
     auto weights = make_shared_blob<uint8_t >({ Precision::U8, {440}, C });
     weights->allocate();
     fillWeights(weights);
-    net_reader.SetWeights(weights);
+    
+    Core ie;
+    auto network = ie.ReadNetwork(Fc2DOutputModel(), weights);
 
-    auto newNet = q.quantize(net_reader.getNetwork(), 1000);
+    auto newNet = q.quantize(network, 1000);
     InputsDataMap inputs;
     newNet->getInputsInfo(inputs);
     auto affineDataPtr = inputs.begin()->second->getInputData()->getInputTo().begin()->second->outData.front();
@@ -119,55 +113,51 @@ TEST_F(I8QuantisationTest, outputAffinePrecisionIs32Bits){
 TEST_F(I8QuantisationTest, fp16tofp32_on_fullyConnected_model) {
     ModelQuantizer<QuantI8> q;
 
-    CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(FCOnlyModelFP16().data(), FCOnlyModelFP16().length()));
-
     auto weights = make_shared_blob<uint8_t>({ Precision::U8, {220}, Layout::C });
     weights->allocate();
     fillWeights(weights);
-    net_reader.SetWeights(weights);
+    
+    Core ie;
+    auto network = ie.ReadNetwork(FCOnlyModelFP16(), weights);
 
-    q.quantize(net_reader.getNetwork(), 1000);
+    q.quantize(network, 1000);
 }
 
 TEST_F(I8QuantisationTest, LSTMCell_quantize) {
     ModelQuantizer<QuantI8> q;
 
-    CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(LSTMCellOnlyModel().data(), LSTMCellOnlyModel().length()));
-
     auto weights = make_shared_blob<uint8_t>({ Precision::U8, {33664}, C });
     weights->allocate();
     fillWeights(weights);
-    net_reader.SetWeights(weights);
+    
+    Core ie;
+    auto network = ie.ReadNetwork(LSTMCellOnlyModel(), weights);
 
-    ASSERT_NO_THROW(q.quantize(net_reader.getNetwork(), 1000));
+    ASSERT_NO_THROW(q.quantize(network, 1000));
 }
 
 TEST_F(I8QuantisationTest, LSTMCell_unaligned_quantize) {
     ModelQuantizer<QuantI8> q;
 
-    CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(LSTMCellOnlyModelUnaligned().data(), LSTMCellOnlyModelUnaligned().length()));
-
     auto weights = make_shared_blob<uint8_t>({ Precision::U8, {3480}, C });
     weights->allocate();
     fillWeights(weights);
-    net_reader.SetWeights(weights);
+    
+    Core ie;
+    auto network = ie.ReadNetwork(LSTMCellOnlyModelUnaligned(), weights);
 
-    ASSERT_NO_THROW(q.quantize(net_reader.getNetwork(), 1000));
+    ASSERT_NO_THROW(q.quantize(network, 1000));
 }
 
 TEST_F(I8QuantisationTest, TI_quantize) {
     ModelQuantizer<QuantI8> q;
 
-    CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(TIModelWithLSTMCell2().data(), TIModelWithLSTMCell2().length()));
-
     auto weights = make_shared_blob<uint8_t>({ Precision::U8, {249748}, C });
     weights->allocate();
     fillWeights(weights);
-    net_reader.SetWeights(weights);
+        
+    Core ie;
+    auto network = ie.ReadNetwork(TIModelWithLSTMCell2(), weights);
 
-    ASSERT_NO_THROW(q.quantize(net_reader.getNetwork(), 1000));
+    ASSERT_NO_THROW(q.quantize(network, 1000));
 }
index a4e2f86..758df6d 100644 (file)
@@ -77,14 +77,12 @@ void GNAPropagateMatcher :: match() {
         OutputsDataMap  outputsInfo;
 
         auto loadNetworkFromIR = [&] () -> InferenceEngine::CNNNetwork {
-            CNNNetReader net_reader;
-
-            net_reader.ReadNetwork(_env.model.data(), _env.model.length());
+            Core net_reader;
             auto weights_fake = make_shared_blob<uint8_t>(TensorDesc(Precision::U8,
-                    SizeVector({std::numeric_limits<uint32_t>::max()}), Layout::C), make_shared<NullAllocator>());
-            net_reader.SetWeights(weights_fake);
+                    SizeVector({std::numeric_limits<uint32_t>::max()/2}), Layout::C));
+            weights_fake->allocate();
 
-            auto net_original = net_reader.getNetwork();
+            auto net_original = net_reader.ReadNetwork(_env.model, weights_fake);
             size_t weightsSize = 0;
             std::vector<std::string> dataBlobs = {
                     "weights",
@@ -120,7 +118,10 @@ void GNAPropagateMatcher :: match() {
             } else {
                 fillWeights(weights);
             }
-            net_reader.SetWeights(weights);
+
+            auto net = net_reader.ReadNetwork(_env.model, weights);
+            sortedLayers = details::CNNNetSortTopologically(net);
+            sortedLayers.insert(sortedLayers.end(), tiBodies.begin(), tiBodies.end());
 
             for (auto &pattern : _env.weightsByLayerFillPattern) {
                 for (auto &layer : sortedLayers) {
@@ -134,7 +135,8 @@ void GNAPropagateMatcher :: match() {
                     }
                 }
             }
-            return net_reader.getNetwork();
+
+            return net;
         };
 
         auto loadCNNNetwork = [&] (CNNNetwork net_original) {
@@ -429,15 +431,15 @@ void GNAPluginCreationMatcher :: match() {
 void GNAPluginAOTMatcher :: match() {
     // matching gna_propagate forward call.
     MockICNNNetwork net;
-    CNNNetReader net_reader;
-    ASSERT_NO_THROW_IE_EXCEPTION(net_reader.ReadNetwork(_env.model.data(), _env.model.length()));
-
+    
     size_t weightsSize = 440*3;
-
     auto weights = make_shared_blob<uint8_t >({ Precision::U8, {weightsSize}, Layout::C });
     weights->allocate();
     fillWeights(weights);
-    net_reader.SetWeights(weights);
+
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW_IE_EXCEPTION(network = core.ReadNetwork(_env.model, weights));
 
     GNAPlugin plugin(_env.config);
 
@@ -449,7 +451,6 @@ void GNAPluginAOTMatcher :: match() {
     output.allocate();
 
     if (_env.cb) {
-        auto network = net_reader.getNetwork();
         _env.cb(network);
     }
 
@@ -459,7 +460,7 @@ void GNAPluginAOTMatcher :: match() {
     EXPECT_CALL(mockApi, GNAAlloc(_,_,_)).WillOnce(DoAll(SetArgPointee<2>(10000), Return(&data.front())));
     EXPECT_CALL(mockApi, GNADeviceOpenSetThreads(_, _)).WillOnce(Return(1));
 #endif
-    plugin.LoadNetwork(net_reader.getNetwork());
+    plugin.LoadNetwork(network);
     plugin.Export(_env.exportedModelFileName);
 }
 
@@ -471,22 +472,21 @@ void GNADumpXNNMatcher::load(GNAPlugin & plugin) {
 
     auto loadNetworkFromIR = [&]() {
         MockICNNNetwork net;
-        CNNNetReader net_reader;
-        ASSERT_NO_THROW_IE_EXCEPTION(net_reader.ReadNetwork(_env.model.data(), _env.model.length()));
 
         size_t weightsSize = 440 * 3;
-
         auto weights = make_shared_blob<uint8_t>({ Precision::U8, {weightsSize}, Layout::C });
         weights->allocate();
         fillWeights(weights);
-        net_reader.SetWeights(weights);
+
+        InferenceEngine::Core core;
+        InferenceEngine::CNNNetwork network;
+        ASSERT_NO_THROW_IE_EXCEPTION(network = core.ReadNetwork(_env.model, weights));
 
         if (_env.cb) {
-            auto network = net_reader.getNetwork();
             _env.cb(network);
         }
 
-        plugin.LoadNetwork(net_reader.getNetwork());
+        plugin.LoadNetwork(network);
     };
 
     auto loadNetworkFromAOT = [&]() {
@@ -539,7 +539,6 @@ void GNADumpXNNMatcher::match() {
 }
 
 void GNAQueryStateMatcher :: match() {
-
    //  TODO : avoid copy pastes
     GNACppApi mockApi;
     std::vector<uint8_t> data(10000);
@@ -547,22 +546,21 @@ void GNAQueryStateMatcher :: match() {
     std::shared_ptr<IExecutableNetworkInternal> executer;
     auto loadNetworkFromIR = [&]() {
         MockICNNNetwork net;
-        CNNNetReader net_reader;
-        ASSERT_NO_THROW_IE_EXCEPTION(net_reader.ReadNetwork(_env.model.data(), _env.model.length()));
 
         size_t weightsSize = 440 * 3;
-
         auto weights = make_shared_blob<uint8_t>({ Precision::U8, {weightsSize}, Layout::C });
         weights->allocate();
         fillWeights(weights);
-        net_reader.SetWeights(weights);
+        
+        InferenceEngine::Core core;
+        InferenceEngine::CNNNetwork network;
+        ASSERT_NO_THROW_IE_EXCEPTION(network = core.ReadNetwork(_env.model, weights));
 
         if (_env.cb) {
-            auto network = net_reader.getNetwork();
             _env.cb(network);
         }
 
-        executer.reset(new GNAExecutableNetwork(net_reader.getNetwork(), _env.config));
+        executer.reset(new GNAExecutableNetwork(network, _env.config));
     };
 
     auto loadNetworkFromAOT = [&]() {
index c2441aa..a2489c1 100644 (file)
@@ -9,6 +9,7 @@
 #include "frontend/model_quantizer.hpp"
 #include "frontend/layer_quantizer.hpp"
 #include "gna_matcher.hpp"
+#include <ie_core.hpp>
 
 using namespace InferenceEngine;
 using namespace GNAPluginNS;
@@ -82,18 +83,16 @@ TEST_F(I16QuantisationTest, canQuantizeActivation){
 }
 
 TEST_F(I16QuantisationTest, outputAffinePrecisionIs32Bits){
-
     ModelQuantizer<QuantI16> q;
 
-    CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(Fc2DOutputModel().data(), Fc2DOutputModel().length()));
-
     auto weights = make_shared_blob<uint8_t>({ Precision::U8, {440}, C });
     weights->allocate();
     fillWeights(weights);
-    net_reader.SetWeights(weights);
 
-    auto newNet = q.quantize(net_reader.getNetwork(), 1000);
+    Core ie;
+    auto network = ie.ReadNetwork(Fc2DOutputModel(), weights);
+
+    auto newNet = q.quantize(network, 1000);
     InputsDataMap inputs;
     newNet->getInputsInfo(inputs);
     auto affineDataPtr = inputs.begin()->second->getInputData()->getInputTo().begin()->second->outData.front();
@@ -105,29 +104,26 @@ TEST_F(I16QuantisationTest, outputAffinePrecisionIs32Bits){
 TEST_F(I16QuantisationTest, canQuantizeLstmLikeTopology) {
     ModelQuantizer<QuantI16> q;
 
-    CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(affineToMemoryModel().data(), affineToMemoryModel().length()));
-
     auto weights = setWeights(make_shared_blob<uint8_t >({ Precision::U8, {440}, C }));
     //std::fill_n(weights->buffer().as<float*>(), weights->byteSize()/sizeof(float), 0);
-    net_reader.SetWeights(weights);
 
-    ASSERT_NO_THROW(q.quantize(net_reader.getNetwork(), 1000));
+    Core ie;
+    auto network = ie.ReadNetwork(affineToMemoryModel(), weights);
+
+    ASSERT_NO_THROW(q.quantize(network, 1000));
 }
 
 TEST_F(I16QuantisationTest, DISABLED_outputScaleFactorForAffineIsCorrect){
-
     ModelQuantizer<QuantI16> q;
 
-    CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(Fc2DOutputModel().data(), Fc2DOutputModel().length()));
-
     auto weights = make_shared_blob<uint8_t >({ Precision::U8, {440}, C });
     weights->allocate();
     fillWeights(weights, {100});
-    net_reader.SetWeights(weights);
 
-    auto newNet = q.quantize(net_reader.getNetwork(), 1000);
+    Core ie;
+    auto network = ie.ReadNetwork(Fc2DOutputModel(), weights);
+
+    auto newNet = q.quantize(network, 1000);
     InputsDataMap inputs;
     newNet->getInputsInfo(inputs);
     auto affineLayerPtr = inputs.begin()->second->getInputData()->getInputTo().begin()->second;
@@ -350,15 +346,14 @@ TEST_F(I16QuantisationTest, DISABLED_noPermutationOfWeightsBetweenConvAndAffineI
 TEST_F(I16QuantisationTest, fp16tofp32_on_fullyConnected_model) {
     ModelQuantizer<QuantI16> q;
 
-    CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(FCOnlyModelFP16().data(), FCOnlyModelFP16().length()));
-
     auto weights = make_shared_blob<uint8_t>({ Precision::U8, {220}, Layout::C });
     weights->allocate();
     fillWeights(weights);
-    net_reader.SetWeights(weights);
+    
+    Core ie;
+    auto network = ie.ReadNetwork(FCOnlyModelFP16(), weights);
 
-    q.quantize(net_reader.getNetwork(), 1000);
+    q.quantize(network, 1000);
 }
 
 
@@ -388,29 +383,27 @@ TEST_F(I16QuantisationTest, ConcatWithConstInputPropagatedForward) {
 TEST_F(I16QuantisationTest, LSTMCell_quantize) {
     ModelQuantizer<QuantI16> q;
 
-    CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(LSTMCellOnlyModel().data(), LSTMCellOnlyModel().length()));
-
     auto weights = make_shared_blob<uint8_t>({ Precision::U8, {33664}, C });
     weights->allocate();
     fillWeights(weights);
-    net_reader.SetWeights(weights);
 
-    ASSERT_NO_THROW(q.quantize(net_reader.getNetwork(), 1000));
+    Core ie;
+    auto network = ie.ReadNetwork(LSTMCellOnlyModel(), weights);
+
+    ASSERT_NO_THROW(q.quantize(network, 1000));
 }
 
 TEST_F(I16QuantisationTest, LSTMCell_unaligned_quantize) {
     ModelQuantizer<QuantI16> q;
 
-    CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(LSTMCellOnlyModelUnaligned().data(), LSTMCellOnlyModelUnaligned().length()));
-
     auto weights = make_shared_blob<uint8_t>({ Precision::U8, {3480}, C });
     weights->allocate();
     fillWeights(weights);
-    net_reader.SetWeights(weights);
+    
+    Core ie;
+    auto network = ie.ReadNetwork(LSTMCellOnlyModelUnaligned(), weights);
 
-    ASSERT_NO_THROW(q.quantize(net_reader.getNetwork(), 1000));
+    ASSERT_NO_THROW(q.quantize(network, 1000));
 }
 
 TEST_F(I16QuantisationTest, EltwisetWithConstInputPropagatedForward) {
@@ -434,15 +427,14 @@ TEST_F(I16QuantisationTest, ConcatWithDifferentInputScaleFactorsPropagateForward
 TEST_F(I16QuantisationTest, TI_quantize) {
     ModelQuantizer<QuantI16> q;
 
-    CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(TIModelWithLSTMCell2().data(), TIModelWithLSTMCell2().length()));
-
     auto weights = make_shared_blob<uint8_t>({ Precision::U8, {249748}, C });
     weights->allocate();
     fillWeights(weights);
-    net_reader.SetWeights(weights);
+    
+    Core ie;
+    auto network = ie.ReadNetwork(TIModelWithLSTMCell2(), weights);
 
-    ASSERT_NO_THROW(q.quantize(net_reader.getNetwork(), 1000));
+    ASSERT_NO_THROW(q.quantize(network, 1000));
 }
 
 TEST_F(I16QuantisationTest, TI_PropagateForward) {
index cda569f..99d1212 100644 (file)
@@ -4,7 +4,7 @@
 
 #include <gtest/gtest.h>
 #include <ie_iextension.h>
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 #include <ie_common.h>
 #include <ie_layers.h>
 #include <tests_common.hpp>
@@ -236,11 +236,12 @@ TEST_F(MKLDNNConstantPropagationTests, ConcatAfterConstLayers) {
         </Net>
         )V0G0N";
 
-    InferenceEngine::CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
     MKLDNNGraphTestClass graph;
-    graph.CreateGraph(net_reader.getNetwork(), extMgr);
+    graph.CreateGraph(network, extMgr);
 
     InferenceEngine::SizeVector dims_src1 = {1, 2, 10, 5};
 
@@ -259,7 +260,7 @@ TEST_F(MKLDNNConstantPropagationTests, ConcatAfterConstLayers) {
     srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in2", src2));
 
     InferenceEngine::OutputsDataMap out;
-    out = net_reader.getNetwork().getOutputsInfo();
+    out = network.getOutputsInfo();
     InferenceEngine::BlobMap outputBlobs;
 
     auto it = out.begin();
index 69b4c3b..b2d3064 100644 (file)
@@ -7,7 +7,6 @@
 #include "ie_blob.h"
 #include "blob_factory.hpp"
 #include "utils/blob_dump.h"
-#include <cpp/ie_cnn_net_reader.h>
 
 using namespace InferenceEngine;
 using namespace MKLDNNPlugin;
index 20fd255..627b13f 100644 (file)
@@ -11,7 +11,7 @@
 #include "details/ie_cnn_network_tools.h"
 #include "common_test_utils/xml_net_builder/xml_net_builder.hpp"
 #include "graph_tools.hpp"
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 
 #include <string>
 #include <map>
@@ -57,11 +57,8 @@ public:
     }
 
     CNNNetwork net() {
-        CNNNetReader net_reader;
-        net_reader.ReadNetwork(model.data(), model.length());
-        net_reader.SetWeights(weights);
-
-        return net_reader.getNetwork();
+        InferenceEngine::Core core;
+        return core.ReadNetwork(model, weights);
     }
 };
 
index abb23c1..7e986ab 100644 (file)
@@ -11,7 +11,7 @@
 #include "single_layer_common.hpp"
 #include <mkldnn_extension_utils.h>
 #include "tests_common.hpp"
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 
 
 using namespace ::testing;
@@ -155,15 +155,16 @@ protected:
             broadcast_test_params p = ::testing::WithParamInterface<broadcast_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             // Output Data
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             // Input Data
index 731b59a..b12376a 100644 (file)
@@ -7,7 +7,7 @@
 #include "mkldnn_graph.h"
 
 #include "test_graph.hpp"
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 
 #include "single_layer_common.hpp"
 #include <mkldnn_extension_utils.h>
@@ -102,11 +102,12 @@ protected:
             bucketize_test_params p = ::testing::WithParamInterface<bucketize_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+                        InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             auto& nodes = graph.getNodes();
             nodes = graph.getNodes();
@@ -140,7 +141,7 @@ protected:
             input_blob_map["BoundariesValues"] = boundaries;
 
             // prepare output blob map
-            InferenceEngine::OutputsDataMap out = net_reader.getNetwork().getOutputsInfo();
+            InferenceEngine::OutputsDataMap out = network.getOutputsInfo();
             InferenceEngine::BlobMap output_blob_map;
             for (auto iter = out.begin(); iter != out.end(); iter++) {
                 std::pair<std::string, InferenceEngine::DataPtr> item = *iter;
index 2659869..18703e8 100644 (file)
@@ -11,7 +11,7 @@
 #include "single_layer_common.hpp"
 #include <mkldnn_extension_utils.h>
 #include "tests_common.hpp"
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 
 
 using namespace ::testing;
@@ -199,15 +199,16 @@ protected:
             depth_to_space_test_params p = ::testing::WithParamInterface<depth_to_space_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+                        InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             // Output Data
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
@@ -311,15 +312,16 @@ protected:
             depth_to_space_test_params p = ::testing::WithParamInterface<depth_to_space_test_params>::GetParam();
             std::string model = getModel(p);
             //std::cout << model;
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+                        InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             // Output Data
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
@@ -443,15 +445,16 @@ protected:
             depth_to_space_test_params p = ::testing::WithParamInterface<depth_to_space_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+                        InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             // Output Data
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
index 1fa8a38..d2d5bc9 100644 (file)
@@ -11,7 +11,7 @@
 #include "single_layer_common.hpp"
 #include <mkldnn_extension_utils.h>
 #include "tests_common.hpp"
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 
 
 using namespace ::testing;
@@ -93,15 +93,16 @@ protected:
             fill_test_params p = ::testing::WithParamInterface<fill_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+                        InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             // Output Data
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             // Input Data
index b47beb3..bb5e607 100644 (file)
@@ -11,7 +11,7 @@
 #include "single_layer_common.hpp"
 #include <mkldnn_extension_utils.h>
 #include "tests_common.hpp"
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 
 
 using namespace ::testing;
@@ -159,11 +159,12 @@ protected:
             gather_test_params p = ::testing::WithParamInterface<gather_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+                        InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             auto& nodes = graph.getNodes();
             nodes = graph.getNodes();
@@ -190,7 +191,7 @@ protected:
 
             // Output Data
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
@@ -409,11 +410,12 @@ protected:
             gatherTF_test_params p = ::testing::WithParamInterface<gatherTF_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+                        InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             // Input Indexes
             InferenceEngine::Blob::Ptr srcIdx;
@@ -434,7 +436,7 @@ protected:
 
             //  Output Data
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
             InferenceEngine::TBlob<float>::Ptr output;
@@ -612,11 +614,12 @@ protected:
             gatherTF_test_params p = ::testing::WithParamInterface<gatherTF_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             // Input Indexes
             InferenceEngine::Blob::Ptr srcIdx;
@@ -648,7 +651,7 @@ protected:
 
             //  Output Data
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
             InferenceEngine::TBlob<float>::Ptr output;
index f06dc67..48d3186 100644 (file)
@@ -8,7 +8,7 @@
 #include "test_graph.hpp"
 
 #include <ie_iextension.h>
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 #include <ie_plugin_config.hpp>
 #include <mkldnn_extension_mngr.h>
 #include "tests_common.hpp"
@@ -573,11 +573,12 @@ TEST_F(MKLDNNGraphGenericTests, DontCreateGPUGenericPrimitive) {
     MKLDNNPlugin::MKLDNNExtensionManager::Ptr extMgr(new MKLDNNPlugin::MKLDNNExtensionManager());
     extMgr->AddExtension(extension);
 
-    InferenceEngine::CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
     MKLDNNGraphTestClass graph;
-    ASSERT_THROW(graph.CreateGraph(net_reader.getNetwork(), extMgr), InferenceEngine::details::InferenceEngineException);
+    ASSERT_THROW(graph.CreateGraph(network, extMgr), InferenceEngine::details::InferenceEngineException);
 }
 
 TEST_F(MKLDNNGraphGenericTests, ExecuteConstGenericPrimitive) {
@@ -621,11 +622,12 @@ TEST_F(MKLDNNGraphGenericTests, ExecuteConstGenericPrimitive) {
     MKLDNNPlugin::MKLDNNExtensionManager::Ptr extMgr(new MKLDNNPlugin::MKLDNNExtensionManager());
     extMgr->AddExtension(extension);
 
-    InferenceEngine::CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
     MKLDNNGraphTestClass graph;
-    graph.CreateGraph(net_reader.getNetwork(), extMgr);
+    graph.CreateGraph(network, extMgr);
 
     InferenceEngine::SizeVector dims_src = {1, 3, 5, 5};
 
@@ -643,7 +645,7 @@ TEST_F(MKLDNNGraphGenericTests, ExecuteConstGenericPrimitive) {
     srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
 
     InferenceEngine::OutputsDataMap out;
-    out = net_reader.getNetwork().getOutputsInfo();
+    out = network.getOutputsInfo();
     InferenceEngine::BlobMap outputBlobs;
 
     std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
@@ -708,11 +710,12 @@ TEST_F(MKLDNNGraphGenericTests, ExecuteGenericPrimitive) {
     MKLDNNPlugin::MKLDNNExtensionManager::Ptr extMgr(new MKLDNNPlugin::MKLDNNExtensionManager());
     extMgr->AddExtension(extension);
 
-    InferenceEngine::CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
     MKLDNNGraphTestClass graph;
-    graph.CreateGraph(net_reader.getNetwork(), extMgr);
+    graph.CreateGraph(network, extMgr);
 
     InferenceEngine::SizeVector dims_src = {1, 3, 5, 5};
 
@@ -730,7 +733,7 @@ TEST_F(MKLDNNGraphGenericTests, ExecuteGenericPrimitive) {
     srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
 
     InferenceEngine::OutputsDataMap out;
-    out = net_reader.getNetwork().getOutputsInfo();
+    out = network.getOutputsInfo();
     InferenceEngine::BlobMap outputBlobs;
 
     std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
@@ -844,11 +847,12 @@ TEST_F(MKLDNNGraphGenericTests, ExecuteGenericPrimitiveWithTwoOutputs) {
     MKLDNNPlugin::MKLDNNExtensionManager::Ptr extMgr(new MKLDNNPlugin::MKLDNNExtensionManager());
     extMgr->AddExtension(extension);
 
-    InferenceEngine::CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
     MKLDNNGraphTestClass graph;
-    graph.CreateGraph(net_reader.getNetwork(), extMgr);
+    graph.CreateGraph(network, extMgr);
 
     InferenceEngine::SizeVector dims_src = {1, 3, 5, 5};
 
@@ -868,7 +872,7 @@ TEST_F(MKLDNNGraphGenericTests, ExecuteGenericPrimitiveWithTwoOutputs) {
     srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
 
     InferenceEngine::OutputsDataMap out;
-    out = net_reader.getNetwork().getOutputsInfo();
+    out = network.getOutputsInfo();
     InferenceEngine::BlobMap outputBlobs;
 
     auto it = out.begin();
@@ -953,11 +957,12 @@ TEST_F(MKLDNNGraphGenericTests, ExecuteGenericInPlaceConcat) {
     MKLDNNPlugin::MKLDNNExtensionManager::Ptr extMgr(new MKLDNNPlugin::MKLDNNExtensionManager());
     extMgr->AddExtension(extension);
 
-    InferenceEngine::CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
     MKLDNNGraphTestClass graph;
-    graph.CreateGraph(net_reader.getNetwork(), extMgr);
+    graph.CreateGraph(network, extMgr);
 
     InferenceEngine::SizeVector dims_src1 = {1, 3, 5, 5};
 
@@ -984,7 +989,7 @@ TEST_F(MKLDNNGraphGenericTests, ExecuteGenericInPlaceConcat) {
     srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in2", src2));
 
     InferenceEngine::OutputsDataMap out;
-    out = net_reader.getNetwork().getOutputsInfo();
+    out = network.getOutputsInfo();
     InferenceEngine::BlobMap outputBlobs;
 
     auto it = out.begin();
@@ -1097,11 +1102,12 @@ TEST_F(MKLDNNGraphGenericTests, ExecuteGenericInPlaceSplit) {
     MKLDNNPlugin::MKLDNNExtensionManager::Ptr extMgr(new MKLDNNPlugin::MKLDNNExtensionManager());
     extMgr->AddExtension(extension);
 
-    InferenceEngine::CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
     MKLDNNGraphTestClass graph;
-    graph.CreateGraph(net_reader.getNetwork(), extMgr);
+    graph.CreateGraph(network, extMgr);
 
     InferenceEngine::SizeVector dims_src = {1, 4, 4, 4};
 
@@ -1121,7 +1127,7 @@ TEST_F(MKLDNNGraphGenericTests, ExecuteGenericInPlaceSplit) {
     srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
 
     InferenceEngine::OutputsDataMap out;
-    out = net_reader.getNetwork().getOutputsInfo();
+    out = network.getOutputsInfo();
     InferenceEngine::BlobMap outputBlobs;
     auto it = out.begin();
 
@@ -1193,11 +1199,12 @@ TEST_F(MKLDNNGraphGenericTests, ExecuteGenericPrimitiveWithDynamicBatch) {
     MKLDNNPlugin::MKLDNNExtensionManager::Ptr extMgr(new MKLDNNPlugin::MKLDNNExtensionManager());
     extMgr->AddExtension(extension);
 
-    InferenceEngine::CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
     MKLDNNGraphTestClass graph;
-    graph.CreateGraph(net_reader.getNetwork(), extMgr);
+    graph.CreateGraph(network, extMgr);
 
     InferenceEngine::SizeVector dims_src = {2, 3, 5, 5};
 
@@ -1215,7 +1222,7 @@ TEST_F(MKLDNNGraphGenericTests, ExecuteGenericPrimitiveWithDynamicBatch) {
     srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
 
     InferenceEngine::OutputsDataMap out;
-    out = net_reader.getNetwork().getOutputsInfo();
+    out = network.getOutputsInfo();
     InferenceEngine::BlobMap outputBlobs;
 
     std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
@@ -1341,11 +1348,12 @@ TEST_F(MKLDNNGraphGenericTests, ExecuteNotInLineGRN) {
         <edge from-layer="2" from-port="4" to-layer="3" to-port="6"/>
     </edges>
 </net>)V0G0N";
-    InferenceEngine::CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
     MKLDNNGraphTestClass graph;
-    graph.CreateGraph(net_reader.getNetwork());
+    graph.CreateGraph(network);
 
     InferenceEngine::SizeVector dims_src = {1, 3, 2, 2};
 
@@ -1363,7 +1371,7 @@ TEST_F(MKLDNNGraphGenericTests, ExecuteNotInLineGRN) {
     srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("data", src));
 
     InferenceEngine::OutputsDataMap out;
-    out = net_reader.getNetwork().getOutputsInfo();
+    out = network.getOutputsInfo();
     InferenceEngine::BlobMap outputBlobs;
 
     std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
@@ -1480,11 +1488,12 @@ TEST_F(MKLDNNGraphGenericTests, ExecuteInLineGRN) {
     </edges>
 </net>)V0G0N";
 
-    InferenceEngine::CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
     MKLDNNGraphTestClass graph;
-    graph.CreateGraph(net_reader.getNetwork());
+    graph.CreateGraph(network);
 
     InferenceEngine::SizeVector dims_src = {1, 3, 2, 2};
 
@@ -1503,7 +1512,7 @@ TEST_F(MKLDNNGraphGenericTests, ExecuteInLineGRN) {
     srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("data2", src2));
 
     InferenceEngine::OutputsDataMap out;
-    out = net_reader.getNetwork().getOutputsInfo();
+    out = network.getOutputsInfo();
     InferenceEngine::BlobMap outputBlobs;
 
     std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
index 582a424..9d5c190 100644 (file)
@@ -11,7 +11,7 @@
 #include "single_layer_common.hpp"
 #include <mkldnn_extension_utils.h>
 #include "tests_common.hpp"
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 
 
 using namespace ::testing;
@@ -188,11 +188,12 @@ protected:
             interp_test_params p = ::testing::WithParamInterface<interp_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+                        InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             auto& nodes = graph.getNodes();
             nodes = graph.getNodes();
@@ -224,7 +225,7 @@ protected:
             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
 
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
index 98f62d2..32bf55e 100644 (file)
@@ -11,7 +11,7 @@
 #include "single_layer_common.hpp"
 #include <mkldnn_extension_utils.h>
 #include "tests_common.hpp"
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 
 
 using namespace ::testing;
@@ -195,11 +195,12 @@ protected:
             log_softmax_test_params p = ::testing::WithParamInterface<log_softmax_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             // Input Data
             InferenceEngine::Blob::Ptr srcData = InferenceEngine::make_shared_blob<float>({ InferenceEngine::Precision::FP32, p.in_out, InferenceEngine::TensorDesc::getLayoutByDims(p.in_out) });
@@ -214,7 +215,7 @@ protected:
 
             // Output Data
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
index 566f038..570abc9 100644 (file)
@@ -10,7 +10,7 @@
 #include "single_layer_common.hpp"
 #include <mkldnn_extension_utils.h>
 #include "tests_common.hpp"
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 
 
 using namespace ::testing;
@@ -225,11 +225,12 @@ protected:
             math_test_params p = ::testing::WithParamInterface<math_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             // Input Data
             InferenceEngine::Blob::Ptr srcData = InferenceEngine::make_shared_blob<float>({ InferenceEngine::Precision::FP32, p.in_out, InferenceEngine::TensorDesc::getLayoutByDims(p.in_out) });
@@ -248,7 +249,7 @@ protected:
 
             // Output Data
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
index 5528477..1df5cb2 100644 (file)
@@ -12,7 +12,7 @@
 #include <mkldnn_extension_utils.h>
 #include "tests_common.hpp"
 #include "ir_gen_helper.hpp"
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 
 #include <nodes/base.hpp>
 #include <cpu_isa_traits.hpp>
@@ -260,8 +260,9 @@ protected:
             mvn_test_params p = ::testing::WithParamInterface<mvn_test_params>::GetParam();
             std::string model = getModel(p);
 
-            CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNPlugin::MKLDNNExtensionManager::Ptr extMgr(new MKLDNNPlugin::MKLDNNExtensionManager());
             auto defaultExtensions = std::make_shared<InferenceEngine::Extensions::Cpu::MKLDNNExtensions<mkldnn::impl::cpu::cpu_isa_t::isa_any>>();
@@ -270,7 +271,7 @@ protected:
 
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork(), extMgr);
+            graph.CreateGraph(network, extMgr);
 
             auto& nodes = graph.getNodes();
             nodes = graph.getNodes();
@@ -312,7 +313,7 @@ protected:
             srcs.insert(std::pair<std::string, Blob::Ptr>("in1", src));
 
             OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             BlobMap outputBlobs;
 
             std::pair<std::string, DataPtr> item = *out.begin();
@@ -528,11 +529,12 @@ protected:
             mvn_test_params p = ::testing::WithParamInterface<mvn_test_params>::GetParam();
             std::string model = getModel(p);
 
-            CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             auto& nodes = graph.getNodes();
             nodes = graph.getNodes();
@@ -580,7 +582,7 @@ protected:
             srcs.insert(std::pair<std::string, Blob::Ptr>("in1", src));
 
             OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             BlobMap outputBlobs;
 
             std::pair<std::string, DataPtr> item = *out.begin();
index 3e23f6b..9643fa8 100644 (file)
@@ -11,7 +11,7 @@
 #include "single_layer_common.hpp"
 #include <mkldnn_extension_utils.h>
 #include "tests_common.hpp"
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 
 
 using namespace ::testing;
@@ -420,11 +420,12 @@ protected:
             nmsTF_test_params p = ::testing::WithParamInterface<nmsTF_test_params>::GetParam();
             std::string model = getModel(p);
             //std::cout << model << std::endl;
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+                        InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             //  Input
             InferenceEngine::BlobMap srcs;
@@ -491,7 +492,7 @@ protected:
 
             //  Output Data
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
             InferenceEngine::TBlob<int32_t>::Ptr output;
index 27a890d..925ae5d 100644 (file)
@@ -8,15 +8,26 @@
 #include "mkldnn_graph.h"
 
 #include "test_graph.hpp"
+#include "ir_gen_helper.hpp"
 
 #include "single_layer_common.hpp"
 #include <mkldnn_extension_utils.h>
 #include "tests_common.hpp"
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 
+#include <nodes/base.hpp>
+#include <cpu_isa_traits.hpp>
+
+
+using namespace InferenceEngine;
 using namespace ::testing;
 using namespace std;
 using namespace mkldnn;
+using namespace single_layer_tests;
+
+using namespace Extensions;
+using namespace ::Cpu;
+using namespace mkldnn::impl;
 
 struct normalize_test_params {
     struct {
@@ -28,14 +39,20 @@ struct normalize_test_params {
     int across_spatial;
     int channel_shared;
     float eps;
+    bool isBlockedFormat;
 
     size_t num_prim_desc;
 
     MKLDNNPlugin::impl_desc_type selectedType;
 
+    Precision prec_in;
+    Precision prec_out;
+
     std::vector<std::function<void(MKLDNNPlugin::PrimitiveDescInfo)>> comp;
 };
 
+extern InferenceEngine::IExtensionPtr make_FakeExtensions();
+
 template <typename data_t>
 void ref_normalize(const InferenceEngine::TBlob<data_t> &src, InferenceEngine::TBlob<data_t> &dst, normalize_test_params prm, const float *weights) {
     int B = static_cast<int>(src.getTensorDesc().getDims()[0]);
@@ -49,39 +66,53 @@ void ref_normalize(const InferenceEngine::TBlob<data_t> &src, InferenceEngine::T
     data_t *dst_data = dst.data();
     
     for (int b = 0; b < B; b++) {
-        const float *src_data_b = src_data + b * C * H * W;
-        float *dst_data_b = dst_data + b * C * H * W;
+        const data_t *src_data_b = src_data + b * C * H * W;
+        data_t *dst_data_b = dst_data + b * C * H * W;
         if (prm.across_spatial) {
-            float sqrt_sum = eps;
+            float sqrt_sum = 0.f;
             for (int i = 0; i < H * W * C; i++) {
                 sqrt_sum += (src_data_b[i] * src_data_b[i]);
             }
 
-            sqrt_sum = std::sqrt(sqrt_sum);
+            sqrt_sum = std::sqrt(sqrt_sum) + eps;
 
             for (int c = 0; c < C; c++) {
                 float s = prm.channel_shared ? weights[0] : weights[c];
                 for (int hw = 0; hw < H * W; hw++) {
-                    dst_data_b[c * H * W + hw] = (src_data_b[c * H * W + hw] / sqrt_sum) * s;
+                    float dst_value = (src_data_b[c * H * W + hw] / sqrt_sum) * s;
+                    if (prm.prec_out == Precision::FP32) {
+                        dst_data_b[c * H * W + hw] = dst_value;
+                    } else if (prm.prec_out == Precision::U8) {
+                        dst_data_b[c * H * W + hw] = (dst_value > 0) ? roundf(dst_value) : 0;
+                    } else if (prm.prec_out == Precision::I8) {
+                        dst_data_b[c * H * W + hw] = roundf(dst_value);
+                    }
                 }
             }
         } else {
             for(int i = 0; i<H*W; i++) {
                 int offset = i;
 
-                float norm = eps;
+                float norm = 0.f;
                 for (int c = 0; c < C; c++) {
-                    const float *src_data_b_c = src_data_b + c * W * H;
+                    const data_t *src_data_b_c = src_data_b + c * W * H;
                     norm += src_data_b_c[offset] * src_data_b_c[offset];
                 }
 
-                norm = std::sqrt(norm);
+                norm = std::sqrt(norm) + eps;
 
                 for (int c = 0; c < C; c++) {
-                    const float *src_data_b_c = src_data_b + c * W * H;
-                    float *dst_data_b_c = dst_data_b + c * W * H;
-
-                    dst_data_b_c[offset] = prm.channel_shared ? (src_data_b_c[offset] / norm * weights[0]) : (src_data_b_c[offset] / norm * weights[c]);
+                    const data_t *src_data_b_c = src_data_b + c * W * H;
+                    data_t *dst_data_b_c = dst_data_b + c * W * H;
+
+                    float dst_value = prm.channel_shared ? (src_data_b_c[offset] / norm * weights[0]) : (src_data_b_c[offset] / norm * weights[c]);
+                    if (prm.prec_out == Precision::FP32) {
+                        dst_data_b_c[offset] = dst_value;
+                    } else if (prm.prec_out == Precision::U8) {
+                        dst_data_b_c[offset] = (dst_value > 0) ? roundf(dst_value) : 0;
+                    } else if (prm.prec_out == Precision::I8) {
+                        dst_data_b_c[offset] = roundf(dst_value);
+                    }
                 }
             }
         }
@@ -102,10 +133,8 @@ class MKLDNNCPUExtNormalizeTests: public TestsCommon, public WithParamInterface<
                 </port>
             </output>
         </layer>
-        <layer name="normalize" id="1" type="Normalize" precision="FP32">
-            <data across_spatial="_AS_" channel_shared="_CS_" eps="_EPS_" />
-            <weights offset="0" size="_WS_" />
 
+        <layer name="fakeLayer" id="1" type="_FL_" precision="FP32">
             <input>
                 <port id="1">
                     <dim>_IN_</dim>
@@ -123,9 +152,31 @@ class MKLDNNCPUExtNormalizeTests: public TestsCommon, public WithParamInterface<
                 </port>
             </output>
         </layer>
+        <layer name="normalize" id="2" type="Normalize" precision="FP32">
+            <data across_spatial="_AS_" channel_shared="_CS_" eps="_EPS_" />
+            <weights offset="0" size="_WS_" />
+
+            <input>
+                <port id="3">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+            </input>
+            <output>
+                <port id="4">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+            </output>
+        </layer>
     </layers>
     <edges>
         <edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
+        <edge from-layer="1" from-port="2" to-layer="2" to-port="3"/>
     </edges>
 </Net>
 )V0G0N";
@@ -143,6 +194,11 @@ class MKLDNNCPUExtNormalizeTests: public TestsCommon, public WithParamInterface<
         REPLACE_WITH_NUM(model, "_WS_", p.in.c*sizeof(float));
         REPLACE_WITH_NUM(model, "_EPS_", p.eps);
 
+        if (p.isBlockedFormat)
+            REPLACE_WITH_STR(model, "_FL_", "FakeLayerBLK");
+        else
+            REPLACE_WITH_STR(model, "_FL_", "FakeLayerPLN");
+
         return model;
     }
 
@@ -156,24 +212,29 @@ protected:
             normalize_test_params p = ::testing::WithParamInterface<normalize_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-            
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+
+            MKLDNNPlugin::MKLDNNExtensionManager::Ptr extMgr(new MKLDNNPlugin::MKLDNNExtensionManager());
+            auto defaultExtensions = std::make_shared<InferenceEngine::Extensions::Cpu::MKLDNNExtensions<mkldnn::impl::cpu::cpu_isa_t::isa_any>>();
+            extMgr->AddExtension(defaultExtensions);
+            extMgr->AddExtension(make_FakeExtensions());
+
             size_t weightSize = p.in.c*sizeof(float);
-            InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::U8, 
+            InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::U8,
                 {weightSize}, InferenceEngine::C });
             weights->allocate();
             float center = 0;
             float ampl = 100;
             float omega = 0.5;
             CommonTestUtils::fill_data_sine( weights->data().as<float*>(), weights->size() / sizeof(float), center, ampl, omega);
-                       
+
             InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
 
-            net_reader.SetWeights(weights_ptr);
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network, extMgr);
 
             auto& nodes = graph.getNodes();
             nodes = graph.getNodes();
@@ -205,7 +266,7 @@ protected:
             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
 
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
@@ -221,7 +282,7 @@ protected:
             dst_ref.allocate();
             ref_normalize(*srcPtr, dst_ref, p, weights->readOnly().as<const float*>());
             compare(*output, dst_ref);
-           
+
         } catch (const InferenceEngine::details::InferenceEngineException &e) {
             FAIL() << e.what();
         }
@@ -233,15 +294,345 @@ TEST_P(MKLDNNCPUExtNormalizeTests, TestsNormalize) {}
 INSTANTIATE_TEST_CASE_P(
         TestsNormalize, MKLDNNCPUExtNormalizeTests,
         ::testing::Values(
-                normalize_test_params{{1, 22, 129, 323}, false, false, 0.000001f, 1, MKLDNNPlugin::impl_desc_type::unknown },
-                normalize_test_params{{1, 22, 129, 323}, false, true, 0.000001f, 1, MKLDNNPlugin::impl_desc_type::unknown },
-                normalize_test_params{{5, 1, 128, 256}, false, false, 0.000001f, 1, MKLDNNPlugin::impl_desc_type::unknown },
-                normalize_test_params{{5, 1, 128, 256}, false, true, 0.000001f, 1, MKLDNNPlugin::impl_desc_type::unknown },
-                normalize_test_params{{1, 2, 129, 323}, true, false, 0.000001f, 1, MKLDNNPlugin::impl_desc_type::unknown },
-                normalize_test_params{{1, 2, 129, 323}, true, true, 0.000001f, 1, MKLDNNPlugin::impl_desc_type::unknown }, 
-                normalize_test_params{{2, 1, 21, 21}, true, false, 0.000001f, 1, MKLDNNPlugin::impl_desc_type::unknown },
-                normalize_test_params{{2, 1, 21, 21}, true, true, 0.000001f, 1, MKLDNNPlugin::impl_desc_type::unknown },
-                normalize_test_params{{2, 1, 21, 21}, true, true, 0.001f, 1, MKLDNNPlugin::impl_desc_type::unknown },
-                normalize_test_params{{1, 35, 101, 127}, true, true, 0.001f, 1, MKLDNNPlugin::impl_desc_type::unknown },
-                normalize_test_params{{1, 35, 101, 127}, true, false, 0.001f, 1, MKLDNNPlugin::impl_desc_type::unknown },
-                normalize_test_params{{1, 128, 320, 320}, false, true, 0.001f, 1, MKLDNNPlugin::impl_desc_type::unknown }));
+                normalize_test_params{{1, 22, 129, 323}, false, false, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 },
+                normalize_test_params{{1, 22, 129, 323}, false, true, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 },
+                normalize_test_params{{5, 1, 128, 256}, false, false, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 },
+                normalize_test_params{{5, 1, 128, 256}, false, true, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 },
+                normalize_test_params{{1, 2, 129, 323}, true, false, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 },
+                normalize_test_params{{1, 2, 129, 323}, true, true, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 },
+                normalize_test_params{{2, 1, 21, 21}, true, false, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 },
+                normalize_test_params{{2, 1, 21, 21}, true, true, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 },
+                normalize_test_params{{2, 1, 21, 21}, true, true, 0.001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 },
+                normalize_test_params{{1, 35, 101, 127}, true, true, 0.001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 },
+                normalize_test_params{{1, 35, 101, 127}, true, false, 0.001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 },
+                normalize_test_params{{1, 128, 320, 320}, false, true, 0.001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 },
+                normalize_test_params{{1, 22, 129, 323}, false, false, 0.000001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 },
+                normalize_test_params{{1, 22, 129, 323}, false, true, 0.000001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 },
+                normalize_test_params{{5, 1, 128, 256}, false, false, 0.000001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 },
+                normalize_test_params{{5, 1, 128, 256}, false, true, 0.000001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 },
+                normalize_test_params{{1, 2, 129, 323}, true, false, 0.000001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 },
+                normalize_test_params{{1, 2, 129, 323}, true, true, 0.000001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 },
+                normalize_test_params{{2, 1, 21, 21}, true, false, 0.000001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 },
+                normalize_test_params{{2, 1, 21, 21}, true, true, 0.000001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 },
+                normalize_test_params{{2, 1, 21, 21}, true, true, 0.001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 },
+                normalize_test_params{{1, 35, 101, 127}, true, true, 0.001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 },
+                normalize_test_params{{1, 35, 101, 127}, true, false, 0.001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 },
+                normalize_test_params{{1, 128, 320, 320}, false, true, 0.001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 }
+                ));
+
+static std::string precToStr (Precision prec) {
+    return prec == Precision::U8 ? "U8" : prec == Precision::I8 ? "I8" : "FP32";
+}
+
+template <typename data_t>
+static void fill_int_data(data_t *data, int size, bool is_signed) {
+    for (int i = 0 ; i < size; i++) {
+        data[i] = i * 13 % 21 - 10 * is_signed;
+    }
+}
+
+class FakeLayerImpl_Normalize: public Cpu::ExtLayerBase,
+                     public WithParamInterface<normalize_test_params> {
+public:
+    explicit FakeLayerImpl_Normalize(const CNNLayer* layer) {
+        try {
+            is_blocked = layer->GetParamAsBool("is_blocked");
+            addConfig(layer);
+        } catch (InferenceEngine::details::InferenceEngineException &ex) {
+            errorMsg = ex.what();
+        }
+    }
+
+    bool is_blocked;
+
+    void addConfig(const CNNLayer* layer) {
+        LayerConfig config;
+
+        // Fill tensor parameters into config
+        auto fill_port = [&] (std::vector<DataConfig>& port, const DataPtr& data) {
+            auto div_up = [](const int a, const int b) -> int {
+                if (!b)
+                    return 0;
+                return (a + b - 1) / b;
+            };
+            if (!data) THROW_IE_EXCEPTION << "Cannot get input data!";
+
+            DataConfig dataConfig;
+            dataConfig.inPlace = 0;
+            dataConfig.constant = false;
+
+            const TensorDesc& data_desc = data->getTensorDesc();
+            const SizeVector& data_dims = data_desc.getDims();
+
+            InferenceEngine::Precision precision = data_desc.getPrecision();
+            Layout layout;
+            if (is_blocked) {
+                int blk_size = cpu::mayiuse(cpu::avx512_common) ? 16 : 8;
+
+                std::vector<size_t> blocks = data_dims;
+                std::vector<size_t> order(blocks.size());
+                for (size_t i = 0; i < order.size(); i++) order[i] = i;
+
+                order.push_back(1);
+                blocks[1] = div_up(blocks[1], blk_size);
+                blocks.push_back(blk_size);
+
+                dataConfig.desc = TensorDesc(precision, data_dims, {blocks, order});
+            } else {
+                dataConfig.desc = TensorDesc(precision, data_dims, data_dims.size() == 5 ? NDHWC : NHWC);
+            }
+
+            port.push_back(dataConfig);
+        };
+
+        fill_port(config.inConfs, layer->insData[0].lock());
+        fill_port(config.outConfs, layer->outData[0]);
+        config.inConfs[0].desc.setPrecision(config.outConfs[0].desc.getPrecision());
+        confs.push_back(config);
+    }
+
+    StatusCode execute(std::vector<Blob::Ptr>& inputs, std::vector<Blob::Ptr>& outputs,
+                       ResponseDesc *resp) noexcept override {
+        return OK;
+    }
+};
+
+REG_FACTORY_FOR(Cpu::ImplFactory<FakeLayerImpl_Normalize>, FakeLayer_Normalize);
+
+class MKLDNNCPUExtNormalizeTests_Blocked: public TestsCommon, public WithParamInterface<normalize_test_params> {
+    std::string model_t = R"V0G0N(
+        <layer name="fakeLayer1" id="1" type="FakeLayer_Normalize">
+            <data is_blocked="_IS_BLOCKED_"/>
+            <input>
+                <port id="1">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+            </input>
+            <output>
+                <port id="2" precision="_PREC_IN_">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+            </output>
+        </layer>
+        <layer name="normalize" id="2" type="Normalize">
+            <data across_spatial="_AS_" channel_shared="_CS_" eps="_EPS_" />
+            <weights offset="0" size="_WS_" />
+
+            <input>
+                <port id="3">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+            </input>
+            <output>
+                <port id="4" precision="_PREC_OUT_">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+            </output>
+        </layer>
+        <layer name="fakeLayer2" id="3" type="FakeLayer_Normalize">
+            <data is_blocked="_IS_BLOCKED_"/>
+            <input>
+                <port id="5">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+            </input>
+            <output>
+                <port id="6" precision="_PREC_OUT_">
+                    <dim>_IN_</dim>
+                    <dim>_IC_</dim>
+                    <dim>_IH_</dim>
+                    <dim>_IW_</dim>
+                </port>
+            </output>
+        </layer>
+)V0G0N";
+
+    std::string edges_t = R"V0G0N(
+        <edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
+        <edge from-layer="1" from-port="2" to-layer="2" to-port="3"/>
+        <edge from-layer="2" from-port="4" to-layer="3" to-port="5"/>
+)V0G0N";
+
+    std::string getModel(normalize_test_params p) {
+        std::string model = model_t;
+        REPLACE_WITH_NUM(model, "_IW_", p.in.w);
+        REPLACE_WITH_NUM(model, "_IH_", p.in.h);
+        REPLACE_WITH_NUM(model, "_IC_", p.in.c);
+        REPLACE_WITH_NUM(model, "_IN_", p.in.n);
+
+        REPLACE_WITH_NUM(model, "_AS_", p.across_spatial);
+        REPLACE_WITH_NUM(model, "_CS_", p.channel_shared);
+
+        REPLACE_WITH_NUM(model, "_WS_", p.in.c*sizeof(float));
+        REPLACE_WITH_NUM(model, "_EPS_", p.eps);
+        REPLACE_WITH_STR(model, "_PREC_IN_", precToStr(p.prec_in));
+        REPLACE_WITH_STR(model, "_PREC_OUT_", precToStr(p.prec_out));
+        REPLACE_WITH_NUM(model, "_IS_BLOCKED_", p.isBlockedFormat);
+
+        model = IRTemplateGenerator::getIRTemplate("Normalize_Only", {p.in.n, p.in.c, p.in.h, p.in.w}, "FP32", model, edges_t, 7);
+
+        return model;
+    }
+
+protected:
+    virtual void TearDown() {
+    }
+
+    virtual void SetUp() {
+        try {
+            TestsCommon::SetUp();
+            normalize_test_params p = ::testing::WithParamInterface<normalize_test_params>::GetParam();
+            std::string model = getModel(p);
+
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+
+            size_t weightSize = p.in.c*sizeof(float);
+            InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::U8,
+                {weightSize}, InferenceEngine::C });
+            weights->allocate();
+            float center = 0;
+            float ampl = 100;
+            float omega = 0.5;
+            CommonTestUtils::fill_data_sine( weights->data().as<float*>(), weights->size() / sizeof(float), center, ampl, omega);
+
+            InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
+
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr));
+
+            MKLDNNGraphTestClass graph;
+            graph.CreateGraph(network);
+
+            auto& nodes = graph.getNodes();
+            nodes = graph.getNodes();
+            for (auto &node : nodes) {
+                if (node->getName() == "normalize") {
+                    ASSERT_LE(p.num_prim_desc, node->getSupportedPrimitiveDescriptors().size());
+                    for (size_t j = 0; j < p.num_prim_desc && j < p.comp.size(); j++) {
+                        p.comp.at(j)(node->getSupportedPrimitiveDescriptors().at(j));
+                    }
+                    ASSERT_NE(nullptr, node->getSelectedPrimitiveDescriptor());
+                    ASSERT_EQ(p.selectedType,
+                              node->getSelectedPrimitiveDescriptor()->getImplementationType() & p.selectedType);
+                }
+            }
+
+            InferenceEngine::SizeVector dims_src = {p.in.n, p.in.c, p.in.h, p.in.w};
+            InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float>({InferenceEngine::Precision::FP32, dims_src, NCHW});
+            src->allocate();
+            if (p.prec_in == Precision::U8) {
+                fill_int_data(src->buffer().as<float *>(), src->size(), false);
+            } else if (p.prec_in == Precision::I8) {
+                fill_int_data(src->buffer().as<float *>(), src->size(), true);
+            } else {
+                fill_data(src->buffer(), src->size());
+            }
+
+            auto * srcPtr = dynamic_cast<InferenceEngine::TBlob<float>*>(src.get());
+
+            if (srcPtr == nullptr)
+                FAIL() << "Cannot cast blob to TBlob<float>.";
+
+            InferenceEngine::BlobMap srcs;
+            srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
+
+            InferenceEngine::OutputsDataMap out;
+            out = network.getOutputsInfo();
+            InferenceEngine::BlobMap outputBlobs;
+
+            std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
+
+            InferenceEngine::TBlob<float>::Ptr output;
+            output = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
+            output->allocate();
+            outputBlobs[item.first] = output;
+
+            graph.Infer(srcs, outputBlobs);
+
+            InferenceEngine::TBlob<float> dst_ref(item.second->getTensorDesc());
+            dst_ref.allocate();
+            ref_normalize(*srcPtr, dst_ref, p, weights->readOnly().as<const float*>());
+            compare(*output, dst_ref);
+
+        } catch (const InferenceEngine::details::InferenceEngineException &e) {
+            FAIL() << e.what();
+        }
+    }
+};
+
+TEST_P(MKLDNNCPUExtNormalizeTests_Blocked, TestsNormalize) {}
+
+INSTANTIATE_TEST_CASE_P(
+        TestsNormalize, MKLDNNCPUExtNormalizeTests_Blocked,
+        ::testing::Values(
+            normalize_test_params{{2, 33, 129, 323}, true, true, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 },
+            normalize_test_params{{2, 33, 129, 323}, true, false, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 },
+            normalize_test_params{{2, 67, 77, 78}, false, true, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 },
+            normalize_test_params{{2, 67, 77, 78}, false, false, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 },
+
+            normalize_test_params{{2, 33, 129, 323}, true, true, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::I8 },
+            normalize_test_params{{2, 33, 129, 323}, true, false, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::I8 },
+            normalize_test_params{{2, 67, 77, 78}, false, true, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::I8 },
+            normalize_test_params{{2, 67, 77, 78}, false, false, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::I8 },
+
+            normalize_test_params{{2, 33, 129, 323}, true, true, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::U8 },
+            normalize_test_params{{2, 33, 129, 323}, true, false, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::U8 },
+            normalize_test_params{{2, 67, 77, 78}, false, true, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::U8 },
+            normalize_test_params{{2, 67, 77, 78}, false, false, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::U8 },
+
+            normalize_test_params{{2, 33, 129, 323}, true, true, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::I8, Precision::I8 },
+            normalize_test_params{{2, 33, 129, 323}, true, false, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::I8, Precision::I8 },
+            normalize_test_params{{2, 67, 77, 78}, false, true, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::I8, Precision::I8 },
+            normalize_test_params{{2, 67, 77, 78}, false, false, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::I8, Precision::I8 },
+
+            normalize_test_params{{2, 33, 129, 323}, true, true, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::I8, Precision::FP32 },
+            normalize_test_params{{2, 33, 129, 323}, true, false, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::I8, Precision::FP32 },
+            normalize_test_params{{2, 67, 77, 78}, false, true, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::I8, Precision::FP32 },
+            normalize_test_params{{2, 67, 77, 78}, false, false, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::I8, Precision::FP32 },
+
+            normalize_test_params{{2, 33, 129, 323}, true, true, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::U8, Precision::U8 },
+            normalize_test_params{{2, 33, 129, 323}, true, false, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::U8, Precision::U8 },
+            normalize_test_params{{2, 67, 77, 78}, false, true, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::U8, Precision::U8 },
+            normalize_test_params{{2, 67, 77, 78}, false, false, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::U8, Precision::U8 },
+
+            normalize_test_params{{2, 33, 129, 323}, true, true, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::U8, Precision::FP32 },
+            normalize_test_params{{2, 33, 129, 323}, true, false, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::U8, Precision::FP32 },
+            normalize_test_params{{2, 67, 77, 78}, false, true, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::U8, Precision::FP32 },
+            normalize_test_params{{2, 67, 77, 78}, false, false, 0.000001f, false, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::U8, Precision::FP32 },
+
+            normalize_test_params{{2, 33, 129, 323}, true, true, 0.000001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 },
+            normalize_test_params{{2, 67, 77, 78}, false, false, 0.000001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::FP32 },
+
+            normalize_test_params{{2, 33, 129, 323}, true, true, 0.000001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::I8 },
+            normalize_test_params{{2, 67, 77, 78}, false, false, 0.000001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::I8 },
+
+            normalize_test_params{{2, 33, 129, 323}, true, true, 0.000001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::U8 },
+            normalize_test_params{{2, 67, 77, 78}, false, false, 0.000001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::FP32, Precision::U8 },
+
+            normalize_test_params{{2, 33, 129, 323}, true, true, 0.000001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::I8, Precision::I8 },
+            normalize_test_params{{2, 67, 77, 78}, false, true, 0.000001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::I8, Precision::I8 },
+
+            normalize_test_params{{2, 33, 129, 323}, true, true, 0.000001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::I8, Precision::FP32 },
+            normalize_test_params{{2, 67, 77, 78}, false, true, 0.000001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::I8, Precision::FP32 },
+
+            normalize_test_params{{2, 33, 129, 323}, true, true, 0.000001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::U8, Precision::U8 },
+            normalize_test_params{{2, 67, 77, 78}, false, false, 0.000001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::U8, Precision::U8 },
+
+            normalize_test_params{{2, 33, 129, 323}, true, true, 0.0001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::U8, Precision::FP32 },
+            normalize_test_params{{2, 67, 77, 78}, false, false, 0.0001f, true, 3, MKLDNNPlugin::impl_desc_type::unknown, Precision::U8, Precision::FP32 }
+        ));
\ No newline at end of file
index 3a4ae3c..9c22a2a 100644 (file)
@@ -13,7 +13,7 @@
 #include "tests_common.hpp"
 
 #include "single_layer_common.hpp"
-#include "cpp/ie_cnn_net_reader.h"
+#include <ie_core.hpp>
 
 using namespace ::testing;
 using namespace InferenceEngine;
@@ -27,10 +27,10 @@ struct one_hot_base_params {
 };
 
 struct one_hot_test_params : one_hot_base_params {
-    std::string libraryName;
+    std::string device_name;
 
     one_hot_test_params(std::string name, one_hot_base_params params) :
-            one_hot_base_params(params), libraryName(name) {}
+            one_hot_base_params(params), device_name(name) {}
 };
 
 class OneHotOnly1dTest: public TestsCommon,
@@ -109,9 +109,10 @@ protected:
             one_hot_test_params p = ::testing::WithParamInterface<one_hot_test_params>::GetParam();
             std::string model = getModel(p);
 
-            CNNNetReader net_reader;
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
             try {
-                net_reader.ReadNetwork(model.data(), model.length());
+                network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr());
             } catch (InferenceEngine::details::InferenceEngineException &e) {
                 FAIL() << e.what();
             } catch (std::exception &e) {
@@ -119,11 +120,11 @@ protected:
             }
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             // Output Data
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
@@ -247,9 +248,10 @@ protected:
             one_hot_test_params p = ::testing::WithParamInterface<one_hot_test_params>::GetParam();
             std::string model = getModel(p);
 
-            CNNNetReader net_reader;
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
             try {
-                net_reader.ReadNetwork(model.data(), model.length());
+                network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr());
             } catch (InferenceEngine::details::InferenceEngineException &e) {
                 FAIL() << e.what();
             } catch (std::exception &e) {
@@ -257,11 +259,11 @@ protected:
             }
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             // Output Data
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
@@ -398,9 +400,10 @@ protected:
             one_hot_test_params p = ::testing::WithParamInterface<one_hot_test_params>::GetParam();
             std::string model = getModel(p);
 
-            CNNNetReader net_reader;
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
             try {
-                net_reader.ReadNetwork(model.data(), model.length());
+                network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr());
             } catch (InferenceEngine::details::InferenceEngineException &e) {
                 FAIL() << e.what();
             } catch (std::exception &e) {
@@ -408,11 +411,11 @@ protected:
             }
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             // Output Data
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
@@ -555,9 +558,10 @@ protected:
             one_hot_test_params p = ::testing::WithParamInterface<one_hot_test_params>::GetParam();
             std::string model = getModel(p);
 
-            CNNNetReader net_reader;
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
             try {
-                net_reader.ReadNetwork(model.data(), model.length());
+                network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr());
             } catch (InferenceEngine::details::InferenceEngineException &e) {
                 FAIL() << e.what();
             } catch (std::exception &e) {
@@ -565,11 +569,11 @@ protected:
             }
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             // Output Data
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
@@ -726,9 +730,10 @@ protected:
             one_hot_test_params p = ::testing::WithParamInterface<one_hot_test_params>::GetParam();
             std::string model = getModel(p);
 
-            CNNNetReader net_reader;
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
             try {
-                net_reader.ReadNetwork(model.data(), model.length());
+                network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr());
             } catch (InferenceEngine::details::InferenceEngineException &e) {
                 FAIL() << e.what();
             } catch (std::exception &e) {
@@ -736,11 +741,11 @@ protected:
             }
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             // Output Data
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
@@ -807,34 +812,34 @@ protected:
 #define case_5d_4 one_hot_base_params({ {1, 3, 2, 3}, {2, 1, 3, 4, 3}, 3, 4, 1.0f, 0.0f })
 
 one_hot_test_params one_hot_only_1d_test_cases[] = {
-    one_hot_test_params("MKLDNNPlugin", case_1d_0),
-    one_hot_test_params("MKLDNNPlugin", case_1d_1)
+    one_hot_test_params("CPU", case_1d_0),
+    one_hot_test_params("CPU", case_1d_1)
 };
 
 one_hot_test_params one_hot_only_2d_test_cases[] = {
-    one_hot_test_params("MKLDNNPlugin", case_2d_0),
-    one_hot_test_params("MKLDNNPlugin", case_2d_1),
-    one_hot_test_params("MKLDNNPlugin", case_2d_2),
+    one_hot_test_params("CPU", case_2d_0),
+    one_hot_test_params("CPU", case_2d_1),
+    one_hot_test_params("CPU", case_2d_2),
 };
 
 one_hot_test_params one_hot_only_3d_test_cases[] = {
-    one_hot_test_params("MKLDNNPlugin", case_3d_0),
-    one_hot_test_params("MKLDNNPlugin", case_3d_1),
-    one_hot_test_params("MKLDNNPlugin", case_3d_2),
+    one_hot_test_params("CPU", case_3d_0),
+    one_hot_test_params("CPU", case_3d_1),
+    one_hot_test_params("CPU", case_3d_2),
 };
 one_hot_test_params one_hot_only_4d_test_cases[] = {
-    one_hot_test_params("MKLDNNPlugin", case_4d_0),
-    one_hot_test_params("MKLDNNPlugin", case_4d_1),
-    one_hot_test_params("MKLDNNPlugin", case_4d_2),
-    one_hot_test_params("MKLDNNPlugin", case_4d_3)
+    one_hot_test_params("CPU", case_4d_0),
+    one_hot_test_params("CPU", case_4d_1),
+    one_hot_test_params("CPU", case_4d_2),
+    one_hot_test_params("CPU", case_4d_3)
 };
 
 one_hot_test_params one_hot_only_5d_test_cases[] = {
-    one_hot_test_params("MKLDNNPlugin", case_5d_0),
-    one_hot_test_params("MKLDNNPlugin", case_5d_1),
-    one_hot_test_params("MKLDNNPlugin", case_5d_2),
-    one_hot_test_params("MKLDNNPlugin", case_5d_3),
-    one_hot_test_params("MKLDNNPlugin", case_5d_4)
+    one_hot_test_params("CPU", case_5d_0),
+    one_hot_test_params("CPU", case_5d_1),
+    one_hot_test_params("CPU", case_5d_2),
+    one_hot_test_params("CPU", case_5d_3),
+    one_hot_test_params("CPU", case_5d_4)
 };
 
 TEST_P(OneHotOnly1dTest, TestsOneHot) {}
index e24ccca..0a55c53 100644 (file)
@@ -11,7 +11,7 @@
 #include "single_layer_common.hpp"
 #include <mkldnn_extension_utils.h>
 #include "tests_common.hpp"
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 
 
 using namespace ::testing;
@@ -123,15 +123,16 @@ protected:
             range_test_params p = ::testing::WithParamInterface<range_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             // Output Data
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             // Input Data
index 8da9cbe..c172334 100644 (file)
@@ -11,7 +11,7 @@
 #include "single_layer_common.hpp"
 #include <mkldnn_extension_utils.h>
 #include "tests_common.hpp"
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 
 
 using namespace ::testing;
@@ -329,15 +329,16 @@ protected:
             reduce_test_params p = ::testing::WithParamInterface<reduce_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             // Output Data
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
index 77681b3..20600a1 100644 (file)
@@ -11,7 +11,7 @@
 #include <mkldnn_extension_utils.h>
 #include "tests_common.hpp"
 #include "ir_gen_helper.hpp"
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 
 #include <nodes/base.hpp>
 #include <cpu_isa_traits.hpp>
@@ -76,13 +76,13 @@ void ref_resample(const InferenceEngine::TBlob<data_t> &src, InferenceEngine::TB
                 for (size_t oz = 0; oz < OD; oz++) {
                     for (size_t oy = 0; oy < OH; oy++) {
                         for (size_t ox = 0; ox < OW; ox++) {
-                            float ix = ox * fx + fx / 2.0f - 0.5f;
-                            float iy = oy * fy + fy / 2.0f - 0.5f;
-                            float iz = oz * fz + fz / 2.0f - 0.5f;
+                            float ix = ox * fx;
+                            float iy = oy * fy;
+                            float iz = oz * fz;
 
-                            size_t ix_r = static_cast<size_t>(round(ix));
-                            size_t iy_r = static_cast<size_t>(round(iy));
-                            size_t iz_r = static_cast<size_t>(round(iz));
+                            size_t ix_r = static_cast<size_t>(std::floor(ix));
+                            size_t iy_r = static_cast<size_t>(std::floor(iy));
+                            size_t iz_r = static_cast<size_t>(std::floor(iz));
 
                             out_ptr[oz * OH * OW + oy * OW + ox] = in_ptr[iz_r * IH * IW + iy_r * IW + ix_r];
                         }
@@ -257,16 +257,17 @@ protected:
             resample_test_params p = ::testing::WithParamInterface<resample_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-
             MKLDNNPlugin::MKLDNNExtensionManager::Ptr extMgr(new MKLDNNPlugin::MKLDNNExtensionManager());
             auto defaultExtensions = std::make_shared<InferenceEngine::Extensions::Cpu::MKLDNNExtensions<mkldnn::impl::cpu::cpu_isa_t::isa_any>>();
             extMgr->AddExtension(defaultExtensions);
             extMgr->AddExtension(make_FakeExtensions());
 
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
+
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork(), extMgr);
+            graph.CreateGraph(network, extMgr);
 
             auto& nodes = graph.getNodes();
             nodes = graph.getNodes();
@@ -304,7 +305,7 @@ protected:
             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
 
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
index ff26376..926d8f2 100644 (file)
@@ -11,8 +11,7 @@
 #include "single_layer_common.hpp"
 #include <mkldnn_extension_utils.h>
 #include "tests_common.hpp"
-#include <cpp/ie_cnn_net_reader.h>
-
+#include <ie_core.hpp>
 
 using namespace ::testing;
 using namespace std;
@@ -149,16 +148,17 @@ protected:
             TestsCommon::SetUp();
             reverse_sequence_test_params p = ::testing::WithParamInterface<reverse_sequence_test_params>::GetParam();
             std::string model = getModel(p);
-            ////std::cout << model;
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             // Output Data
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
index 0670283..1ab0823 100644 (file)
@@ -11,7 +11,7 @@
 #include "single_layer_common.hpp"
 #include <mkldnn_extension_utils.h>
 #include "tests_common.hpp"
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 
 
 using namespace ::testing;
@@ -118,11 +118,12 @@ protected:
             scatterTF_test_params p = ::testing::WithParamInterface<scatterTF_test_params>::GetParam();
             std::string model = getModel(p);
             //std::cout << model << std::endl;
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+                        InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             //  Input Data
             InferenceEngine::Blob::Ptr srcData = InferenceEngine::make_shared_blob<float>({ InferenceEngine::Precision::FP32, p.inDataDim, InferenceEngine::TensorDesc::getLayoutByDims(p.inDataDim) });
@@ -163,7 +164,7 @@ protected:
 
             //  Output Data
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
             InferenceEngine::TBlob<float>::Ptr output;
index 4afc193..a154be8 100644 (file)
@@ -11,7 +11,7 @@
 #include "single_layer_common.hpp"
 #include <mkldnn_extension_utils.h>
 #include "tests_common.hpp"
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 
 
 using namespace ::testing;
@@ -168,15 +168,17 @@ protected:
             SizeVector inputShape;
             std::tie(conditionType, conditionShape, inputShape) = ::testing::WithParamInterface<select_test_params>::GetParam();
             std::string model = getModel(conditionType, conditionShape, inputShape);
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+            
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             // Output Data
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
index 4e22b83..ce4a30c 100644 (file)
@@ -11,7 +11,7 @@
 #include "single_layer_common.hpp"
 #include <mkldnn_extension_utils.h>
 #include "tests_common.hpp"
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 
 
 using namespace ::testing;
@@ -128,16 +128,17 @@ protected:
             TestsCommon::SetUp();
             shuffle_channels_test_params p = ::testing::WithParamInterface<shuffle_channels_test_params>::GetParam();
             std::string model = getModel(p);
-            ////std::cout << model;
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             // Output Data
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
index bacf825..7510958 100644 (file)
@@ -11,7 +11,7 @@
 #include "single_layer_common.hpp"
 #include <mkldnn_extension_utils.h>
 #include "tests_common.hpp"
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 
 #include <algorithm>
 #include <vector>
@@ -310,11 +310,12 @@ protected:
             sparse_fill_empty_rows_test_params p = ::testing::WithParamInterface<sparse_fill_empty_rows_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             auto& nodes = graph.getNodes();
             nodes = graph.getNodes();
@@ -360,7 +361,7 @@ protected:
 
             // Output Data
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap output_blobs;
             auto iter = out.begin();
 
index 0f15e6f..adde6ec 100644 (file)
@@ -11,7 +11,7 @@
 #include "single_layer_common.hpp"
 #include <mkldnn_extension_utils.h>
 #include "tests_common.hpp"
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 
 #include <algorithm>
 #include <vector>
@@ -91,11 +91,12 @@ protected:
             sparse_segment_reduce_test_params p = ::testing::WithParamInterface<sparse_segment_reduce_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             auto& nodes = graph.getNodes();
             nodes = graph.getNodes();
@@ -135,7 +136,7 @@ protected:
             input_blob_map["InputSegmentIds"] = input_segment_ids;
 
             // prepare output blob map
-            InferenceEngine::OutputsDataMap out = net_reader.getNetwork().getOutputsInfo();
+            InferenceEngine::OutputsDataMap out = network.getOutputsInfo();
             InferenceEngine::BlobMap output_blob_map;
             for (auto iter = out.begin(); iter != out.end(); iter++) {
                 std::pair<std::string, InferenceEngine::DataPtr> item = *iter;
index f0bdb9e..cb820b4 100644 (file)
@@ -11,7 +11,7 @@
 #include "single_layer_common.hpp"
 #include <mkldnn_extension_utils.h>
 #include "tests_common.hpp"
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 
 #include <algorithm>
 #include <vector>
@@ -104,11 +104,12 @@ protected:
             sparse_to_dense_test_params p = ::testing::WithParamInterface<sparse_to_dense_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             auto& nodes = graph.getNodes();
             nodes = graph.getNodes();
@@ -156,7 +157,7 @@ protected:
             input_blob_map["InputDefaultValue"] = input_default_value;
 
             // prepare output blob map
-            InferenceEngine::OutputsDataMap out = net_reader.getNetwork().getOutputsInfo();
+            InferenceEngine::OutputsDataMap out = network.getOutputsInfo();
             InferenceEngine::BlobMap output_blob_map;
             for (auto iter = out.begin(); iter != out.end(); iter++) {
                 std::pair<std::string, InferenceEngine::DataPtr> item = *iter;
index d4e261f..3da2b0c 100644 (file)
@@ -11,7 +11,7 @@
 #include "single_layer_common.hpp"
 #include <mkldnn_extension_utils.h>
 #include "tests_common.hpp"
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 
 #include <algorithm>
 #include <vector>
@@ -112,11 +112,12 @@ protected:
             sparse_weighted_reduce_test_params p = ::testing::WithParamInterface<sparse_weighted_reduce_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             auto& nodes = graph.getNodes();
             nodes = graph.getNodes();
@@ -183,7 +184,7 @@ protected:
             }
 
             // prepare output blob map
-            InferenceEngine::OutputsDataMap out = net_reader.getNetwork().getOutputsInfo();
+            InferenceEngine::OutputsDataMap out = network.getOutputsInfo();
             InferenceEngine::BlobMap output_blob_map;
             for (auto iter = out.begin(); iter != out.end(); iter++) {
                 std::pair<std::string, InferenceEngine::DataPtr> item = *iter;
index cfd262d..0a3e121 100644 (file)
@@ -11,7 +11,7 @@
 #include "single_layer_common.hpp"
 #include <mkldnn_extension_utils.h>
 #include "tests_common.hpp"
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 
 
 using namespace ::testing;
@@ -320,15 +320,16 @@ protected:
             strided_slice_test_params p = ::testing::WithParamInterface<strided_slice_test_params>::GetParam();
             std::string model = getModel(p);
             ////std::cout << model;
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             // Output Data
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
index 23b35ac..75a53e7 100644 (file)
 #include "tests_common.hpp"
 #include <stdio.h>
 
-#include <cpp/ie_cnn_net_reader.h>
 #include <ie_core.hpp>
 #include <ie_plugin_config.hpp>
 
-
-#include <ie_plugin_ptr.hpp>
-
 #include "single_layer_common.hpp"
 #include "tests_common.hpp"
 #include <algorithm>
@@ -227,15 +223,16 @@ protected:
             topk_test_params p = ::testing::WithParamInterface<topk_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+            InferenceEngine::Core ie;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = ie.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             // Output Data
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             auto it = out.begin();
@@ -426,11 +423,12 @@ protected:
             topk_test_params p = ::testing::WithParamInterface<topk_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             // Input Data
             InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float>({ InferenceEngine::Precision::FP32, p.in_shape,
@@ -459,7 +457,7 @@ protected:
 
             // Output Data
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
             auto it = out.begin();
             std::pair<std::string, InferenceEngine::DataPtr> item = *it;
index 3825335..962af31 100644 (file)
@@ -11,7 +11,7 @@
 #include "single_layer_common.hpp"
 #include <mkldnn_extension_utils.h>
 #include "tests_common.hpp"
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 
 #include <algorithm>
 #include <vector>
@@ -95,11 +95,12 @@ protected:
             unique_test_params p = ::testing::WithParamInterface<unique_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             auto& nodes = graph.getNodes();
             nodes = graph.getNodes();
@@ -126,7 +127,7 @@ protected:
             input_blob_map["InputValues"] = input;
 
             // prepare output blob map
-            InferenceEngine::OutputsDataMap out = net_reader.getNetwork().getOutputsInfo();
+            InferenceEngine::OutputsDataMap out = network.getOutputsInfo();
             InferenceEngine::BlobMap output_blob_map;
             for (auto iter = out.begin(); iter != out.end(); iter++) {
                 std::pair<std::string, InferenceEngine::DataPtr> item = *iter;
index bc2285b..2a235b7 100644 (file)
@@ -10,7 +10,7 @@
 #include <mkldnn_extension_utils.h>
 #include <cnn_network_impl.hpp>
 #include "tests_common.hpp"
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 #include <ie_plugin_config.hpp>
 
 using namespace ::testing;
@@ -208,11 +208,12 @@ protected:
             activation_test_params p = ::testing::WithParamInterface<activation_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
             auto& nodes = graph.getNodes();
             for (int i = 0; i < nodes.size(); i++) {
                 if (nodes[i]->getType() == MKLDNNPlugin::Activation) {
@@ -250,7 +251,7 @@ protected:
             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
 
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
@@ -322,9 +323,10 @@ protected:
             if (MB < 2)
                 MB = 2;
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-            InferenceEngine::CNNNetwork network = net_reader.getNetwork();
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
+            
             auto implNet = dynamic_cast<InferenceEngine::details::CNNNetworkImpl *>(&((InferenceEngine::ICNNNetwork&)network));
             ASSERT_NE(nullptr, implNet) << "Failed to cast ICNNNetwork to CNNNetworkImpl";
             InferenceEngine::ResponseDesc resp;
@@ -333,7 +335,7 @@ protected:
 
             MKLDNNGraphTestClass graph;
             graph.setProperty({{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES}});
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             InferenceEngine::SizeVector dims_src = p.dims;
             InferenceEngine::Layout layout = InferenceEngine::ANY;
@@ -359,7 +361,7 @@ protected:
             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
 
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
index 43616f2..b7edba0 100644 (file)
 
 #include "single_layer_common.hpp"
 #include "tests_common.hpp"
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 #include <ie_plugin_config.hpp>
+#include <ie_system_conf.h>
 
 using namespace ::testing;
-using namespace std;
 using namespace mkldnn;
 
 struct batchnorm_scaleshift_test_params {
@@ -181,9 +181,6 @@ protected:
             batchnorm_scaleshift_test_params p = ::testing::WithParamInterface<batchnorm_scaleshift_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-
             InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::U8, 
                 {p.in.c * 4 * sizeof(float)}, InferenceEngine::C });
             weights->allocate();
@@ -195,10 +192,13 @@ protected:
                 }
             }
             InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
-            net_reader.SetWeights(weights_ptr);
+            
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
             auto& nodes = graph.getNodes();
             for (int i = 0; i < nodes.size(); i++) {
                 if ((nodes[i]->getType() == MKLDNNPlugin::Depthwise && nodes[i]->getCnnLayer()->type == "ScaleShift")
@@ -225,7 +225,7 @@ protected:
             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
 
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
@@ -250,13 +250,17 @@ protected:
 
 TEST_P(MKLDNNGraphBatchNormScaleShiftTests, TestsBatchNormWithScaleShift) {}
 
+using namespace  MKLDNNPlugin;
+
+const size_t expect_num_impl = InferenceEngine::with_cpu_x86_avx2() ? 5 : 4;
+
 INSTANTIATE_TEST_CASE_P(
         TestsBatchNormWithScaleShift, MKLDNNGraphBatchNormScaleShiftTests,
         ::testing::Values(
-                batchnorm_scaleshift_test_params{{1, 32, 128, 256}, 1e-6, 2, 5, MKLDNNPlugin::impl_desc_type::jit},
-                batchnorm_scaleshift_test_params{{4, 3, 227, 227}, 1e-6, 2, 5, MKLDNNPlugin::impl_desc_type::jit},
-                batchnorm_scaleshift_test_params{{1, 32, 128, 256}, 1e-6, 2, 5, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
-                batchnorm_scaleshift_test_params{{4, 3, 227, 227}, 1e-6, 2, 5, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}));
+                batchnorm_scaleshift_test_params{{1, 32, 128, 256}, 1e-6, 2, expect_num_impl, jit},
+                batchnorm_scaleshift_test_params{{4, 3,  227, 227}, 1e-6, 2, expect_num_impl, jit},
+                batchnorm_scaleshift_test_params{{1, 32, 128, 256}, 1e-6, 2, expect_num_impl, ref, {ref_any}},
+                batchnorm_scaleshift_test_params{{4, 3,  227, 227}, 1e-6, 2, expect_num_impl, ref, {ref_any}}));
 
 
 class MKLDNNGraphDynBatchBatchNormScaleShiftTests: public MKLDNNGraphBatchNormScaleShiftTests {
@@ -270,9 +274,6 @@ protected:
             if (MB < 2)
                 MB = 2;
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-
             InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::U8, 
                 {p.in.c * 4 * sizeof(float)}, InferenceEngine::C });
             weights->allocate();
@@ -284,8 +285,11 @@ protected:
                 }
             }
             InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
-            net_reader.SetWeights(weights_ptr);
-            InferenceEngine::CNNNetwork network = net_reader.getNetwork();
+            
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr));
+
             auto implNet = dynamic_cast<InferenceEngine::details::CNNNetworkImpl *>(&((InferenceEngine::ICNNNetwork&)network));
             ASSERT_NE(nullptr, implNet) << "Failed to cast ICNNNetwork to CNNNetworkImpl";
             InferenceEngine::ResponseDesc resp;
@@ -295,7 +299,7 @@ protected:
 
             MKLDNNGraphTestClass graph;
             graph.setProperty({{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES}});
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             InferenceEngine::SizeVector dims_src = {MB, p.in.c, p.in.h, p.in.w};
             InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float>({InferenceEngine::Precision::FP32, dims_src, InferenceEngine::NCHW});
@@ -310,7 +314,7 @@ protected:
             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
 
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
index 6501132..2faade0 100644 (file)
 #include "single_layer_common.hpp"
 #include <mkldnn_extension_utils.h>
 #include <cnn_network_impl.hpp>
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 #include <ie_plugin_config.hpp>
 #include "tests_common.hpp"
+#include "ie_system_conf.h"
 
 using namespace ::testing;
-using namespace std;
+using namespace MKLDNNPlugin;
 using namespace mkldnn;
 
 struct batchnorm4D_test_params {
@@ -148,10 +149,7 @@ protected:
             batchnorm4D_test_params p = ::testing::WithParamInterface<batchnorm4D_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-
-            InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::FP32, 
+            InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::U8, 
                 {p.in.c * 2 * sizeof(float)}, InferenceEngine::C });
             weights->allocate();
             fill_data(weights->buffer(), weights->size() / sizeof(float));
@@ -164,10 +162,12 @@ protected:
 
             InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
 
-            net_reader.SetWeights(weights_ptr);
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             auto& nodes = graph.getNodes();
             for (int i = 0; i < nodes.size(); i++) {
@@ -197,7 +197,7 @@ protected:
             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
 
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
@@ -223,14 +223,15 @@ protected:
 
 TEST_P(MKLDNNGraphBatchNormTests, TestsBatchNorm) {}
 
+const size_t expect_num_impl = InferenceEngine::with_cpu_x86_avx2() ? 5 : 4;
 
 INSTANTIATE_TEST_CASE_P(
         TestsBatchNorm, MKLDNNGraphBatchNormTests,
         ::testing::Values(
-                batchnorm4D_test_params{{1, 32, 128, 256}, 1e-6, 5, MKLDNNPlugin::impl_desc_type::jit},
-                batchnorm4D_test_params{{3, 3, 128, 256}, 1e-6, 5, MKLDNNPlugin::impl_desc_type::jit},
-                batchnorm4D_test_params{{1, 32, 128, 256}, 1e-6, 5, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
-                batchnorm4D_test_params{{3, 3, 128, 256}, 1e-6, 5, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}));
+                batchnorm4D_test_params{{1, 32, 128, 256}, 1e-6, expect_num_impl, jit},
+                batchnorm4D_test_params{{3, 3,  128, 256}, 1e-6, expect_num_impl, jit},
+                batchnorm4D_test_params{{1, 32, 128, 256}, 1e-6, expect_num_impl, ref, {ref_any}},
+                batchnorm4D_test_params{{3, 3,  128, 256}, 1e-6, expect_num_impl, ref, {ref_any}}));
 
 class MKLDNNGraphDynBatchBatchNormTests: public MKLDNNGraphBatchNormTests {
 protected:
@@ -244,9 +245,6 @@ protected:
             if (MB < 2)
                 MB = 2;
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-
             InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::U8, 
                 {p.in.c * 4 * sizeof(float)}, InferenceEngine::C });
             weights->allocate();
@@ -258,8 +256,11 @@ protected:
                 }
             }
             InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
-            net_reader.SetWeights(weights_ptr);
-            InferenceEngine::CNNNetwork network = net_reader.getNetwork();
+            
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr));
+            
             auto implNet = dynamic_cast<InferenceEngine::details::CNNNetworkImpl *>(&((InferenceEngine::ICNNNetwork&)network));
             ASSERT_NE(nullptr, implNet) << "Failed to cast ICNNNetwork to CNNNetworkImpl";
             InferenceEngine::ResponseDesc resp;
@@ -268,7 +269,7 @@ protected:
 
             MKLDNNGraphTestClass graph;
             graph.setProperty({{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES}});
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             InferenceEngine::SizeVector dims_src = {MB, p.in.c, p.in.h, p.in.w};
             InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float>({InferenceEngine::Precision::FP32, dims_src, InferenceEngine::NCHW});
@@ -283,7 +284,7 @@ protected:
             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
 
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
index 2454cd9..2e5d388 100644 (file)
@@ -12,7 +12,7 @@
 #include <mkldnn_extension_utils.h>
 #include <unordered_set>
 #include <cnn_network_impl.hpp>
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 #include <ie_plugin_config.hpp>
 #include "tests_common.hpp"
 
@@ -111,11 +111,12 @@ protected:
             concat_test_params p = ::testing::WithParamInterface<concat_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
             auto& nodes = graph.getNodes();
             for (int i = 0; i < nodes.size(); i++) {
                 if (nodes[i]->getType() == MKLDNNPlugin::Concatenation) {
@@ -153,7 +154,7 @@ protected:
             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in2", src2));
 
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
@@ -318,9 +319,10 @@ protected:
             if (MB < 2)
                 MB = 2;
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-            InferenceEngine::CNNNetwork network = net_reader.getNetwork();
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
+
             auto implNet = dynamic_cast<InferenceEngine::details::CNNNetworkImpl *>(&((InferenceEngine::ICNNNetwork&)network));
             ASSERT_NE(nullptr, implNet) << "Failed to cast ICNNNetwork to CNNNetworkImpl";
             InferenceEngine::ResponseDesc resp;
@@ -329,7 +331,7 @@ protected:
 
             MKLDNNGraphTestClass graph;
             graph.setProperty({{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES}});
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             InferenceEngine::SizeVector dims_src1 = p.in1;
             InferenceEngine::SizeVector dims_src2 = p.in2;
@@ -355,7 +357,7 @@ protected:
             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in2", src2));
 
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
@@ -664,11 +666,12 @@ protected:
             two_concat_test_params p = ::testing::WithParamInterface<two_concat_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             InferenceEngine::SizeVector dims_src1 = p.in1;
             InferenceEngine::SizeVector dims_src2 = p.in2;
@@ -701,7 +704,7 @@ protected:
             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in3", src3));
 
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             for (auto & it : out) {
@@ -946,11 +949,12 @@ protected:
             TestsCommon::SetUp();
             std::string model = model_t;
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             InferenceEngine::SizeVector dims_src1 = {1, 3, 2, 2};
             InferenceEngine::SizeVector dims_src2 = {1, 2, 2, 2};
@@ -971,7 +975,7 @@ protected:
             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in2", src2));
 
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             for (auto & it : out) {
@@ -1102,8 +1106,8 @@ protected:
             concat_test_params p = ::testing::WithParamInterface<concat_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_THROW(net_reader.ReadNetwork(model.data(), model.length()), 
+            InferenceEngine::Core core;
+            ASSERT_THROW(core.ReadNetwork(model, InferenceEngine::Blob::CPtr()), 
                          InferenceEngine::details::InferenceEngineException);
         } catch (const InferenceEngine::details::InferenceEngineException &e) {
             FAIL() << e.what();
index f7c83c2..b812394 100644 (file)
@@ -12,7 +12,7 @@
 #include <mkldnn_extension_utils.h>
 #include <cnn_network_impl.hpp>
 #include "tests_common.hpp"
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 
 #define XBYAK_NO_OP_NAMES
 #define XBYAK_UNDEF_JNL
@@ -244,9 +244,6 @@ protected:
             conv_test_params p = ::testing::WithParamInterface<conv_test_params>::GetParam();
             std::string model = getModel(p);
 
-            CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-
             size_t blob_size = p.out_c * p.dims[1] / p.grp_c;
             for (auto k : p.kernel) {
                 blob_size *= k;
@@ -260,8 +257,9 @@ protected:
 
             TBlob<uint8_t>::Ptr weights_ptr = TBlob<uint8_t>::Ptr(weights);
 
-            net_reader.SetWeights(weights_ptr);
-            CNNNetwork network = net_reader.getNetwork();
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr));
 
             MKLDNNGraphTestClass graph;
             graph.CreateGraph(network);
@@ -428,9 +426,6 @@ protected:
             if (dims[0] < 2)
                 dims[0] = 2;
 
-            CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-
             size_t blob_size = p.out_c * dims[1] / p.grp_c;
             for (auto k : p.kernel) {
                 blob_size *= k;
@@ -441,8 +436,10 @@ protected:
             fill_data((float *) weights->buffer(), weights->size() / sizeof(float));
             TBlob<uint8_t>::Ptr weights_ptr = TBlob<uint8_t>::Ptr(weights);
 
-            net_reader.SetWeights(weights_ptr);
-            CNNNetwork network = net_reader.getNetwork();
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr));
+
             auto implNet = dynamic_cast<details::CNNNetworkImpl *>(&((ICNNNetwork&)network));
             ASSERT_NE(nullptr, implNet) << "Failed to cast ICNNNetwork to CNNNetworkImpl";
             ResponseDesc resp;
index de1b4e7..d06993d 100644 (file)
@@ -13,7 +13,7 @@
 #include <cnn_network_impl.hpp>
 #include "tests_common.hpp"
 
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 #include <ie_plugin_config.hpp>
 
 using namespace ::testing;
@@ -166,11 +166,12 @@ protected:
             crop_test_params p = ::testing::WithParamInterface<crop_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             auto& nodes = graph.getNodes();
             for (int i = 0; i < nodes.size(); i++) {
@@ -197,7 +198,7 @@ protected:
             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
 
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
@@ -260,9 +261,9 @@ protected:
             if (MB < 2)
                 MB = 2;
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-            InferenceEngine::CNNNetwork network = net_reader.getNetwork();
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
             network.setBatchSize(MB);
 
             MKLDNNGraphTestClass graph;
@@ -283,7 +284,7 @@ protected:
             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
 
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
index 4dceecd..d85b94c 100644 (file)
@@ -14,8 +14,9 @@
 #include "ir_gen_helper.hpp"
 #include "tests_common.hpp"
 
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 #include <ie_plugin_config.hpp>
+#include <ie_system_conf.h>
 
 using namespace InferenceEngine;
 using namespace ::testing;
@@ -263,9 +264,6 @@ protected:
             deconv_test_params p = ::testing::WithParamInterface<deconv_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-
             size_t blob_size = p.out_c * (p.dims[1] / p.grp_c);
             for (auto k : p.kernel) {
                 blob_size *= k;
@@ -294,10 +292,13 @@ protected:
                 memcpy(model_blob_ptr, blb->buffer().as<uint8_t*>(), blb->byteSize());
                 model_blob_ptr += blb->byteSize();
             }
-            net_reader.SetWeights(model_blob);
+            
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, model_blob));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
             auto& nodes = graph.getNodes();
             for (auto &node : nodes) {
                 if (node->getType() == MKLDNNPlugin::Deconvolution) {
@@ -330,7 +331,7 @@ protected:
             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
 
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
@@ -358,10 +359,14 @@ TEST_P(MKLDNNGraphDeconvolutionalTests, TestsDeconvolution) {}
 
 //  deconv_test_params(dims, kernel, strides, pads_begin, pads_end, out_c, grp_c, with_bias, auto_pad, num_prim_desc,
 //                     selectedTypes, preferTypes, comp)
+
+size_t  expected_num_prim_desc = InferenceEngine::with_cpu_x86_avx2() ? 3 : 2;
+
+
 INSTANTIATE_TEST_CASE_P(
     TestDeconvolution, MKLDNNGraphDeconvolutionalTests,
     ::testing::Values(
-        /*0*/   deconv_test_params{ {1, 3, 3, 3}, {3, 3}, {1, 1}, {0, 0}, {0, 0}, 2, 1, false, "", 2, {MKLDNNPlugin::impl_desc_type::jit} },
+        /*0*/   deconv_test_params{{1, 3, 3, 3}, {3, 3}, {1, 1}, {0, 0}, {0, 0}, 2, 1, false, "", 2, {MKLDNNPlugin::impl_desc_type::jit} },
                 deconv_test_params{{3, 3, 3, 3}, {4, 3}, {1, 1}, {0, 0}, {0, 0}, 2, 1, false, "", 2, {MKLDNNPlugin::impl_desc_type::jit} },
                 deconv_test_params{{2, 8, 5, 5}, {4, 4}, {2, 2}, {1, 1}, {0, 0}, 8, 8, false, "", 4, {MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_dw}},
                 deconv_test_params{{2, 8, 5, 5}, {8, 8}, {4, 4}, {1, 1}, {0, 0}, 8, 8, false, "", 4, {MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_dw}},
@@ -384,20 +389,28 @@ INSTANTIATE_TEST_CASE_P(
                                    {MKLDNNPlugin::impl_desc_type::ref_any}, {MKLDNNPlugin::impl_desc_type::ref_any}},
                 deconv_test_params{{1, 6, 6, 5}, {3, 1}, {1, 1}, {1, 0}, {1, 0}, 9, 3, true, "", 2,
                                    {MKLDNNPlugin::impl_desc_type::ref_any}, {MKLDNNPlugin::impl_desc_type::ref_any}},
-                deconv_test_params{{2, 24, 5, 5}, {4, 4}, {2, 2}, {1, 1}, {0, 0}, 24, 3, true, "", 4, {MKLDNNPlugin::impl_desc_type::jit}},
+                deconv_test_params{{2, 24, 5, 5}, {4, 4}, {2, 2}, {1, 1}, {0, 0}, 24, 3, true, "",
+                                   InferenceEngine::with_cpu_x86_avx2() ? 4ul : 3ul,
+                                   {MKLDNNPlugin::impl_desc_type::jit}},
         /*10*/  deconv_test_params{{2, 48, 5, 5}, {4, 4}, {2, 2}, {1, 1}, {0, 0}, 48, 3, true, "", 4, {MKLDNNPlugin::impl_desc_type::jit}},
                 deconv_test_params{{2, 48, 3, 3}, {4, 4}, {2, 2}, {1, 1}, {0, 0}, 192, 3, true, "", 4, {MKLDNNPlugin::impl_desc_type::jit}},
                 deconv_test_params{{2, 24, 5, 5}, {4, 4}, {2, 2}, {1, 1}, {0, 0}, 24, 1, true, "", 3, {MKLDNNPlugin::impl_desc_type::jit}},
-                deconv_test_params{{2, 72, 5, 5}, {4, 4}, {2, 2}, {1, 1}, {0, 0}, 72, 3, true, "", 4, {MKLDNNPlugin::impl_desc_type::jit}},
+                deconv_test_params{{2, 72, 5, 5}, {4, 4}, {2, 2}, {1, 1}, {0, 0}, 72, 3, true, "",
+                                   InferenceEngine::with_cpu_x86_avx2() ? 4ul : 3ul,
+                                   {MKLDNNPlugin::impl_desc_type::jit}},
                 deconv_test_params{{1, 12, 2, 2}, {4, 4}, {2, 2}, {1, 1}, {1, 1}, 12, 12, true, "", 4, {MKLDNNPlugin::impl_desc_type::jit}},
-                deconv_test_params{{1, 32, 5, 5}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, 16, 1, true, "", 2, {MKLDNNPlugin::impl_desc_type::jit}},
+// In case of SSE oor pure AVX there is no JIT implementation
+//                deconv_test_params{{1, 32, 5, 5}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, 16, 1, true, "",
+//                                   2, {MKLDNNPlugin::impl_desc_type::jit}},
                 deconv_test_params{{1, 48, 3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, 96, 3, true, "", 2, {MKLDNNPlugin::impl_desc_type::jit}},
         // 5D
         /*17*/  deconv_test_params{{1, 2, 8, 5, 5}, {3, 3, 3}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, 4, 1, true, "", 4,
                                    {MKLDNNPlugin::impl_desc_type::ref_any}, {MKLDNNPlugin::impl_desc_type::ref_any} },
                 deconv_test_params{{1, 6, 5, 5, 5}, {3, 3, 3}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, 9, 3, true, "", 2,
                                    {MKLDNNPlugin::impl_desc_type::ref_any}, {MKLDNNPlugin::impl_desc_type::ref_any} },
-                deconv_test_params{{2, 24, 5, 5, 5}, {4, 4, 4}, {2, 2, 1}, {1, 1, 1}, {0, 0, 0}, 24, 3, true, "", 4, {MKLDNNPlugin::impl_desc_type::jit}},
+                deconv_test_params{{2, 24, 5, 5, 5}, {4, 4, 4}, {2, 2, 1}, {1, 1, 1}, {0, 0, 0}, 24, 3, true, "",
+                                   InferenceEngine::with_cpu_x86_avx2() ? 4ul : 3ul,
+                                   {MKLDNNPlugin::impl_desc_type::jit}},
                 deconv_test_params{{2, 48, 5, 5, 5}, {4, 4, 4}, {2, 2, 1}, {1, 1, 1}, {0, 0, 0}, 48, 3, true, "", 4, {MKLDNNPlugin::impl_desc_type::jit}}
         // Blocked, with biases
         // TODO support on jit
@@ -445,9 +458,6 @@ protected:
             size_t MB = p.dims[0];
             if (MB < 2)
                 MB = 2;
-
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
             
             size_t blob_size = 1;
             for (auto k : p.kernel) {
@@ -477,9 +487,11 @@ protected:
                 memcpy(model_blob_ptr, blb->buffer().as<uint8_t*>(), blb->byteSize());
                 model_blob_ptr += blb->byteSize();
             }
-            net_reader.SetWeights(model_blob);
+            
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, model_blob));
 
-            InferenceEngine::CNNNetwork network = net_reader.getNetwork();
             auto implNet = dynamic_cast<InferenceEngine::details::CNNNetworkImpl *>(&((InferenceEngine::ICNNNetwork&)network));
             ASSERT_NE(nullptr, implNet) << "Failed to cast ICNNNetwork to CNNNetworkImpl";
             InferenceEngine::ResponseDesc resp;
@@ -489,7 +501,7 @@ protected:
 
             MKLDNNGraphTestClass graph;
             graph.setProperty({{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES}});
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float>(
                     {InferenceEngine::Precision::FP32, p.dims, InferenceEngine::TensorDesc::getLayoutByDims(p.dims)});
@@ -504,7 +516,7 @@ protected:
             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
 
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
index 50b830e..2c11585 100644 (file)
 #include <mkldnn_extension_utils.h>
 #include <cnn_network_impl.hpp>
 #include "tests_common.hpp"
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 #include <ie_plugin_config.hpp>
+#include <ie_system_conf.h>
 
-using namespace ::testing;
-using namespace std;
+using namespace MKLDNNPlugin;
 using namespace mkldnn;
+using namespace ::testing;
+
+using std::vector;
+using std::function;
 
 struct depthwise_test_params {
-    mkldnn::algorithm alg;
+    algorithm alg;
 
     // Formats: NC, NCHW, NCDHW
     vector<size_t> dims;
@@ -184,9 +188,6 @@ protected:
             depthwise_test_params p = ::testing::WithParamInterface<depthwise_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-
             size_t weightSize = 2 * p.dims[1] * sizeof(float);
             InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::U8, 
                 {weightSize}, InferenceEngine::C });
@@ -195,10 +196,12 @@ protected:
 
             InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
 
-            net_reader.SetWeights(weights_ptr);
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
             auto& nodes = graph.getNodes();
             for (int i = 0; i < nodes.size(); i++) {
                 if (nodes[i]->getType() == MKLDNNPlugin::Depthwise) {
@@ -233,7 +236,7 @@ protected:
             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
 
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
@@ -259,24 +262,26 @@ protected:
 
 TEST_P(MKLDNNGraphDepthwiseTests, TestsDepthwise) {}
 
+const size_t num_2d_impl = InferenceEngine::with_cpu_x86_avx2() ? 3 : 2;
+
 INSTANTIATE_TEST_CASE_P(
         TestsDepthwise, MKLDNNGraphDepthwiseTests,
         ::testing::Values(
                 // 2D
-                depthwise_test_params{depthwise_scale_shift, {128, 32}, false, 3, MKLDNNPlugin::impl_desc_type::jit},
-                depthwise_test_params{depthwise_scale_shift, {4, 3}, true, 3, MKLDNNPlugin::impl_desc_type::jit},
-                depthwise_test_params{depthwise_scale_shift, {1, 1}, false, 3, MKLDNNPlugin::impl_desc_type::jit},
-                depthwise_test_params{depthwise_scale_shift, {37, 35}, false, 3, MKLDNNPlugin::impl_desc_type::jit},
-                depthwise_test_params{depthwise_prelu, {128, 32}, false, 3, MKLDNNPlugin::impl_desc_type::jit},
-                depthwise_test_params{depthwise_prelu, {4, 3}, true, 3, MKLDNNPlugin::impl_desc_type::jit},
-                depthwise_test_params{depthwise_prelu, {1, 1}, false, 3, MKLDNNPlugin::impl_desc_type::jit},
-                depthwise_test_params{depthwise_prelu, {37, 35}, false, 3, MKLDNNPlugin::impl_desc_type::jit},
-                depthwise_test_params{depthwise_scale_shift, {128, 32}, false, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
-                depthwise_test_params{depthwise_scale_shift, {4, 3}, true, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
-                depthwise_test_params{depthwise_scale_shift, {1, 1}, false, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
-                depthwise_test_params{depthwise_prelu, {128, 32}, false, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
-                depthwise_test_params{depthwise_prelu, {4, 3}, true, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
-                depthwise_test_params{depthwise_prelu, {1, 1}, false, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
+                depthwise_test_params{depthwise_scale_shift, {128, 32}, false, num_2d_impl, jit},
+                depthwise_test_params{depthwise_scale_shift, {4,   3 }, true,  num_2d_impl, jit},
+                depthwise_test_params{depthwise_scale_shift, {1,   1 }, false, num_2d_impl, jit},
+                depthwise_test_params{depthwise_scale_shift, {37,  35}, false, num_2d_impl, jit},
+                depthwise_test_params{depthwise_prelu,       {128, 32}, false, num_2d_impl, jit},
+                depthwise_test_params{depthwise_prelu,       {4,   3 }, true,  num_2d_impl, jit},
+                depthwise_test_params{depthwise_prelu,       {1,   1 }, false, num_2d_impl, jit},
+                depthwise_test_params{depthwise_prelu,       {37,  35}, false, num_2d_impl, jit},
+                depthwise_test_params{depthwise_scale_shift, {128, 32}, false, num_2d_impl, ref, {ref_any}},
+                depthwise_test_params{depthwise_scale_shift, {4,   3 }, true,  num_2d_impl, ref, {ref_any}},
+                depthwise_test_params{depthwise_scale_shift, {1,   1 }, false, num_2d_impl, ref, {ref_any}},
+                depthwise_test_params{depthwise_prelu,       {128, 32}, false, num_2d_impl, ref, {ref_any}},
+                depthwise_test_params{depthwise_prelu,       {4,   3 }, true,  num_2d_impl, ref, {ref_any}},
+                depthwise_test_params{depthwise_prelu,       {1,   1 }, false, num_2d_impl, ref, {ref_any}},
                 // 4D
                 depthwise_test_params{depthwise_scale_shift, {1, 32, 128, 256}, false, 3, MKLDNNPlugin::impl_desc_type::jit},
                 depthwise_test_params{depthwise_scale_shift, {4, 3, 228, 228}, false, 3, MKLDNNPlugin::impl_desc_type::jit},
@@ -329,9 +334,6 @@ protected:
             if (MB < 2)
                 MB = 2;
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-
             InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::U8, 
                 {p.dims[1] * 4 * sizeof(float)}, InferenceEngine::C });
             weights->allocate();
@@ -343,8 +345,11 @@ protected:
                 }
             }
             InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
-            net_reader.SetWeights(weights_ptr);
-            InferenceEngine::CNNNetwork network = net_reader.getNetwork();
+
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr));
+
             auto implNet = dynamic_cast<InferenceEngine::details::CNNNetworkImpl *>(&((InferenceEngine::ICNNNetwork&)network));
             ASSERT_NE(nullptr, implNet) << "Failed to cast ICNNNetwork to CNNNetworkImpl";
             InferenceEngine::ResponseDesc resp;
@@ -354,7 +359,7 @@ protected:
 
             MKLDNNGraphTestClass graph;
             graph.setProperty({{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES}});
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             InferenceEngine::SizeVector dims_src = p.dims;
             InferenceEngine::Layout layout = InferenceEngine::ANY;
@@ -378,7 +383,7 @@ protected:
             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
 
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
index 236aaa4..4e4dfbf 100644 (file)
@@ -16,7 +16,7 @@
 #include <mkldnn_extension_utils.h>
 #include <cnn_network_impl.hpp>
 #include "tests_common.hpp"
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 
 using namespace ::testing;
 using namespace std;
@@ -367,11 +367,12 @@ protected:
             eltwise_test_params p = ::testing::WithParamInterface<eltwise_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             auto& nodes = graph.getNodes();
             for (int i = 0; i < nodes.size(); i++) {
@@ -445,7 +446,7 @@ protected:
             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in3", src3));
 
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
@@ -640,11 +641,12 @@ protected:
             eltwise_test_params p = ::testing::WithParamInterface<eltwise_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             auto& nodes = graph.getNodes();
             for (int i = 0; i < nodes.size(); i++) {
@@ -684,7 +686,7 @@ protected:
             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in2", src2));
 
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
@@ -791,9 +793,10 @@ protected:
             if (MB < 2)
                 MB = 2;
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-            InferenceEngine::CNNNetwork network = net_reader.getNetwork();
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
+
             auto implNet = dynamic_cast<InferenceEngine::details::CNNNetworkImpl *>(&((InferenceEngine::ICNNNetwork&)network));
             ASSERT_NE(nullptr, implNet) << "Failed to cast ICNNNetwork to CNNNetworkImpl";
             InferenceEngine::ResponseDesc resp;
@@ -802,7 +805,7 @@ protected:
 
             MKLDNNGraphTestClass graph;
             graph.setProperty({{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES}});
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             InferenceEngine::SizeVector dims_src1 = p.dims1;
             InferenceEngine::Layout layout1 = InferenceEngine::ANY;
@@ -866,7 +869,7 @@ protected:
             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in3", src3));
 
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
@@ -990,11 +993,12 @@ protected:
             precisions_test_2params p = ::testing::WithParamInterface<precisions_test_2params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            ASSERT_NO_THROW(graph.CreateGraph(net_reader.getNetwork()));
+            ASSERT_NO_THROW(graph.CreateGraph(network));
 
             auto& nodes = graph.getNodes();
             nodes = graph.getNodes();
index bf26ef1..5c4da1c 100644 (file)
@@ -13,7 +13,7 @@
 #include <cnn_network_impl.hpp>
 #include "tests_common.hpp"
 
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 #include <ie_plugin_config.hpp>
 
 using namespace ::testing;
@@ -156,9 +156,6 @@ protected:
             fc_test_params p = ::testing::WithParamInterface<fc_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-
             size_t weights_size = p.out_c;
             for (int i = 1; i < p.in_dims.size(); i++) {
                 weights_size *= p.in_dims[i];
@@ -170,10 +167,12 @@ protected:
             fill_data((float *) weights->buffer(), weights->size() / sizeof(float));
             InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
 
-            net_reader.SetWeights(weights_ptr);
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
             auto& nodes = graph.getNodes();
             for (int i = 0; i < nodes.size(); i++) {
                 if (nodes[i]->getType() == MKLDNNPlugin::FullyConnected) {
@@ -210,7 +209,7 @@ protected:
             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
 
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
@@ -260,9 +259,6 @@ class MKLDNNGraphDynBatchFullyConnectedTests: public MKLDNNGraphFullyConnectedTe
             if (MB < 2)
                 MB = 2;
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-
             size_t weights_size = p.out_c;
             for (int i = 1; i < p.in_dims.size(); i++) {
                 weights_size *= p.in_dims[i];
@@ -272,8 +268,11 @@ class MKLDNNGraphDynBatchFullyConnectedTests: public MKLDNNGraphFullyConnectedTe
             weights->allocate();
             fill_data((float *) weights->buffer(), weights->size() / sizeof(float));
             InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
-            net_reader.SetWeights(weights_ptr);
-            InferenceEngine::CNNNetwork network = net_reader.getNetwork();
+
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr));
+
             auto implNet = dynamic_cast<InferenceEngine::details::CNNNetworkImpl *>(&((InferenceEngine::ICNNNetwork&)network));
             ASSERT_NE(nullptr, implNet) << "Failed to cast ICNNNetwork to CNNNetworkImpl";
             InferenceEngine::ResponseDesc resp;
@@ -282,7 +281,7 @@ class MKLDNNGraphDynBatchFullyConnectedTests: public MKLDNNGraphFullyConnectedTe
 
             MKLDNNGraphTestClass graph;
             graph.setProperty({{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES}});
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             InferenceEngine::SizeVector dims_src = p.in_dims;
             InferenceEngine::Layout layout = InferenceEngine::ANY;
@@ -308,7 +307,7 @@ class MKLDNNGraphDynBatchFullyConnectedTests: public MKLDNNGraphFullyConnectedTe
             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
 
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
index ea37a55..53b9c05 100644 (file)
@@ -12,7 +12,7 @@
 #include <mkldnn_extension_utils.h>
 #include <cnn_network_impl.hpp>
 #include "tests_common.hpp"
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 #include <ie_plugin_config.hpp>
 
 using namespace ::testing;
@@ -213,11 +213,12 @@ protected:
             gemm_test_params p = ::testing::WithParamInterface<gemm_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             auto& nodes = graph.getNodes();
             for (int i = 0; i < nodes.size(); i++) {
@@ -268,7 +269,7 @@ protected:
             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in3", src3));
 
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
@@ -398,9 +399,10 @@ protected:
             if (MB < 2)
                 MB = 2;
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-            InferenceEngine::CNNNetwork network = net_reader.getNetwork();
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
+            
             auto implNet = dynamic_cast<InferenceEngine::details::CNNNetworkImpl *>(&((InferenceEngine::ICNNNetwork&)network));
             ASSERT_NE(nullptr, implNet) << "Failed to cast ICNNNetwork to CNNNetworkImpl";
             InferenceEngine::ResponseDesc resp;
@@ -409,7 +411,7 @@ protected:
 
             MKLDNNGraphTestClass graph;
             graph.setProperty({{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES}});
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             auto m_A = p.transposeA ? p.K : p.M;
             auto n_A = p.transposeA ? p.M : p.K;
@@ -448,7 +450,7 @@ protected:
             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in3", src3));
 
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
@@ -572,11 +574,12 @@ protected:
             gemm_test_params p = ::testing::WithParamInterface<gemm_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             auto& nodes = graph.getNodes();
             for (int i = 0; i < nodes.size(); i++) {
@@ -618,7 +621,7 @@ protected:
             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in2", src2));
 
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
index 92961e3..27ea656 100644 (file)
@@ -10,7 +10,7 @@
 
 #include <mkldnn_extension_utils.h>
 #include "tests_common.hpp"
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 
 
 using namespace ::testing;
@@ -135,11 +135,12 @@ protected:
             input_test_params p = ::testing::WithParamInterface<input_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             auto& nodes = graph.getNodes();
             for (int i = 0; i < nodes.size(); i++) {
@@ -271,9 +272,6 @@ protected:
             TestsCommon::SetUp();
             std::string model = model_t;
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-
             InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::U8, 
                 {72}, InferenceEngine::C });
             weights->allocate();
@@ -300,17 +298,19 @@ protected:
             }
             InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
 
-            net_reader.SetWeights(weights_ptr);
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
             auto& nodes = graph.getNodes();
             ASSERT_LE(3, nodes.size());
 
             InferenceEngine::BlobMap srcs;
             srcs["in1"] = src1;
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
@@ -431,11 +431,12 @@ protected:
             input_layout_test_params p = ::testing::WithParamInterface<input_layout_test_params>::GetParam();
             std::string model = model_t;
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             InferenceEngine::TensorDesc desc(InferenceEngine::Precision::FP32, { 1, 3, 2, 2 }, p.layout);
             InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float>(desc);
@@ -444,7 +445,7 @@ protected:
             InferenceEngine::BlobMap srcs;
             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("input", src));
 
-            InferenceEngine::OutputsDataMap out = net_reader.getNetwork().getOutputsInfo();
+            InferenceEngine::OutputsDataMap out = network.getOutputsInfo();
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
             InferenceEngine::TBlob<float>::Ptr output;
             output = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
index bd0c15c..f20a99b 100644 (file)
@@ -7,7 +7,7 @@
 #include <gmock/gmock-spec-builders.h>
 #include "mkldnn_graph.h"
 #include "mkldnn_exec_network.h"
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 
 #include "test_graph.hpp"
 
@@ -43,18 +43,18 @@ public:
 
 class MKLDNNGraphLeaksTests: public ::testing::Test {
 protected:
-    void addOutputToEachNode(InferenceEngine::CNNNetReader& net_reader, std::vector<std::string>& new_outputs,
+    void addOutputToEachNode(InferenceEngine::CNNNetwork& network, std::vector<std::string>& new_outputs,
                              InferenceEngine::CNNLayerPtr cnnLayer) {
-        auto outputs = net_reader.getNetwork().getOutputsInfo();
+        auto outputs = network.getOutputsInfo();
         if (outputs.find(cnnLayer->name) != outputs.end())
             return;
 
-        net_reader.getNetwork().addOutput(cnnLayer->name);
+        network.addOutput(cnnLayer->name);
         new_outputs.push_back(cnnLayer->name);
 
         for (const auto &layer : cnnLayer->outData) {
             for (const auto &data : layer->getInputTo()) {
-                addOutputToEachNode(net_reader, new_outputs, data.second);
+                addOutputToEachNode(network, new_outputs, data.second);
             }
         }
     }
@@ -72,7 +72,6 @@ protected:
 
 TEST_F(MKLDNNGraphLeaksTests, MKLDNN_not_release_outputs_fp32) {
     try {
-        InferenceEngine::CNNNetReader net_reader;
         std::string model = "<net name=\"LeNet\" version=\"2\" batch=\"1\">\n"
                 "    <layers>\n"
                 "        <layer name=\"data\" type=\"Input\" precision=\"FP32\" id=\"0\">\n"
@@ -243,39 +242,39 @@ TEST_F(MKLDNNGraphLeaksTests, MKLDNN_not_release_outputs_fp32) {
                 "</net>";
 
         size_t weights_size = 1724320;
-        net_reader.ReadNetwork(model.c_str(), model.size());
 
         InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::U8, {weights_size}, InferenceEngine::C });
         weights->allocate();
         fill_data((float *) weights->buffer(), weights->size() / sizeof(float));
         InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
 
-        net_reader.SetWeights(weights_ptr);
+        InferenceEngine::Core core;
+        InferenceEngine::CNNNetwork network;
+        ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr));
 
-        auto outputs = net_reader.getNetwork().getOutputsInfo();
+        auto outputs = network.getOutputsInfo();
         std::vector<std::string> new_outputs;
 
-        for (auto input : net_reader.getNetwork().getInputsInfo()) {
+        for (auto input : network.getInputsInfo()) {
             for (const auto &layer : input.second->getInputData()->getInputTo()) {
-                addOutputToEachNode(net_reader, new_outputs, layer.second);
+                addOutputToEachNode(network, new_outputs, layer.second);
             }
         }
 
-        ASSERT_NE(1, net_reader.getNetwork().getOutputsInfo().size());
+        ASSERT_NE(1, network.getOutputsInfo().size());
 
         std::shared_ptr<MKLDNNTestEngine> score_engine(new MKLDNNTestEngine());
         InferenceEngine::IExecutableNetwork::Ptr exeNetwork1;
-        ASSERT_NO_THROW(score_engine->LoadNetwork(exeNetwork1, net_reader.getNetwork(), {}));
+        ASSERT_NO_THROW(score_engine->LoadNetwork(exeNetwork1, network, {}));
 
         size_t modified_outputs_size = score_engine->getGraph(exeNetwork1).GetOutputNodes().size();
 
-        InferenceEngine::CNNNetReader net_reader2;
-        net_reader2.ReadNetwork(model.c_str(), model.size());
-        net_reader2.SetWeights(weights_ptr);
-        ASSERT_EQ(1, net_reader2.getNetwork().getOutputsInfo().size());
+        InferenceEngine::CNNNetwork network2;
+        ASSERT_NO_THROW(network2 = core.ReadNetwork(model, weights_ptr));
+        ASSERT_EQ(1, network2.getOutputsInfo().size());
 
         InferenceEngine::IExecutableNetwork::Ptr exeNetwork2;
-        ASSERT_NO_THROW(score_engine->LoadNetwork(exeNetwork2, net_reader2.getNetwork(), {}));
+        ASSERT_NO_THROW(score_engine->LoadNetwork(exeNetwork2, network2, {}));
 
         size_t original_outputs_size = score_engine->getGraph(exeNetwork2).GetOutputNodes().size();
 
index 1b64986..b6a65d6 100644 (file)
@@ -12,7 +12,7 @@
 #include <mkldnn_extension_utils.h>
 #include <cnn_network_impl.hpp>
 #include "tests_common.hpp"
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 #include <ie_plugin_config.hpp>
 
 using namespace ::testing;
@@ -144,11 +144,12 @@ protected:
             lrn_test_params p = ::testing::WithParamInterface<lrn_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
             auto& nodes = graph.getNodes();
             for (int i = 0; i < nodes.size(); i++) {
                 if (nodes[i]->getType() == MKLDNNPlugin::Lrn) {
@@ -179,7 +180,7 @@ protected:
             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
 
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
@@ -246,9 +247,10 @@ protected:
             if (MB < 2)
                 MB = 2;
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-            InferenceEngine::CNNNetwork network = net_reader.getNetwork();
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
+
             auto implNet = dynamic_cast<InferenceEngine::details::CNNNetworkImpl *>(&((InferenceEngine::ICNNNetwork&)network));
             ASSERT_NE(nullptr, implNet) << "Failed to cast ICNNNetwork to CNNNetworkImpl";
             InferenceEngine::ResponseDesc resp;
@@ -257,7 +259,7 @@ protected:
 
             MKLDNNGraphTestClass graph;
             graph.setProperty({{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES}});
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             InferenceEngine::SizeVector dims_src = {MB, p.in.c, p.in.h, p.in.w};
 
@@ -274,7 +276,7 @@ protected:
             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
 
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
index 8aa7573..577222f 100644 (file)
@@ -14,7 +14,7 @@
 #include "tests_common.hpp"
 #include <nodes/base.hpp>
 
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 #include <ie_plugin_config.hpp>
 
 using namespace ::testing;
@@ -255,11 +255,12 @@ protected:
             permute_test_params p = initialize_permute_test_params();
             std::string model = getModel(p);
 
-            CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+            Core core;
+            CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
             auto& nodes = graph.getNodes();
             for (int i = 0; i < nodes.size(); i++) {
                 if (nodes[i]->getType() == MKLDNNPlugin::Permute) {
@@ -284,7 +285,7 @@ protected:
             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
 
             OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             BlobMap outputBlobs;
 
             auto item = *out.begin();
@@ -545,9 +546,10 @@ protected:
                 MB = 2;
             p.dims[0] = MB;
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-            InferenceEngine::CNNNetwork network = net_reader.getNetwork();
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
+
             auto implNet = dynamic_cast<InferenceEngine::details::CNNNetworkImpl *>(&((InferenceEngine::ICNNNetwork&)network));
             ASSERT_NE(nullptr, implNet) << "Failed to cast ICNNNetwork to CNNNetworkImpl";
             InferenceEngine::ResponseDesc resp;
@@ -556,7 +558,7 @@ protected:
 
             MKLDNNGraphTestClass graph;
             graph.setProperty({{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES}});
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float>({InferenceEngine::Precision::FP32, p.dims, InferenceEngine::TensorDesc::getLayoutByDims(p.dims)});
             src->allocate();
@@ -571,7 +573,7 @@ protected:
             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
 
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
index a388c6e..a9ccb11 100644 (file)
@@ -22,7 +22,7 @@
 #include "ir_gen_helper.hpp"
 #include <math.h>
 
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 
 using namespace InferenceEngine;
 using namespace ::testing;
@@ -294,11 +294,12 @@ protected:
             pooling_test_params p = ::testing::WithParamInterface<pooling_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
             auto& nodes = graph.getNodes();
             for (int i = 0; i < nodes.size(); i++) {
                 if (nodes[i]->getType() == MKLDNNPlugin::Pooling) {
@@ -335,7 +336,7 @@ protected:
             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
 
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
@@ -439,9 +440,10 @@ protected:
             if (MB < 2)
                 MB = 2;
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-            InferenceEngine::CNNNetwork network = net_reader.getNetwork();
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
+
             auto implNet = dynamic_cast<InferenceEngine::details::CNNNetworkImpl *>(&((InferenceEngine::ICNNNetwork&)network));
             ASSERT_NE(nullptr, implNet) << "Failed to cast ICNNNetwork to CNNNetworkImpl";
             InferenceEngine::ResponseDesc resp;
@@ -450,7 +452,7 @@ protected:
 
             MKLDNNGraphTestClass graph;
             graph.setProperty({{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES}});
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
 
             InferenceEngine::Layout layout = ANY;
@@ -476,7 +478,7 @@ protected:
             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
 
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
index 28b72dd..f382a2e 100644 (file)
@@ -13,7 +13,7 @@
 #include <cnn_network_impl.hpp>
 #include "tests_common.hpp"
 
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 #include <ie_plugin_config.hpp>
 
 using namespace ::testing;
@@ -114,11 +114,12 @@ protected:
             power_test_params p = ::testing::WithParamInterface<power_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
             auto& nodes = graph.getNodes();
             for (int i = 0; i < nodes.size(); i++) {
                 if (nodes[i]->getType() == MKLDNNPlugin::Power) {
@@ -146,7 +147,7 @@ protected:
             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
 
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
@@ -271,9 +272,10 @@ protected:
             if (MB < 2)
                 MB = 2;
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-            InferenceEngine::CNNNetwork network = net_reader.getNetwork();
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
+
             auto implNet = dynamic_cast<InferenceEngine::details::CNNNetworkImpl *>(&((InferenceEngine::ICNNNetwork&)network));
             ASSERT_NE(nullptr, implNet) << "Failed to cast ICNNNetwork to CNNNetworkImpl";
             InferenceEngine::ResponseDesc resp;
@@ -282,7 +284,7 @@ protected:
 
             MKLDNNGraphTestClass graph;
             graph.setProperty({{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES}});
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             InferenceEngine::SizeVector dims_src = {MB, p.in.c, p.in.h, p.in.w};
 
@@ -299,7 +301,7 @@ protected:
             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
 
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
index 7743e10..a234a2d 100644 (file)
@@ -11,7 +11,7 @@
 #include "single_layer_common.hpp"
 #include <mkldnn_extension_utils.h>
 #include "tests_common.hpp"
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 
 
 using namespace ::testing;
@@ -140,11 +140,12 @@ protected:
             relu_test_params p = ::testing::WithParamInterface<relu_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
             auto& nodes = graph.getNodes();
             for (int i = 0; i < nodes.size(); i++) {
                 if (nodes[i]->getType() == MKLDNNPlugin::Activation) {
@@ -181,7 +182,7 @@ protected:
             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
 
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
index 9d61427..b601fe2 100644 (file)
@@ -9,7 +9,7 @@
 
 #include <mkldnn_extension_mngr.h>
 #include "tests_common.hpp"
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 
 #include "unit_test_utils/mocks/mock_error_listener.hpp"
 
@@ -82,9 +82,6 @@ TEST_F(MKLDNNGraphReorderTests, CreateReorder) {
 </Net>
 )V0G0N";
 
-    InferenceEngine::CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-
     InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::U8,
                                                                                    {(1 * 1 * 17 * 9 / 1 + 17)
                                                       * sizeof(float)}, InferenceEngine::C });
@@ -92,10 +89,12 @@ TEST_F(MKLDNNGraphReorderTests, CreateReorder) {
     fill_data((float *) weights->buffer(), weights->size() / sizeof(float));
     InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
 
-    net_reader.SetWeights(weights_ptr);
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr));
 
     MKLDNNGraphTestClass graph;
-    graph.CreateGraph(net_reader.getNetwork());
+    graph.CreateGraph(network);
 
     auto& nodes = graph.getNodes();
     for (int i = 0; i < nodes.size(); i++) {
@@ -194,9 +193,6 @@ TEST_F(MKLDNNGraphReorderTests, CreateInPlaceReorder) {
 </Net>
 )V0G0N";
 
-    InferenceEngine::CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-
     InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::U8, {24}, InferenceEngine::C });
     weights->allocate();
     float *data = weights->buffer().as<float *>();
@@ -205,12 +201,15 @@ TEST_F(MKLDNNGraphReorderTests, CreateInPlaceReorder) {
         data[i] = 2;
     }
     InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
+    
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr));
 
-    net_reader.SetWeights(weights_ptr);
-    net_reader.getNetwork().addOutput("reshape1");
+    network.addOutput("reshape1");
 
     MKLDNNGraphTestClass graph;
-    graph.CreateGraph(net_reader.getNetwork());
+    graph.CreateGraph(network);
 
     InferenceEngine::SizeVector dims_src = {1, 9, 16, 32};
 
@@ -231,7 +230,7 @@ TEST_F(MKLDNNGraphReorderTests, CreateInPlaceReorder) {
     srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
 
     InferenceEngine::OutputsDataMap out;
-    out = net_reader.getNetwork().getOutputsInfo();
+    out = network.getOutputsInfo();
     InferenceEngine::BlobMap outputBlobs;
 
     auto it = out.begin();
index 5b36e80..7ffadd2 100644 (file)
@@ -12,7 +12,7 @@
 #include <mkldnn_extension_utils.h>
 #include "tests_common.hpp"
 
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 
 using namespace ::testing;
 using namespace std;
@@ -116,11 +116,12 @@ protected:
             reshape_test_params p = ::testing::WithParamInterface<reshape_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
             auto& nodes = graph.getNodes();
             for (int i = 0; i < nodes.size(); i++) {
                 if (nodes[i]->getType() == MKLDNNPlugin::Reshape) {
@@ -146,7 +147,7 @@ protected:
             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
 
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
index 4c416d6..01ef525 100644 (file)
@@ -11,8 +11,8 @@
 #include "single_layer_common.hpp"
 #include <mkldnn_extension_utils.h>
 #include "tests_common.hpp"
-#include <cpp/ie_cnn_net_reader.h>
-
+#include <ie_core.hpp>
+#include <ie_system_conf.h>
 
 using namespace ::testing;
 using namespace std;
@@ -233,11 +233,12 @@ protected:
             roi_pooling_test_params p = ::testing::WithParamInterface<roi_pooling_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
             auto& nodes = graph.getNodes();
             for (int i = 0; i < nodes.size(); i++) {
                 if (nodes[i]->getType() == MKLDNNPlugin::ROIPooling) {
@@ -276,7 +277,7 @@ protected:
             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in2", roi));
 
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
@@ -302,9 +303,16 @@ protected:
 
 TEST_P(MKLDNNGraphRoiPoolingTests, TestsRoiPooling) {}
 
+const size_t expect_num_impl = InferenceEngine::with_cpu_x86_avx2() ? 5 : 4;
 
 INSTANTIATE_TEST_CASE_P(
         TestsRoiPooling, MKLDNNGraphRoiPoolingTests,
         ::testing::Values(
                 roi_pooling_test_params{
-                        {1, 256, 39, 64}, {150, 5}, 6, 6, 0.0625f, 5, MKLDNNPlugin::impl_desc_type::jit}));
+                        {1, 256, 39, 64},  // in1
+                        {150, 5},          // in2
+                        6, 6,              // pool H and W
+                        0.0625f,           // spatial_scale
+                        expect_num_impl,   // num_prim_desc (platform dependent)
+                        MKLDNNPlugin::impl_desc_type::jit
+                }));
index a62a46e..e7ea4f2 100644 (file)
@@ -7,7 +7,7 @@
 #include "mkldnn_graph.h"
 
 #include "test_graph.hpp"
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 
 #include "single_layer_common.hpp"
 #include <mkldnn_extension_utils.h>
@@ -374,11 +374,12 @@ protected:
             simplernms_test_params p = ::testing::WithParamInterface<simplernms_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
             auto& nodes = graph.getNodes();
             for (int i = 0; i < nodes.size(); i++) {
                 if (nodes[i]->getType() == MKLDNNPlugin::SimplerNMS) {
@@ -433,7 +434,7 @@ protected:
             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in3", src_info));
 
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
index a75cc8a..af696c0 100644 (file)
@@ -13,7 +13,7 @@
 #include <cnn_network_impl.hpp>
 #include "tests_common.hpp"
 
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 #include <ie_plugin_config.hpp>
 
 using namespace ::testing;
@@ -235,11 +235,13 @@ protected:
             TestsCommon::SetUp();
             softmax_test_params p = ::testing::WithParamInterface<softmax_test_params>::GetParam();
             std::string model = getModel(p);
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+            
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
             auto& nodes = graph.getNodes();
             for (int i = 0; i < nodes.size(); i++) {
                 if (nodes[i]->getType() == MKLDNNPlugin::SoftMax) {
@@ -276,7 +278,7 @@ protected:
             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
 
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
@@ -335,9 +337,10 @@ protected:
             if (MB < 2)
                 MB = 2;
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-            InferenceEngine::CNNNetwork network = net_reader.getNetwork();
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
+
             auto implNet = dynamic_cast<InferenceEngine::details::CNNNetworkImpl *>(&((InferenceEngine::ICNNNetwork&)network));
             ASSERT_NE(nullptr, implNet) << "Failed to cast ICNNNetwork to CNNNetworkImpl";
             InferenceEngine::ResponseDesc resp;
@@ -346,7 +349,7 @@ protected:
 
             MKLDNNGraphTestClass graph;
             graph.setProperty({{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES}});
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             InferenceEngine::Layout layout = InferenceEngine::ANY;
             switch (p.dims.size()) {
@@ -371,7 +374,7 @@ protected:
             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
 
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
index 1e9c5bc..f1e4138 100644 (file)
@@ -13,7 +13,7 @@
 #include <cnn_network_impl.hpp>
 #include "tests_common.hpp"
 
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 #include <ie_plugin_config.hpp>
 
 using namespace ::testing;
@@ -168,11 +168,12 @@ protected:
             split_test_params p = ::testing::WithParamInterface<split_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            net_reader.ReadNetwork(model.data(), model.length());
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr());
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
             auto& nodes = graph.getNodes();
             for (int i = 0; i < nodes.size(); i++) {
                 if (nodes[i]->getType() == MKLDNNPlugin::Split) {
@@ -186,7 +187,7 @@ protected:
             }
             ASSERT_LE(3, nodes.size());
 
-            InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float>(net_reader.getNetwork().getInputsInfo().begin()->second->getTensorDesc());
+            InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float>(network.getInputsInfo().begin()->second->getTensorDesc());
             src->allocate();
             fill_data(src->buffer(), src->size());
 
@@ -199,7 +200,7 @@ protected:
                 FAIL() << "Cannot cast blob to TBlob<float>.";
 
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
             std::vector<InferenceEngine::TBlob<float>> dst_refs;
             for (auto& item : out) {
@@ -411,9 +412,10 @@ protected:
             if (MB < 2)
                 MB = 2;
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-            InferenceEngine::CNNNetwork network = net_reader.getNetwork();
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
+            
             auto implNet = dynamic_cast<InferenceEngine::details::CNNNetworkImpl *>(&((InferenceEngine::ICNNNetwork&)network));
             ASSERT_NE(nullptr, implNet) << "Failed to cast ICNNNetwork to CNNNetworkImpl";
             InferenceEngine::ResponseDesc resp;
@@ -422,9 +424,9 @@ protected:
 
             MKLDNNGraphTestClass graph;
             graph.setProperty({{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES}});
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
-            InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float>(net_reader.getNetwork().getInputsInfo().begin()->second->getTensorDesc());
+            InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float>(network.getInputsInfo().begin()->second->getTensorDesc());
             src->allocate();
             fill_data(src->buffer(), src->size());
 
@@ -437,7 +439,7 @@ protected:
                 FAIL() << "Cannot cast blob to TBlob<float>.";
 
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
             auto it = out.begin();
 
index 0b0691d..4fb88dc 100644 (file)
@@ -12,7 +12,7 @@
 #include <mkldnn_extension_utils.h>
 #include <cnn_network_impl.hpp>
 #include "tests_common.hpp"
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 #include <ie_plugin_config.hpp>
 
 
@@ -132,11 +132,12 @@ protected:
             tile_test_params p = ::testing::WithParamInterface<tile_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
             auto& nodes = graph.getNodes();
             for (int i = 0; i < nodes.size(); i++) {
                 if (nodes[i]->getType() == MKLDNNPlugin::Tile) {
@@ -164,7 +165,7 @@ protected:
             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
 
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
@@ -216,9 +217,10 @@ protected:
             if (MB < 2)
                 MB = 2;
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-            InferenceEngine::CNNNetwork network = net_reader.getNetwork();
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
+
             auto implNet = dynamic_cast<InferenceEngine::details::CNNNetworkImpl *>(&((InferenceEngine::ICNNNetwork&)network));
             ASSERT_NE(nullptr, implNet) << "Failed to cast ICNNNetwork to CNNNetworkImpl";
             InferenceEngine::ResponseDesc resp;
@@ -227,7 +229,7 @@ protected:
 
             MKLDNNGraphTestClass graph;
             graph.setProperty({{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES}});
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             InferenceEngine::SizeVector dims_src = {MB, p.in.c, p.in.h, p.in.w};
 
@@ -244,7 +246,7 @@ protected:
             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
 
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
index fd2a045..73f02e7 100644 (file)
@@ -11,7 +11,7 @@
 #include <mkldnn_extension_utils.h>
 #include "tests_common.hpp"
 #include "ir_gen_helper.hpp"
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 
 using namespace ::testing;
 using namespace std;
@@ -149,9 +149,6 @@ protected:
             conv_concat_params p = ::testing::WithParamInterface<conv_concat_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-
             size_t blob_size = p.conv.out_c * p.in[1] / p.conv.group;
             for (size_t i = 0; i < p.conv.kernel.size(); i++) {
                 blob_size *= p.conv.kernel[i];
@@ -181,9 +178,11 @@ protected:
                 memcpy(model_blob_ptr, blb->buffer().as<uint8_t*>(), blb->byteSize());
                 model_blob_ptr += blb->byteSize();
             }
-            net_reader.SetWeights(model_blob);
+            
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, model_blob));
 
-            auto network = net_reader.getNetwork();
             MKLDNNGraphTestClass graph;
             graph.CreateGraph(network);
 
index 7a36ce1..7dddde6 100644 (file)
@@ -11,7 +11,7 @@
 #include "single_layer_common.hpp"
 #include <mkldnn_extension_utils.h>
 #include "tests_common.hpp"
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 
 using namespace ::testing;
 using namespace std;
@@ -237,9 +237,6 @@ protected:
             conv_depthwise_fusing_test_params p = ::testing::WithParamInterface<conv_depthwise_fusing_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-
             size_t conv_w_size = p.conv.krn_w * p.conv.krn_h * p.conv.out_c * p.in.c / p.conv.grp_c + p.conv.out_c; // conv weights + biases
 
             size_t array_size =  p.isBroadcast ? 1 : p.conv.out_c;
@@ -251,10 +248,12 @@ protected:
             CommonTestUtils::fill_data_sine((float *) weights->buffer(), weights->size() / sizeof(float), 5, 10, 0.5);
             InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
 
-            net_reader.SetWeights(weights_ptr);
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             auto& nodes = graph.getNodes();
             nodes = graph.getNodes();
@@ -289,7 +288,7 @@ protected:
             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
 
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
index bf61b3f..8768b78 100644 (file)
@@ -12,7 +12,7 @@
 #include <mkldnn_extension_utils.h>
 #include "tests_common.hpp"
 #include "ir_gen_helper.hpp"
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 #include "common_test_utils/common_layers_params.hpp"
 
 using namespace ::testing;
@@ -270,9 +270,6 @@ protected:
             deconv_concat_params p = ::testing::WithParamInterface<deconv_concat_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-
             size_t blob_size = p.deconv.out_c * (p.in[1] / p.deconv.group);
             for (int i = 0 ; i < p.deconv.kernel.size(); i++) {
                 blob_size *= p.deconv.kernel[i];
@@ -301,9 +298,11 @@ protected:
                 memcpy(model_blob_ptr, blb->buffer().as<uint8_t*>(), blb->byteSize());
                 model_blob_ptr += blb->byteSize();
             }
-            net_reader.SetWeights(model_blob);
 
-            auto network = net_reader.getNetwork();
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, model_blob));
+
             MKLDNNGraphTestClass graph;
             graph.CreateGraph(network);
 
index 1ee95a3..5a81b3e 100644 (file)
@@ -11,7 +11,7 @@
 #include "single_layer_common.hpp"
 #include <mkldnn_extension_utils.h>
 #include "tests_common.hpp"
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 #include <ie_plugin_config.hpp>
 
 using namespace ::testing;
@@ -266,9 +266,6 @@ protected:
             dw_conv_fusing_test_params p = ::testing::WithParamInterface<dw_conv_fusing_test_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-
             size_t conv1_w_size = p.conv1.krn_w * p.conv1.krn_h * p.conv1.out_c * p.in.c / p.conv1.grp_c + p.conv1.out_c; // conv1 weights + biases
             size_t conv2_w_size = p.conv2.krn_w * p.conv2.krn_h * p.conv2.out_c * p.conv1.out_c / p.conv2.grp_c + p.conv2.out_c; // conv2 weights + biases
 
@@ -278,10 +275,12 @@ protected:
             fill_data((float *) weights->buffer(), weights->size() / sizeof(float), 1);
             InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
 
-            net_reader.SetWeights(weights_ptr);
+            InferenceEngine::Core core;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr));
 
             MKLDNNGraphTestClass graph;
-            graph.CreateGraph(net_reader.getNetwork());
+            graph.CreateGraph(network);
 
             InferenceEngine::SizeVector dims_src = {p.in.n, p.in.c, p.in.h, p.in.w};
 
@@ -298,7 +297,7 @@ protected:
             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
 
             InferenceEngine::OutputsDataMap out;
-            out = net_reader.getNetwork().getOutputsInfo();
+            out = network.getOutputsInfo();
             InferenceEngine::BlobMap outputBlobs;
 
             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
index d0f582b..e6764ba 100644 (file)
@@ -10,7 +10,7 @@
 #include <mkldnn_extension_utils.h>
 #include <mkldnn_extension_mngr.h>
 #include "tests_common.hpp"
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 #include "../test_graph.hpp"
 
 
@@ -90,21 +90,19 @@ TEST_F(MKLDNNGraphOptimizationTests, TestNoFuseConvSumWithOneInput) {
 
 )V0G0N";
 
-    InferenceEngine::CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-
     InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::U8, {48}, InferenceEngine::C });
     weights->allocate();
     float * data = weights->buffer();
 
     fill_data((float *) weights->buffer(), weights->size() / sizeof(float));
-
     InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
 
-    net_reader.SetWeights(weights_ptr);
+    InferenceEngine::Core ie;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(network = ie.ReadNetwork(model, weights_ptr));
 
     MKLDNNGraphTestClass graph;
-    ASSERT_NO_THROW(graph.CreateGraph(net_reader.getNetwork()));
+    ASSERT_NO_THROW(graph.CreateGraph(network));
 
     bool fused = true;
     auto& nodes = graph.getNodes();
@@ -205,21 +203,19 @@ TEST_F(MKLDNNGraphOptimizationTests, DISABLED_TestNoCrashForFuseConvSumAndInput)
 
 )V0G0N";
 
-    InferenceEngine::CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-
     InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::U8, {48}, InferenceEngine::C });
     weights->allocate();
     float * data = weights->buffer();
 
     fill_data((float *) weights->buffer(), weights->size() / sizeof(float));
-
     InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
 
-    net_reader.SetWeights(weights_ptr);
+    InferenceEngine::Core ie;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(ie.ReadNetwork(model, weights_ptr));
 
     MKLDNNGraphTestClass graph;
-    ASSERT_NO_THROW(graph.CreateGraph(net_reader.getNetwork()));
+    ASSERT_NO_THROW(graph.CreateGraph(network));
 
     bool fused = false;
     auto& nodes = graph.getNodes();
@@ -413,21 +409,19 @@ TEST_F(MKLDNNGraphOptimizationTests, TestNoFuseCustomActivation) {
     MKLDNNPlugin::MKLDNNExtensionManager::Ptr extMgr(new MKLDNNPlugin::MKLDNNExtensionManager());
     extMgr->AddExtension(extension);
 
-    InferenceEngine::CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-
     InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::U8, {139776}, InferenceEngine::C });
     weights->allocate();
     float * data = weights->buffer();
 
     fill_data((float *) weights->buffer(), weights->size() / sizeof(float));
-
     InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
 
-    net_reader.SetWeights(weights_ptr);
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr));
 
     MKLDNNGraphTestClass graph;
-    ASSERT_NO_THROW(graph.CreateGraph(net_reader.getNetwork(), extMgr));
+    ASSERT_NO_THROW(graph.CreateGraph(network, extMgr));
 
     bool fused = true;
     auto& nodes = graph.getNodes();
index ff3ea1d..1e2c22d 100644 (file)
@@ -9,12 +9,10 @@
 #include "tests_common.hpp"
 #include "../test_graph.hpp"
 #include <ie_ir_reader.hpp>
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
+#include <ie_system_conf.h>
 
-// to fix compilation in Debug mode
-IE_SUPPRESS_DEPRECATED_START
-#include <ie_builders.hpp>
-IE_SUPPRESS_DEPRECATED_END
+#include <ngraph/ngraph.hpp>
 
 using namespace ::testing;
 using namespace std;
@@ -171,21 +169,17 @@ TEST_F(MKLDNNGraphStructureTests, TestNoRedundantReorders) {
     </edges>
 </net>)V0G0N";
 
-
-
-    InferenceEngine::CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-
     InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::U8, {9728}, InferenceEngine::C });
     weights->allocate();
     fill_data((float *) weights->buffer(), weights->size() / sizeof(float));
     InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
 
-    net_reader.SetWeights(weights_ptr);
-
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr));
 
     MKLDNNGraphTestClass graph;
-    graph.CreateGraph(net_reader.getNetwork());
+    graph.CreateGraph(network);
 
     size_t reorders_num = 0;
     auto& nodes = graph.getNodes();
@@ -283,19 +277,17 @@ TEST_F(MKLDNNGraphStructureTests, TestRedundantReorderBeforeConvWithC_3) {
 </net>
 )V0G0N";
 
-    InferenceEngine::CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-
     InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::U8, {37936}, InferenceEngine::C });
     weights->allocate();
     fill_data((float *) weights->buffer(), weights->size() / sizeof(float));
     InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
 
-    net_reader.SetWeights(weights_ptr);
-
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr));
 
     MKLDNNGraphTestClass graph;
-    graph.CreateGraph(net_reader.getNetwork());
+    graph.CreateGraph(network);
 
     size_t reorders_num = 0;
     auto& nodes = graph.getNodes();
@@ -309,7 +301,8 @@ TEST_F(MKLDNNGraphStructureTests, TestRedundantReorderBeforeConvWithC_3) {
             }
         }
     }
-    ASSERT_EQ(reorders_num, 3);
+    size_t expected = InferenceEngine::with_cpu_x86_avx2() ? 3 : 1;
+    ASSERT_EQ(reorders_num, expected);
 }
 
 TEST_F(MKLDNNGraphStructureTests, TestNoRedundantReordersBeforeConcat) {
@@ -441,9 +434,6 @@ TEST_F(MKLDNNGraphStructureTests, TestNoRedundantReordersBeforeConcat) {
 </net>
 )V0G0N";
 
-    InferenceEngine::CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-
     InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::U8, {2432}, InferenceEngine::C });
     weights->allocate();
     float * data = weights->buffer();
@@ -460,10 +450,12 @@ TEST_F(MKLDNNGraphStructureTests, TestNoRedundantReordersBeforeConcat) {
 
     InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
 
-    net_reader.SetWeights(weights_ptr);
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr));
 
     MKLDNNGraphTestClass graph;
-    graph.CreateGraph(net_reader.getNetwork());
+    graph.CreateGraph(network);
 
     size_t reorders_num = 0;
     auto& nodes = graph.getNodes();
@@ -484,7 +476,7 @@ TEST_F(MKLDNNGraphStructureTests, TestNoRedundantReordersBeforeConcat) {
     InferenceEngine::BlobMap srcs;
     srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("data", src));
 
-    InferenceEngine::OutputsDataMap out = net_reader.getNetwork().getOutputsInfo();
+    InferenceEngine::OutputsDataMap out = network.getOutputsInfo();
 
     InferenceEngine::BlobMap outputBlobs;
     std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
@@ -514,8 +506,8 @@ TEST_F(MKLDNNGraphStructureTests, TestNoRedundantReordersBeforeConcat) {
     compare(*output, *dstOut);
 
     // Compare for batch2
-    net_reader.getNetwork().setBatchSize(2);
-    graph.CreateGraph(net_reader.getNetwork());
+    network.setBatchSize(2);
+    graph.CreateGraph(network);
     desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, {2, 3, 7, 7}, InferenceEngine::NCHW);
 
     InferenceEngine::Blob::Ptr srcBatch = InferenceEngine::make_shared_blob<float>(desc);
@@ -530,7 +522,7 @@ TEST_F(MKLDNNGraphStructureTests, TestNoRedundantReordersBeforeConcat) {
 
     srcs.clear();
     srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("data", srcBatch));
-    out = net_reader.getNetwork().getOutputsInfo();
+    out = network.getOutputsInfo();
 
     outputBlobs.clear();
     item = *out.begin();
@@ -723,19 +715,18 @@ TEST_F(MKLDNNGraphStructureTests, TestNoRedundantReordersBeforeDWConvolution) {
 </net>
 )V0G0N";
 
-    InferenceEngine::CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-
     InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::U8, {288}, InferenceEngine::C });
     weights->allocate();
     fill_data((float *) weights->buffer(), weights->size() / sizeof(float));
 
     InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
 
-    net_reader.SetWeights(weights_ptr);
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr));
 
     MKLDNNGraphTestClass graph;
-    graph.CreateGraph(net_reader.getNetwork());
+    graph.CreateGraph(network);
 
     size_t reorders_num = 0;
     auto& nodes = graph.getNodes();
@@ -744,7 +735,8 @@ TEST_F(MKLDNNGraphStructureTests, TestNoRedundantReordersBeforeDWConvolution) {
             reorders_num++;
         }
     }
-    ASSERT_EQ(reorders_num, 2);
+    size_t expected = InferenceEngine::with_cpu_x86_avx2()  ? 2 : 3;
+    ASSERT_EQ(reorders_num, expected);
     InferenceEngine::TensorDesc desc(InferenceEngine::Precision::FP32, {2, 3, 5, 5}, InferenceEngine::NCHW);
     InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float>(desc);
     src->allocate();
@@ -758,7 +750,7 @@ TEST_F(MKLDNNGraphStructureTests, TestNoRedundantReordersBeforeDWConvolution) {
     InferenceEngine::BlobMap srcs;
     srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("input", src));
 
-    InferenceEngine::OutputsDataMap out = net_reader.getNetwork().getOutputsInfo();
+    InferenceEngine::OutputsDataMap out = network.getOutputsInfo();
 
     InferenceEngine::BlobMap outputBlobs;
     std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
@@ -900,19 +892,18 @@ TEST_F(MKLDNNGraphStructureTests, DISABLED_TestNoRedundantReordersBeforeDWDeconv
 </net>
 )V0G0N";
 
-    InferenceEngine::CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-
     InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::U8, {5664}, InferenceEngine::C });
     weights->allocate();
     fill_data((float *) weights->buffer(), weights->size() / sizeof(float));
 
     InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
 
-    net_reader.SetWeights(weights_ptr);
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr));
 
     MKLDNNGraphTestClass graph;
-    graph.CreateGraph(net_reader.getNetwork());
+    graph.CreateGraph(network);
 
     size_t reorders_num = 0;
     auto& nodes = graph.getNodes();
@@ -931,7 +922,7 @@ TEST_F(MKLDNNGraphStructureTests, DISABLED_TestNoRedundantReordersBeforeDWDeconv
     InferenceEngine::BlobMap srcs;
     srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("input", src));
 
-    InferenceEngine::OutputsDataMap out = net_reader.getNetwork().getOutputsInfo();
+    InferenceEngine::OutputsDataMap out = network.getOutputsInfo();
 
     InferenceEngine::BlobMap outputBlobs;
     InferenceEngine::DataPtr item = out["deconv1"];
@@ -1059,11 +1050,12 @@ TEST_F(MKLDNNGraphStructureTests, TestSeveralOutputToNextLayer) {
 </net>
 )V0G0N";
 
-    InferenceEngine::CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
     MKLDNNGraphTestClass graph;
-    graph.CreateGraph(net_reader.getNetwork());
+    graph.CreateGraph(network);
 
     size_t reorders_num = 0;
     auto& nodes = graph.getNodes();
@@ -1081,7 +1073,7 @@ TEST_F(MKLDNNGraphStructureTests, TestSeveralOutputToNextLayer) {
     InferenceEngine::BlobMap srcs;
     srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("data", src));
 
-    InferenceEngine::OutputsDataMap out = net_reader.getNetwork().getOutputsInfo();
+    InferenceEngine::OutputsDataMap out = network.getOutputsInfo();
 
     InferenceEngine::BlobMap outputBlobs;
     std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
@@ -1203,11 +1195,12 @@ TEST_F(MKLDNNGraphStructureTests, TestOutputAfterInplacePlusConcat) {
 </net>
 )V0G0N";
 
-    InferenceEngine::CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-    MKLDNNPlugin::MKLDNNExecNetwork::Ptr execNetwork(new MKLDNNPlugin::MKLDNNExecNetwork(net_reader.getNetwork(), {}, {}));
-    InferenceEngine::InputsDataMap _networkInputs = net_reader.getNetwork().getInputsInfo();
-    InferenceEngine::OutputsDataMap _networkOutputs = net_reader.getNetwork().getOutputsInfo();
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
+    MKLDNNPlugin::MKLDNNExecNetwork::Ptr execNetwork(new MKLDNNPlugin::MKLDNNExecNetwork(network, {}, {}));
+    InferenceEngine::InputsDataMap _networkInputs = network.getInputsInfo();
+    InferenceEngine::OutputsDataMap _networkOutputs = network.getOutputsInfo();
     execNetwork->setNetworkInputs(_networkInputs);
     execNetwork->setNetworkOutputs(_networkOutputs);
     InferenceEngine::IInferRequest::Ptr inferRequest;
@@ -1223,7 +1216,7 @@ TEST_F(MKLDNNGraphStructureTests, TestOutputAfterInplacePlusConcat) {
     InferenceEngine::StatusCode sts = inferRequest->SetBlob("data", src, &resp);
     ASSERT_EQ(InferenceEngine::OK, sts) << resp.msg;
 
-    InferenceEngine::OutputsDataMap out = net_reader.getNetwork().getOutputsInfo();
+    InferenceEngine::OutputsDataMap out = network.getOutputsInfo();
 
     std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
 
@@ -1711,19 +1704,19 @@ TEST_F(MKLDNNGraphStructureTests, TestResnetPart) {
 )V0G0N";
 
     std::string model = modelB + modelE;
-    InferenceEngine::CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
 
     InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::U8, {1643424}, InferenceEngine::C });
     weights->allocate();
     fill_data((float *) weights->buffer(), weights->size() / sizeof(float));
     InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
 
-    net_reader.SetWeights(weights_ptr);
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr));
 
-    MKLDNNPlugin::MKLDNNExecNetwork::Ptr execNetwork(new MKLDNNPlugin::MKLDNNExecNetwork(net_reader.getNetwork(), {}, {}));
-    InferenceEngine::InputsDataMap _networkInputs = net_reader.getNetwork().getInputsInfo();
-    InferenceEngine::OutputsDataMap _networkOutputs = net_reader.getNetwork().getOutputsInfo();
+    MKLDNNPlugin::MKLDNNExecNetwork::Ptr execNetwork(new MKLDNNPlugin::MKLDNNExecNetwork(network, {}, {}));
+    InferenceEngine::InputsDataMap _networkInputs = network.getInputsInfo();
+    InferenceEngine::OutputsDataMap _networkOutputs = network.getOutputsInfo();
     execNetwork->setNetworkInputs(_networkInputs);
     execNetwork->setNetworkOutputs(_networkOutputs);
     InferenceEngine::IInferRequest::Ptr inferRequest;
@@ -1739,7 +1732,7 @@ TEST_F(MKLDNNGraphStructureTests, TestResnetPart) {
     InferenceEngine::StatusCode sts = inferRequest->SetBlob("input", src, &resp);
     ASSERT_EQ(InferenceEngine::OK, sts) << resp.msg;
 
-    InferenceEngine::OutputsDataMap out = net_reader.getNetwork().getOutputsInfo();
+    InferenceEngine::OutputsDataMap out = network.getOutputsInfo();
 
     std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
 
@@ -1868,11 +1861,12 @@ TEST_F(MKLDNNGraphStructureTests, TestConcatAfterConcat) {
 </net>
 )V0G0N";
 
-    InferenceEngine::CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-    MKLDNNPlugin::MKLDNNExecNetwork::Ptr execNetwork(new MKLDNNPlugin::MKLDNNExecNetwork(net_reader.getNetwork(), {}, {}));
-    InferenceEngine::InputsDataMap _networkInputs = net_reader.getNetwork().getInputsInfo();
-    InferenceEngine::OutputsDataMap _networkOutputs = net_reader.getNetwork().getOutputsInfo();
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
+    MKLDNNPlugin::MKLDNNExecNetwork::Ptr execNetwork(new MKLDNNPlugin::MKLDNNExecNetwork(network, {}, {}));
+    InferenceEngine::InputsDataMap _networkInputs = network.getInputsInfo();
+    InferenceEngine::OutputsDataMap _networkOutputs = network.getOutputsInfo();
     execNetwork->setNetworkInputs(_networkInputs);
     execNetwork->setNetworkOutputs(_networkOutputs);
     InferenceEngine::IInferRequest::Ptr inferRequest;
@@ -1900,7 +1894,7 @@ TEST_F(MKLDNNGraphStructureTests, TestConcatAfterConcat) {
     sts = inferRequest->SetBlob("data3", src3, &resp);
     ASSERT_EQ(InferenceEngine::OK, sts) << resp.msg;
 
-    InferenceEngine::OutputsDataMap out = net_reader.getNetwork().getOutputsInfo();
+    InferenceEngine::OutputsDataMap out = network.getOutputsInfo();
 
     std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
 
@@ -2048,11 +2042,12 @@ TEST_F(MKLDNNGraphStructureTests, Test2ConcatFromConcat) {
 </net>
 )V0G0N";
 
-    InferenceEngine::CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-    MKLDNNPlugin::MKLDNNExecNetwork::Ptr execNetwork(new MKLDNNPlugin::MKLDNNExecNetwork(net_reader.getNetwork(), {}, {}));
-    InferenceEngine::InputsDataMap _networkInputs = net_reader.getNetwork().getInputsInfo();
-    InferenceEngine::OutputsDataMap _networkOutputs = net_reader.getNetwork().getOutputsInfo();
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
+    MKLDNNPlugin::MKLDNNExecNetwork::Ptr execNetwork(new MKLDNNPlugin::MKLDNNExecNetwork(network, {}, {}));
+    InferenceEngine::InputsDataMap _networkInputs = network.getInputsInfo();
+    InferenceEngine::OutputsDataMap _networkOutputs = network.getOutputsInfo();
     execNetwork->setNetworkInputs(_networkInputs);
     execNetwork->setNetworkOutputs(_networkOutputs);
     InferenceEngine::IInferRequest::Ptr inferRequest;
@@ -2089,7 +2084,7 @@ TEST_F(MKLDNNGraphStructureTests, Test2ConcatFromConcat) {
     sts = inferRequest->SetBlob("data4", src4, &resp);
     ASSERT_EQ(InferenceEngine::OK, sts) << resp.msg;
 
-    InferenceEngine::OutputsDataMap out = net_reader.getNetwork().getOutputsInfo();
+    InferenceEngine::OutputsDataMap out = network.getOutputsInfo();
 
     std::vector<InferenceEngine::TBlob<float>::Ptr> outputs;
     std::vector<InferenceEngine::TBlob<float>::Ptr> refOutputs;
@@ -2238,19 +2233,18 @@ TEST_F(MKLDNNGraphStructureTests, TestResultsAfterGroupedConvWithStrides) {
 </net>
 )V0G0N";
 
-    InferenceEngine::CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-
     InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::U8, {3552}, InferenceEngine::C });
     weights->allocate();
     float * data = weights->buffer();
     fill_data((float *) weights->buffer(), weights->size() / sizeof(float));
     InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
 
-    net_reader.SetWeights(weights_ptr);
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr));
 
     MKLDNNGraphTestClass graph;
-    graph.CreateGraph(net_reader.getNetwork());
+    graph.CreateGraph(network);
 
     InferenceEngine::TensorDesc desc(InferenceEngine::Precision::FP32, {1, 24, 80, 80}, InferenceEngine::NCHW);
     InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float>(desc);
@@ -2260,7 +2254,7 @@ TEST_F(MKLDNNGraphStructureTests, TestResultsAfterGroupedConvWithStrides) {
     InferenceEngine::BlobMap srcs;
     srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("data", src));
 
-    InferenceEngine::OutputsDataMap out = net_reader.getNetwork().getOutputsInfo();
+    InferenceEngine::OutputsDataMap out = network.getOutputsInfo();
 
     InferenceEngine::BlobMap outputBlobs;
     std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
@@ -2273,8 +2267,8 @@ TEST_F(MKLDNNGraphStructureTests, TestResultsAfterGroupedConvWithStrides) {
     graph.Infer(srcs, outputBlobs);
 
     // Compare for batch2
-    net_reader.getNetwork().setBatchSize(2);
-    graph.CreateGraph(net_reader.getNetwork());
+    network.setBatchSize(2);
+    graph.CreateGraph(network);
     desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, {2, 24, 80, 80}, InferenceEngine::NCHW);
 
     InferenceEngine::Blob::Ptr srcBatch = InferenceEngine::make_shared_blob<float>(desc);
@@ -2289,7 +2283,7 @@ TEST_F(MKLDNNGraphStructureTests, TestResultsAfterGroupedConvWithStrides) {
 
     srcs.clear();
     srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("data", srcBatch));
-    out = net_reader.getNetwork().getOutputsInfo();
+    out = network.getOutputsInfo();
 
     outputBlobs.clear();
     item = *out.begin();
@@ -2373,19 +2367,19 @@ TEST_F(MKLDNNGraphStructureTests, TestLoadTopologyWithConstLayer) {
 </net>
 )V0G0N";
 
-    InferenceEngine::CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-
     InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::U8, {6400}, InferenceEngine::C });
     weights->allocate();
     float * data = weights->buffer();
     fill_data((float *) weights->buffer(), weights->size() / sizeof(float));
     InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
 
-    net_reader.SetWeights(weights_ptr);
-    MKLDNNPlugin::MKLDNNExecNetwork::Ptr execNetwork(new MKLDNNPlugin::MKLDNNExecNetwork(net_reader.getNetwork(), {}, {}));
-    InferenceEngine::InputsDataMap _networkInputs = net_reader.getNetwork().getInputsInfo();
-    InferenceEngine::OutputsDataMap _networkOutputs = net_reader.getNetwork().getOutputsInfo();
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr));
+
+    MKLDNNPlugin::MKLDNNExecNetwork::Ptr execNetwork(new MKLDNNPlugin::MKLDNNExecNetwork(network, {}, {}));
+    InferenceEngine::InputsDataMap _networkInputs = network.getInputsInfo();
+    InferenceEngine::OutputsDataMap _networkOutputs = network.getOutputsInfo();
     execNetwork->setNetworkInputs(_networkInputs);
     execNetwork->setNetworkOutputs(_networkOutputs);
     InferenceEngine::IInferRequest::Ptr inferRequest;
@@ -2401,7 +2395,7 @@ TEST_F(MKLDNNGraphStructureTests, TestLoadTopologyWithConstLayer) {
     InferenceEngine::StatusCode sts = inferRequest->SetBlob("data", src1, &resp);
     ASSERT_EQ(InferenceEngine::OK, sts) << resp.msg;
 
-    InferenceEngine::OutputsDataMap out = net_reader.getNetwork().getOutputsInfo();
+    InferenceEngine::OutputsDataMap out = network.getOutputsInfo();
 
     std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
 
@@ -2516,9 +2510,6 @@ TEST_F(MKLDNNGraphStructureTests, TestLoadTopologyWithEltwiseBeforeConcat) {
 </net>
 )V0G0N";
 
-    InferenceEngine::CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-
     InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::U8, {6400}, InferenceEngine::C });
     weights->allocate();
     float * data = weights->buffer();
@@ -2530,10 +2521,13 @@ TEST_F(MKLDNNGraphStructureTests, TestLoadTopologyWithEltwiseBeforeConcat) {
     }
     InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
 
-    net_reader.SetWeights(weights_ptr);
-    MKLDNNPlugin::MKLDNNExecNetwork::Ptr execNetwork(new MKLDNNPlugin::MKLDNNExecNetwork(net_reader.getNetwork(), {}, {}));
-    InferenceEngine::InputsDataMap _networkInputs = net_reader.getNetwork().getInputsInfo();
-    InferenceEngine::OutputsDataMap _networkOutputs = net_reader.getNetwork().getOutputsInfo();
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr));
+
+    MKLDNNPlugin::MKLDNNExecNetwork::Ptr execNetwork(new MKLDNNPlugin::MKLDNNExecNetwork(network, {}, {}));
+    InferenceEngine::InputsDataMap _networkInputs = network.getInputsInfo();
+    InferenceEngine::OutputsDataMap _networkOutputs = network.getOutputsInfo();
     execNetwork->setNetworkInputs(_networkInputs);
     execNetwork->setNetworkOutputs(_networkOutputs);
     InferenceEngine::IInferRequest::Ptr inferRequest;
@@ -2552,7 +2546,7 @@ TEST_F(MKLDNNGraphStructureTests, TestLoadTopologyWithEltwiseBeforeConcat) {
     InferenceEngine::StatusCode sts = inferRequest->SetBlob("data", src1, &resp);
     ASSERT_EQ(InferenceEngine::OK, sts) << resp.msg;
 
-    InferenceEngine::OutputsDataMap out = net_reader.getNetwork().getOutputsInfo();
+    InferenceEngine::OutputsDataMap out = network.getOutputsInfo();
 
     std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
 
@@ -2574,7 +2568,7 @@ TEST_F(MKLDNNGraphStructureTests, TestLoadTopologyWithEltwiseBeforeConcat) {
     }
 
     MKLDNNGraphTestClass graph;
-    graph.CreateGraph(net_reader.getNetwork());
+    graph.CreateGraph(network);
 
     size_t reorders_num = 0;
     auto& nodes = graph.getNodes();
@@ -2992,18 +2986,18 @@ TEST_F(MKLDNNGraphStructureTests, TestNoRedundantReordersRmnet_SSSSD) {
        </edges>
 </net>
 )V0G0N";
-    InferenceEngine::CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
 
     InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::U8, {8664}, InferenceEngine::C });
     weights->allocate();
     fill_data((float *) weights->buffer(), weights->size() / sizeof(float));
     InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
 
-    net_reader.SetWeights(weights_ptr);
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr));
 
     MKLDNNGraphTestClass graph;
-    graph.CreateGraph(net_reader.getNetwork());
+    graph.CreateGraph(network);
 
     size_t reorders_num = 0;
     auto& nodes = graph.getNodes();
@@ -3225,19 +3219,18 @@ TEST_F(MKLDNNGraphStructureTests, TestFailedPartDPN92) {
     </edges>
 </net>)V0G0N";
 
-    InferenceEngine::CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-
     InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::U8, {33792}, InferenceEngine::C });
     weights->allocate();
     fill_data((float *) weights->buffer(), weights->size() / sizeof(float));
 
     InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
 
-    net_reader.SetWeights(weights_ptr);
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr));
 
     MKLDNNGraphTestClass graph;
-    graph.CreateGraph(net_reader.getNetwork());
+    graph.CreateGraph(network);
 
     InferenceEngine::TensorDesc desc(InferenceEngine::Precision::FP32, {1, 32, 14, 14}, InferenceEngine::NCHW);
     InferenceEngine::Blob::Ptr src1 = InferenceEngine::make_shared_blob<float>(desc);
@@ -3254,7 +3247,7 @@ TEST_F(MKLDNNGraphStructureTests, TestFailedPartDPN92) {
     srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("data", src1));
     srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("data2", src2));
 
-    InferenceEngine::OutputsDataMap out = net_reader.getNetwork().getOutputsInfo();
+    InferenceEngine::OutputsDataMap out = network.getOutputsInfo();
 
     InferenceEngine::BlobMap outputBlobs;
     std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
@@ -3273,8 +3266,8 @@ TEST_F(MKLDNNGraphStructureTests, TestFailedPartDPN92) {
     }
 
     // Compare for batch2
-    net_reader.getNetwork().setBatchSize(2);
-    graph.CreateGraph(net_reader.getNetwork());
+    network.setBatchSize(2);
+    graph.CreateGraph(network);
     desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, {2, 32, 14, 14}, InferenceEngine::NCHW);
 
     InferenceEngine::Blob::Ptr src1Batch = InferenceEngine::make_shared_blob<float>(desc);
@@ -3302,7 +3295,7 @@ TEST_F(MKLDNNGraphStructureTests, TestFailedPartDPN92) {
     srcs.clear();
     srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("data", src1Batch));
     srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("data2", src2Batch));
-    out = net_reader.getNetwork().getOutputsInfo();
+    out = network.getOutputsInfo();
 
     outputBlobs.clear();
     item = *out.begin();
@@ -3790,20 +3783,17 @@ TEST_F(MKLDNNGraphStructureTests, TestNoRedundantReordersForXceptionTopology) {
 </net>
 )V0G0N";
 
-
-
-    InferenceEngine::CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-
     InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::U8, {758272}, InferenceEngine::C });
     weights->allocate();
     fill_data((float *) weights->buffer(), weights->size() / sizeof(float));
     InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
-
-    net_reader.SetWeights(weights_ptr);
+    
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr));
 
     MKLDNNGraphTestClass graph;
-    graph.CreateGraph(net_reader.getNetwork());
+    graph.CreateGraph(network);
 
     size_t reorders_num = 0;
     auto& nodes = graph.getNodes();
@@ -3858,20 +3848,17 @@ TEST_F(MKLDNNGraphStructureTests, TestNoRedundantReordersForGrayscaleInput) {
 </net>
 )V0G0N";
 
-
-
-    InferenceEngine::CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-
     InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::U8, {1280}, InferenceEngine::C });
     weights->allocate();
     fill_data((float *) weights->buffer(), weights->size() / sizeof(float));
     InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
 
-    net_reader.SetWeights(weights_ptr);
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr));
 
     MKLDNNGraphTestClass graph;
-    graph.CreateGraph(net_reader.getNetwork());
+    graph.CreateGraph(network);
 
     size_t reorders_num = 0;
     auto& nodes = graph.getNodes();
@@ -4034,19 +4021,18 @@ TEST_F(MKLDNNGraphStructureTests, TestFailedPartPlateRecognitionBarrier0001) {
     </edges>
 </net>)V0G0N";
 
-    InferenceEngine::CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-
     InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::U8, {3672348}, InferenceEngine::C });
     weights->allocate();
     fill_data((float *) weights->buffer(), weights->size() / sizeof(float));
 
     InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
-
-    net_reader.SetWeights(weights_ptr);
+    
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr));
 
     MKLDNNGraphTestClass graph;
-    graph.CreateGraph(net_reader.getNetwork());
+    graph.CreateGraph(network);
 
     InferenceEngine::TensorDesc desc(InferenceEngine::Precision::FP32, {1, 128, 1, 88}, InferenceEngine::NCHW);
     InferenceEngine::Blob::Ptr src1 = InferenceEngine::make_shared_blob<float>(desc);
@@ -4056,7 +4042,7 @@ TEST_F(MKLDNNGraphStructureTests, TestFailedPartPlateRecognitionBarrier0001) {
     InferenceEngine::BlobMap srcs;
     srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("data", src1));
 
-    InferenceEngine::OutputsDataMap out = net_reader.getNetwork().getOutputsInfo();
+    InferenceEngine::OutputsDataMap out = network.getOutputsInfo();
 
     InferenceEngine::BlobMap outputBlobs;
     std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
@@ -4075,8 +4061,8 @@ TEST_F(MKLDNNGraphStructureTests, TestFailedPartPlateRecognitionBarrier0001) {
     }
 
     // Compare for batch2
-    net_reader.getNetwork().setBatchSize(2);
-    graph.CreateGraph(net_reader.getNetwork());
+    network.setBatchSize(2);
+    graph.CreateGraph(network);
     desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, {2, 128, 1, 88}, InferenceEngine::NCHW);
 
     InferenceEngine::Blob::Ptr src1Batch = InferenceEngine::make_shared_blob<float>(desc);
@@ -4091,7 +4077,7 @@ TEST_F(MKLDNNGraphStructureTests, TestFailedPartPlateRecognitionBarrier0001) {
 
     srcs.clear();
     srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("data", src1Batch));
-    out = net_reader.getNetwork().getOutputsInfo();
+    out = network.getOutputsInfo();
 
     outputBlobs.clear();
     item = *out.begin();
@@ -4213,19 +4199,18 @@ TEST_F(MKLDNNGraphStructureTests, TestFailedVNect0001) {
     </edges>
 </net>)V0G0N";
 
-    InferenceEngine::CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-
-    InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::FP32, { 1032192 }, InferenceEngine::C });
+    InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::U8, { 1032192 }, InferenceEngine::C });
     weights->allocate();
     fill_data((float *)weights->buffer(), weights->size() / sizeof(float));
 
     InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
 
-    net_reader.SetWeights(weights_ptr);
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr));
 
     MKLDNNGraphTestClass graph;
-    ASSERT_NO_THROW(graph.CreateGraph(net_reader.getNetwork()));
+    ASSERT_NO_THROW(graph.CreateGraph(network));
 }
 
 TEST_F(MKLDNNGraphStructureTests, TestFailedVNect0002) {
@@ -4309,19 +4294,18 @@ TEST_F(MKLDNNGraphStructureTests, TestFailedVNect0002) {
 </net>
 )V0G0N";
 
-    InferenceEngine::CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-
-    InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::FP32, { 43008 }, InferenceEngine::C });
+    InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::U8, { 43008 }, InferenceEngine::C });
     weights->allocate();
     fill_data((float *)weights->buffer(), weights->size() / sizeof(float));
 
     InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
 
-    net_reader.SetWeights(weights_ptr);
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr));
 
     MKLDNNGraphTestClass graph;
-    graph.CreateGraph(net_reader.getNetwork());
+    graph.CreateGraph(network);
 
     size_t outputs_num = 0;
     auto& nodes = graph.getNodes();
@@ -4497,11 +4481,12 @@ TEST_F(MKLDNNGraphStructureTests, TestFailedVNect0003) {
     </edges>
 </net>)V0G0N";
 
-    InferenceEngine::CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
     MKLDNNGraphTestClass graph;
-    ASSERT_NO_THROW(graph.CreateGraph(net_reader.getNetwork()));
+    ASSERT_NO_THROW(graph.CreateGraph(network));
 }
 
 TEST_F(MKLDNNGraphStructureTests, TestConvolutionDWConvolutionSumFusing) {
@@ -4644,20 +4629,19 @@ TEST_F(MKLDNNGraphStructureTests, TestConvolutionDWConvolutionSumFusing) {
 </net>
 )V0G0N";
 
-    InferenceEngine::CNNNetReader net_reader;
-    net_reader.ReadNetwork(model.data(), model.length());
-
     InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::U8, {8064}, InferenceEngine::C });
     weights->allocate();
     float * data = weights->buffer();
     memset((float *) weights->buffer(), 0, weights->size());
 
     InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
-
-    net_reader.SetWeights(weights_ptr);
+    
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    network = core.ReadNetwork(model, weights_ptr);
 
     MKLDNNGraphTestClass graph;
-    graph.CreateGraph(net_reader.getNetwork());
+    graph.CreateGraph(network);
 
     InferenceEngine::TensorDesc src0_desc(InferenceEngine::Precision::FP32, {1, 32, 300, 600}, InferenceEngine::NCHW);
     InferenceEngine::Blob::Ptr src0 = InferenceEngine::make_shared_blob<float>(src0_desc);
@@ -4684,7 +4668,7 @@ TEST_F(MKLDNNGraphStructureTests, TestConvolutionDWConvolutionSumFusing) {
     srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("data0", src0));
     srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("data1", src1));
 
-    InferenceEngine::OutputsDataMap out = net_reader.getNetwork().getOutputsInfo();
+    InferenceEngine::OutputsDataMap out = network.getOutputsInfo();
 
     InferenceEngine::BlobMap outputBlobs;
     std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
@@ -4794,19 +4778,18 @@ TEST_F(MKLDNNGraphStructureTests, TestConstantLayerAsOutput) {
 </net>
 )V0G0N";
 
-    InferenceEngine::CNNNetReader net_reader;
-    ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-
     InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::U8, {37912}, InferenceEngine::C });
     weights->allocate();
     fill_data((float *) weights->buffer(), weights->size() / sizeof(float));
 
     InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
 
-    net_reader.SetWeights(weights_ptr);
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr));
 
     MKLDNNGraphTestClass graph;
-    graph.CreateGraph(net_reader.getNetwork());
+    graph.CreateGraph(network);
 
     InferenceEngine::TensorDesc desc(InferenceEngine::Precision::FP32, {1, 3, 10, 10}, InferenceEngine::NCHW);
     InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float>(desc);
@@ -4821,7 +4804,7 @@ TEST_F(MKLDNNGraphStructureTests, TestConstantLayerAsOutput) {
     InferenceEngine::BlobMap srcs;
     srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("data", src));
 
-    InferenceEngine::OutputsDataMap out = net_reader.getNetwork().getOutputsInfo();
+    InferenceEngine::OutputsDataMap out = network.getOutputsInfo();
 
     InferenceEngine::BlobMap outputBlobs;
     std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
@@ -5146,14 +5129,14 @@ TEST_F(MKLDNNGraphStructureTests, TestGemmConvolutionWithConcat) {
 </net>
 )V0G0N";
 
-    InferenceEngine::CNNNetReader net_reader;
-    net_reader.ReadNetwork(model.data(), model.length());
-
     InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::U8, {52800}, InferenceEngine::C });
     weights->allocate();
     fill_data((float *) weights->buffer(), weights->size() / sizeof(float));
     InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
-    net_reader.SetWeights(weights_ptr);
+    
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr));
 
     auto graphInfer = [](InferenceEngine::CNNNetwork network, InferenceEngine::BlobMap& inBlobs,
             InferenceEngine::BlobMap& outBlobs, std::string primitivesPriority) {
@@ -5164,50 +5147,9 @@ TEST_F(MKLDNNGraphStructureTests, TestGemmConvolutionWithConcat) {
         MKLDNNGraphTestClass graph;
         graph.CreateGraph(network);
         graph.Infer(inBlobs, outBlobs);
-
-#if 0
-        std::map<std::string, InferenceEngine::InferenceEngineProfileInfo> perfMap;
-        graph.GetPerfData(perfMap);
-
-        long long totalTime = 0;
-        // Print performance counts
-
-        std::cout << std::endl << "performance counts:" << std::endl << std::endl;
-        for (const auto & it : perfMap) {
-            std::string toPrint(it.first);
-            const int maxLayerName = 30;
-
-            if (it.first.length() >= maxLayerName) {
-                toPrint  = it.first.substr(0, maxLayerName - 4);
-                toPrint += "...";
-            }
-
-
-            std::cout << std::setw(maxLayerName) << std::left << toPrint;
-            switch (it.second.status) {
-                case InferenceEngine::InferenceEngineProfileInfo::EXECUTED:
-                    std::cout << std::setw(15) << std::left << "EXECUTED";
-                    break;
-                case InferenceEngine::InferenceEngineProfileInfo::NOT_RUN:
-                    std::cout << std::setw(15) << std::left << "NOT_RUN";
-                    break;
-                case InferenceEngine::InferenceEngineProfileInfo::OPTIMIZED_OUT:
-                    std::cout << std::setw(15) << std::left << "OPTIMIZED_OUT";
-                    break;
-            }
-            std::cout << std::setw(30) << std::left << "layerType: " + std::string(it.second.layer_type) + " ";
-            std::cout << std::setw(20) << std::left << "realTime: " + std::to_string(it.second.realTime_uSec);
-            std::cout << std::setw(20) << std::left << " cpu: "  + std::to_string(it.second.cpu_uSec);
-            std::cout << " execType: " << it.second.exec_type << std::endl;
-            if (it.second.realTime_uSec > 0) {
-                totalTime += it.second.realTime_uSec;
-            }
-        }
-        std::cout << std::setw(20) << std::left << "Total time: " + std::to_string(totalTime) << " microseconds" << std::endl;
-#endif
     };
 
-    InferenceEngine::InputsDataMap inputsMap = net_reader.getNetwork().getInputsInfo();
+    InferenceEngine::InputsDataMap inputsMap = network.getInputsInfo();
     InferenceEngine::BlobMap inputBlobs;
 
     for (const auto& input : inputsMap) {
@@ -5217,7 +5159,7 @@ TEST_F(MKLDNNGraphStructureTests, TestGemmConvolutionWithConcat) {
         inputBlobs[input.first] = src;
     }
 
-    InferenceEngine::OutputsDataMap outsMap = net_reader.getNetwork().getOutputsInfo();
+    InferenceEngine::OutputsDataMap outsMap = network.getOutputsInfo();
     InferenceEngine::BlobMap outputBlobs1;
     InferenceEngine::BlobMap outputBlobs2;
     for (const auto& output : outsMap) {
@@ -5230,20 +5172,20 @@ TEST_F(MKLDNNGraphStructureTests, TestGemmConvolutionWithConcat) {
         outputBlobs2[output.first] = dst2;
     }
 
-    graphInfer(net_reader.getNetwork(), inputBlobs, outputBlobs1, "");
-    graphInfer(net_reader.getNetwork(), inputBlobs, outputBlobs2, "cpu:gemm_blas");
+    graphInfer(network, inputBlobs, outputBlobs1, "");
+    graphInfer(network, inputBlobs, outputBlobs2, "cpu:gemm_blas");
     compare(*outputBlobs1.begin()->second, *outputBlobs2.begin()->second);
 
-    graphInfer(net_reader.getNetwork(), inputBlobs, outputBlobs2, "cpu:gemm_avx512");
+    graphInfer(network, inputBlobs, outputBlobs2, "cpu:gemm_avx512");
     compare(*outputBlobs1.begin()->second, *outputBlobs2.begin()->second);
 
-    graphInfer(net_reader.getNetwork(), inputBlobs, outputBlobs2, "cpu:gemm_avx2");
+    graphInfer(network, inputBlobs, outputBlobs2, "cpu:gemm_avx2");
     compare(*outputBlobs1.begin()->second, *outputBlobs2.begin()->second);
 
-    graphInfer(net_reader.getNetwork(), inputBlobs, outputBlobs2, "cpu:gemm_sse42");
+    graphInfer(network, inputBlobs, outputBlobs2, "cpu:gemm_sse42");
     compare(*outputBlobs1.begin()->second, *outputBlobs2.begin()->second);
 
-    graphInfer(net_reader.getNetwork(), inputBlobs, outputBlobs2, "cpu:gemm_any");
+    graphInfer(network, inputBlobs, outputBlobs2, "cpu:gemm_any");
     compare(*outputBlobs1.begin()->second, *outputBlobs2.begin()->second);
 }
 
@@ -5471,14 +5413,14 @@ TEST_F(MKLDNNGraphStructureTests, TestRefPoolingWithConcat) {
 </net>
 )V0G0N";
 
-    InferenceEngine::CNNNetReader net_reader;
-    net_reader.ReadNetwork(model.data(), model.length());
-
     InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::U8, {52800}, InferenceEngine::C });
     weights->allocate();
     fill_data((float *) weights->buffer(), weights->size() / sizeof(float));
     InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
-    net_reader.SetWeights(weights_ptr);
+    
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr));
 
     auto graphInfer = [](InferenceEngine::CNNNetwork network, InferenceEngine::BlobMap& inBlobs,
                          InferenceEngine::BlobMap& outBlobs, std::string primitivesPriority) {
@@ -5489,50 +5431,9 @@ TEST_F(MKLDNNGraphStructureTests, TestRefPoolingWithConcat) {
         MKLDNNGraphTestClass graph;
         graph.CreateGraph(network);
         graph.Infer(inBlobs, outBlobs);
-
-#if 1
-        std::map<std::string, InferenceEngine::InferenceEngineProfileInfo> perfMap;
-        graph.GetPerfData(perfMap);
-
-        long long totalTime = 0;
-        // Print performance counts
-
-        std::cout << std::endl << "performance counts:" << std::endl << std::endl;
-        for (const auto & it : perfMap) {
-            std::string toPrint(it.first);
-            const int maxLayerName = 30;
-
-            if (it.first.length() >= maxLayerName) {
-                toPrint  = it.first.substr(0, maxLayerName - 4);
-                toPrint += "...";
-            }
-
-
-            std::cout << std::setw(maxLayerName) << std::left << toPrint;
-            switch (it.second.status) {
-                case InferenceEngine::InferenceEngineProfileInfo::EXECUTED:
-                    std::cout << std::setw(15) << std::left << "EXECUTED";
-                    break;
-                case InferenceEngine::InferenceEngineProfileInfo::NOT_RUN:
-                    std::cout << std::setw(15) << std::left << "NOT_RUN";
-                    break;
-                case InferenceEngine::InferenceEngineProfileInfo::OPTIMIZED_OUT:
-                    std::cout << std::setw(15) << std::left << "OPTIMIZED_OUT";
-                    break;
-            }
-            std::cout << std::setw(30) << std::left << "layerType: " + std::string(it.second.layer_type) + " ";
-            std::cout << std::setw(20) << std::left << "realTime: " + std::to_string(it.second.realTime_uSec);
-            std::cout << std::setw(20) << std::left << " cpu: "  + std::to_string(it.second.cpu_uSec);
-            std::cout << " execType: " << it.second.exec_type << std::endl;
-            if (it.second.realTime_uSec > 0) {
-                totalTime += it.second.realTime_uSec;
-            }
-        }
-        std::cout << std::setw(20) << std::left << "Total time: " + std::to_string(totalTime) << " microseconds" << std::endl;
-#endif
     };
 
-    InferenceEngine::InputsDataMap inputsMap = net_reader.getNetwork().getInputsInfo();
+    InferenceEngine::InputsDataMap inputsMap = network.getInputsInfo();
     InferenceEngine::BlobMap inputBlobs;
 
     for (const auto& input : inputsMap) {
@@ -5542,7 +5443,7 @@ TEST_F(MKLDNNGraphStructureTests, TestRefPoolingWithConcat) {
         inputBlobs[input.first] = src;
     }
 
-    InferenceEngine::OutputsDataMap outsMap = net_reader.getNetwork().getOutputsInfo();
+    InferenceEngine::OutputsDataMap outsMap = network.getOutputsInfo();
     InferenceEngine::BlobMap outputBlobs1;
     InferenceEngine::BlobMap outputBlobs2;
     for (const auto& output : outsMap) {
@@ -5555,11 +5456,11 @@ TEST_F(MKLDNNGraphStructureTests, TestRefPoolingWithConcat) {
         outputBlobs2[output.first] = dst2;
     }
 
-    graphInfer(net_reader.getNetwork(), inputBlobs, outputBlobs1, "");
-    graphInfer(net_reader.getNetwork(), inputBlobs, outputBlobs2, "cpu:gemm_blas,cpu:ref_any");
+    graphInfer(network, inputBlobs, outputBlobs1, "");
+    graphInfer(network, inputBlobs, outputBlobs2, "cpu:gemm_blas,cpu:ref_any");
     compare(*outputBlobs1.begin()->second, *outputBlobs2.begin()->second);
 
-    graphInfer(net_reader.getNetwork(), inputBlobs, outputBlobs2, "cpu:ref_any");
+    graphInfer(network, inputBlobs, outputBlobs2, "cpu:ref_any");
     compare(*outputBlobs1.begin()->second, *outputBlobs2.begin()->second);
 }
 
@@ -5647,9 +5548,6 @@ TEST_F(MKLDNNGraphStructureTests, TestConvolutionWith2DepthwiseOpFusing) {
 </net>
 )V0G0N";
 
-    InferenceEngine::CNNNetReader net_reader;
-    net_reader.ReadNetwork(model.data(), model.length());
-
     InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::U8, {6724}, InferenceEngine::C });
     weights->allocate();
     float* wdata = weights->buffer();
@@ -5660,10 +5558,12 @@ TEST_F(MKLDNNGraphStructureTests, TestConvolutionWith2DepthwiseOpFusing) {
 
     InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
 
-    net_reader.SetWeights(weights_ptr);
+        InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr));
 
     MKLDNNGraphTestClass graph;
-    graph.CreateGraph(net_reader.getNetwork());
+    graph.CreateGraph(network);
 
     const auto& nodes = graph.getNodes();
     ASSERT_EQ(nodes.size(), 5);
@@ -5690,7 +5590,7 @@ TEST_F(MKLDNNGraphStructureTests, TestConvolutionWith2DepthwiseOpFusing) {
     InferenceEngine::BlobMap srcs;
     srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("data", src));
 
-    InferenceEngine::OutputsDataMap out = net_reader.getNetwork().getOutputsInfo();
+    InferenceEngine::OutputsDataMap out = network.getOutputsInfo();
 
     InferenceEngine::BlobMap outputBlobs;
     std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
@@ -5788,9 +5688,6 @@ TEST_F(MKLDNNGraphStructureTests, TestConvolutionWith2EltwiseOpFusing) {
 </net>
 )V0G0N";
 
-    InferenceEngine::CNNNetReader net_reader;
-    net_reader.ReadNetwork(model.data(), model.length());
-
     InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::U8, {384}, InferenceEngine::C });
     weights->allocate();
     float* wdata = weights->buffer();
@@ -5800,10 +5697,12 @@ TEST_F(MKLDNNGraphStructureTests, TestConvolutionWith2EltwiseOpFusing) {
 
     InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
 
-    net_reader.SetWeights(weights_ptr);
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr));
 
     MKLDNNGraphTestClass graph;
-    graph.CreateGraph(net_reader.getNetwork());
+    graph.CreateGraph(network);
 
     const auto& nodes = graph.getNodes();
     ASSERT_EQ(nodes.size(), 4);
@@ -5829,7 +5728,7 @@ TEST_F(MKLDNNGraphStructureTests, TestConvolutionWith2EltwiseOpFusing) {
     InferenceEngine::BlobMap srcs;
     srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("data", src));
 
-    InferenceEngine::OutputsDataMap out = net_reader.getNetwork().getOutputsInfo();
+    InferenceEngine::OutputsDataMap out = network.getOutputsInfo();
 
     InferenceEngine::BlobMap outputBlobs;
     std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
@@ -5930,9 +5829,6 @@ TEST_F(MKLDNNGraphStructureTests, TestGemmConvolutionWith2DepthwiseOpFusing) {
 </net>
 )V0G0N";
 
-    InferenceEngine::CNNNetReader net_reader;
-    net_reader.ReadNetwork(model.data(), model.length());
-
     InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::U8, {228}, InferenceEngine::C });
     weights->allocate();
     float* wdata = weights->buffer();
@@ -5943,10 +5839,12 @@ TEST_F(MKLDNNGraphStructureTests, TestGemmConvolutionWith2DepthwiseOpFusing) {
 
     InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
 
-    net_reader.SetWeights(weights_ptr);
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(network = core.ReadNetwork(model, weights_ptr));
 
     MKLDNNGraphTestClass graph;
-    graph.CreateGraph(net_reader.getNetwork());
+    graph.CreateGraph(network);
 
     const auto& nodes = graph.getNodes();
     ASSERT_EQ(nodes.size(), 3);
@@ -5971,7 +5869,7 @@ TEST_F(MKLDNNGraphStructureTests, TestGemmConvolutionWith2DepthwiseOpFusing) {
     InferenceEngine::BlobMap srcs;
     srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("data", src));
 
-    InferenceEngine::OutputsDataMap out = net_reader.getNetwork().getOutputsInfo();
+    InferenceEngine::OutputsDataMap out = network.getOutputsInfo();
 
     InferenceEngine::BlobMap outputBlobs;
     std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
@@ -6048,9 +5946,6 @@ TEST_F(MKLDNNGraphStructureTests, TestCreateGraphWithSplit) {
         inputData[i] = (i < batchSize) ? channel1Value : channel2Value;
     }
 
-    InferenceEngine::CNNNetReader reader;
-    reader.ReadNetwork(model.data(), model.size());
-
     InferenceEngine::TBlob<uint8_t>* weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::U8, { 228 }, InferenceEngine::C });
     weights->allocate();
     float* weightsData = weights->buffer();
@@ -6059,10 +5954,13 @@ TEST_F(MKLDNNGraphStructureTests, TestCreateGraphWithSplit) {
     }
 
     const InferenceEngine::TBlob<uint8_t>::Ptr weightsPtr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
-    reader.SetWeights(weightsPtr);
+    
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(network = core.ReadNetwork(model, weightsPtr));
 
     MKLDNNGraphTestClass graph;
-    graph.CreateGraph(reader.getNetwork());
+    graph.CreateGraph(network);
 
     const auto& nodes = graph.getNodes();
     ASSERT_EQ(nodes.size(), 5);
@@ -6072,7 +5970,7 @@ TEST_F(MKLDNNGraphStructureTests, TestCreateGraphWithSplit) {
     ASSERT_EQ(nodes[3].get()->getType(), MKLDNNPlugin::Type::Output);
     ASSERT_EQ(nodes[4].get()->getType(), MKLDNNPlugin::Type::Output);
 
-    InferenceEngine::OutputsDataMap outputs = reader.getNetwork().getOutputsInfo();
+    InferenceEngine::OutputsDataMap outputs = network.getOutputsInfo();
     const std::pair<std::string, InferenceEngine::DataPtr> splitOutputItem1 {"Split.0", outputs["Split.0"]};
     const std::pair<std::string, InferenceEngine::DataPtr> splitOutputItem2 {"Split.1", outputs["Split.1"]};
 
@@ -6193,9 +6091,6 @@ TEST_F(MKLDNNGraphStructureTests, TestCreateGraphWithFakeOutput) {
     for (int splitFromPortNumber = 1; splitFromPortNumber <= 2; ++splitFromPortNumber) {
         sprintf(model.data(), modelTemplate.c_str(), splitFromPortNumber);
 
-        InferenceEngine::CNNNetReader reader;
-        reader.ReadNetwork(model.data(), model.size());
-
         InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::U8, { 228 }, InferenceEngine::C });
         weights->allocate();
         float* weightsData = weights->buffer();
@@ -6204,12 +6099,15 @@ TEST_F(MKLDNNGraphStructureTests, TestCreateGraphWithFakeOutput) {
         }
 
         const InferenceEngine::TBlob<uint8_t>::Ptr weightsPtr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
-        reader.SetWeights(weightsPtr);
+        
+        InferenceEngine::Core core;
+        InferenceEngine::CNNNetwork network;
+        ASSERT_NO_THROW(network = core.ReadNetwork(&model[0], weightsPtr));
 
         MKLDNNGraphTestClass graph;
-        graph.CreateGraph(reader.getNetwork());
+        graph.CreateGraph(network);
 
-        InferenceEngine::OutputsDataMap outputs = reader.getNetwork().getOutputsInfo();
+        InferenceEngine::OutputsDataMap outputs = network.getOutputsInfo();
         const std::pair<std::string, InferenceEngine::DataPtr> reshapeOutputItem = std::make_pair("Reshape", outputs["Reshape"]);
         const std::string splitOutputName = std::string("Split.") + (splitFromPortNumber == 1 ? "1" : "0");
         const std::pair<std::string, InferenceEngine::DataPtr> splitOutputItem = std::make_pair(splitOutputName, outputs[splitOutputName]);
@@ -6364,10 +6262,6 @@ TEST_F(MKLDNNGraphStructureTests, TestCreateGraphWithMultipleData) {
         inputData[i] = (i < batchSize) ? channel1Value : channel2Value;
     }
 
-
-    InferenceEngine::CNNNetReader reader;
-    reader.ReadNetwork(model.data(), model.size());
-
     InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>({ InferenceEngine::Precision::U8, { 228 }, InferenceEngine::C });
     weights->allocate();
     float* weightsData = weights->buffer();
@@ -6376,12 +6270,15 @@ TEST_F(MKLDNNGraphStructureTests, TestCreateGraphWithMultipleData) {
     }
 
     const InferenceEngine::TBlob<uint8_t>::Ptr weightsPtr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
-    reader.SetWeights(weightsPtr);
+        
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(network = core.ReadNetwork(model, weightsPtr));
 
-    reader.getNetwork().addOutput("split");
+    network.addOutput("split");
 
     MKLDNNGraphTestClass graph;
-    graph.CreateGraph(reader.getNetwork());
+    graph.CreateGraph(network);
 
     const auto& nodes = graph.getNodes();
     ASSERT_EQ(nodes.size(), 12);
@@ -6398,7 +6295,7 @@ TEST_F(MKLDNNGraphStructureTests, TestCreateGraphWithMultipleData) {
     ASSERT_EQ(nodes[10]->getType(), MKLDNNPlugin::Type::Output);
     ASSERT_EQ(nodes[11]->getType(), MKLDNNPlugin::Type::Output);
 
-    InferenceEngine::OutputsDataMap outputs = reader.getNetwork().getOutputsInfo();
+    InferenceEngine::OutputsDataMap outputs = network.getOutputsInfo();
     std::vector<std::pair<std::string, InferenceEngine::DataPtr>> outputItems = {
         std::make_pair("reshape1", outputs.find("reshape1")->second),
         std::make_pair("reshape2", outputs.find("reshape2")->second),
@@ -6521,11 +6418,10 @@ TEST_F(MKLDNNGraphStructureTests, TestCreateGraphWithMultipleData_2) {
 
     const auto weights = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, SizeVector{0}, Layout::C));
 
-    InferenceEngine::CNNNetReader reader;
-    reader.ReadNetwork(model.data(), model.size());
-    reader.SetWeights(weights);
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork net;
+    ASSERT_NO_THROW(net = core.ReadNetwork(model, weights));
 
-    auto net = reader.getNetwork();
     net.addOutput("split", 0);
 
     MKLDNNGraphTestClass graph;
@@ -6569,42 +6465,35 @@ TEST_F(MKLDNNGraphStructureTests, TestCreateGraphWithMultipleData_2) {
 }
 
 TEST_F(MKLDNNGraphStructureTests, TestCreateGraphAllDataToConcat) {
-    IE_SUPPRESS_DEPRECATED_START
-
-    using namespace InferenceEngine;
-    // Build the network.
-    Builder::Network netBuilder("");
-
-    // First input layer
-    idx_t inpId = netBuilder.addLayer(InferenceEngine::Builder::InputLayer("input").setPort(InferenceEngine::Port({1, 1, 4, 5})));
-
-    std::vector<size_t> weightsSize = {1, 1, 1, 1};  // OIHW
-    std::vector<float> twos(1, 2);
-    auto weights = make_shared_blob<float>({ Precision::FP32, weightsSize, InferenceEngine::Layout::OIHW }, &twos[0]);
-
-    idx_t weightsId = netBuilder.addLayer({}, Builder::ConstLayer("weights").setData(weights));
-
-    // Convolution layer
-    idx_t firstConvId = netBuilder.addLayer({{inpId}, {weightsId}}, Builder::ConvolutionLayer("conv").setKernel({1, 1})
-            .setStrides({1, 1}).setDilation({1, 1}).setPaddingsBegin({0, 0}).setPaddingsEnd({0, 0}).setGroup(1).setOutDepth(1));
-
-    std::vector<float> threes(1, 3);
-    weights = make_shared_blob<float>({ Precision::FP32, weightsSize, InferenceEngine::Layout::OIHW }, &threes[0]);
-
-    weightsId = netBuilder.addLayer({}, Builder::ConstLayer("weights").setData(weights));
-    // Convolution layer
-    idx_t secondConvId = netBuilder.addLayer({{inpId}, {weightsId}}, Builder::ConvolutionLayer("conv").setKernel({1, 1})
-            .setStrides({1, 1}).setDilation({1, 1}).setPaddingsBegin({0, 0}).setPaddingsEnd({0, 0}).setGroup(1).setOutDepth(1));
-
-    // Concat layer
-    idx_t concatId = netBuilder.addLayer({{inpId}, {firstConvId}, {secondConvId}},
-                                         InferenceEngine::Builder::ConcatLayer("concat").setAxis(1).setInputPorts(std::vector<InferenceEngine::Port>(3)));
-
-    // Output layer
-    InferenceEngine::Builder::OutputLayer outLayer("output");
-    netBuilder.addLayer({concatId}, outLayer);
+    std::shared_ptr<ngraph::Function> function;
+    {
+        ngraph::element::Type elementType = ngraph::element::Type_t::f32;
+        ngraph::Shape shape { 1, 1, 4, 5 };
+        auto input = std::make_shared<ngraph::op::Parameter>(elementType, shape);
+        input->set_friendly_name("input");
+
+        auto weights1 = std::make_shared<ngraph::op::Constant>(
+            elementType, ngraph::Shape{1, 1, 1, 1}, std::vector<float>(1, 2.0f));
+        auto conv1 = std::make_shared<ngraph::op::v1::Convolution>(
+            input, weights1, ngraph::Strides { 1, 1 },
+            ngraph::CoordinateDiff { 0, 0 }, ngraph::CoordinateDiff{0, 0}, ngraph::Strides { 1, 1 });
+
+        auto weights2 = std::make_shared<ngraph::op::Constant>(
+            elementType, ngraph::Shape{1, 1, 1, 1}, std::vector<float>(1, 3.0f));
+        auto conv2 = std::make_shared<ngraph::op::v1::Convolution>(
+            input, weights2, ngraph::Strides { 1, 1 },
+            ngraph::CoordinateDiff { 0, 0 }, ngraph::CoordinateDiff{0, 0}, ngraph::Strides { 1, 1 });
+
+        auto concat = std::make_shared<ngraph::op::Concat>(ngraph::OutputVector { input, conv1, conv2 }, 1);
+        concat->set_friendly_name("concat");
+        auto result = std::make_shared<ngraph::op::Result>(concat);
+
+        ngraph::ResultVector results { result };
+        ngraph::ParameterVector params { input };
+        function = std::make_shared<ngraph::Function>(results, params);
+    }
 
-    auto cnn = CNNNetwork(Builder::convertToICNNNetwork(netBuilder.build()));
+    auto cnn = InferenceEngine::CNNNetwork(function);
 
     // Load the network
     std::vector<size_t> inpSize = {5, 4, 1, 1};
@@ -6638,22 +6527,21 @@ TEST_F(MKLDNNGraphStructureTests, TestCreateGraphAllDataToConcat) {
 }
 
 TEST_F(MKLDNNGraphStructureTests, TestCreateGraphAllDataFromInputToConcat) {
-    using namespace InferenceEngine;
-    // Build the network.
-    Builder::Network netBuilder("");
-
-    // First input layer
-    idx_t inpId = netBuilder.addLayer(InferenceEngine::Builder::InputLayer("input").setPort(InferenceEngine::Port({1, 1, 4, 5})));
-
-    // Concat layer
-    idx_t concatId = netBuilder.addLayer({{inpId}, {inpId}, {inpId}},
-                                         InferenceEngine::Builder::ConcatLayer("concat").setAxis(1).setInputPorts(std::vector<InferenceEngine::Port>(3)));
-
-    // Output layer
-    InferenceEngine::Builder::OutputLayer outLayer("output");
-    netBuilder.addLayer({concatId}, outLayer);
+    std::shared_ptr<ngraph::Function> function;
+    {
+        ngraph::element::Type elementType = ngraph::element::Type_t::f32;
+        auto input = std::make_shared<ngraph::op::Parameter>(elementType, ngraph::Shape { 1, 1, 4, 5 });
+        input->set_friendly_name("input");
+        auto concat = std::make_shared<ngraph::op::Concat>(ngraph::OutputVector { input, input, input }, 1);
+        concat->set_friendly_name("concat");
+        auto result = std::make_shared<ngraph::op::Result>(concat);
+
+        ngraph::ResultVector results { result };
+        ngraph::ParameterVector params { input };
+        function = std::make_shared<ngraph::Function>(results, params);
+    }
 
-    auto cnn = CNNNetwork(Builder::convertToICNNNetwork(netBuilder.build()));
+    auto cnn = InferenceEngine::CNNNetwork(function);
 
     // Load the network
     std::vector<size_t> inpSize = {5, 4, 1, 1};
@@ -6686,8 +6574,6 @@ TEST_F(MKLDNNGraphStructureTests, TestCreateGraphAllDataFromInputToConcat) {
     InferenceEngine::TBlob<float>::Ptr dstOut = InferenceEngine::make_shared_blob<float>(outputBlobs["concat"]->getTensorDesc(), refDst.data());
 
     compare(*outputBlobs["concat"], *dstOut);
-
-    IE_SUPPRESS_DEPRECATED_END
 }
 
 
@@ -6731,60 +6617,53 @@ TEST_F(MKLDNNGraphStructureTests, TestCheckIncorrectScaleShift) {
 </net>
 )V0G0N";
     using namespace InferenceEngine;
-    const auto weights = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, SizeVector{64}, Layout::C));
+    auto weights = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, SizeVector{64}, Layout::C));
+    weights->allocate();
 
-    InferenceEngine::CNNNetReader reader;
-    reader.ReadNetwork(model.data(), model.size());
-    reader.SetWeights(weights);
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(network = core.ReadNetwork(model, weights));
 
     MKLDNNGraphTestClass graph;
-    ASSERT_THROW(graph.CreateGraph(reader.getNetwork()), InferenceEngine::details::InferenceEngineException);
+    ASSERT_THROW(graph.CreateGraph(network), InferenceEngine::details::InferenceEngineException);
 }
 
 TEST_F(MKLDNNGraphStructureTests, TestConcatWithFourInputs) {
-    IE_SUPPRESS_DEPRECATED_START
-
-    using namespace InferenceEngine;
-    // Build the network.
-    Builder::Network netBuilder("");
-
-    // First input layer
-    idx_t inpId = netBuilder.addLayer(InferenceEngine::Builder::InputLayer("input").setPort(InferenceEngine::Port({1, 1, 4, 5})));
-
-    std::vector<size_t> weightsSize = {1, 1, 1, 1};  // OIHW
-    std::vector<float> twos(1, 2);
-    auto weights = make_shared_blob<float>({ Precision::FP32, weightsSize, InferenceEngine::Layout::OIHW }, &twos[0]);
-    idx_t weightsId = netBuilder.addLayer({}, Builder::ConstLayer("weights").setData(weights));
-
-    // Convolution layer
-    idx_t firstConvId = netBuilder.addLayer({{inpId}, {weightsId}}, Builder::ConvolutionLayer("conv").setKernel({1, 1})
-            .setStrides({1, 1}).setDilation({1, 1}).setPaddingsBegin({0, 0}).setPaddingsEnd({0, 0}).setGroup(1).setOutDepth(1));
-
-    std::vector<float> threes(1, 3);
-    weights = make_shared_blob<float>({ Precision::FP32, weightsSize, InferenceEngine::Layout::OIHW }, &threes[0]);
-
-    weightsId = netBuilder.addLayer({}, Builder::ConstLayer("weights").setData(weights));
-    // Convolution layer
-    idx_t secondConvId = netBuilder.addLayer({{inpId}, {weightsId}}, Builder::ConvolutionLayer("conv").setKernel({1, 1})
-            .setStrides({1, 1}).setDilation({1, 1}).setPaddingsBegin({0, 0}).setPaddingsEnd({0, 0}).setGroup(1).setOutDepth(1));
-
-    std::vector<float> four(1, -1);
-    weights = make_shared_blob<float>({ Precision::FP32, weightsSize, InferenceEngine::Layout::OIHW }, &four[0]);
-
-    weightsId = netBuilder.addLayer({}, Builder::ConstLayer("weights").setData(weights));
-    // Convolution layer
-    idx_t thirdConvId = netBuilder.addLayer({{inpId}, {weightsId}}, Builder::ConvolutionLayer("conv").setKernel({1, 1})
-            .setStrides({1, 1}).setDilation({1, 1}).setPaddingsBegin({0, 0}).setPaddingsEnd({0, 0}).setGroup(1).setOutDepth(1));
-
-    // Concat layer
-    idx_t concatId = netBuilder.addLayer({{inpId}, {firstConvId}, {secondConvId}, {thirdConvId}},
-                                         InferenceEngine::Builder::ConcatLayer("concat").setAxis(1).setInputPorts(std::vector<InferenceEngine::Port>(4)));
-
-    // Output layer
-    InferenceEngine::Builder::OutputLayer outLayer("output");
-    netBuilder.addLayer({concatId}, outLayer);
+    std::shared_ptr<ngraph::Function> function;
+    {
+        ngraph::element::Type elementType = ngraph::element::Type_t::f32;
+        ngraph::Shape shape { 1, 1, 4, 5 };
+        auto input = std::make_shared<ngraph::op::Parameter>(elementType, shape);
+        input->set_friendly_name("input");
+
+        auto weights1 = std::make_shared<ngraph::op::Constant>(
+            elementType, ngraph::Shape{1, 1, 1, 1}, std::vector<float>(1, 2.0f));
+        auto conv1 = std::make_shared<ngraph::op::v1::Convolution>(
+            input, weights1, ngraph::Strides { 1, 1 },
+            ngraph::CoordinateDiff { 0, 0 }, ngraph::CoordinateDiff{0, 0}, ngraph::Strides { 1, 1 });
+
+        auto weights2 = std::make_shared<ngraph::op::Constant>(
+            elementType, ngraph::Shape{1, 1, 1, 1}, std::vector<float>(1, 3.0f));
+        auto conv2 = std::make_shared<ngraph::op::v1::Convolution>(
+            input, weights2, ngraph::Strides { 1, 1 },
+            ngraph::CoordinateDiff { 0, 0 }, ngraph::CoordinateDiff{0, 0}, ngraph::Strides { 1, 1 });
+
+        auto weights3 = std::make_shared<ngraph::op::Constant>(
+            elementType, ngraph::Shape{1, 1, 1, 1}, std::vector<float>(1, -1.0f));
+        auto conv3 = std::make_shared<ngraph::op::v1::Convolution>(
+            input, weights3, ngraph::Strides { 1, 1 },
+            ngraph::CoordinateDiff { 0, 0 }, ngraph::CoordinateDiff{0, 0}, ngraph::Strides { 1, 1 });
+
+        auto concat = std::make_shared<ngraph::op::Concat>(ngraph::OutputVector { input, conv1, conv2, conv3 }, 1);
+        concat->set_friendly_name("concat");
+        auto result = std::make_shared<ngraph::op::Result>(concat);
+
+        ngraph::ResultVector results { result };
+        ngraph::ParameterVector params { input };
+        function = std::make_shared<ngraph::Function>(results, params);
+    }
 
-    auto cnn = CNNNetwork(Builder::convertToICNNNetwork(netBuilder.build()));
+    auto cnn = InferenceEngine::CNNNetwork(function);
 
     // Load the network
     std::vector<size_t> inpSize = {5, 4, 1, 1};
@@ -6817,6 +6696,4 @@ TEST_F(MKLDNNGraphStructureTests, TestConcatWithFourInputs) {
     InferenceEngine::TBlob<float>::Ptr dstOut = InferenceEngine::make_shared_blob<float>(outputBlobs["concat"]->getTensorDesc(), refDst.data());
 
     compare(*outputBlobs["concat"], *dstOut);
-
-    IE_SUPPRESS_DEPRECATED_END
 }
index c0232cb..25f55c2 100644 (file)
@@ -87,10 +87,10 @@ TEST_F(VPU_AdjustDataLocationTest, FlushCMX_TwoSpecialConsumers) {
     ASSERT_EQ(data1Consumer->type(), StageType::Copy);
     ASSERT_EQ(data1ConsumerOutput->dataLocation().location, Location::BSS);
     ASSERT_EQ(data1ConsumerOutput->numChildDatas(), 4);
-    ASSERT_TRUE(contains(data1ConsumerOutput->childDataEdges(), [data2](const SharedAllocation& e) { return e->child() == data2; }));
-    ASSERT_TRUE(contains(data1ConsumerOutput->childDataEdges(), [data3](const SharedAllocation& e) { return e->child() == data3; }));
-    ASSERT_TRUE(contains(data1ConsumerOutput->childDataEdges(), [data4](const SharedAllocation& e) { return e->child() == data4; }));
-    ASSERT_TRUE(contains(data1ConsumerOutput->childDataEdges(), [data5](const SharedAllocation& e) { return e->child() == data5; }));
+    ASSERT_TRUE(contains(data1ConsumerOutput->childDataToDataEdges(), [data2](const DataToDataAllocation& e) { return e->child() == data2; }));
+    ASSERT_TRUE(contains(data1ConsumerOutput->childDataToDataEdges(), [data3](const DataToDataAllocation& e) { return e->child() == data3; }));
+    ASSERT_TRUE(contains(data1ConsumerOutput->childDataToDataEdges(), [data4](const DataToDataAllocation& e) { return e->child() == data4; }));
+    ASSERT_TRUE(contains(data1ConsumerOutput->childDataToDataEdges(), [data5](const DataToDataAllocation& e) { return e->child() == data5; }));
 }
 
 //
index 5afa381..eeccb27 100644 (file)
@@ -85,7 +85,7 @@ TEST_F(VPU_EliminateCopyTest, OneInputTwoConcats) {
     pipeline.run(model);
 
     const auto& hwOutput = hwStage->output(0);
-    ASSERT_NE(hwOutput->parentDataEdge(), nullptr);
+    ASSERT_NE(hwOutput->parentDataToDataEdge(), nullptr);
     ASSERT_EQ(hwOutput->parentData(), outputCopy1);
 
     ASSERT_EQ(hwOutput->numConsumers(), 2);
index 3571ef9..0f96c3b 100644 (file)
@@ -14,104 +14,104 @@ using namespace InferenceEngine;
 
 using VPU_AddVpuScaleTest = GraphTransformerTest;
 
-TEST_F(VPU_AddVpuScaleTest, CanAddVpuScaleToNetwork) {
-    InitCompileEnv();
+// TEST_F(VPU_AddVpuScaleTest, CanAddVpuScaleToNetwork) {
+//     InitCompileEnv();
 
-    auto& env = CompileEnv::get();
-    CompilationConfig config{};
-    config.irWithVpuScalesDir = "/";
-    env.updateConfig(config);
+//     auto& env = CompileEnv::get();
+//     CompilationConfig config{};
+//     config.irWithVpuScalesDir = "/";
+//     env.updateConfig(config);
 
-    Builder::Network builder("network");
-    Builder::FullyConnectedLayer fcBuilder("FullyConnected");
+//     Builder::Network builder("network");
+//     Builder::FullyConnectedLayer fcBuilder("FullyConnected");
 
-    fcBuilder.setOutputNum(1024 * 1);
-    SizeVector inputDims = {1, 2, 16, 16};
+//     fcBuilder.setOutputNum(1024 * 1);
+//     SizeVector inputDims = {1, 2, 16, 16};
 
-    idx_t layerId = builder.addLayer(Builder::InputLayer("input").setPort(Port(inputDims)));
+//     idx_t layerId = builder.addLayer(Builder::InputLayer("input").setPort(Port(inputDims)));
 
-    Blob::Ptr blob = make_shared_blob<ie_fp16>(TensorDesc(Precision::FP16, {1024, 2, 16, 16}, Layout::OIHW));
-    blob->allocate();
+//     Blob::Ptr blob = make_shared_blob<ie_fp16>(TensorDesc(Precision::FP16, {1024, 2, 16, 16}, Layout::OIHW));
+//     blob->allocate();
 
-    idx_t weightsId = builder.addLayer(Builder::ConstLayer("weights").setData(blob));
-    layerId = builder.addLayer({{layerId}, {weightsId}}, fcBuilder);
-    builder.addLayer({PortInfo(layerId)}, Builder::OutputLayer("output"));
+//     idx_t weightsId = builder.addLayer(Builder::ConstLayer("weights").setData(blob));
+//     layerId = builder.addLayer({{layerId}, {weightsId}}, fcBuilder);
+//     builder.addLayer({PortInfo(layerId)}, Builder::OutputLayer("output"));
 
-    auto network = Builder::convertToICNNNetwork(builder.build());
+//     auto network = Builder::convertToICNNNetwork(builder.build());
 
-    CNNLayerPtr layer;
-    network->getLayerByName("FullyConnected", layer, nullptr);
+//     CNNLayerPtr layer;
+//     network->getLayerByName("FullyConnected", layer, nullptr);
 
-    EXPECT_EQ(layer->params.find("vpu_scale"), layer->params.end());
+//     EXPECT_EQ(layer->params.find("vpu_scale"), layer->params.end());
 
-    auto model = frontEnd->buildInitialModel(*network);
+//     auto model = frontEnd->buildInitialModel(*network);
 
-    auto middleEnd = passManager->buildMiddleEnd();
+//     auto middleEnd = passManager->buildMiddleEnd();
 
-    middleEnd->run(model);
+//     middleEnd->run(model);
 
-    EXPECT_NE(layer->params.find("vpu_scale"), layer->params.end());
-}
+//     EXPECT_NE(layer->params.find("vpu_scale"), layer->params.end());
+// }
 
-TEST_F(VPU_AddVpuScaleTest, VpuScaleFromIrChangesWeights) {
-    InitCompileEnv();
-    const auto& env = CompileEnv::get();
-    CompilationConfig config{};
-    config.irWithVpuScalesDir = "/";
-    env.updateConfig(config);
+// TEST_F(VPU_AddVpuScaleTest, VpuScaleFromIrChangesWeights) {
+//     InitCompileEnv();
+//     const auto& env = CompileEnv::get();
+//     CompilationConfig config{};
+//     config.irWithVpuScalesDir = "/";
+//     env.updateConfig(config);
 
-    Builder::Network netBuilder("network");
+//     Builder::Network netBuilder("network");
 
-    Blob::Ptr weightsBlob = make_shared_blob<ie_fp16>(TensorDesc(Precision::FP16, {1, 1, 1, 1}, Layout::NCHW));
-    weightsBlob->allocate();
-    auto buf = weightsBlob->buffer().as<ie_fp16*>();
+//     Blob::Ptr weightsBlob = make_shared_blob<ie_fp16>(TensorDesc(Precision::FP16, {1, 1, 1, 1}, Layout::NCHW));
+//     weightsBlob->allocate();
+//     auto buf = weightsBlob->buffer().as<ie_fp16*>();
 
-    for (size_t i = 0; i < weightsBlob->size(); ++i) {
-        buf[i] = PrecisionUtils::f32tof16(1.f);
-    }
+//     for (size_t i = 0; i < weightsBlob->size(); ++i) {
+//         buf[i] = PrecisionUtils::f32tof16(1.f);
+//     }
 
-    idx_t layerId = netBuilder.addLayer(Builder::InputLayer("input").setPort(Port({1, 1, 1, 1})));
-    size_t weightsId = netBuilder.addLayer(Builder::ConstLayer("weights").setData(weightsBlob));
+//     idx_t layerId = netBuilder.addLayer(Builder::InputLayer("input").setPort(Port({1, 1, 1, 1})));
+//     size_t weightsId = netBuilder.addLayer(Builder::ConstLayer("weights").setData(weightsBlob));
 
-    const auto convBuilder = Builder::ConvolutionLayer("Convolution").setStrides({1, 1}).setKernel({1, 1})
-            .setOutDepth(1).setInputPort(Port({1, 1, 1, 1}));
+//     const auto convBuilder = Builder::ConvolutionLayer("Convolution").setStrides({1, 1}).setKernel({1, 1})
+//             .setOutDepth(1).setInputPort(Port({1, 1, 1, 1}));
 
-    layerId = netBuilder.addLayer({{layerId}, {weightsId}}, convBuilder);
-    netBuilder.addLayer({PortInfo(layerId)}, Builder::OutputLayer("output"));
+//     layerId = netBuilder.addLayer({{layerId}, {weightsId}}, convBuilder);
+//     netBuilder.addLayer({PortInfo(layerId)}, Builder::OutputLayer("output"));
 
-    auto network = Builder::convertToICNNNetwork(netBuilder.build());
+//     auto network = Builder::convertToICNNNetwork(netBuilder.build());
 
-    CNNLayerPtr layer;
-    network->getLayerByName("Convolution", layer, nullptr);
+//     CNNLayerPtr layer;
+//     network->getLayerByName("Convolution", layer, nullptr);
 
-    auto model = frontEnd->buildInitialModel(*network);
-    auto middleEnd = passManager->buildMiddleEnd();
+//     auto model = frontEnd->buildInitialModel(*network);
+//     auto middleEnd = passManager->buildMiddleEnd();
 
-    auto checkWeightWasChanged = [this, network, layer](const float scale) {
-        layer->params["vpu_scale"] = toString(scale);
-        auto model = frontEnd->buildInitialModel(*network);
-        auto middleEnd = passManager->buildMiddleEnd();
-        middleEnd->run(model);
-        for (const auto& stage : model->getStages()) {
-            if (stage->name() == "Convolution") {
-                auto content = stage->input(1)->content()->get<ie_fp16>();
-                EXPECT_EQ(scale, PrecisionUtils::f16tof32(content[0]));
-            }
-        }
-    };
+//     auto checkWeightWasChanged = [this, network, layer](const float scale) {
+//         layer->params["vpu_scale"] = toString(scale);
+//         auto model = frontEnd->buildInitialModel(*network);
+//         auto middleEnd = passManager->buildMiddleEnd();
+//         middleEnd->run(model);
+//         for (const auto& stage : model->getStages()) {
+//             if (stage->name() == "Convolution") {
+//                 auto content = stage->input(1)->content()->get<ie_fp16>();
+//                 EXPECT_EQ(scale, PrecisionUtils::f16tof32(content[0]));
+//             }
+//         }
+//     };
 
-    checkWeightWasChanged(32);
-    checkWeightWasChanged(64);
+//     checkWeightWasChanged(32);
+//     checkWeightWasChanged(64);
 
-    const auto maxVal = std::numeric_limits<float>::infinity();
-    layer->params["vpu_scale"] = toString(maxVal);
-    model = frontEnd->buildInitialModel(*network);
-    middleEnd = passManager->buildMiddleEnd();
-    middleEnd->run(model);
+//     const auto maxVal = std::numeric_limits<float>::infinity();
+//     layer->params["vpu_scale"] = toString(maxVal);
+//     model = frontEnd->buildInitialModel(*network);
+//     middleEnd = passManager->buildMiddleEnd();
+//     middleEnd->run(model);
 
-    for (const auto& stage : model->getStages()) {
-        if (stage->name() == "Convolution") {
-            EXPECT_EQ(stage->attrs().get<float>("scaleFactor"), maxVal);
-        }
-    }
-}
+//     for (const auto& stage : model->getStages()) {
+//         if (stage->name() == "Convolution") {
+//             EXPECT_EQ(stage->attrs().get<float>("scaleFactor"), maxVal);
+//         }
+//     }
+// }
index 574478a..096c827 100644 (file)
@@ -26,7 +26,7 @@ TEST_F(GraphTransformerTest, CantConnectInputOutputDatas) {
     model->attrs().set<int>("numOutputs", 1);
 
     ASSERT_ANY_THROW(
-    model->connectDatas()
+    model->connectDataWithData()
         .parent(input)
         .child(output)
         .mode(SharedDataMode::ROI)
index 064a309..3d33163 100644 (file)
@@ -81,8 +81,11 @@ TEST_F(MVNCWatchdogTests, DISABLED_removeDeviceIfXLINKSessionNotIninitialized) {
     std::this_thread::sleep_for(std::chrono::milliseconds(1000));
 }
 
-
+#if defined(__APPLE__) && !defined(NDEBUG)
+TEST_F(MVNCWatchdogTests, DISABLED_canNotBeRegisteredTwice) {
+#else
 TEST_F(MVNCWatchdogTests, canNotBeRegisteredTwice) {
+#endif
 
     d.wd_interval = 10;
 
@@ -110,7 +113,11 @@ TEST_F(MVNCWatchdogTests, canUnRegisterIfInterval0) {
     ASSERT_EQ(WD_ERRNO, watchdog_unregister_device(&ctx));
 }
 
+#if defined(__APPLE__) && !defined(NDEBUG)
+TEST_F(MVNCWatchdogTests, DISABLED_failUnRegisterTwice) {
+#else
 TEST_F(MVNCWatchdogTests, failUnRegisterTwice) {
+#endif
 
     d.wd_interval = 10;
 
index 61c7c27..21370f4 100644 (file)
@@ -10,7 +10,7 @@
 #include <gmock/gmock-generated-matchers.h>
 #include <gmock/gmock-more-actions.h>
 #include "ie_common.h"
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 #include "graph_test_base.hpp"
 #include <memory>
 
@@ -168,7 +168,6 @@ TEST_F(GraphCopyTests, canQuantizeTopology) {
 #endif
 
 TEST(CNNSpecificGraphCopyTests, copyNetworkWithClampLayer) {
-    CNNNetReader netReader;
     //define minimal network with Clamp layer
     const std::string SINGLE_LAYER_MODEL = R"V0G0N(
     <net name="SingleLayer" version="2" batch="1">
@@ -208,9 +207,10 @@ TEST(CNNSpecificGraphCopyTests, copyNetworkWithClampLayer) {
         </edges>
     </net>
     )V0G0N";
-    ASSERT_NO_THROW(netReader.ReadNetwork(SINGLE_LAYER_MODEL.data(), SINGLE_LAYER_MODEL.length()));
-    ASSERT_TRUE(netReader.isParseSuccess());
-    auto network = netReader.getNetwork();
+
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(network = core.ReadNetwork(SINGLE_LAYER_MODEL, InferenceEngine::Blob::CPtr()));
 
     //copy the network
     struct EmptyStruct {};
@@ -225,7 +225,6 @@ TEST(CNNSpecificGraphCopyTests, copyNetworkWithClampLayer) {
 }
 
 TEST(CNNSpecificGraphCopyTests, copyPreprocess) {
-    CNNNetReader netReader;
     //define minimal network with Clamp layer
     const std::string SINGLE_LAYER_MODEL = R"V0G0N(
     <net name="SingleLayer" version="2" batch="1">
@@ -276,9 +275,10 @@ TEST(CNNSpecificGraphCopyTests, copyPreprocess) {
         </pre-process>
     </net>
     )V0G0N";
-    ASSERT_NO_THROW(netReader.ReadNetwork(SINGLE_LAYER_MODEL.data(), SINGLE_LAYER_MODEL.length()));
-    ASSERT_TRUE(netReader.isParseSuccess());
-    auto network = netReader.getNetwork();
+
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(network = core.ReadNetwork(SINGLE_LAYER_MODEL, InferenceEngine::Blob::CPtr()));
 
     //copy the network
     struct EmptyStruct {};
@@ -298,7 +298,6 @@ TEST(CNNSpecificGraphCopyTests, copyPreprocess) {
 }
 
 TEST(CNNSpecificGraphCopyTests, copyNetworkWithDeconvolution) {
-    CNNNetReader netReader;
     //define minimal network with deconvolution layer
     const std::string SINGLE_LAYER_MODEL = R"V0G0N(
     <net name="SingleLayer" version="2" batch="1">
@@ -339,9 +338,10 @@ TEST(CNNSpecificGraphCopyTests, copyNetworkWithDeconvolution) {
         </edges>
     </net>
     )V0G0N";
-    ASSERT_NO_THROW(netReader.ReadNetwork(SINGLE_LAYER_MODEL.data(), SINGLE_LAYER_MODEL.length()));
-    ASSERT_TRUE(netReader.isParseSuccess());
-    auto network = netReader.getNetwork();
+
+    InferenceEngine::Core core;
+    InferenceEngine::CNNNetwork network;
+    ASSERT_NO_THROW(network = core.ReadNetwork(SINGLE_LAYER_MODEL, InferenceEngine::Blob::CPtr()));
 
     // copy the network
     struct EmptyStruct {};
index 395dd32..eddfd17 100644 (file)
@@ -8,8 +8,6 @@
 #include <random>
 #include <chrono>
 
-#include <cpp/ie_cnn_net_reader.h>
-
 #include <gmock/gmock-spec-builders.h>
 
 #include "unit_test_utils/mocks/mock_allocator.hpp"
index 2061103..55b8829 100644 (file)
@@ -12,7 +12,6 @@
 #include <memory>
 #include <map>
 
-#include <cpp/ie_cnn_net_reader.h>
 #include <cpp/ie_cnn_network.h>
 #include <ie_util_internal.hpp>
 #include <ie_parameter.hpp>
@@ -449,11 +448,8 @@ TEST_F(CNNNGraphImplTests, ReadFromCNNNetReader) {
     </edges>
 </net>
 )V0G0N";
-    CNNNetReader reader;
-    reader.ReadNetwork(model.data(), model.length());
-    ASSERT_TRUE(reader.isParseSuccess());
-    reader.SetWeights(nullptr);
-    CNNNetwork network = reader.getNetwork();
+    InferenceEngine::Core core;
+    CNNNetwork network = core.ReadNetwork(model, InferenceEngine::Blob::CPtr());
     network.begin();
     ASSERT_EQ(2, network.layerCount());
 }
index 1f5e0ef..e0b719c 100644 (file)
@@ -12,6 +12,7 @@
 #include <ngraph/function.hpp>
 #include <ngraph/opsets/opset1.hpp>
 #include <ngraph_ops/convolution_ie.hpp>
+#include <transformations/init_node_info.hpp>
 
 using namespace testing;
 using namespace InferenceEngine;
@@ -29,6 +30,7 @@ TEST_F(ConvertFunctionToCNNNetworkTests, ConvertPReLUNetwork) {
 
         f = std::make_shared<ngraph::Function>(ngraph::ResultVector{result},
                                                ngraph::ParameterVector{param1, param2});
+        ngraph::pass::InitNodeInfo().run_on_function(f);
     }
 
     InferenceEngine::CNNNetwork nGraphImpl(f);
@@ -38,7 +40,7 @@ TEST_F(ConvertFunctionToCNNNetworkTests, ConvertPReLUNetwork) {
     } catch (InferenceEngine::details::InferenceEngineException &err) {
         const std::string ref_msg = "Error of validate layer: prelu with type: PReLU. Number of inputs (2) is not equal to expected ones: 1";
         const std::string resp_msg = err.what();
-        ASSERT_TRUE(resp_msg.find(ref_msg) != std::string::npos);
+        ASSERT_TRUE(resp_msg.find(ref_msg) != std::string::npos) << resp_msg;
     }
 }
 
@@ -58,6 +60,7 @@ TEST_F(ConvertFunctionToCNNNetworkTests, ConvertConvolutionNetwork) {
 
         f = std::make_shared<ngraph::Function>(ngraph::ResultVector{result},
                                                ngraph::ParameterVector{param1, param2});
+        ngraph::pass::InitNodeInfo().run_on_function(f);
     }
 
     InferenceEngine::CNNNetwork nGraphImpl(f);
diff --git a/inference-engine/tests_deprecated/unit/inference_engine_tests/inference_engine_test.cpp b/inference-engine/tests_deprecated/unit/inference_engine_tests/inference_engine_test.cpp
deleted file mode 100644 (file)
index 2a8a6e9..0000000
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <gtest/gtest.h>
-
-#include "inference_engine.hpp"
-
-using namespace std;
-using namespace testing;
-using namespace InferenceEngine;
-
-class InferenceEngineTests : public ::testing::Test {
-public:
-       InferenceEngineTests(): output(TensorDesc(Precision::FP32, C))
-       {
-       }
-
-protected:
-    InferenceEngine::TBlob<float> output;
-    vector<unsigned> results;
-    std::vector<float> values;
-
-    virtual void TearDown() override{
-    }
-
-    virtual void SetUp() override {
-        values = { 0.3f, 0.1f, 0.01f, 0.9f, 0.99f, 0.12f, 0.001f, 0, 0.999f, 0.0000001f };
-        output = TBlob<float>(TensorDesc(Precision::FP32, { 1, 10 }, Layout::NC), &values[0]);
-    }
-
-    InferenceEngine::TBlob<float>::Ptr getCopiedTBlob(InferenceEngine::SizeVector size) {
-        InferenceEngine::TBlob<float>::Ptr blob(new InferenceEngine::TBlob<float>(TensorDesc(Precision::FP32, size,
-                                                                                  TensorDesc::getLayoutByDims(size))));
-        blob->allocate();
-        const size_t arr_size = 4;
-        uint8_t data[arr_size] = { 1, 2, 3, 4 };
-        IE_SUPPRESS_DEPRECATED_START
-        InferenceEngine::copyFromRGB8(&data[0], arr_size, blob.get());
-        IE_SUPPRESS_DEPRECATED_END
-        return blob;
-    }
-};
-
-TEST_F(InferenceEngineTests, checkZeroInput) {
-    InferenceEngine::TBlob<float> output(TensorDesc(Precision::FP32, C));
-    output.allocate();
-
-    IE_SUPPRESS_DEPRECATED_START
-    EXPECT_THROW(InferenceEngine::TopResults(5, output, results), InferenceEngine::details::InferenceEngineException);
-    IE_SUPPRESS_DEPRECATED_END
-}
-
-TEST_F(InferenceEngineTests, testInsertSort) {
-    IE_SUPPRESS_DEPRECATED_START
-    InferenceEngine::TopResults(5, output, results);
-    IE_SUPPRESS_DEPRECATED_END
-
-    ASSERT_EQ(5, results.size());
-    ASSERT_EQ(8, results[0]);
-    ASSERT_EQ(4, results[1]);
-    ASSERT_EQ(3, results[2]);
-    ASSERT_EQ(0, results[3]);
-    ASSERT_EQ(5, results[4]);
-}
-
-TEST_F(InferenceEngineTests, testInsertSortOverDraft) {
-    IE_SUPPRESS_DEPRECATED_START
-    InferenceEngine::TopResults(15, output, results);
-    IE_SUPPRESS_DEPRECATED_END
-    
-    ASSERT_EQ(10, results.size());
-    ASSERT_EQ(8, results[0]);
-    ASSERT_EQ(4, results[1]);
-    ASSERT_EQ(3, results[2]);
-    ASSERT_EQ(0, results[3]);
-    ASSERT_EQ(5, results[4]);
-    ASSERT_EQ(1, results[5]);
-    ASSERT_EQ(2, results[6]);
-    ASSERT_EQ(6, results[7]);
-    ASSERT_EQ(9, results[8]);
-    ASSERT_EQ(7, results[9]);
-}
-
-TEST_F(InferenceEngineTests, testThrowsOnCopyToBadBlob) {
-    ASSERT_THROW(getCopiedTBlob({ 1, 1, 1 }), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(InferenceEngineTests, testThrowsOnCopyToBlobWithBadSize) {
-    ASSERT_THROW(getCopiedTBlob({ 1, 1, 1, 1 }), InferenceEngine::details::InferenceEngineException);
-}
-
-TEST_F(InferenceEngineTests, canCopyToProperBlob) {
-    auto blob = getCopiedTBlob({ 1, 1, 1, 4 });
-    ASSERT_EQ(blob->data()[blob->size() - 1], 4);
-}
index 4c90e2f..33585ee 100644 (file)
@@ -5,7 +5,7 @@
 #include <gtest/gtest.h>
 #include <single_layer_common.hpp>
 
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 #include <net_pass.h>
 
 using namespace ::testing;
@@ -216,13 +216,12 @@ protected:
     }
 
     void testBody(bool isLSTM = false) const {
-        CNNNetReader reader;
+        InferenceEngine::Core core;
 
         // This model contains layers with float attributes.
         // Conversion from string may be affected by locale.
         std::string model = isLSTM ? _model_LSTM : getModel();
-        reader.ReadNetwork(model.data(), model.length());
-        auto net = reader.getNetwork();
+        auto net = core.ReadNetwork(model, InferenceEngine::Blob::CPtr());
 
         if (!isLSTM) {
             auto power_layer = dynamic_pointer_cast<PowerLayer>(net.getLayerByName("power"));
index 010351c..f572434 100644 (file)
@@ -5,7 +5,7 @@
 #include <gtest/gtest.h>
 #include <cpp/ie_cnn_network.h>
 #include <network_serializer.h>
-
+#include <ie_core.hpp>
 
 static const auto model = R"_(
 <?xml version="1.0" encoding="UTF-8"?>
@@ -95,14 +95,8 @@ static const auto model = R"_(
 )_";
 
 TEST(NetworkSerializerTest, TopoSortResultUnique) {
-    auto reader = InferenceEngine::CreateCNNNetReaderPtr();
-
-    InferenceEngine::ResponseDesc resp;
-
-    ASSERT_EQ(InferenceEngine::StatusCode::OK, reader->ReadNetwork(model, std::strlen(model), &resp)) << resp.msg;
-
-    auto network = InferenceEngine::CNNNetwork(reader);
-
+    InferenceEngine::Core ie;
+    auto network = ie.ReadNetwork(model, InferenceEngine::Blob::CPtr());
     auto sorted = InferenceEngine::Serialization::TopologicalSort(network);
 
     std::vector<std::string> actualLayerNames;
index 78362fd..ea45be3 100644 (file)
@@ -4,7 +4,7 @@
 
 #include <gtest/gtest.h>
 #include <gmock/gmock-spec-builders.h>
-#include <cpp/ie_cnn_net_reader.h>
+#include <ie_core.hpp>
 
 #include <cnn_network_int8_normalizer.hpp>
 #include "tests_common.hpp"
@@ -130,10 +130,9 @@ protected:
             conv_conv_eltwise_params p = ::testing::WithParamInterface<conv_conv_eltwise_params>::GetParam();
             std::string model = getModel(p);
 
-            InferenceEngine::CNNNetReader net_reader;
-            ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
-
-            auto network = net_reader.getNetwork();
+            InferenceEngine::Core ie;
+            InferenceEngine::CNNNetwork network;
+            ASSERT_NO_THROW(network = ie.ReadNetwork(model, InferenceEngine::Blob::CPtr()));
 
             int maxSign = 0x7F;
             int maxUnsign = 0xFF;
index cc60d14..fd047af 100644 (file)
@@ -17,7 +17,6 @@
 #include <gmock/gmock.h>
 #include <string>
 #include "ie_plugin_dispatcher.hpp"
-#include "ie_plugin_ptr.hpp"
 #include <fstream>
 
 #include "unit_test_utils/mocks/mock_plugin_dispatcher.hpp"
index b22a6b0..9cb8935 100644 (file)
@@ -7,7 +7,9 @@
 #include <gmock/gmock-spec-builders.h>
 
 #include <memory>
+#include <tests_utils.hpp>
 #include <details/ie_so_pointer.hpp>
+#include <details/ie_irelease.hpp>
 
 using namespace InferenceEngine::details;
 using namespace ::testing;
@@ -69,3 +71,32 @@ TEST_F(SoPointerTests, assignObjThenLoader) {
 
     soPointer1 = soPointer2;
 }
+
+namespace InferenceEngine {
+
+namespace details {
+
+template<>
+class SOCreatorTrait<InferenceEngine::details::IRelease> {
+public:
+    static constexpr auto name = "CreateIRelease";
+};
+
+}  // namespace details
+
+}  // namespace InferenceEngine
+
+TEST_F(SoPointerTests, UnknownPlugin) {
+    ASSERT_THROW(SOPointer<InferenceEngine::details::IRelease>("UnknownPlugin"), InferenceEngineException);
+}
+
+TEST_F(SoPointerTests, UnknownPluginExceptionStr) {
+    try {
+        SOPointer<InferenceEngine::details::IRelease>("UnknownPlugin");
+    }
+    catch (InferenceEngineException &e) {
+        ASSERT_STR_CONTAINS(e.what(), "Cannot load library 'UnknownPlugin':");
+        ASSERT_STR_DOES_NOT_CONTAIN(e.what(), "path:");
+        ASSERT_STR_DOES_NOT_CONTAIN(e.what(), "from CWD:");
+    }
+}
diff --git a/inference-engine/tests_deprecated/unit/shape_infer/adult_test.cpp b/inference-engine/tests_deprecated/unit/shape_infer/adult_test.cpp
deleted file mode 100644 (file)
index fb0b821..0000000
+++ /dev/null
@@ -1,684 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <gtest/gtest.h>
-#include <tuple>
-#include "adult_test.hpp"
-#include "debug.h"
-#include <cmath>
-
-using namespace InferenceEngine;
-using namespace details;
-using namespace ShapeInfer;
-using namespace ShapeInferTests;
-
-void BasicTest::SetUp() {
-    auto params = GetParam();
-    type = std::get<0>(params);
-    inOutData = std::get<1>(params);
-}
-
-void BlobTest::SetUp() {
-    auto params = GetParam();
-    type = std::get<0>(params);
-    inOutData = std::get<1>(params);
-    blobsParam = std::get<2>(params);
-}
-
-void ParamsTest::SetUp() {
-    auto params = GetParam();
-    type = std::get<0>(params);
-    inOutData = std::get<1>(params);
-    strParams = std::get<2>(params);
-}
-
-ASITestBuilder CommonTests::assertThat() {
-    return ASITestBuilder().withType(type).withData(inOutData);
-}
-
-std::vector<Precision> StridedSliceTest::getPrecisions() {
-    size_t size = inOutData.inData.size();
-    std::vector<Precision> result;
-    if (!size) THROW_IE_EXCEPTION << "unsupported number of precisions";
-    result.emplace_back(Precision::FP32);
-    for (int i = 1; i < size; i++) {
-        result.emplace_back(Precision::I32);
-    }
-    return result;
-}
-
-std::vector<float> FillTest::refGen(const InOutData& inOutData) {
-    const size_t FILL_DIMS = 0;
-    const size_t FILL_VALUE = 1;
-    float value = inOutData.inData[FILL_VALUE][0];
-    auto shape = inOutData.inData[FILL_DIMS];
-    return std::vector<float>(product(shape), value);
-}
-
-std::vector<float> RangeTest::refGen(const InOutData& inOutData) {
-    std::vector<float> result;
-    float start = inOutData.inData[0][0];
-    float limit = inOutData.inData[1][0];
-    float delta = inOutData.inData[2][0];
-    size_t work_amount_dst = std::floor(std::abs((limit - start) / delta));
-    if (work_amount_dst != product(inOutData.inOutShapes.outDims[0]))
-        THROW_IE_EXCEPTION << "Range indexes exceeds data tensor dimension";
-
-    float dst_value = start;
-    for (size_t iwork = 0; iwork < work_amount_dst; ++iwork, dst_value += delta) {
-        result.push_back(dst_value);
-    }
-    return result;
-}
-
-std::vector<float> BroadcastTest::refGen(const InOutData& inOutData) {
-    const size_t BROADCAST_DIMS = 0;
-    const size_t BROADCAST_VALUE = 1;
-    float value = inOutData.inData[BROADCAST_VALUE][0];
-    auto shape = inOutData.inData[BROADCAST_DIMS];
-    return std::vector<float>(product(shape), value);
-}
-
-TEST_P(BlobTest, impl) {
-    assertThat().constInferResultFor().withBlobs(blobsParam).equals().toData(inOutData.outData);
-}
-
-TEST_P(BasicTest, impl) {
-    assertThat().constInferResultFor().equals().toData(inOutData.outData);
-}
-
-TEST_P(ParamsTest, impl) {
-    assertThat().constInferResultFor().withParams(strParams.data).equals().toData(inOutData.outData);
-}
-
-TEST_P(StridedSliceTest, impl) {
-    assertThat().constInferResultFor().withParams(strParams.data)
-            .withInputPrecisions(getPrecisions()).equals().toData(inOutData.outData);
-}
-
-TEST_P(StridedSliceTest, shapeInfer) {
-    assertThat().shapeInferResultFor().withParams(strParams.data)
-            .withInputPrecisions(getPrecisions())
-            .equals().toShapes(inOutData.inOutShapes.outDims);
-}
-
-TEST_P(BasicAdultTest, impl) {
-    assertThat().shapeInferResultFor().equals().toShapes(inOutData.inOutShapes.outDims);
-}
-
-TEST_P(FillTest, impl) {
-    assertThat().constInferResultFor().withInputPrecisions({Precision::I32, Precision::FP32})
-            .equals().toData({refGen(inOutData)});
-}
-
-TEST_P(FillTest, shapeInfer) {
-    assertThat().shapeInferResultFor().withInputPrecisions({Precision::I32, Precision::FP32})
-            .equals().toShapes(inOutData.inOutShapes.outDims);
-}
-
-TEST_P(RangeTest, impl) {
-    assertThat().constInferResultFor().equals().toData({refGen(inOutData)});
-}
-
-TEST_P(RangeTest, shapeInfer) {
-    assertThat().shapeInferResultFor().equals().toShapes(inOutData.inOutShapes.outDims);
-}
-
-TEST_P(BroadcastTest, impl) {
-    assertThat().constInferResultFor().withInputPrecisions({Precision::FP32, Precision::I32})
-            .equals().toData({refGen(inOutData)});
-}
-
-TEST_P(BroadcastTest, shapeInfer) {
-    assertThat().shapeInferResultFor().withInputPrecisions({Precision::FP32, Precision::I32})
-            .equals().toShapes(inOutData.inOutShapes.outDims);
-}
-
-static std::vector<float> singleInputData = {4.f, 8.f, 12.f, 16.f};
-
-static testing::InOutShapes singleSmallShapes = {{{1, 3}},
-                                                 {{1, 3}}};
-static std::vector<float> singleSmallData = {1.f, 2.f, 4.f};
-
-static testing::InOutShapes singleSmall2Shapes = {{{1, 3}, {1, 3}},
-                                                  {{1, 3}}};
-
-static testing::InOutShapes singleInOutShape = {{{4, 8, 12, 16}},
-                                                {{4}}};
-
-static std::vector<float> fourInARow = {1.f, 2.f, 3.f, 4.f};
-
-static SizeVector threeDeuces = {2, 2, 2};
-
-INSTANTIATE_TEST_CASE_P(
-        CheckOutputDirectly, BlobTest,
-        ::testing::Values(
-                ::testing::make_tuple(LayerType("Const"), InOutDataParam({singleInOutShape, {}, {singleInputData}}),
-                                      BlobsParam(FloatMap{{"custom", singleInputData}}))
-        )
-);
-
-INSTANTIATE_TEST_CASE_P(
-        CheckOutputDirectly, ParamsTest,
-        ::testing::Values(
-                ::testing::make_tuple(LayerType("Power"),
-                                      InOutDataParam({singleSmallShapes,
-                                                      {singleSmallData},
-                                                      {{-2 / 3.f, -2 / 7.f, -2 / 15.f}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"power", "-1"},
-                                                                                             {"scale", "-2"},
-                                                                                             {"shift", "0.5"}}))),
-                ::testing::make_tuple(LayerType("Power"),
-                                      InOutDataParam({singleSmallShapes,
-                                                      {singleSmallData},
-                                                      {{-3.375f, -1.f, 0.f,}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"power", "3"},
-                                                                                             {"scale", "0.5"},
-                                                                                             {"shift", "-2"}}))),
-                ::testing::make_tuple(LayerType("Power"),
-                                      InOutDataParam({singleSmallShapes,
-                                                      {singleSmallData},
-                                                      {{10.f, 10.f, 10.f,}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"power", "1"},
-                                                                                             {"scale", "0"},
-                                                                                             {"shift", "10"}}))),
-                ::testing::make_tuple(LayerType("Tile"),
-                                      InOutDataParam({{{{2, 1, 2}},
-                                                              {threeDeuces}},
-                                                      {fourInARow},
-                                                      {{1.f, 2.f, 1.f, 2.f, 3.f, 4.f, 3.f, 4.f}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"axis",  "1"},
-                                                                                             {"tiles", "2"}}))),
-                ::testing::make_tuple(LayerType("Tile"),
-                                      InOutDataParam({{{{2, 2, 1}},
-                                                              {threeDeuces}},
-                                                      {fourInARow},
-                                                      {{1.f, 1.f, 2.f, 2.f, 3.f, 3.f, 4.f, 4.f}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"axis",  "2"},
-                                                                                             {"tiles", "2"}}))),
-                ::testing::make_tuple(LayerType("Tile"),
-                                      InOutDataParam({{{{1, 2, 2}},
-                                                              {threeDeuces}},
-                                                      {fourInARow},
-                                                      {{1.f, 2.f, 3.f, 4.f, 1.f, 2.f, 3.f, 4.f}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"axis",  "0"},
-                                                                                             {"tiles", "2"}}))),
-                ::testing::make_tuple(LayerType("Reshape"),
-                                      InOutDataParam({{{{1, 2, 2}}, {{4}}},
-                                                      {fourInARow},
-                                                      {fourInARow}}),
-                                      MapParams(MapStrStr())),
-                ::testing::make_tuple(LayerType("Split"),
-                                      InOutDataParam({{{{2, 1, 2}}, {{2, 1, 1}, {2, 1, 1}}},
-                                                      {fourInARow},
-                                                      {{1.f, 3.f},  {2.f,       4.f}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"axis", "2"}}))),
-                ::testing::make_tuple(LayerType("Split"),
-                                      InOutDataParam({{{{2, 1, 2}}, {{1, 1, 2}, {1, 1, 2}}},
-                                                      {fourInARow},
-                                                      {{1.f, 2.f},  {3.f,       4.f}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"axis", "0"}}))),
-                ::testing::make_tuple(LayerType("Split"),
-                                      InOutDataParam({{{{4, 1, 1}}, {{2, 1, 1}, {1, 1, 1}, {1, 1, 1}}},
-                                                      {fourInARow},
-                                                      {{1.f, 2.f},  {3.f}, {4.f}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"axis", "0"}}))),
-                ::testing::make_tuple(LayerType("Concat"),
-                                      InOutDataParam({{{{2, 1, 1}, {2, 1, 1}}, {{2, 1, 2}}},
-                                                      {{1.f,       3.f},       {2.f, 4.f}},
-                                                      {fourInARow}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"axis", "2"}}))),
-                ::testing::make_tuple(LayerType("Concat"),
-                                      InOutDataParam({{{{1, 1, 2}, {1, 1, 2}}, {{2, 1, 2}}},
-                                                      {{1.f,       2.f},       {3.f, 4.f}},
-                                                      {fourInARow}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"axis", "0"}}))),
-                ::testing::make_tuple(LayerType("Concat"),
-                                      InOutDataParam({{{{2, 1, 1}, {1, 1, 1}, {1, 1, 1}}, {{4, 1, 1}}},
-                                                      {{1.f,       2.f},                  {3.f}, {4.f}},
-                                                      {fourInARow}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"axis", "0"}})))
-        )
-);
-
-namespace {
-//  Test data vectors
-std::vector<float> in0 = {0.f, 1.f, 1.f, 0.f};
-std::vector<float> in1 = {0.f, 1.f, 2.f, 1.f};
-std::vector<float> dict = {1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f, 12.f};
-std::vector<float> dict2D = {1.f, 2.f, 3.f, 4.f}; // 2x2
-std::vector<float> ref_in0_a0_d223 = {1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f, 12.f, 7.f, 8.f, 9.f,
-                                      10.f, 11.f, 12.f, 1.f, 2.f, 3.f, 4.f, 5.f, 6.f}; // 2x2x2x3
-std::vector<float> ref_in1_a2_d223 = {1.f, 2.f, 3.f, 2.f, 4.f, 5.f, 6.f, 5.f, 7.f, 8.f, 9.f, 8.f, 10.f, 11.f, 12.f,
-                                      11.f}; // 2x2x2x2
-std::vector<float> ref_in0_a0_d22 = {1.f, 2.f, 3.f, 4.f, 3.f, 4.f, 1.f, 2.f}; // 2x2x2
-}
-
-INSTANTIATE_TEST_CASE_P(
-        TestsGather, ParamsTest,
-        ::testing::Values(
-                ::testing::make_tuple(LayerType("Gather"),
-                                      InOutDataParam({{{{2, 2}, {1, 4}}, {{1, 4, 2}}},
-                                                      {dict2D,           in0},
-                                                      {ref_in0_a0_d22}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"axis", "0"}}))),
-                ::testing::make_tuple(LayerType("Gather"),
-                                      InOutDataParam({{{{2, 2, 3}, {2, 2}}, {{2, 2, 2, 3}}},
-                                                      {dict,                in0},
-                                                      {ref_in0_a0_d223}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"axis", "0"}}))),
-                ::testing::make_tuple(LayerType("Gather"),
-                                      InOutDataParam({{{{2, 2, 3}, {2, 2}}, {{2, 2, 2, 3}}},
-                                                      {dict,                in0},
-                                                      {ref_in0_a0_d223}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"axis", "-3"}}))),
-                ::testing::make_tuple(LayerType("Gather"),
-                                      InOutDataParam({{{{2, 2, 3}, {2, 2}}, {{2, 2, 2, 2}}},
-                                                      {dict,                in1},
-                                                      {ref_in1_a2_d223}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"axis", "2"}})))
-        )
-);
-
-//static testing::InOutShapes eltWiseShapes1 = {{{4}, {1}},
-//                                              {{4}}};
-//static std::vector<std::vector<float>> eltWiseInputs1 = {singleInputData,
-//                                                         {4.f}};
-//
-//static testing::InOutShapes eltWiseShapes2 = {{{2, 3}, {3}},
-//                                              {{2, 3}}};
-//static std::vector<std::vector<float>> eltWiseInputs2 = {{4.f, 8.f, 12.f, 4.f, 8.f, 8.f},
-//                                                         {4.f, 8.f, 4.f}};
-INSTANTIATE_TEST_CASE_P(
-        CheckOutputDirectly, BasicTest,
-        ::testing::Values(
-                ::testing::make_tuple(
-                        LayerType("Shape"),
-                        InOutDataParam({singleInOutShape, {}, {singleInputData}})),
-//                ::testing::make_tuple(
-//                        LayerType("Mul"),
-//                        InOutDataParam({eltWiseShapes1, eltWiseInputs1, {{16.f, 32.f, 48.f, 64.f}}})),
-//                ::testing::make_tuple(
-//                        LayerType("Add"),
-//                        InOutDataParam({eltWiseShapes1, eltWiseInputs1, {{8.f, 12.f, 16.f, 20.f}}})),
-//                ::testing::make_tuple(
-//                        LayerType("Div"),
-//                        InOutDataParam({eltWiseShapes1, eltWiseInputs1, {{1.f, 2.f, 3.f, 4.f}}})),
-//                ::testing::make_tuple(
-//                        LayerType("Mul"),
-//                        InOutDataParam({eltWiseShapes2, eltWiseInputs2, {{16.f, 64.f, 48.f, 16.f, 64.f, 32.f}}})),
-//                ::testing::make_tuple(
-//                        LayerType("Add"),
-//                        InOutDataParam({eltWiseShapes2, eltWiseInputs2, {{8.f, 16.f, 16.f, 8.f, 16.f, 12.f}}})),
-//                ::testing::make_tuple(
-//                        LayerType("Div"),
-//                        InOutDataParam({eltWiseShapes2, eltWiseInputs2, {{1.f, 1.f, 3.f, 1.f, 1.f, 2.f}}})),
-                ::testing::make_tuple(LayerType("Mul"),
-                                      InOutDataParam({singleSmall2Shapes, {singleSmallData, singleSmallData},
-                                                      {{1.f, 4.f, 16.f}}})),
-                ::testing::make_tuple(LayerType("Add"),
-                                      InOutDataParam({singleSmall2Shapes, {singleSmallData, singleSmallData},
-                                                      {{2.f, 4.f, 8.f}}})),
-                ::testing::make_tuple(LayerType("Div"),
-                                      InOutDataParam({singleSmall2Shapes, {singleSmallData, singleSmallData},
-                                                      {{1.f, 1.f, 1.f}}}))
-        )
-);
-
-INSTANTIATE_TEST_CASE_P(
-        SecondInput, BasicAdultTest,
-        ::testing::Combine(::testing::Values(LayerType("Reshape"), LayerType("Interp"), LayerType("Resample")),
-                           ::testing::Values(InOutDataParam({{{{2, 3}, {2}},
-                                                                     {{1, 6}}},
-                                                             {{},    {1.f, 6.f}},
-                                                             {}})))
-);
-
-INSTANTIATE_TEST_CASE_P(
-        DimSemantic, BasicAdultTest,
-        ::testing::Values(
-                ::testing::make_tuple(LayerType("Reshape"),
-                                      InOutDataParam({{{{2, 3}, {2}},
-                                                              {{1, 6}}},
-                                                      {{},    {1.f, -1.f}},
-                                                      {}}))
-        )
-);
-
-INSTANTIATE_TEST_CASE_P(
-        SqueezeUnsqueeze, BasicAdultTest,
-        ::testing::Values(
-                ::testing::make_tuple(LayerType("Unsqueeze"),
-                                      InOutDataParam({{{{3}, {1}},
-                                                              {{1, 3}}},
-                                                      {{},    {0.f}},
-                                                      {}})),
-                ::testing::make_tuple(LayerType("Unsqueeze"),
-                                      InOutDataParam({{{{3}, {3}},
-                                                              {{1, 1, 1, 3}}},
-                                                      {{},    {0.f, 1.f, 2.f}},
-                                                      {}})),
-                ::testing::make_tuple(LayerType("Unsqueeze"),
-                                      InOutDataParam({{{{3}, {3}},
-                                                              {{1, 3, 1, 1}}},
-                                                      {{},    {0.f, 2.f, 3.f}},
-                                                      {}})),
-                ::testing::make_tuple(LayerType("Unsqueeze"),
-                                      InOutDataParam({{{{2, 3}, {2}},
-                                                              {{1, 2, 3, 1}}},
-                                                      {{},    {0.f, 3.f}},
-                                                      {}})),
-                ::testing::make_tuple(LayerType("Unsqueeze"),
-                                      InOutDataParam({{{{2, 3}, {1}},
-                                                              {{2, 1, 3}}},
-                                                      {{},    {1.f}},
-                                                      {}})),
-                ::testing::make_tuple(LayerType("Unsqueeze"),
-                                      InOutDataParam({{{{3}, {1}},
-                                                              {{1, 3}}},
-                                                      {{},    {0.f}},
-                                                      {}})),
-                ::testing::make_tuple(LayerType("Unsqueeze"),
-                                      InOutDataParam({{{{3}, {3}},
-                                                              {{1, 1, 1, 3}}},
-                                                      {{},    {0.f, 1.f, 2.f}},
-                                                      {}})),
-                ::testing::make_tuple(LayerType("Unsqueeze"),
-                                      InOutDataParam({{{{3}, {3}},
-                                                              {{1, 3, 1, 1}}},
-                                                      {{},    {0.f, 2.f, 3.f}},
-                                                      {}})),
-                ::testing::make_tuple(LayerType("Unsqueeze"),
-                                      InOutDataParam({{{{2, 3}, {2}},
-                                                              {{1, 2, 3, 1}}},
-                                                      {{},    {0.f, 3.f}},
-                                                      {}})),
-                ::testing::make_tuple(LayerType("Unsqueeze"),
-                                      InOutDataParam({{{{2, 3}, {1}},
-                                                              {{2, 1, 3}}},
-                                                      {{},    {1.f,}},
-                                                      {}})),
-                ::testing::make_tuple(LayerType("Squeeze"),
-                                      InOutDataParam({{{{1}, {1}},
-                                                              {{}}},
-                                                      {{},    {0.f}},
-                                                      {}})),
-                ::testing::make_tuple(LayerType("Squeeze"),
-                                      InOutDataParam({{{{1, 3, 1}, {1}},
-                                                              {{3, 1}}},
-                                                      {{},    {0.f}},
-                                                      {}})),
-                ::testing::make_tuple(LayerType("Squeeze"),
-                                      InOutDataParam({{{{1, 3, 1}, {1}},
-                                                              {{1, 3}}},
-                                                      {{},    {2.f}},
-                                                      {}})),
-                ::testing::make_tuple(LayerType("Squeeze"),
-                                      InOutDataParam({{{{1, 3, 1}, {2}},
-                                                              {{3}}},
-                                                      {{},    {0.f, 2.f}},
-                                                      {}})),
-                ::testing::make_tuple(LayerType("Squeeze"),
-                                      InOutDataParam({{{{1, 3, 1}, {1}},
-                                                              {{1, 3}}},
-                                                      {{},    {-1.f}},
-                                                      {}})),
-                ::testing::make_tuple(LayerType("Squeeze"),
-                                      InOutDataParam({{{{1, 3, 1, 2}, {2}},
-                                                              {{3, 2}}},
-                                                      {{},    {0.f, 2.f}},
-                                                      {}})),
-                ::testing::make_tuple(LayerType("Squeeze"),
-                                      InOutDataParam({{{{1}, {1}},
-                                                              {{}}},
-                                                      {{},    {0.f}},
-                                                      {}})),
-                ::testing::make_tuple(LayerType("Squeeze"),
-                                      InOutDataParam({{{{1, 3, 1}, {1}},
-                                                              {{1, 3}}},
-                                                      {{},    {2.f}},
-                                                      {}})),
-                ::testing::make_tuple(LayerType("Squeeze"),
-                                      InOutDataParam({{{{1, 3, 1}, {2}},
-                                                              {{3}}},
-                                                      {{},    {0.f, 2.f}},
-                                                      {}})),
-                ::testing::make_tuple(LayerType("Squeeze"),
-                                      InOutDataParam({{{{1, 3, 1}, {1}},
-                                                              {{1, 3}}},
-                                                      {{},    {-1.f}},
-                                                      {}})),
-                ::testing::make_tuple(LayerType("Squeeze"),
-                                      InOutDataParam({{{{1, 3, 1, 2}, {2}},
-                                                              {{3, 2}}},
-                                                      {{},    {0.f, 2.f}},
-                                                      {}}))
-        )
-);
-namespace {
-//  Test data vectors
-std::vector<float> test0 = {0.f, 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f};
-std::vector<float> test2 = {0.f, 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f};
-std::vector<float> test5 = {5.f, 6.f, 7.f, 8.f};
-std::vector<float> test6 = {0.f, 1.f, 2.f, 3.f, 4.f, 5.f};
-std::vector<float> test8 = {5.f, 4.f, 3.f, 2.f, 1.f};
-std::vector<float> test9 = {5.f, 4.f, 3.f, 2.f, 1.f, 0.f};
-std::vector<float> test10 = {5.f, 4.f, 3.f};
-std::vector<float> test11 = {0.f, 2.f, 4.f, 6.f, 8.f};
-std::vector<float> test12 = {1.f, 3.f, 5.f, 7.f, 9.f};
-std::vector<float> test13 = {9.f, 8.f, 7.f, 6.f, 5.f, 4.f, 3.f, 2.f, 1.f, 0.f};
-std::vector<float> test14 = {9.f, 7.f, 5.f, 3.f, 1.f};
-std::vector<float> test16 = {0.f, 1.f, 3.f, 4.f};
-std::vector<float> test17 = {1.f, 4.f};
-std::vector<float> test19 = {0.f, 1.f, 2.f, 3.f};
-std::vector<float> test20 = {4.f, 5.f, 6.f, 7.f};
-/*
-0. [0,1,2,3,4,5,6,7,8,9], shape=[10]
-1. [0,1,2,3,4,5,6,7,8,9], shape=[10]
-2. [0,1,2,3,4,5,6,7,8], shape=[9]
-3. [0,1,2,3,4,5,6,7,8], shape=[9]
-4. [0,1,2,3,4,5,6,7,8,9], shape=[10]
-5. [5,6,7,8,9], shape=[5]
-6. [0,1,2,3,4,5], shape=[6]
-7. [5,6,7,8,9], shape=[5]
-8. [5,4,3,2,1], shape=[5]
-9. [5,4,3,2,1,0], shape=[6]
-10. [5,4,3], shape=[3]
-11. [0,2,4,6,8], shape=[5]
-12. [1,3,5,7,9], shape=[5]
-13. [9,8,7,6,5,4,3,2,1,0], shape=[10]
-14. [9,7,5,3,1], shape=[5]
-15. [[0,1,2,3,4,5,6,7,8,9]], shape=[1,10]
-16. [[[0,1,2],[3,4,5]]], shape=[1,2,2]
-17. [[[0,1,2],[3,4,5]]], shape=[1,2,1]
-18. [[[0,1,2],[3,4,5]]], shape=[1,1,2,1]
-19. [[[[0,1],[2,3]],[[4,5],[6,7]]]], shape=[1,2,2]
-20. [[[[0,1],[2,3]],[[4,5],[6,7]]]], shape=[1,2,2]
-21. [[[0,1,2],[3,4,5]]], shape=[1,1,2]
-*/
-}
-
-INSTANTIATE_TEST_CASE_P(
-        StridedSlice, StridedSliceTest,
-        ::testing::Values(
-                /* 0 */
-                ::testing::make_tuple(LayerType("StridedSlice"), InOutDataParam({{{{10}, {}, {}, {}}, {{10}}},
-                                                                                 {{test0},            {}, {}, {}},
-                                                                                 {test0}}),
-                                      MapParams(MapStrStr())),
-                ::testing::make_tuple(LayerType("StridedSlice"), InOutDataParam({{{{10}, {1}, {1}, {}}, {{10}}},
-                                                                                 {{test0},              {0.f}, {0.f}, {}},
-                                                                                 {test0}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"end_mask", "0"}}))),
-                ::testing::make_tuple(LayerType("StridedSlice"), InOutDataParam({{{{10}, {1}, {1}, {}}, {{9}}},
-                                                                                 {{test0},              {-1.f}, {-1.f}, {}},
-                                                                                 {test2}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"begin_mask", "0"}}))),
-                ::testing::make_tuple(LayerType("StridedSlice"), InOutDataParam({{{{10}, {1}, {1}, {}}, {{9}}},
-                                                                                 {{test0},              {0.f}, {-1.f}, {}},
-                                                                                 {test2}}),
-                                      MapParams(MapStrStr())),
-                ::testing::make_tuple(LayerType("StridedSlice"), InOutDataParam({{{{10}, {1}, {1}, {}}, {{10}}},
-                                                                                 {{test0},              {0.f}, {10.f}, {}},
-                                                                                 {test0}}),
-                                      MapParams(MapStrStr())),
-/* 5 */
-                ::testing::make_tuple(LayerType("StridedSlice"), InOutDataParam({{{{10}, {1}, {1}, {}}, {{5}}},
-                                                                                 {{test0},              {5.f}, {10.f}, {}},
-                                                                                 {test5}}),
-                                      MapParams(MapStrStr())),
-                ::testing::make_tuple(LayerType("StridedSlice"), InOutDataParam({{{{10}, {1}, {1}, {}}, {{6}}},
-                                                                                 {{test0},              {0.f}, {6.f}, {}},
-                                                                                 {test6}}),
-                                      MapParams(MapStrStr())),
-                ::testing::make_tuple(LayerType("StridedSlice"), InOutDataParam({{{{10}, {1}, {1}, {}}, {{5}}},
-                                                                                 {{test0},              {-5.f}, {10.f}, {}},
-                                                                                 {test5}}),
-                                      MapParams(MapStrStr())),
-                ::testing::make_tuple(LayerType("StridedSlice"), InOutDataParam({{{{10}, {1}, {1}, {1}}, {{5}}},
-                                                                                 {{test0},               {-5.f}, {0.f}, {-1.f}},
-                                                                                 {test8}}),
-                                      MapParams(MapStrStr())),
-                ::testing::make_tuple(LayerType("StridedSlice"), InOutDataParam({{{{10}, {1}, {1}, {1}}, {{6}}},
-                                                                                 {{test0},               {-5.f}, {0.f}, {-1.f}},
-                                                                                 {test9}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"end_mask", "0"}}))
-                ),
-/* 10 */
-                ::testing::make_tuple(LayerType("StridedSlice"), InOutDataParam({{{{10}, {1}, {1}, {1}}, {{3}}},
-                                                                                 {{test0},               {-5.f}, {2.f}, {-1.f}},
-                                                                                 {test10}}),
-                                      MapParams(MapStrStr())),
-                ::testing::make_tuple(LayerType("StridedSlice"), InOutDataParam({{{{10}, {1}, {1}, {1}}, {{5}}},
-                                                                                 {{test0},               {0.f}, {0.f}, {2.f}},
-                                                                                 {test11}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"end_mask", "0"}}))),
-                ::testing::make_tuple(LayerType("StridedSlice"), InOutDataParam({{{{10}, {1}, {1}, {1}}, {{5}}},
-                                                                                 {{test0},               {1.f}, {0.f}, {2.f}},
-                                                                                 {test12}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"end_mask", "0"}}))),
-                ::testing::make_tuple(LayerType("StridedSlice"), InOutDataParam({{{{10}, {1}, {1}, {1}}, {{10}}},
-                                                                                 {{test0},               {-1.f}, {0.f}, {-1.f}},
-                                                                                 {test13}}),
-                                      MapParams(MapStrStr(
-                                              std::map<std::string, std::string>{{"end_mask", "0"}}))),
-                ::testing::make_tuple(LayerType("StridedSlice"), InOutDataParam({{{{10}, {1}, {1}, {1}}, {{5}}},
-                                                                                 {{test0},               {-1.f}, {0.f}, {-2.f}},
-                                                                                 {test14}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"end_mask", "0"}}))),
-/* 15 */
-                ::testing::make_tuple(LayerType("StridedSlice"), InOutDataParam({{{{10}, {1}, {1}, {}}, {{1, 10}}},
-                                                                                 {{test0},              {0.f}, {10.f}, {}},
-                                                                                 {test0}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"new_axis_mask", "1"}}))),
-                ::testing::make_tuple(LayerType("StridedSlice"),
-                                      InOutDataParam({{{{1, 2, 3}, {2}, {2}, {}}, {{1, 2, 2}}},
-                                                      {{test0},                   {0.f, 0.f}, {1.f, 2.f}, {}},
-                                                      {test16}}),
-                                      MapParams(
-                                              MapStrStr(std::map<std::string, std::string>{{"ellipsis_mask", "0,1"}}))),
-                ::testing::make_tuple(LayerType("StridedSlice"),
-                                      InOutDataParam({{{{1, 2, 3}, {4}, {4}, {}}, {{1,   2,   1}}},
-                                                      {{test0},                   {{0.f, 0.f, 0.f, 1.f}}, {2.f, 3.f, 2.f, 2.f}, {}},
-                                                      {test17}}),
-                                      MapParams(
-                                              MapStrStr(std::map<std::string, std::string>{{"new_axis_mask",    "0,0,1,0"},
-                                                                                           {"shrink_axis_mask", "0,0,0,1"}}))),
-                ::testing::make_tuple(LayerType("StridedSlice"),
-                                      InOutDataParam({{{{1, 2, 3}, {3}, {3}, {}}, {{1, 1, 2, 1}}},
-                                                      {{test0},                   {0.f, 0.f, 1.f}, {2.f, 2.f, 2.f}, {}},
-                                                      {test17}}),
-                                      MapParams(MapStrStr(
-                                              std::map<std::string, std::string>{{"ellipsis_mask", "0,1"},
-                                                                                 {"new_axis_mask", "1"}}))),
-                ::testing::make_tuple(LayerType("StridedSlice"),
-                                      InOutDataParam({{{{1, 2, 2, 2}, {1}, {1}, {1}}, {{1, 2, 2}}},
-                                                      {{test0},                       {-1.f}, {0.f}, {-2.f}},
-                                                      {test19}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"begin_mask",       "0,1,0,0"},
-                                                                                             {"end_mask",         "0,1,0,0"},
-                                                                                             {"shrink_axis_mask", "0,1"}}))),
-/* 20 */
-                ::testing::make_tuple(LayerType("StridedSlice"),
-                                      InOutDataParam({{{{1, 2, 2, 2}, {4}, {4}, {}}, {{1, 2, 2}}},
-                                                      {{test0},                      {0.f, 1.f, 0.f, 0.f}, {1.f, 2.f, 2.f, 2.f}, {}},
-                                                      {test20}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"begin_mask",       "0,1,0,0"},
-                                                                                             {"end_mask",         "0,1,0,0"},
-                                                                                             {"shrink_axis_mask", "0,1,0,0"}}))),
-                ::testing::make_tuple(LayerType("StridedSlice"),
-                                      InOutDataParam({{{{1, 2, 3}, {3}, {3}, {}}, {{1, 1, 2}}},
-                                                      {{test0},                   {0.f, 0.f, 1.f}, {2.f, 2.f, 2.f}, {}},
-                                                      {test17}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"ellipsis_mask",    "0,1"},
-                                                                                             {"new_axis_mask",    "1"},
-                                                                                             {"shrink_axis_mask", "0,0,1"}})))
-        )
-);
-
-INSTANTIATE_TEST_CASE_P(
-        Fill, FillTest,
-        ::testing::Values(
-                ::testing::make_tuple(LayerType("Fill"), InOutDataParam({{{{1}, {1}},
-                                                                                 {{1}}},
-                                                                         {{1.f}, {1.f}},
-                                                                         {}})),
-                ::testing::make_tuple(LayerType("Fill"), InOutDataParam({{{{3}, {1}},
-                                                                                 {{1, 3, 1}}},
-                                                                         {{1.f, 3.f, 1.f}, {1.f}},
-                                                                         {}})),
-                ::testing::make_tuple(LayerType("Fill"), InOutDataParam({{{{3}, {1}},
-                                                                                 {{2, 3, 6}}},
-                                                                         {{2.f, 3.f, 6.f}, {-1.f}},
-                                                                         {}})),
-                ::testing::make_tuple(LayerType("Fill"), InOutDataParam({{{{4}, {1}},
-                                                                                 {{1, 3, 1, 2}}},
-                                                                         {{1.f, 3.f, 1.f, 2.f}, {.5f}},
-                                                                         {}})),
-                ::testing::make_tuple(LayerType("Fill"), InOutDataParam({{{{6}, {1}},
-                                                                                 {{4, 3, 2, 5, 4, 2}}},
-                                                                         {{4.f, 3.f, 2.f, 5.f, 4.f, 2.f}, {.25f}},
-                                                                         {}}))
-        )
-);
-
-INSTANTIATE_TEST_CASE_P(
-        Range, RangeTest,
-        ::testing::Values(
-                ::testing::make_tuple(LayerType("Range"), InOutDataParam({{{{1}, {1}, {1}},
-                                                                                  {{5}}},
-                                                                          {{3.f}, {18.f}, {3.f}},
-                                                                          {{}}})),
-                ::testing::make_tuple(LayerType("Range"), InOutDataParam({{{{1}, {1}, {1}},
-                                                                                  {{2}}},
-                                                                          {{3.f}, {1.f}, {-1.f}},
-                                                                          {{}}})),
-                ::testing::make_tuple(LayerType("Range"), InOutDataParam({{{{1}, {1}, {1}},
-                                                                                  {{6}}},
-                                                                          {{3.f}, {-3.f}, {-1.f}},
-                                                                          {{}}})),
-                ::testing::make_tuple(LayerType("Range"), InOutDataParam({{{{1}, {1}, {1}},
-                                                                                  {{5}}},
-                                                                          {{0.f}, {5.f}, {1.f}},
-                                                                          {{}}}))
-        )
-);
-
-INSTANTIATE_TEST_CASE_P(
-        Broadcast, BroadcastTest,
-        ::testing::Values(
-                ::testing::make_tuple(LayerType("Broadcast"), InOutDataParam({{{{3}, {2}},
-                                                                      {{3, 3}}},
-                                                              {{},    {3, 3}},
-                                                              {}})),
-                ::testing::make_tuple(LayerType("Broadcast"), InOutDataParam({{{{16, 50, 1}, {4}},
-                                                                      {{1, 16, 50, 50}}},
-                                                              {{},    {1, 16, 50, 50}},
-                                                              {}})),
-                ::testing::make_tuple(LayerType("Broadcast"), InOutDataParam({{{{1}, {3}},
-                                                                      {{1, 50, 50}}},
-                                                              {{},    {1, 50, 50}},
-                                                              {}}))
-)
-);
\ No newline at end of file
diff --git a/inference-engine/tests_deprecated/unit/shape_infer/adult_test.hpp b/inference-engine/tests_deprecated/unit/shape_infer/adult_test.hpp
deleted file mode 100644 (file)
index 01eef21..0000000
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <utility>
-
-#include <utility>
-
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <gtest/gtest.h>
-#include <shape_infer/const_infer/ie_const_infer_holder.hpp>
-#include "built_in_shape_infer_general_test.hpp"
-#include "adult_test_utils.hpp"
-
-namespace IE = InferenceEngine;
-
-namespace ShapeInferTests {
-
-class CommonTests : public ::testing::Test {
-protected:
-    ASITestBuilder assertThat();
-
-protected:
-    std::string type;
-    InOutData inOutData;
-};
-
-class BasicTest
-        : public CommonTests,
-          public testing::WithParamInterface<std::tuple<LayerType, InOutDataParam>> {
-protected:
-    void SetUp() override;
-};
-
-class BlobTest
-        : public CommonTests,
-          public testing::WithParamInterface<std::tuple<LayerType, InOutDataParam, BlobsParam>> {
-protected:
-    void SetUp() override;
-
-protected:
-    FloatMap blobsParam;
-};
-
-class ParamsTest
-        : public CommonTests,
-          public testing::WithParamInterface<std::tuple<LayerType, InOutDataParam, MapParams>> {
-protected:
-    void SetUp() override;
-
-protected:
-    MapStrStr strParams;
-};
-
-class BasicAdultTest : public BasicTest {
-};
-
-class StridedSliceTest : public ParamsTest {
-public:
-    std::vector<IE::Precision> getPrecisions();
-};
-
-class FillTest : public BasicTest {
-protected:
-    std::vector<float> refGen(const InOutData& inOutData);
-};
-
-class RangeTest : public BasicTest {
-protected:
-    std::vector<float> refGen(const InOutData& inOutData);
-};
-
-class BroadcastTest : public BasicTest {
-protected:
-    std::vector<float> refGen(const InOutData& inOutData);
-};
-
-}  // namespace ShapeInferTests
diff --git a/inference-engine/tests_deprecated/unit/shape_infer/adult_test_utils.cpp b/inference-engine/tests_deprecated/unit/shape_infer/adult_test_utils.cpp
deleted file mode 100644 (file)
index e8f36c5..0000000
+++ /dev/null
@@ -1,124 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <gtest/gtest.h>
-#include <tuple>
-#include "adult_test.hpp"
-#include "adult_test_utils.hpp"
-
-
-using namespace InferenceEngine;
-using namespace details;
-using namespace ShapeInfer;
-
-void BaseMatcher::compareWithRef(const std::vector<InferenceEngine::Blob::Ptr>& outBlobs,
-                                 const std::vector<std::vector<float>>& refData,
-                                 float tolerance) {
-    for (int outIdx = 0; outIdx < outBlobs.size(); outIdx++) {
-        auto* data = outBlobs[outIdx]->buffer().as<float*>();
-        for (int elemIdx = 0; elemIdx < refData[outIdx].size(); elemIdx++) {
-            ASSERT_NEAR(data[elemIdx], refData[outIdx][elemIdx], tolerance);
-        }
-    }
-}
-
-std::vector<IE::Blob::Ptr>
-BaseMatcher::createBlobs(const std::vector<IE::SizeVector>& shapes, const std::vector<IE::Precision>& precisions) {
-    if (shapes.size() != precisions.size())
-        THROW_IE_EXCEPTION << "Vectors of shapes and precisions can't have different sizes";
-    std::vector<Blob::Ptr> blobs;
-    int i = 0;
-    for (const auto& dims : shapes) {
-        // it's assumed that empty dims = empty data = no blob
-        if (!dims.empty()) {
-            TensorDesc inDesc(precisions[i++], dims, TensorDesc::getLayoutByDims(dims));
-            auto blob = make_blob_with_precision(inDesc);
-            blob->allocate();
-            blobs.push_back(blob);
-        }
-    }
-    return blobs;
-}
-
-void BaseMatcher::fillBlobs(const std::vector<IE::Blob::Ptr>& blobs, const std::vector<std::vector<float>>& data) {
-    if (!data.empty()) {
-        for (int blobIdx = 0; blobIdx < blobs.size(); blobIdx++) {
-            auto blob = blobs[blobIdx];
-            // it's assumed that empty dims = empty data = no blob
-            if (!data[blobIdx].empty()) {
-                switch (blob->getTensorDesc().getPrecision()) {
-                    case Precision::FP32: {
-                        auto* buffer = blob->buffer().as<float*>();
-                        for (int dataIdx = 0; dataIdx < blob->size(); dataIdx++) {
-                            buffer[dataIdx] = data[blobIdx][dataIdx];
-                        }
-                    }
-                        break;
-                    case Precision::I32: {
-                        auto* buffer = blob->buffer().as<int32_t*>();
-                        for (int dataIdx = 0; dataIdx < blob->size(); dataIdx++) {
-                            buffer[dataIdx] = static_cast<int32_t>(data[blobIdx][dataIdx]);
-                        }
-                    }
-                        break;
-                    default:
-                        THROW_IE_EXCEPTION << "Unsupported precision " << blob->getTensorDesc().getPrecision() << " to fill blobs";
-                }
-            }
-        }
-    }
-}
-
-void ConstInferMatcher::toData(const std::vector<std::vector<float>>& refData) {
-    auto impl = holder->getConstInferImpl(config.type);
-    ASSERT_NE(nullptr, impl);
-    auto outBlobs = createBlobs(config.inOutData.inOutShapes.outDims, config.outPrecisions);
-    auto inBlobs = createBlobs(config.inOutData.inOutShapes.inDims, config.inPrecisions);
-    fillBlobs(inBlobs, config.inOutData.inData);
-    auto blobs = config.initBlobs(config.floatBlobData);
-    std::vector<Blob::CPtr> inCBlobs;
-    std::copy(inBlobs.begin(), inBlobs.end(), back_inserter(inCBlobs));
-    ASSERT_NO_THROW(impl->infer(inCBlobs, config.strParams, blobs, outBlobs));
-    compareWithRef(outBlobs, refData);
-}
-
-void ShapeInferMatcher::toShapes(const std::vector<IE::SizeVector>& refShape) {
-    siHolder.reset(new IE::ShapeInfer::BuiltInShapeInferHolder());
-    IE::IShapeInferImpl::Ptr impl;
-    std::vector<IE::SizeVector> outShapes;
-    sts = siHolder->getShapeInferImpl(impl, config.type.c_str(), &desc);
-    ASSERT_NE(nullptr, impl);
-    auto inBlobs = createBlobs(config.inOutData.inOutShapes.inDims, config.inPrecisions);
-    fillBlobs(inBlobs, config.inOutData.inData);
-    std::vector<Blob::CPtr> inCBlobs;
-    std::copy(inBlobs.begin(), inBlobs.end(), back_inserter(inCBlobs));
-    auto blobs = config.initBlobs(config.floatBlobData);
-    sts = impl->inferShapes(inCBlobs, config.strParams, blobs, outShapes, &desc);
-    ASSERT_EQ(sts, IE::OK) << desc.msg;
-    ASSERT_EQ(config.inOutData.inOutShapes.outDims, outShapes);
-}
-
-InitBlobsFunc ASITestBuilder::defaultBlobInit() {
-    return [](const FloatMap& blobDataMap) -> BlobMap {
-        BlobMap blobs;
-        for (const auto& it : blobDataMap) {
-            std::string blobName;
-            std::vector<float> data;
-            std::tie(blobName, data) = it;
-            SizeVector blobDims = {data.size()};
-            auto blob = make_shared_blob<float>(Precision::FP32, TensorDesc::getLayoutByDims(blobDims), blobDims,
-                                                data);
-            blobs[blobName] = blob;
-        }
-        return blobs;
-    };
-}
-
-MatcherConfigurator<ConstInferMatcher> ASITestBuilder::constInferResultFor() {
-    return MatcherConfigurator<ConstInferMatcher>(config);
-}
-
-MatcherConfigurator<ShapeInferMatcher> ASITestBuilder::shapeInferResultFor() {
-    return MatcherConfigurator<ShapeInferMatcher>(config);
-}
diff --git a/inference-engine/tests_deprecated/unit/shape_infer/adult_test_utils.hpp b/inference-engine/tests_deprecated/unit/shape_infer/adult_test_utils.hpp
deleted file mode 100644 (file)
index 3f14001..0000000
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <utility>
-
-#include <utility>
-
-#pragma once
-
-#include <gtest/gtest.h>
-#include <shape_infer/const_infer/ie_const_infer_holder.hpp>
-#include "built_in_shape_infer_general_test.hpp"
-
-namespace IE = InferenceEngine;
-
-struct InOutData {
-    testing::InOutShapes inOutShapes;
-    std::vector<std::vector<float>> inData;
-    std::vector<std::vector<float>> outData;
-};
-
-using FloatMap = std::map<std::string, std::vector<float>>;
-using InitBlobsFunc = std::function<IE::BlobMap(const FloatMap& inOutData)>;
-
-struct ASIConfig {
-    InOutData inOutData;
-    std::string type;
-    FloatMap floatBlobData;
-    std::map<std::string, std::string> strParams;
-    InitBlobsFunc initBlobs;
-    std::vector<IE::Precision> inPrecisions;
-    std::vector<IE::Precision> outPrecisions;
-};
-
-class BaseMatcher {
-public:
-    explicit BaseMatcher(ASIConfig config) : config(std::move(config)) {}
-
-protected:
-    void compareWithRef(const std::vector<IE::Blob::Ptr>& outBlobs,
-                        const std::vector<std::vector<float>>& refData,
-                        float tolerance = 0.0001);
-
-    std::vector<IE::Blob::Ptr>
-    createBlobs(const std::vector<IE::SizeVector>& shapes, const std::vector<IE::Precision>& precisions);
-
-    void fillBlobs(const std::vector<IE::Blob::Ptr>& blobs, const std::vector<std::vector<float>>& data);
-
-    ASIConfig config;
-};
-
-class ConstInferMatcher : public BaseMatcher {
-public:
-    explicit ConstInferMatcher(const ASIConfig& config) : BaseMatcher(config) {}
-
-    void toData(const std::vector<std::vector<float>>& refData);
-
-private:
-    std::shared_ptr<IE::ShapeInfer::ConstInferHolder> holder;
-};
-
-class ShapeInferMatcher : public BaseMatcher {
-public:
-    explicit ShapeInferMatcher(const ASIConfig& config) : BaseMatcher(config) {}
-
-    void toShapes(const std::vector<IE::SizeVector>& refShape);
-
-private:
-    std::unique_ptr<IE::ShapeInfer::BuiltInShapeInferHolder> siHolder;
-    IE::StatusCode sts;
-    IE::ResponseDesc desc;
-};
-
-template<typename M>
-class MatcherConfigurator {
-public:
-    explicit MatcherConfigurator(ASIConfig config) : config(std::move(config)) {}
-
-    MatcherConfigurator& withParams(const std::map<std::string, std::string>& params) {
-        config.strParams = params;
-        return *this;
-    }
-
-    MatcherConfigurator& withInputPrecisions(const std::vector<IE::Precision>& inputPrecisions) {
-        config.inPrecisions = inputPrecisions;
-        return *this;
-    }
-
-    MatcherConfigurator& withOutputPrecisions(const std::vector<IE::Precision>& outputPrecisions) {
-        config.outPrecisions = outputPrecisions;
-        return *this;
-    }
-
-    MatcherConfigurator& withBlobs(const FloatMap& blobDataMap) {
-        config.floatBlobData = blobDataMap;
-        return *this;
-    }
-
-    M equals() {
-        return M(config);
-    }
-
-private:
-    ASIConfig config;
-};
-
-class ASITestBuilder {
-    ASIConfig config;
-public:
-    ASITestBuilder() {
-        config.initBlobs = defaultBlobInit();
-    }
-
-    ASITestBuilder& withData(const InOutData& data) {
-        config.inOutData = data;
-        config.inPrecisions = {data.inOutShapes.inDims.size(), IE::Precision::FP32};
-        config.outPrecisions = {data.inOutShapes.outDims.size(), IE::Precision::FP32};
-        return *this;
-    }
-
-    ASITestBuilder& withType(const std::string& type) {
-        config.type = type;
-        return *this;
-    }
-
-    MatcherConfigurator<ConstInferMatcher> constInferResultFor();
-
-    MatcherConfigurator<ShapeInferMatcher> shapeInferResultFor();
-
-private:
-    InitBlobsFunc defaultBlobInit();
-};
-
-PRETTY_PARAM(BlobsParam, FloatMap)
-
-PRETTY_PARAM(InOutDataParam, InOutData)
diff --git a/inference-engine/tests_deprecated/unit/shape_infer/built_in_holder_test.cpp b/inference-engine/tests_deprecated/unit/shape_infer/built_in_holder_test.cpp
deleted file mode 100644 (file)
index 7dc4fc0..0000000
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <gtest/gtest.h>
-#include <list>
-#include <shape_infer/built-in/ie_built_in_holder.hpp>
-#include <shape_infer/built-in/ie_equal_shape_infer.hpp>
-
-#include "unit_test_utils/mocks/shape_infer/mock_ishape_infer_impl.hpp"
-
-using namespace InferenceEngine;
-using namespace ShapeInfer;
-
-class ShapeInferHolderTest : public ::testing::Test {
-protected:
-    StatusCode sts = GENERAL_ERROR;
-    ResponseDesc resp;
-    std::vector<InferenceEngine::SizeVector> outShapes;
-    std::map<std::string, std::string> params;
-    std::map<std::string, Blob::Ptr> blobs;
-
-    std::list<std::string> _expectedTypes = {
-            "Power",
-            "Convolution",
-            "Deconvolution",
-            "Pooling",
-            "LRN",
-            "Norm",
-            "SoftMax",
-            "ReLU",
-            "Clamp",
-            "Split",
-            "Slice",
-            "Concat",
-            "Eltwise",
-            "ScaleShift",
-            "PReLU",
-            "Crop",
-            "Reshape",
-            "Tile",
-            "BatchNormalization",
-            "Input",
-            "Memory",
-            "Const",
-            "Gemm"
-    };
-
-    void TearDown() override {
-    }
-
-    void SetUp() override {
-    }
-
-public:
-
-};
-
-TEST_F(ShapeInferHolderTest, canCreateHolder) {
-    ASSERT_NO_THROW(BuiltInShapeInferHolder());
-}
-
-TEST_F(ShapeInferHolderTest, DISABLED_allRegistered) {
-    auto holder = std::make_shared<BuiltInShapeInferHolder>();
-    char** types = nullptr;
-    unsigned int size = 0;
-    ASSERT_NO_THROW(sts = holder->getShapeInferTypes(types, size, &resp));
-    std::list<std::string> actualTypes;
-    for (int i = 0; i < size; i++) {
-        actualTypes.emplace_back(types[i], strlen(types[i]));
-    }
-
-    _expectedTypes.sort();
-    actualTypes.sort();
-
-    std::vector<std::string> different_words;
-    std::set_difference(actualTypes.begin(), actualTypes.end(),
-                        _expectedTypes.begin(), _expectedTypes.end(),
-                        std::back_inserter(different_words));
-    // TODO: update expectedTypes!
-    ASSERT_EQ(19, different_words.size());
-}
-
-
-TEST_F(ShapeInferHolderTest, returnNullForNotKnown) {
-    IShapeInferImpl::Ptr impl;
-
-    sts = BuiltInShapeInferHolder().getShapeInferImpl(impl, "NOT_KNOWN_TYPE", &resp);
-    ASSERT_FALSE(impl) << resp.msg;
-    ASSERT_EQ(NOT_FOUND, sts);
-}
-
-class ShapeInferNotSupportedTest
-        : public ShapeInferHolderTest, public testing::WithParamInterface<std::string> {
-};
-
-TEST_P(ShapeInferNotSupportedTest, returnNotFoundOnNotSupported) {
-    std::string type = GetParam();
-    IShapeInferImpl::Ptr impl;
-
-    sts = BuiltInShapeInferHolder().getShapeInferImpl(impl, type.c_str(), &resp);
-    ASSERT_FALSE(impl) << resp.msg;
-    ASSERT_EQ(NOT_FOUND, sts) << resp.msg;
-}
-
-// TODO: list all not supported later
-INSTANTIATE_TEST_CASE_P(
-        NotSupported, ShapeInferNotSupportedTest, ::testing::Values("NOT_SUPPORTED"));
diff --git a/inference-engine/tests_deprecated/unit/shape_infer/built_in_shape_infer_batch_test.cpp b/inference-engine/tests_deprecated/unit/shape_infer/built_in_shape_infer_batch_test.cpp
deleted file mode 100644 (file)
index 41e1b3a..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <gtest/gtest.h>
-#include <shape_infer/built-in/ie_built_in_holder.hpp>
-#include <xml_net_builder.hpp>
-#include <cnn_network_impl.hpp>
-#include <ie_format_parser.h>
-#include <xml_helper.hpp>
-#include <shape_infer/ie_reshaper.hpp>
-#include "built_in_shape_infer_general_test.hpp"
-
-using namespace InferenceEngine;
-using namespace ShapeInfer;
-
-class BuiltInShapeInferImplTestBatch : public BuiltInShapeInferImplTest {};
-
-TEST_P(BuiltInShapeInferImplTestBatch, batch) {
-    auto cnnNetworkImplPtr = buildSingleLayerNetwork<3>(type, inOutShapes, &layerParams.data, layerDataName);
-    auto reshaper = std::make_shared<Reshaper>(*cnnNetworkImplPtr);
-
-    if (canInfer) {
-        StatusCode sts = cnnNetworkImplPtr->setBatchSizeReshape(BATCH, &resp);
-        ASSERT_EQ((int)OK, sts) << resp.msg;
-        checkNetworkInOut(*cnnNetworkImplPtr, newInOutShapes);
-    } else {
-        sts = cnnNetworkImplPtr->setBatchSizeReshape(BATCH, &resp);
-        ASSERT_EQ(GENERAL_ERROR, sts) << resp.msg;
-    }
-}
-
-// TBD: instantiate
diff --git a/inference-engine/tests_deprecated/unit/shape_infer/built_in_shape_infer_conv_test.cpp b/inference-engine/tests_deprecated/unit/shape_infer/built_in_shape_infer_conv_test.cpp
deleted file mode 100644 (file)
index 97e8fba..0000000
+++ /dev/null
@@ -1,356 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <gtest/gtest.h>
-#include <shape_infer/built-in/ie_built_in_holder.hpp>
-#include <xml_net_builder.hpp>
-#include <cnn_network_impl.hpp>
-#include <ie_format_parser.h>
-#include <xml_helper.hpp>
-#include <shape_infer/ie_reshaper.hpp>
-#include "built_in_shape_infer_general_test.hpp"
-
-using namespace InferenceEngine;
-using namespace ShapeInfer;
-
-class BuiltInShapeInferConvImplTest
-        : public BuiltInShapeInferTestWithParam<std::tuple<InOutShapes, kernel, stride, pad, auto_pad, out_channels, group, dilation_factor, NewInOutShapes, CanInfer, pad_end, IsTransposed>> {
-protected:
-    void SetUp() override {
-        BuiltInShapeInferCommon::SetUp();
-        auto params = GetParam();
-        inOutShapes = std::get<0>(params);
-        kernel = std::get<1>(params);
-        stride = std::get<2>(params);
-        pad = std::get<3>(params);
-        auto_pad = std::get<4>(params);
-        out_channels = std::get<5>(params);
-        group = std::get<6>(params);
-        dilation_factor = std::get<7>(params);
-        newInOutShapes = std::get<8>(params);
-        canInfer = std::get<9>(params);
-        pad_end = std::get<10>(params);
-        isTransposed = std::get<11>(params);
-        if (isTransposed) {
-            type = "Deconvolution";
-            dataName = "deconvolution_data";
-        }
-    }
-
-    std::map<std::string, std::string> getMapParams() {
-        std::map<std::string, std::string> params = {
-                {"kernel",     kernel.toSeparetedRow(",")},
-                {"strides",    stride.toSeparetedRow(",")},
-                {"pads_begin", pad.toSeparetedRow(",")},
-                {"output",     std::to_string(out_channels)},
-                {"group",      std::to_string(group)},
-                {"dilations",  dilation_factor.toSeparetedRow(",")}
-        };
-        if (!auto_pad.empty()) params["auto_pad"] = auto_pad;
-        if (!pad_end.empty()) params["pads_end"] = pad_end.toSeparetedRow(",");
-        return params;
-    }
-
-protected:
-    std::string type = "Convolution";
-    std::string dataName = "convolution_data";
-    testing::InOutShapes inOutShapes;
-    testing::InOutShapes newInOutShapes;
-    param_size kernel{};
-    param_size stride{};
-    param_size pad{};
-    param_size pad_end{};
-    param_size dilation_factor{};
-    std::string auto_pad;
-    unsigned out_channels{};
-    unsigned group{};
-    bool canInfer;
-    bool isTransposed;
-};
-
-
-TEST_P(BuiltInShapeInferConvImplTest, impl) {
-    auto impl = getShapeInferImpl(type);
-    ASSERT_NE(nullptr, impl);
-    if (!group) group = 1;
-    unsigned w_dim = out_channels * inOutShapes.inDims[0][1] / group;
-    for (auto k : kernel.dims)
-        w_dim *= k;
-    SizeVector weightsDim{w_dim};
-    blobs["weights"] = make_shared_blob(Precision::fromType<size_t>(), weightsDim);
-    ASSERT_NO_THROW(sts = impl->inferShapes(getBlobs(inOutShapes.inDims), getMapParams(), blobs, outShapes, &resp));
-    ASSERT_EQ(int(OK), sts) << resp.msg;
-    ASSERT_EQ(inOutShapes.outDims, outShapes);
-}
-
-TEST_P(BuiltInShapeInferConvImplTest, batch) {
-    auto layerParams = getMapParams();
-    auto cnnNetworkImplPtr = buildSingleLayerNetwork<4>(type, inOutShapes, &layerParams, dataName);
-    auto reshaper = std::make_shared<Reshaper>(*cnnNetworkImplPtr);
-    sts = cnnNetworkImplPtr->setBatchSizeReshape(BATCH, &resp);
-    ASSERT_EQ((int) OK, sts) << resp.msg;
-    inOutShapes.inDims[0][0] = inOutShapes.outDims[0][0] = BATCH;
-    checkNetworkInOut(*cnnNetworkImplPtr, inOutShapes);
-}
-
-TEST_P(BuiltInShapeInferConvImplTest, reshaper) {
-    auto layerParams = getMapParams();
-    auto cnnNetworkImplPtr = buildSingleLayerNetwork<4>(type, inOutShapes, &layerParams, dataName);
-    auto reshaper = std::make_shared<Reshaper>(*cnnNetworkImplPtr);
-    auto inputShapes = setInputShapes(*cnnNetworkImplPtr, newInOutShapes.inDims);
-    reshaper->run(inputShapes);
-    checkNetworkInOut(*cnnNetworkImplPtr, newInOutShapes);
-}
-
-INSTANTIATE_TEST_CASE_P(
-        BuiltInImplsConv, BuiltInShapeInferConvImplTest,
-        ::testing::Values(
-                // fixate pad
-                ::testing::make_tuple(InOutShapes({{{4, 3,  228, 228}},
-                                                   {{4, 64, 229, 115}}}), kernel({4, 2}), stride({2, 1}),
-                                      pad({2, 1}), auto_pad(""), out_channels(64), group(1), dilation_factor({0, 0}),
-                                      NewInOutShapes({{{1, 3,  228, 228}},
-                                                      {{1, 64, 229, 115}}}),
-                                      CanInfer(true), pad_end(), IsTransposed(false)),
-                // fixate pad + dilation
-                ::testing::make_tuple(InOutShapes({{{4, 3,  228, 228}},
-                                                   {{4, 64, 225, 109}}}), kernel({4, 2}), stride({2, 1}),
-                                      pad({2, 1}), auto_pad(""), out_channels(64), group(1), dilation_factor({5, 5}),
-                                      NewInOutShapes({{{1, 3,  228, 228}},
-                                                      {{1, 64, 225, 109}}}),
-                                      CanInfer(true), pad_end(), IsTransposed(false)),
-                // fixate pad + right/bottom
-                ::testing::make_tuple(InOutShapes({{{4, 3,  228, 228}},
-                                                   {{4, 64, 230, 115}}}), kernel({4, 2}), stride({2, 1}),
-                                      pad({2, 1}), auto_pad(""), out_channels(64), group(1), dilation_factor({0, 0}),
-                                      NewInOutShapes({{{1, 3,  228, 228}},
-                                                      {{1, 64, 230, 115}}}),
-                                      CanInfer(true), pad_end({3, 2}), IsTransposed(false)),
-                // valid + empty paddings
-                ::testing::make_tuple(InOutShapes({{{4, 3,  228, 228}},
-                                                   {{4, 64, 227, 113}}}), kernel({4, 2}), stride({2, 1}),
-                                      pad({0, 0}), auto_pad("valid"), out_channels(64), group(1), dilation_factor({0, 0}),
-                                      NewInOutShapes({{{1, 3,  228, 228}},
-                                                      {{1, 64, 227, 113}}}),
-                                      CanInfer(true), pad_end(), IsTransposed(false)),
-                // valid + dilation
-                ::testing::make_tuple(InOutShapes({{{4, 3,  228, 228}},
-                                                   {{4, 64, 223, 107}}}), kernel({4, 2}), stride({2, 1}),
-                                      pad({0, 0}), auto_pad("valid"), out_channels(64), group(1), dilation_factor({5, 5}),
-                                      NewInOutShapes({{{1, 3,  228, 228}},
-                                                      {{1, 64, 223, 107}}}),
-                                      CanInfer(true), pad_end({0, 0}), IsTransposed(false)),
-                // valid + fixated paddings (shouldn't affect)
-                ::testing::make_tuple(InOutShapes({{{4, 3,  228, 228}},
-                                                   {{4, 64, 227, 113}}}), kernel({4, 2}), stride({2, 1}),
-                                      pad({2, 4}), auto_pad("valid"), out_channels(64), group(1), dilation_factor({0, 0}),
-                                      NewInOutShapes({{{1, 3,  228, 228}},
-                                                      {{1, 64, 227, 113}}}),
-                                      CanInfer(true), pad_end({3, 2}), IsTransposed(false)),
-                // same_upper + empty paddings
-                ::testing::make_tuple(InOutShapes({{{4, 3,  227, 227}},
-                                                   {{4, 64, 227, 114}}}), kernel({4, 2}), stride({2, 1}),
-                                      pad({0, 0}), auto_pad("same_upper"), out_channels(64), group(1),
-                                      dilation_factor({0, 0}),
-                                      NewInOutShapes({{{1, 3,  227, 227}},
-                                                      {{1, 64, 227, 114}}}),
-                                      CanInfer(true), pad_end(), IsTransposed(false)),
-                // same_upper + dilation paddings
-                ::testing::make_tuple(InOutShapes({{{4, 3,  227, 227}},
-                                                   {{4, 64, 227, 114}}}), kernel({4, 2}), stride({2, 1}),
-                                      pad({0, 0}), auto_pad("same_upper"), out_channels(64), group(1),
-                                      dilation_factor({5, 5}),
-                                      NewInOutShapes({{{1, 3,  227, 227}},
-                                                      {{1, 64, 227, 114}}}),
-                                      CanInfer(true), pad_end({0, 0}), IsTransposed(false)),
-                // same_upper + fixated paddings (shouldn't affect)
-                ::testing::make_tuple(InOutShapes({{{4, 3,  227, 227}},
-                                                   {{4, 64, 227, 114}}}), kernel({4, 2}), stride({2, 1}),
-                                      pad({2, 4}), auto_pad("same_upper"), out_channels(64), group(1),
-                                      dilation_factor({0, 0}),
-                                      NewInOutShapes({{{1, 3,  227, 227}},
-                                                      {{1, 64, 227, 114}}}),
-                                      CanInfer(true), pad_end({0, 0}), IsTransposed(false)),
-                // same_lower + empty paddings
-                ::testing::make_tuple(InOutShapes({{{4, 3,  227, 227}},
-                                                   {{4, 64, 227, 113}}}), kernel({4, 2}), stride({2, 1}),
-                                      pad({0, 0}), auto_pad("same_lower"), out_channels(64), group(1),
-                                      dilation_factor({0, 0}),
-                                      NewInOutShapes({{{1, 3,  227, 227}},
-                                                      {{1, 64, 227, 113}}}),
-                                      CanInfer(true), pad_end(), IsTransposed(false)),
-                // same_lower + dilation
-                ::testing::make_tuple(InOutShapes({{{4, 3,  227, 227}},
-                                                   {{4, 64, 227, 113}}}), kernel({4, 2}), stride({2, 1}),
-                                      pad({0, 0}), auto_pad("same_lower"), out_channels(64), group(1),
-                                      dilation_factor({0, 0}),
-                                      NewInOutShapes({{{1, 3,  227, 227}},
-                                                      {{1, 64, 227, 113}}}),
-                                      CanInfer(true), pad_end({0, 0}), IsTransposed(false)),
-                // same_lower + fixated paddings (shouldn't affect)
-                ::testing::make_tuple(InOutShapes({{{4, 3,  227, 227}},
-                                                   {{4, 64, 227, 113}}}), kernel({4, 2}), stride({2, 1}),
-                                      pad({2, 4}), auto_pad("same_lower"), out_channels(64), group(1),
-                                      dilation_factor({0, 0}),
-                                      NewInOutShapes({{{1, 3,  227, 227}},
-                                                      {{1, 64, 227, 113}}}),
-                                      CanInfer(true), pad_end({0, 0}), IsTransposed(false)),
-                // 5D tensors
-                // fixate pad
-                ::testing::make_tuple(InOutShapes({{{4, 3, 64, 100, 120}},
-                                                   {{4, 64, 66, 101, 61}}}), kernel({4, 2, 1}), stride({2, 1, 1}),
-                                      pad({2, 1, 1}), auto_pad(""), out_channels(64), group(1), dilation_factor({0, 0, 0}),
-                                      NewInOutShapes({{{1, 3, 64, 100, 120}},
-                                                      {{1, 64, 66, 101, 61}}}),
-                                      CanInfer(true), pad_end(), IsTransposed(false)),
-                // fixate pad + right/bottom
-                ::testing::make_tuple(InOutShapes({{{4, 3, 16, 128, 128}},
-                                                   {{4, 64, 18, 130, 65}}}), kernel({4, 2, 2}), stride({2, 1, 1}),
-                                      pad({2, 1, 1}), auto_pad(""), out_channels(64), group(1), dilation_factor({0, 0, 0}),
-                                      NewInOutShapes({{{1, 3, 16, 128, 128}},
-                                                      {{1, 64, 18, 130, 65}}}),
-                                      CanInfer(true), pad_end({3, 2, 2}), IsTransposed(false)),
-                // valid + fixated paddings (shouldn't affect)
-                ::testing::make_tuple(InOutShapes({{{4, 3, 16, 128, 130}},
-                                                   {{4, 64, 15, 127, 64}}}), kernel({4, 2, 2}), stride({2, 1, 1}),
-                                      pad({2, 4, 2}), auto_pad("valid"), out_channels(64), group(1), dilation_factor({0, 0, 0}),
-                                      NewInOutShapes({{{1, 3, 16, 128, 130}},
-                                                      {{1, 64, 15, 127, 64}}}),
-                                      CanInfer(true), pad_end({3, 2, 2}), IsTransposed(false)),
-                // same_lower + empty paddings
-                ::testing::make_tuple(InOutShapes({{{4, 3, 16, 128, 130}},
-                                                   {{4, 64, 16, 128, 65}}}), kernel({4, 2, 1}), stride({2, 1, 1}),
-                                      pad({0, 0, 0}), auto_pad("same_lower"), out_channels(64), group(1),
-                                      dilation_factor({0, 0, 0}),
-                                      NewInOutShapes({{{1, 3, 16, 128, 130}},
-                                                      {{1, 64, 16, 128, 65}}}),
-                                      CanInfer(true), pad_end(), IsTransposed(false))
-        )
-);
-
-INSTANTIATE_TEST_CASE_P(
-        BuiltInImplsDeConv, BuiltInShapeInferConvImplTest,
-        ::testing::Values(
-                // fixate pad
-                ::testing::make_tuple(InOutShapes({{{4, 3,  228, 228}},
-                                                   {{4, 64, 227, 454}}}), kernel({4, 2}), stride({2, 1}),
-                                      pad({2, 1}), auto_pad(""), out_channels(64), group(1), dilation_factor({0, 0}),
-                                      NewInOutShapes({{{1, 3,  228, 228}},
-                                                      {{1, 64, 227, 454}}}),
-                                      CanInfer(true), pad_end(), IsTransposed(true)),
-                // fixate pad + dilation
-                ::testing::make_tuple(InOutShapes({{{4, 3,  228, 228}},
-                                                   {{4, 64, 231, 466}}}), kernel({4, 2}), stride({2, 1}),
-                                      pad({2, 1}), auto_pad(""), out_channels(64), group(1), dilation_factor({5, 5}),
-                                      NewInOutShapes({{{1, 3,  228, 228}},
-                                                      {{1, 64, 231, 466}}}),
-                                      CanInfer(true), pad_end(), IsTransposed(true)),
-                // fixate pad + right/bottom
-                ::testing::make_tuple(InOutShapes({{{4, 3,  228, 228}},
-                                                   {{4, 64, 226, 453}}}), kernel({4, 2}), stride({2, 1}),
-                                      pad({2, 1}), auto_pad(""), out_channels(64), group(1), dilation_factor({0, 0}),
-                                      NewInOutShapes({{{1, 3,  228, 228}},
-                                                      {{1, 64, 226, 453}}}),
-                                      CanInfer(true), pad_end({3, 2}), IsTransposed(true)),
-                // valid + empty paddings
-                ::testing::make_tuple(InOutShapes({{{4, 3,  228, 228}},
-                                                   {{4, 64, 229, 459}}}), kernel({4, 2}), stride({2, 1}),
-                                      pad({0, 0}), auto_pad("valid"), out_channels(64), group(1), dilation_factor({0, 0}),
-                                      NewInOutShapes({{{1, 3,  228, 228}},
-                                                      {{1, 64, 229, 459}}}),
-                                      CanInfer(true), pad_end({0, 0}), IsTransposed(true)),
-                // valid + dilation
-                ::testing::make_tuple(InOutShapes({{{4, 3,  228, 228}},
-                                                   {{4, 64, 233, 471}}}), kernel({4, 2}), stride({2, 1}),
-                                      pad({0, 0}), auto_pad("valid"), out_channels(64), group(1), dilation_factor({5, 5}),
-                                      NewInOutShapes({{{1, 3,  228, 228}},
-                                                      {{1, 64, 233, 471}}}),
-                                      CanInfer(true), pad_end({0, 0}), IsTransposed(true)),
-                // valid + fixated paddings (shouldn't affect)
-                ::testing::make_tuple(InOutShapes({{{4, 3,  228, 228}},
-                                                   {{4, 64, 233, 471}}}), kernel({4, 2}), stride({2, 1}),
-                                      pad({2, 4}), auto_pad("valid"), out_channels(64), group(1), dilation_factor({5, 5}),
-                                      NewInOutShapes({{{1, 3,  228, 228}},
-                                                      {{1, 64, 233, 471}}}),
-                                      CanInfer(true), pad_end({3, 2}), IsTransposed(true)),
-                // same_upper + empty paddings
-                ::testing::make_tuple(InOutShapes({{{4, 3,  227, 227}},
-                                                   {{4, 64, 227, 454}}}), kernel({4, 2}), stride({2, 1}),
-                                      pad({0, 0}), auto_pad("same_upper"), out_channels(64), group(1),
-                                      dilation_factor({0, 0}),
-                                      NewInOutShapes({{{1, 3,  227, 227}},
-                                                      {{1, 64, 227, 454}}}),
-                                      CanInfer(true), pad_end({0, 0}), IsTransposed(true)),
-                // same_upper + dilation paddings
-                ::testing::make_tuple(InOutShapes({{{4, 3,  227, 227}},
-                                                   {{4, 64, 227, 454}}}), kernel({4, 2}), stride({2, 1}),
-                                      pad({0, 0}), auto_pad("same_upper"), out_channels(64), group(1),
-                                      dilation_factor({5, 5}),
-                                      NewInOutShapes({{{1, 3,  227, 227}},
-                                                      {{1, 64, 227, 454}}}),
-                                      CanInfer(true), pad_end({0, 0}), IsTransposed(true)),
-                // same_upper + fixated paddings (shouldn't affect)
-                ::testing::make_tuple(InOutShapes({{{4, 3,  227, 227}},
-                                                   {{4, 64, 227, 454}}}), kernel({4, 2}), stride({2, 1}),
-                                      pad({2, 4}), auto_pad("same_upper"), out_channels(64), group(1),
-                                      dilation_factor({0, 0}),
-                                      NewInOutShapes({{{1, 3,  227, 227}},
-                                                      {{1, 64, 227, 454}}}),
-                                      CanInfer(true), pad_end({0, 0}), IsTransposed(true)),
-                // same_lower + empty paddings
-                ::testing::make_tuple(InOutShapes({{{4, 3,  227, 227}},
-                                                   {{4, 64, 227, 454}}}), kernel({4, 2}), stride({2, 1}),
-                                      pad({0, 0}), auto_pad("same_lower"), out_channels(64), group(1),
-                                      dilation_factor({0, 0}),
-                                      NewInOutShapes({{{1, 3,  227, 227}},
-                                                      {{1, 64, 227, 454}}}),
-                                      CanInfer(true), pad_end({0, 0}), IsTransposed(true)),
-                // same_lower + dilation
-                ::testing::make_tuple(InOutShapes({{{4, 3,  227, 227}},
-                                                   {{4, 64, 227, 454}}}), kernel({4, 2}), stride({2, 1}),
-                                      pad({0, 0}), auto_pad("same_lower"), out_channels(64), group(1),
-                                      dilation_factor({0, 0}),
-                                      NewInOutShapes({{{1, 3,  227, 227}},
-                                                      {{1, 64, 227, 454}}}),
-                                      CanInfer(true), pad_end({0, 0}), IsTransposed(true)),
-                // same_lower + fixated paddings (shouldn't affect)
-                ::testing::make_tuple(InOutShapes({{{4, 3,  227, 227}},
-                                                   {{4, 64, 227, 454}}}), kernel({4, 2}), stride({2, 1}),
-                                      pad({2, 4}), auto_pad("same_lower"), out_channels(64), group(1),
-                                      dilation_factor({0, 0}),
-                                      NewInOutShapes({{{1, 3,  227, 227}},
-                                                      {{1, 64, 227, 454}}}),
-                                      CanInfer(true), pad_end({0, 0}), IsTransposed(true)),
-                // 5D tensors
-                // fixate pad
-                ::testing::make_tuple(InOutShapes({{{4, 3, 64, 100, 120}},
-                                                   {{4, 64, 66, 101, 61}}}), kernel({4, 2, 1}), stride({2, 1, 1}),
-                                      pad({2, 1, 1}), auto_pad(""), out_channels(64), group(1), dilation_factor({0, 0 ,0}),
-                                      NewInOutShapes({{{1, 3,  64, 100, 120}},
-                                                      {{1, 64, 66, 101, 61}}}),
-                                      CanInfer(true), pad_end(), IsTransposed(false)),
-                // fixate pad + right/bottom
-                ::testing::make_tuple(InOutShapes({{{4, 3, 16, 128, 130}},
-                                                   {{4, 64, 14, 126, 257}}}), kernel({4, 2, 2}), stride({2, 1, 1}),
-                                      pad({2, 1, 1}), auto_pad(""), out_channels(64), group(1), dilation_factor({0, 0, 0}),
-                                      NewInOutShapes({{{1, 3, 16, 128, 130}},
-                                                      {{1, 64, 14, 126, 257 }}}),
-                                      CanInfer(true), pad_end({3, 2, 2}), IsTransposed(true)),
-                // valid + fixated paddings (shouldn't affect)
-                ::testing::make_tuple(InOutShapes({{{4, 3, 16, 128, 130}},
-                                                   {{4, 64, 15, 127, 64}}}), kernel({4, 2, 2}), stride({2, 1, 1}),
-                                      pad({2, 4, 2}), auto_pad("valid"), out_channels(64), group(1), dilation_factor({0, 0, 0}),
-                                      NewInOutShapes({{{1, 3, 16, 128, 130}},
-                                                      {{1, 64, 15, 127, 64}}}),
-                                      CanInfer(true), pad_end({3, 2, 2}), IsTransposed(false)),
-                // same_lower + empty paddings
-                ::testing::make_tuple(InOutShapes({{{4, 3, 16, 128, 130}},
-                                                   {{4, 64, 16, 128, 65}}}), kernel({4, 2, 1}), stride({2, 1, 1}),
-                                      pad({0, 0, 0}), auto_pad("same_lower"), out_channels(64), group(1),
-                                      dilation_factor({0, 0, 0}),
-                                      NewInOutShapes({{{1, 3, 16, 128, 130}},
-                                                      {{1, 64, 16, 128, 65}}}),
-                                      CanInfer(true), pad_end(), IsTransposed(false))
-        )
-);
diff --git a/inference-engine/tests_deprecated/unit/shape_infer/built_in_shape_infer_fake_test.cpp b/inference-engine/tests_deprecated/unit/shape_infer/built_in_shape_infer_fake_test.cpp
deleted file mode 100644 (file)
index b386021..0000000
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <gtest/gtest.h>
-#include <shape_infer/built-in/ie_built_in_holder.hpp>
-#include <xml_net_builder.hpp>
-#include <cnn_network_impl.hpp>
-#include <ie_format_parser.h>
-#include <xml_helper.hpp>
-#include <shape_infer/ie_reshaper.hpp>
-#include "built_in_shape_infer_general_test.hpp"
-
-using namespace InferenceEngine;
-using namespace ShapeInfer;
-
-class BuiltInShapeInferImplFakeTest : public BuiltInShapeInferImplTest {
-};
-
-TEST_P(BuiltInShapeInferImplFakeTest, reshaper) {
-    auto cnnNetworkImplPtr = buildSingleLayerNetwork<3>(type, inOutShapes, &layerParams.data, layerDataName);
-    auto reshaper = std::make_shared<Reshaper>(*cnnNetworkImplPtr);
-    auto inputShapes = setInputShapes(*cnnNetworkImplPtr, newInOutShapes.inDims);
-
-    if (canInfer) {
-        reshaper->run(inputShapes);
-        checkNetworkInOut(*cnnNetworkImplPtr, newInOutShapes);
-    } else {
-        ASSERT_THROW(reshaper->run(inputShapes), InferenceEngine::details::InferenceEngineException);
-    }
-}
-
-//TODO: use static variables for dimensions and parameters!!
-//TODO: think about shorter instantiation
-
-INSTANTIATE_TEST_CASE_P(
-        BuiltInImplsFake2, BuiltInShapeInferImplFakeTest,
-        ::testing::Values(
-                ::testing::make_tuple(LayerType("NOT_KNOWN"),
-                                      InOutShapes({{{1, 2, 3, 4}, {1, 2}, {1, 2, 3}},
-                                                   {{2, 1},       {2, 1}, {2, 1}}}),
-                                      NewInOutShapes({{{1, 2, 3, 4}, {1, 2}, {1, 2, 3}},
-                                                      {{2, 1},       {2, 1}, {2, 1}}}),
-                                      MapParams(MapStrStr()),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("NOT_KNOWN"),
-                                      InOutShapes({{{1, 2, 3, 4}},
-                                                   {{2, 1}}}),
-                                      NewInOutShapes({{{BATCH, 2, 3, 4}},
-                                                      {{BATCH, 1}}}),
-                                      MapParams(MapStrStr()),
-                                      LayerDataName("data"),
-                                      CanInfer(false)),
-                ::testing::make_tuple(LayerType("NOT_KNOWN"),
-                                      InOutShapes({{{1, 2, 3, 4}, {1, 2}},
-                                                   {{2, 1},       {2, 1}, {2, 1}}}),
-                                      NewInOutShapes({{{1, 2, 3, 4}, {BATCH, 2}},
-                                                      {{2, 1},       {2,     1}, {2, 1}}}),
-                                      MapParams(MapStrStr()),
-                                      LayerDataName("data"),
-                                      CanInfer(false)),
-                ::testing::make_tuple(LayerType("NOT_KNOWN"),
-                                      InOutShapes({{{1, 2, 3, 4}},
-                                                   {{2, 1}, {2, 1}, {2, 1}}}),
-                                      NewInOutShapes({{{BATCH, 2, 3, 4}},
-                                                      {{BATCH, 1}, {BATCH, 1}, {BATCH, 1}}}),
-                                      MapParams(MapStrStr()),
-                                      LayerDataName("data"),
-                                      CanInfer(false)),
-                ::testing::make_tuple(LayerType("NOT_KNOWN"),
-                                      InOutShapes({{{1, 2, 3, 4}},
-                                                   {{2, 1}, {2, 1}, {2, 1}}}),
-                                      NewInOutShapes({{{1, BATCH, 3, 4}},
-                                                      {{2, 1}, {2, 1}, {2, 1}}}),
-                                      MapParams(MapStrStr()),
-                                      LayerDataName("data"),
-                                      CanInfer(false)))
-);
diff --git a/inference-engine/tests_deprecated/unit/shape_infer/built_in_shape_infer_general_test.cpp b/inference-engine/tests_deprecated/unit/shape_infer/built_in_shape_infer_general_test.cpp
deleted file mode 100644 (file)
index b409082..0000000
+++ /dev/null
@@ -1,868 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <gtest/gtest.h>
-#include <shape_infer/built-in/ie_built_in_holder.hpp>
-#include <xml_net_builder.hpp>
-#include <cnn_network_impl.hpp>
-#include <shape_infer/ie_reshaper.hpp>
-#include <cpp/ie_cnn_net_reader.h>
-#include <test_model_path.hpp>
-#include <debug.h>
-#include "built_in_shape_infer_general_test.hpp"
-
-using namespace InferenceEngine;
-using namespace InferenceEngine::details;
-using namespace ShapeInfer;
-
-TEST_P(BuiltInShapeInferImplTest, impl) {
-    auto impl = getShapeInferImpl(type);
-    ASSERT_NE(nullptr, impl);
-    ASSERT_NO_THROW(
-            sts = impl->inferShapes(getBlobs(newInOutShapes.inDims), layerParams.data, blobs, outShapes, &resp));
-
-    if (canInfer) {
-        ASSERT_EQ(int(OK), sts) << resp.msg;
-        ASSERT_EQ(newInOutShapes.outDims, outShapes);
-    } else {
-        ASSERT_EQ(GENERAL_ERROR, sts) << resp.msg;
-    }
-}
-
-TEST_P(BuiltInShapeInferImplTest, reshaper) {
-    auto cnnNetworkImplPtr = buildSingleLayerNetwork<3>(type, inOutShapes, &layerParams.data, layerDataName);
-    auto reshaper = std::make_shared<Reshaper>(*cnnNetworkImplPtr);
-    auto inputShapes = setInputShapes(*cnnNetworkImplPtr.get(), newInOutShapes.inDims);
-    if (canInfer) {
-        reshaper->run(inputShapes);
-        checkNetworkInOut(*cnnNetworkImplPtr, newInOutShapes);
-    } else {
-        ASSERT_THROW(reshaper->run(inputShapes), InferenceEngine::details::InferenceEngineException);
-    }
-}
-
-//TODO: use static variables for dimensions and parameters!!
-//TODO: think about shorter instantiation
-INSTANTIATE_TEST_CASE_P(
-        BuiltInEqualImpls, BuiltInShapeInferImplTest,
-        ::testing::Combine(
-                ::testing::Values(LayerType("SoftMax"), LayerType("ELU"), LayerType("TanH"), LayerType("Sigmoid"),
-                                  LayerType("Logistic"),
-                                  LayerType("Normalize"), LayerType("Copy"), LayerType("Eltwise"),
-                                  LayerType("ScaleShift"), LayerType("PowerFile"),
-                                  LayerType("GRN"), LayerType("MVN"),
-                                  LayerType("Abs"), LayerType("Acos"), LayerType("Acosh"), LayerType("Asin"),
-                                  LayerType("Asinh"), LayerType("Atan"), LayerType("Atanh"),
-                                  LayerType("Ceil"), LayerType("Cos"), LayerType("Cosh"), LayerType("Erf"), LayerType("Floor"),
-                                  LayerType("HardSigmoid"), LayerType("Log"), LayerType("Neg"), LayerType("Reciprocal"),
-                                  LayerType("Selu"), LayerType("Sign"), LayerType("Sin"), LayerType("Sinh"),
-                                  LayerType("Softplus"), LayerType("Softsign"), LayerType("Tan"), LayerType("LogSoftmax")),
-                ::testing::Values(InOutShapes({{{1, 1, 1, 1}},
-                                               {{1, 1, 1, 1}}})),
-                ::testing::Values(NewInOutShapes({{{1, 3, 228, 228}},
-                                                  {{1, 3, 228, 228}}})),
-                ::testing::Values(MapParams(MapStrStr())),
-                ::testing::Values(LayerDataName("data")),
-                ::testing::Values(CanInfer(true)))
-);
-
-INSTANTIATE_TEST_CASE_P(
-        BuiltInMultiImpls, BuiltInShapeInferImplTest,
-        ::testing::Combine(
-                ::testing::Values(LayerType("Mul"), LayerType("Eltwise"), LayerType("Add"), LayerType("Div")),
-                ::testing::Values(InOutShapes({{{1, 1, 1, 1}, {1, 1, 1, 1}},
-                                               {{1, 1, 1, 1}}})),
-                ::testing::Values(NewInOutShapes({{{1, 3, 228, 228}, {1, 3, 228, 228}},
-                                                  {{1, 3, 228, 228}}})),
-                ::testing::Values(MapParams(MapStrStr())),
-                ::testing::Values(LayerDataName("data")),
-                ::testing::Values(CanInfer(true)))
-);
-
-INSTANTIATE_TEST_CASE_P(
-        BuiltInGeneralImpls, BuiltInShapeInferImplTest,
-        ::testing::Values(
-                ::testing::make_tuple(LayerType("LRN"),
-                                      InOutShapes({{{1, 1, 1, 1}},
-                                                   {{1, 1, 1, 1}}}),
-                                      NewInOutShapes({{{1, 3, 228, 228}},
-                                                      {{1, 3, 228, 228}}}),
-                                      MapParams(MapStrStr({{"alpha",      "9.9999997e-05"},
-                                                           {"beta",       "0.75"},
-                                                           {"local-size", "5"},
-                                                           {"region",     "across"}})),
-                                      LayerDataName("norm_data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("ReLU"),
-                                      InOutShapes({{{1, 1, 1, 1}},
-                                                   {{1, 1, 1, 1}}}),
-                                      NewInOutShapes({{{1, 3, 228, 228}},
-                                                      {{1, 3, 228, 228}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"negative_slope", "0"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("Power"),
-                                      InOutShapes({{{1, 1, 1, 1}},
-                                                   {{1, 1, 1, 1}}}),
-                                      NewInOutShapes({{{1, 3, 228, 228}},
-                                                      {{1, 3, 228, 228}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"shift", "1"},
-                                                                                             {"power", "1"},
-                                                                                             {"scale", "1"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("Clamp"),
-                                      InOutShapes({{{1, 1, 1, 1}},
-                                                   {{1, 1, 1, 1}}}),
-                                      NewInOutShapes({{{1, 3, 228, 228}},
-                                                      {{1, 3, 228, 228}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"min", "1"},
-                                                                                             {"max", "2"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("BatchNormalization"),
-                                      InOutShapes({{{1, 1, 1, 1}},
-                                                   {{1, 1, 1, 1}}}),
-                                      NewInOutShapes({{{1, 3, 228, 228}},
-                                                      {{1, 3, 228, 228}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"epsilon", "1"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("InnerProduct"),
-                                      InOutShapes({{{1, 3, 228, 228}},
-                                                   {{1, 1000}}}),
-                                      NewInOutShapes({{{1, 3, 228, 228}},
-                                                      {{1, 1000}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"out-size", "1000"}})),
-                                      LayerDataName("fc_data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("FullyConnected"),
-                                      InOutShapes({{{1, 3}},
-                                                   {{1, 1000}}}),
-                                      NewInOutShapes({{{BATCH, 3}},
-                                                      {{BATCH, 1000}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"out-size", "1000"}})),
-                                      LayerDataName("fc_data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("Permute"),
-                                      InOutShapes({{{2, 3, 4, 5}},
-                                                   {{4, 3, 5, 2}}}),
-                                      NewInOutShapes({{{10, 3, 4, 5}},
-                                                      {{4,  3, 5, 10}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"order", "2,1,3,0"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("Reshape"),
-                                      InOutShapes({{{1, 34452}},
-                                                   {{2, 5742, 6}}}),
-                                      NewInOutShapes({{{2, 34458}},
-                                                      {{2, 5743, 6}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"dim", "0,-1,6"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("Reshape"),
-                                      InOutShapes({{{1,   1, 300, 4}},
-                                                   {{300, 4}}}),
-                                      NewInOutShapes({{{1,   1, 500, 4}},
-                                                      {{500, 4}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"dim", "-1,4"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("Flatten"),
-                                      InOutShapes({{{2, 1, 4, 5}},
-                                                   {{40}}}),
-                                      NewInOutShapes({{{4, 1, 4, 5}},
-                                                      {{80}}}),
-                                      MapParams(MapParams(MapStrStr())),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("PriorBoxClustered"), // TODO 5D test
-                                      InOutShapes({ {{2, 1, 4, 5}, {2, 4, 5, 6}},
-                                                   {{1, 2, 400}}}),
-                    NewInOutShapes({ {{4, 1, 5, 5}, {3, 5, 6, 3}},
-                                                      {{1, 2, 500}} }),
-                                      MapParams(MapStrStr(
-                                              std::map<std::string, std::string>{{"width",  "86.000000,13.000000,57.000000,39.000000,68.000000"},
-                                                                                 {"clip",   "0"},
-                                                                                 {"flip",   "1"},
-                                                                                 {"offset", "0.5"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("PriorBox"),
-                                      InOutShapes({{{1, 256, 1, 1}, {1, 3, 300, 300}},
-                                                   {{1, 2,   16}}}),
-                                      NewInOutShapes({{{2, 256, 1, 1}, {2, 3, 300, 300}},
-                                                      {{1, 2,   16}}}),
-                                      MapParams(MapStrStr(
-                                              std::map<std::string, std::string>{{"min_size",     "264"},
-                                                                                 {"max_size",     "315"},
-                                                                                 {"clip",         "0"},
-                                                                                 {"flip",         "1"},
-                                                                                 { "offset",       "0.5" },
-                                                                                 {"aspect_ratio", "2"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("PriorBox"),
-                                      InOutShapes({{{2, 512, 32, 32}, {2, 3, 512, 512}},
-                                                   {{1, 2,   16384}}}),
-                                      NewInOutShapes({{{2, 512, 32, 32}, {2, 3, 512, 512}},
-                                                      {{1, 2,   16384}}}),
-                                      MapParams(MapStrStr(
-                                              std::map<std::string, std::string>{{"min_size",        "35.84,52.46464"},
-                                                                                 {"max_size",        ""},
-                                                                                 {"clip",            "0"},
-                                                                                 {"step",            "16"},
-                                                                                 {"flip",            "0"},
-                                                                                 {"offset",          "0.5"},
-                                                                                 {"aspect_ratio",    "1.0,2.0,0.5"},
-                                                                                 {"scale_all_sizes", "0"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("PriorBox"),
-                                      InOutShapes({{{2, 512, 32, 32}, {2, 3, 512, 512}},
-                                                   {{1, 2,   32768}}}),
-                                      NewInOutShapes({{{2, 512, 32, 32}, {2, 3, 512, 512}},
-                                                      {{1, 2,   28672}}}),
-                                      MapParams(MapStrStr(
-                                              std::map<std::string, std::string>{{"min_size",        "35.84,52.46464"},
-                                                                                 {"max_size",        ""},
-                                                                                 {"clip",            "0"},
-                                                                                 {"step",            "16"},
-                                                                                 {"offset",          "0.5"},
-                                                                                 {"flip",            "1"},
-                                                                                 {"aspect_ratio",    "1.0,2.0,0.5"},
-                                                                                 {"scale_all_sizes", "0"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("DetectionOutput"),
-                                      InOutShapes({{{2, 1, 4,   5}, { 2, 1, 4,   5 }, { 2, 1, 4,   5 }},
-                                                   {{2, 1, 200, 7}}}),
-                                      NewInOutShapes({{{4, 1, 5,   5}, { 4, 1, 5,   5 }, { 4, 1, 5,   5 }},
-                                                      {{1, 1, 800, 7}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"keep_top_k",    "200"},
-                                                                                             {"num_classes",   "21"},
-                                                                                             {"nms_threshold", "0.44999998807907104"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("Interp"),
-                                      InOutShapes({{{2, 2, 33,  65}},
-                                                   {{2, 2, 257, 513}}}),
-                                      NewInOutShapes({{{2, 2, 33,  65}},
-                                                      {{2, 2, 257, 513}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"align_corners", "1"},
-                                                                                             {"height",        "257"},
-                                                                                             {"pad_beg",       "0"},
-                                                                                             {"pad_end",       "0"},
-                                                                                             {"width",         "513"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("Interp"),
-                                      InOutShapes({{{2, 2, 33, 65}},
-                                                   {{2, 2, 66, 513}}}),
-                                      NewInOutShapes({{{2, 2, 33, 65}},
-                                                      {{2, 2, 66, 513}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"align_corners", "1"},
-                                                                                             {"factor",        "2"},
-                                                                                             {"width",         "513"},
-                                                                                             {"pad_beg",       "0"},
-                                                                                             {"pad_end",       "0"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("Interp"),
-                                      InOutShapes({{{2, 2, 33,  65}},
-                                                   {{2, 2, 257, 130}}}),
-                                      NewInOutShapes({{{2, 2, 33,  65}},
-                                                      {{2, 2, 257, 130}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"align_corners", "1"},
-                                                                                             {"factor",        "2"},
-                                                                                             {"height",        "257"},
-                                                                                             {"pad_beg",       "0"},
-                                                                                             {"pad_end",       "0"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("ROIPooling"),
-                                      InOutShapes({{{2,   3, 4, 5}, {150, 5}},
-                                                   {{150, 3, 6, 6}}}),
-                                      NewInOutShapes({{{4,   1, 5, 5}, {150, 5}},
-                                                      {{150, 1, 6, 6}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"pooled_h",      "6"},
-                                                                                             {"pooled_w",      "6"},
-                                                                                             {"spatial_scale", "0.062500"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("Upsampling"),
-                                      InOutShapes({{{1, 3, 4, 5}},
-                                                   {{1, 3, 8, 10}}}),
-                                      NewInOutShapes({{{2, 1, 5,  5}},
-                                                      {{2, 1, 10, 10}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"scale", "2"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("PSROIPooling"),
-                                      InOutShapes({{{1, 3, 4, 5}, {150, 5}},
-                                                   {{150, 2, 6, 6}}}),
-                                      NewInOutShapes({{{2,   1, 5, 5}, {200, 5}},
-                                                      {{200, 2, 6, 6}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"output_dim", "2"},
-                                                                                             {"group_size", "6"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("SimplerNMS"),
-                                      InOutShapes({{{1,   3, 4, 5}, {1, 3, 4, 5}, {1, 3}},
-                                                   {{150, 5}}}),
-                                      NewInOutShapes({{{2,   1, 5, 5}, {2, 1, 5, 5}, {1, 3}},
-                                                      {{150, 5}}}),
-                                      MapParams(
-                                              MapStrStr(std::map<std::string, std::string>{{"post_nms_topn", "150"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("Concat"),
-                                      InOutShapes({{{1, 3, 5, 5}, {1, 2, 5, 5}},
-                                                   {{1, 5, 5, 5}}}),
-                                      NewInOutShapes({{{2, 1, 5, 5}, {2, 1, 5, 5}},
-                                                      {{2, 2, 5, 5}}}),
-                                      MapParams(
-                                              MapStrStr(std::map<std::string, std::string>{{"post_nms_topn", "150"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("Crop"),
-                                      InOutShapes({{{1, 3, 5, 5}, {7, 7, 2, 3}},
-                                                   {{1, 5, 2, 3}}}),
-                                      NewInOutShapes({{{2, 1, 5, 6}, {7, 7, 2, 3}},
-                                                      {{2, 1, 2, 3}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"axis",   "2,3"},
-                                                                                             {"offset", "2,1"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("Crop"),
-                                      InOutShapes({{{1, 3, 5, 5}},
-                                                   {{1, 5, 2, 3}}}),
-                                      NewInOutShapes({{{2, 1, 5, 6}},
-                                                      {{2, 1, 2, 1}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"axis",   "2,3"},
-                                                                                             {"offset", "2,1"},
-                                                                                             {"dim",    "2,1"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                // offset is too big
-                ::testing::make_tuple(LayerType("Crop"),
-                                      InOutShapes({{{1, 3, 5, 5}},
-                                                   {{1, 5, 2, 3}}}),
-                                      NewInOutShapes({{{2, 1, 5, 4}},
-                                                      {{2, 1, 2, 1}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"axis",   "2,3"},
-                                                                                             {"offset", "3,4"},
-                                                                                             {"dim",    "2,1"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(false)),
-                ::testing::make_tuple(LayerType("Crop"),
-                                      InOutShapes({{{1, 3, 5, 5}},
-                                                   {{1, 5, 2, 3}}}),
-                                      NewInOutShapes({{{2, 1, 5, 6}},
-                                                      {{2, 1, 1, 4}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"axis",       "2,3"},
-                                                                                             {"crop_begin", "2,1"},
-                                                                                             {"crop_end",   "2,1"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("Tile"),
-                                      InOutShapes({{{1, 3, 5, 5}},
-                                                   {{1, 9, 5, 5}}}),
-                                      NewInOutShapes({{{2, 1, 5, 6}},
-                                                      {{2, 3, 5, 6}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"axis",  "1"},
-                                                                                             {"tiles", "3"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("Split"),
-                                      InOutShapes({{{1, 4, 5, 5}},
-                                                   {{1, 2, 5, 5}, {1, 2, 5, 5}}}),
-                                      NewInOutShapes({{{2, 4, 5, 6}},
-                                                      {{2, 2, 5, 6}, {2, 2, 5, 6}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"axis",      "1"},
-                                                                                             {"out_sizes", "2,2"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("Slice"),
-                                      InOutShapes({{{1, 6, 5, 5}},
-                                                   {{1, 2, 5, 5}, {1, 4, 5, 5}}}),
-                                      NewInOutShapes({{{2, 6, 5, 6}},
-                                                      {{2, 2, 5, 6}, {2, 4, 5, 6}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"axis",      "1"},
-                                                                                             {"out_sizes", "2,4"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("Proposal"),
-                                      InOutShapes({{{1,   12, 34, 62}, {1, 24, 34, 62}, {1, 6}},
-                                                   {{200, 5}}}),
-                                      NewInOutShapes({{{2,   1, 5, 5}, {2, 1, 5, 5}, {1, 6}},
-                                                      {{400, 5}}}),
-                                      MapParams(
-                                              MapStrStr(std::map<std::string, std::string>{{"post_nms_topn", "200"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("ReorgYolo"),
-                                      InOutShapes({{{1, 64,  26, 26}},
-                                                   {{1, 256, 13, 13}}}),
-                                      NewInOutShapes({{{2, 8,  6, 6}},
-                                                      {{2, 32, 3, 3}}}),
-                                      MapParams(
-                                              MapStrStr(std::map<std::string, std::string>{{"stride", "2"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("RegionYolo"),
-                                      InOutShapes({{{1, 125, 13, 13}},
-                                                   {{1, 21125}}}),
-                                      NewInOutShapes({{{20, 125, 16, 13}},
-                                                      {{20, 26000}}}),
-                                      MapParams(MapStrStr({{"axis",       "1"},
-                                                           {"end_axis",   "-1"},
-                                                           {"do_softmax", "1"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("ArgMax"),
-                                      InOutShapes({{{1, 3, 1025, 2049}},
-                                                   {{1, 3, 1025, 100}}}),
-                                      NewInOutShapes({{{20, 3, 1025, 2049}},
-                                                      {{20, 3, 1025, 100}}}),
-                                      MapParams(MapStrStr({{"out_max_val", "1"},
-                                                           {"top_k",       "100"},
-                                                           {"axis",        "-1"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("ArgMax"),
-                                      InOutShapes({{{1, 3, 1025, 2049}},
-                                                   {{1, 3, 100,  2049}}}),
-                                      NewInOutShapes({{{20, 3, 1025, 2049}},
-                                                      {{20, 3, 100,  2049}}}),
-                                      MapParams(MapStrStr({{"out_max_val", "1"},
-                                                           {"top_k",       "100"},
-                                                           {"axis",        "2"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("ArgMax"),
-                                      InOutShapes({{{1, 3}},
-                                                   {{1, 2, 100}}}),
-                                      NewInOutShapes({{{20, 3}},
-                                                      {{20, 2, 100}}}),
-                                      MapParams(MapStrStr({{"out_max_val", "1"},
-                                                           {"top_k",       "100"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("ArgMax"),
-                                      InOutShapes({{{1, 3}},
-                                                   {{1, 1, 100}}}),
-                                      NewInOutShapes({{{20, 3}},
-                                                      {{20, 1, 100}}}),
-                                      MapParams(MapStrStr({{"out_max_val", "0"},
-                                                           {"top_k",       "100"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("Gemm"),
-                                      InOutShapes({{{15, 10}, {10, 20}, {15, 20}},
-                                                   {{15, 20}}}),
-                                      NewInOutShapes({{{20, 15}, {15, 25}, {20, 25}},
-                                                      {{20, 25}}}),
-                                      MapParams(MapStrStr({{"alpha",       "1"},
-                                                           {"beta",        "1"},
-                                                           {"transpose_a", "false"},
-                                                           {"transpose_b", "false"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("Gemm"),
-                                      InOutShapes({{{15, 10}, {10, 20}, {15, 20}},
-                                                   {{15, 20}}}),
-                                      NewInOutShapes({{{20, 15}, {10, 25}, {20, 25}},
-                                                      {{20, 25}}}),
-                                      MapParams(MapStrStr({{"alpha",       "1"},
-                                                           {"beta",        "1"},
-                                                           {"transpose_a", "false"},
-                                                           {"transpose_b", "false"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(false)),
-                ::testing::make_tuple(LayerType("Gemm"),
-                                      InOutShapes({{{15, 10}, {10, 20}, {15, 20}},
-                                                   {{15, 20}}}),
-                                      NewInOutShapes({{{20, 15}, {15, 25}, {15, 25}},
-                                                      {{20, 25}}}),
-                                      MapParams(MapStrStr({{"alpha",       "1"},
-                                                           {"beta",        "1"},
-                                                           {"transpose_a", "false"},
-                                                           {"transpose_b", "false"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(false)),
-                ::testing::make_tuple(LayerType("Gemm"),
-                                      InOutShapes({{{15, 10}, {10, 20}},
-                                                   {{15, 20}}}),
-                                      NewInOutShapes({{{20, 15}, {15, 25}},
-                                                      {{20, 25}}}),
-                                      MapParams(MapStrStr({{"alpha",       "1"},
-                                                           {"beta",        "1"},
-                                                           {"transpose_a", "false"},
-                                                           {"transpose_b", "false"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("Gemm"),
-                                      InOutShapes({{{15, 10}, {10, 20}},
-                                                   {{15, 20}}}),
-                                      NewInOutShapes({{{20, 15}, {10, 25}},
-                                                      {{20, 25}}}),
-                                      MapParams(MapStrStr({{"alpha",       "1"},
-                                                           {"beta",        "1"},
-                                                           {"transpose_a", "false"},
-                                                           {"transpose_b", "false"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(false)),
-                ::testing::make_tuple(LayerType("Gemm"),
-                                      InOutShapes({{{3, 3, 15, 10}, {3, 3, 10, 20}, {3, 3, 15, 20}},
-                                                   {{3, 3, 15, 20}}}),
-                                      NewInOutShapes({{{4, 1, 20, 15}, {4, 1, 15, 25}, {4, 1, 20, 25}},
-                                                      {{4, 1, 20, 25}}}),
-                                      MapParams(MapStrStr({{"alpha",       "1"},
-                                                           {"beta",        "1"},
-                                                           {"transpose_a", "false"},
-                                                           {"transpose_b", "false"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("Gemm"),
-                                      InOutShapes({{{3, 3, 15, 10}, {3, 1, 10, 20}, {3, 1, 15, 20}},
-                                                   {{3, 3, 15, 20}}}),
-                                      NewInOutShapes({{{4, 2, 20, 15}, {4, 2, 15, 25}, {4, 1, 20, 25}},
-                                                      {{4, 2, 20, 25}}}),
-                                      MapParams(MapStrStr({{"alpha",       "1"},
-                                                           {"beta",        "1"},
-                                                           {"transpose_a", "false"},
-                                                           {"transpose_b", "false"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("Pad"),
-                                      InOutShapes({{{3, 3,  15, 10}},
-                                                   {{9, 11, 25, 22}}}),
-                                      NewInOutShapes({{{4,  2,  20, 15}},
-                                                      {{10, 10, 30, 27}}}),
-                                      MapParams(MapStrStr({{"pads_begin", "1,2,3,4"},
-                                                           {"pads_end",   "5,6,7,8"},
-                                                           {"pad_mode",   "edge"},
-                                                           {"pad_value",  "1.0f"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("Pad"),
-                                      InOutShapes({{{10, 10, 15, 10}},
-                                                   {{16, 18, 25, 22}}}),
-                                      NewInOutShapes({{{20, 30, 40, 50}},
-                                                      {{26, 38, 40, 50}}}),
-                                      MapParams(MapStrStr({{"pads_begin", "1,2,0,0"},
-                                                           {"pads_end",   "5,6,0,0"},
-                                                           {"pad_mode",   "reflect"},
-                                                           {"pad_value",  "1.0f"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("Pad"),
-                                      InOutShapes({{{10, 10, 15, 10}},
-                                                   {{16, 18, 25, 22}}}),
-                                      NewInOutShapes({{{4,  2,  20, 15}},
-                                                      {{10, 10, 30, 27}}}),
-                                      MapParams(MapStrStr({{"pads_begin", "1,2,3,4"},
-                                                           {"pads_end",   "5,6,7,8"},
-                                                           {"pad_mode",   "reflect"},
-                                                           {"pad_value",  "1.0f"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(false))
-        )
-);
-
-// There are gtest limitation on tests number: 50
-INSTANTIATE_TEST_CASE_P(
-        BuiltInGeneralImpls2, BuiltInShapeInferImplTest,
-        ::testing::Values(
-                ::testing::make_tuple(LayerType("Gather"),
-                                      InOutShapes({{{7, 16}, {1, 25}},
-                                                   {{1, 25, 16}}}),
-                                      NewInOutShapes({{{7,  16}, {12, 25}},
-                                                      {{12, 25, 16}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"axis", "0"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("Gather"),
-                                      InOutShapes({{{7, 16}, {1, 25}},
-                                                   {{7, 1, 25}}}),
-                                      NewInOutShapes({{{7, 16}, {12, 25}},
-                                                      {{7, 12, 25}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"axis", "1"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("Gather"),
-                                      InOutShapes({{{7, 16}, {1, 25}},
-                                                   {{7, 1, 25}}}),
-                                      NewInOutShapes({{{7, 16}, {12, 25}},
-                                                      {{7, 12, 25}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"axis", "-1"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("CTCGreedyDecoder"),
-                                      InOutShapes({{{88, 1,  48, 1}},
-                                                   {{1,  88, 1,  1}}}),
-                                      NewInOutShapes({{{88, 2,  48, 1}},
-                                                      {{2,  88, 1,  1}}}),
-                                      MapParams(MapStrStr()),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("CTCGreedyDecoder"),
-                                      InOutShapes({{{88, 1, 71}, {88, 1}},
-                                                   {{1,  88, 1, 1}}}),
-                                      NewInOutShapes({{{88, 2, 71}, {88, 2}},
-                                                      {{2,  88, 1,  1}}}),
-                                      MapParams(MapStrStr()),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("Reshape"),
-                                      InOutShapes({{{1, 2}},
-                                                   {{1, 1}}}),
-                                      NewInOutShapes({{{1, 2}},
-                                                      {{1, 1}}}),
-                                      MapParams(MapStrStr(
-                                              std::map<std::string, std::string>{{"dim", "1,1"}})),  // dim doesn't match input
-                                      LayerDataName("data"),
-                                      CanInfer(false)),
-                ::testing::make_tuple(LayerType("Flatten"),
-                                      InOutShapes({{{2, 1, 4, 5}},
-                                                   {{40}}}),
-                                      NewInOutShapes({{{4, 1, 4, 5}},
-                                                      {{80}}}),
-                                      MapParams(MapParams(MapStrStr(std::map<std::string, std::string>{{"axis",     "0"},
-                                                                                                       {"end_axis", "-1"}}))),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("Flatten"),
-                                      InOutShapes({{{2, 2, 4, 5}},
-                                                   {{2, 8, 5}}}),
-                                      NewInOutShapes({{{4, 2, 4, 5}},
-                                                      {{4, 8, 5}}}),
-                                      MapParams(MapParams(MapStrStr(std::map<std::string, std::string>{{"axis",     "1"},
-                                                                                                       {"end_axis", "2"}}))),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("Flatten"),
-                                      InOutShapes({{{2, 2, 4, 5}},
-                                                   {{2, 40}}}),
-                                      NewInOutShapes({{{4, 2, 4, 5}},
-                                                      {{4, 40}}}),
-                                      MapParams(
-                                              MapParams(MapStrStr(std::map<std::string, std::string>{{"axis", "1"}}))),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("Flatten"),
-                                      InOutShapes({{{2, 2, 4, 5}},
-                                                   {{4, 4, 5}}}),
-                                      NewInOutShapes({{{4, 2, 4, 5}},
-                                                      {{8, 4, 5}}}),
-                                      MapParams(MapParams(
-                                              MapStrStr(std::map<std::string, std::string>{{"end_axis", "1"}}))),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("Interp"),
-                                      InOutShapes({{{2, 2, 100, 16}},
-                                                   {{2, 2, 25,  4}}}),
-                                      NewInOutShapes({{{2, 2, 201, 33}},
-                                                      {{2, 2, 50,  8}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"align_corners", "1"},
-                                                                                             {"factor",        "0.25"},
-                                                                                             {"pad_beg",       "0"},
-                                                                                             {"pad_end",       "0"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("Interp"),
-                                      InOutShapes({{{2, 2, 100, 16}},
-                                                   {{2, 2, 100, 16}}}),
-                                      NewInOutShapes({{{2, 2, 101, 33}},
-                                                      {{2, 2, 101, 33}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"align_corners", "1"},
-                                                                                             {"shrink_factor", "1.5"},
-                                                                                             {"zoom_factor",   "1.5"},
-                                                                                             {"pad_beg",       "0"},
-                                                                                             {"pad_end",       "0"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("ShuffleChannels"),
-                                      InOutShapes({{{1, 2, 3, 4}},
-                                                   {{1, 2, 3, 4}}}),
-                                      NewInOutShapes({{{2, 4, 4, 7}},
-                                                      {{2, 4, 4, 7}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"axis",  "1"},
-                                                                                             {"group", "2"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("DepthToSpace"),
-                                      InOutShapes({{{4, 2, 3}},
-                                                   {{1, 4, 6}}}),
-                                      NewInOutShapes({{{8, 3, 4}},
-                                                      {{2, 6, 8}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"block_size", "2"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("SpaceToDepth"),
-                                      InOutShapes({ { { 1, 4, 6 } },
-                                                    { { 4, 2, 3 } } }),
-                                      NewInOutShapes({ { { 2, 6, 8 } },
-                                                       { { 8, 3, 4 } } }),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{ {"block_size", "2"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("ReverseSequence"),
-                                      InOutShapes({{{3, 4, 5}, {3}},
-                                                   {{3, 4, 5}}}),
-                                      NewInOutShapes({{{4, 8, 9}, {4}},
-                                                      {{4, 8, 9}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"seq_axis",   "1"},
-                                                                                             {"batch_axis", "0"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("SparseSegmentSum"),
-                                      InOutShapes({{{5, 2, 3}, {10}, {10}},
-                                                   {{10, 2, 3}}}),
-                                      NewInOutShapes({{{20, 5, 4}, {17}, {17}},
-                                                      {{17, 5, 4}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("Select"),
-                                      InOutShapes({ { {2, 4, 4 }, {2, 4, 4}, {2, 4, 4} },
-                                                    { {2, 4, 4} } }),
-                                      NewInOutShapes({ { {2, 4, 4 }, {4, 4, 4}, {4, 4, 4} },
-                                                       { { 4, 4, 4 } } }),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("RegionYolo"),
-                                      InOutShapes({{{1,       125, 13, 13}},
-                                                   {{1 * 125, 13,  13}}}),
-                                      NewInOutShapes({{{20,       125, 16, 13}},
-                                                      {{20 * 125, 16,  13}}}),
-                                      MapParams(MapStrStr({{"axis",       "0"},
-                                                           {"end_axis",   "1"},
-                                                           {"do_softmax", "1"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("RegionYolo"),
-                                      InOutShapes({{{1,            125, 13, 13}},
-                                                   {{1 * 125 * 13, 13}}}),
-                                      NewInOutShapes({{{20,            125, 16, 13}},
-                                                      {{20 * 125 * 16, 13}}}),
-                                      MapParams(MapStrStr({{"axis",       "0"},
-                                                           {"end_axis",   "2"},
-                                                           {"do_softmax", "1"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("RegionYolo"),
-                                      InOutShapes({{{1, 125,                13, 13}},
-                                                   {{1, (80 + 4 + 1) * 125, 13, 13}}}),
-                                      NewInOutShapes({{{20, 125,                16, 13}},
-                                                      {{20, (80 + 4 + 1) * 3, 16, 13}}}),
-                                      MapParams(MapStrStr({{"axis",       "1"},
-                                                           {"end_axis",   "-1"},
-                                                           {"do_softmax", "0"},
-                                                           {"classes",    "80"},
-                                                           {"coords",     "4"},
-                                                           {"mask",       "6,7,8"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("Upsampling"),
-                                      InOutShapes({{{1, 3, 4, 5, 6}},
-                                                   {{1, 3, 8, 10, 12}}}),
-                                      NewInOutShapes({{{2, 1, 7, 5, 5}},
-                                                      {{2, 1, 14, 10, 10}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"scale", "2"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("OneHot"),
-                                      InOutShapes({{{1, 3, 4}},
-                                                   {{1, 3, 4, 3}}}),
-                                      NewInOutShapes({{{2, 1, 7}},
-                                                      {{2, 1, 7, 3}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"depth", "3"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("OneHot"),
-                                      InOutShapes({{{1, 3, 5}},
-                                                   {{1, 11, 3, 5}}}),
-                                      NewInOutShapes({{{2, 4, 7}},
-                                                      {{2, 11, 4, 7, }}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"depth", "11"}, {"axis", "1"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("Quantize"),
-                                      InOutShapes({{{1, 64, 10, 10}, {1, 64, 1, 1}, {1, 64, 1, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}},
-                                                   {{1, 64, 10, 10}}}),
-                                      NewInOutShapes({{{2, 128, 10, 10}, {1, 128, 1, 1}, {1, 128, 1, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}},
-                                                      {{2, 128, 10, 10}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{ {"levels", "2"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("Unique"),
-                                      InOutShapes({{{5}},
-                                                   {{5}, {5}}}),
-                                      NewInOutShapes({{{25}},
-                                                      {{25}, {25}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"sorted", "false"},
-                                                                                             {"return_inverse", "true"},
-                                                                                             {"return_counts", "false"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("Unique"),
-                                      InOutShapes({{{5}},
-                                                   {{5}, {5}, {5}}}),
-                                      NewInOutShapes({{{25}},
-                                                      {{25}, {25}, {25}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"sorted", "false"},
-                                                                                             {"return_inverse", "true"},
-                                                                                             {"return_counts", "true"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("Scatter"),
-                                      InOutShapes({{{3, 3}, {2, 3}},
-                                                   {{3,3}}}),
-                                      NewInOutShapes({{{4,  4}, {3, 4}},
-                                                      {{4,4}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"axis", "0"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true)),
-                ::testing::make_tuple(LayerType("NonMaxSuppression"),
-                                      InOutShapes({{{1, 2, 4}, {1, 3, 2}},
-                                                   {{6, 3}}}),
-                                      NewInOutShapes({{{2, 5, 4}, {2, 3, 5}},
-                                                      {{30, 3}}}),
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"center_point_box", "0"}},
-                                      MapParams(MapStrStr(std::map<std::string, std::string>{{"sort_result_descending", "1"}})),
-                                      LayerDataName("data"),
-                                      CanInfer(true))
-        )
-);
-
-class LayerValidatorNegativeTests : public BuiltInShapeInferImplTest {
-};
-
-TEST_P(LayerValidatorNegativeTests, reshaper) {
-    ASSERT_THROW(buildSingleLayerNetwork<3>(type, inOutShapes, &layerParams.data, layerDataName),
-                 InferenceEngine::details::InferenceEngineException);
-}
-
-// TODO: test using MR!1690
-INSTANTIATE_TEST_CASE_P(
-        Reshape, LayerValidatorNegativeTests,
-        ::testing::Combine(
-                ::testing::Values(LayerType("Reshape")),
-                ::testing::Values(InOutShapes({{{1,   1, 300, 4}},
-                                               {{300, 4}}})),
-                ::testing::Values(NewInOutShapes({{{1,   1, 500, 4}},
-                                                  {{500, 4}}})),
-                ::testing::Values(
-                        MapParams(MapStrStr(
-                                std::map<std::string, std::string>{{"dim", "0,-2,6"}})),  // can't be less the -1
-                        MapParams(MapStrStr(
-                                std::map<std::string, std::string>{{"dim", "0,-1,-1"}}))),  // single -1 is expected
-                ::testing::Values(LayerDataName("data")),
-                ::testing::Values(CanInfer())
-        )
-);
-
diff --git a/inference-engine/tests_deprecated/unit/shape_infer/built_in_shape_infer_general_test.hpp b/inference-engine/tests_deprecated/unit/shape_infer/built_in_shape_infer_general_test.hpp
deleted file mode 100644 (file)
index b616499..0000000
+++ /dev/null
@@ -1,207 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <gtest/gtest.h>
-#include <blob_factory.hpp>
-#include <shape_infer/built-in/ie_built_in_holder.hpp>
-#include <utility>
-#include <ie_format_parser.h>
-#include <xml_helper.hpp>
-#include <single_layer_common.hpp>
-#include <tests_common.hpp>
-
-#include "common_test_utils/xml_net_builder/xml_net_builder.hpp"
-
-namespace IE = InferenceEngine;
-
-struct param_size {
-    // dimensions order: x, y, z, ...
-    std::vector<unsigned> dims;
-    param_size() {}
-//    param_size(const std::vector<unsigned>& dims) {
-//        this->dims = dims;
-//    }
-    param_size(std::initializer_list<unsigned> dims) {
-        this->dims = dims;
-    }
-    bool empty() {
-        return dims.empty();
-    }
-
-    friend std::ostream &operator<<(std::ostream &os, param_size const &paramSize) {
-        auto d_size = paramSize.dims.size();
-        if (d_size > 0) {
-            os << "dims[" << std::to_string(0) << "]=" << std::to_string(paramSize.dims[0]);
-            for (int i = 1; i < paramSize.dims.size(); i++)
-                os << ", dims[" << std::to_string(i) << "]=" << std::to_string(paramSize.dims[i]);
-        }
-        return os;
-    };
-
-    std::string toSeparetedRow(const char *separator) {
-        auto d_size = dims.size();
-        std::string res;
-        if (d_size > 0) {
-            res = std::to_string(dims[d_size - 1]);
-            for (int i = d_size - 2; i >= 0; i--) {
-                res += separator + std::to_string(dims[i]);
-            }
-        }
-        return res;
-    }
-};
-
-PRETTY_PARAM(kernel, param_size);
-
-PRETTY_PARAM(stride, param_size);
-
-PRETTY_PARAM(pad, param_size);
-
-PRETTY_PARAM(pad_end, param_size);
-
-PRETTY_PARAM(auto_pad, std::string);
-
-PRETTY_PARAM(out_channels, unsigned);
-
-PRETTY_PARAM(group, unsigned);
-
-PRETTY_PARAM(dilation_factor, param_size);
-
-PRETTY_PARAM(pool_type, std::string);
-
-PRETTY_PARAM(exclude_pad, bool);
-
-PRETTY_PARAM(LayerType, std::string)
-
-PRETTY_PARAM(LayerDataName, std::string)
-
-PRETTY_PARAM(InOutShapes, CommonTestUtils::InOutShapes)
-
-PRETTY_PARAM(NewInOutShapes, CommonTestUtils::InOutShapes)
-
-PRETTY_PARAM(MapParams, MapStrStr)
-
-PRETTY_PARAM(CanInfer, bool);
-
-PRETTY_PARAM(IsTransposed, bool);
-
-PRETTY_PARAM(TopologyPath, std::string);
-
-PRETTY_PARAM(ModelPath, std::string);
-
-static size_t BATCH = 100;
-
-class BuiltInShapeInferCommon : public TestsCommon {
-protected:
-    void SetUp() override {
-        holder = std::make_shared<IE::ShapeInfer::BuiltInShapeInferHolder>();
-    }
-
-    IE::IShapeInferImpl::Ptr getShapeInferImpl(const std::string &type) {
-        IE::IShapeInferImpl::Ptr impl;
-        sts = holder->getShapeInferImpl(impl, type.c_str(), &resp);
-        if (sts != IE::StatusCode::OK) THROW_IE_EXCEPTION << resp.msg;
-        return impl;
-    }
-
-protected:
-    IE::StatusCode sts = IE::StatusCode::GENERAL_ERROR;
-    IE::ResponseDesc resp;
-    std::shared_ptr<IE::IShapeInferExtension> holder;
-};
-
-template<class T>
-class BuiltInShapeInferTestWithParam : public BuiltInShapeInferCommon,
-                                       public testing::WithParamInterface<T> {
-
-protected:
-    static std::vector<IE::Blob::CPtr> getBlobs(const std::vector<IE::SizeVector>& shapes) {
-        std::vector<IE::Blob::CPtr> inBlobs;
-        for (auto const& dims : shapes) {
-            IE::TensorDesc desc(IE::Precision::FP32, dims, IE::TensorDesc::getLayoutByDims(dims));
-            auto blob = make_blob_with_precision(desc);
-            inBlobs.push_back(blob);
-        }
-        return inBlobs;
-    }
-
-    static IE::ICNNNetwork::InputShapes
-    setInputShapes(const IE::ICNNNetwork &cnnNetwork,
-                   const std::vector<IE::SizeVector> &shapesToSet) {
-        IE::ICNNNetwork::InputShapes inputShapes;
-        IE::InputsDataMap inputs;
-        cnnNetwork.getInputsInfo(inputs);
-        for (const auto &pair : inputs) {
-            auto info = pair.second;
-            if (info) {
-                auto data = info->getInputData();
-                if (data) {
-                    inputShapes[data->getName()] = data->getTensorDesc().getDims();
-                }
-            }
-        }
-        int i = 0;
-        for (auto &pair : inputShapes) {
-            pair.second = shapesToSet[i++];
-        }
-        return inputShapes;
-    }
-
-    static void checkNetworkInOut(const IE::ICNNNetwork &network,
-                                  const CommonTestUtils::InOutShapes &inOutData) {
-        IE::InputsDataMap inputsDataMap;
-        IE::OutputsDataMap outputsDataMap;
-        network.getInputsInfo(inputsDataMap);
-        network.getOutputsInfo(outputsDataMap);
-        int i = 0;
-        for (auto pair : inputsDataMap) {
-            ASSERT_EQ(inOutData.inDims[i++], pair.second->getTensorDesc().getDims());
-        }
-        i = 0;
-        for (auto pair : outputsDataMap) {
-            ASSERT_EQ(inOutData.outDims[i++], pair.second->getDims());
-        }
-    }
-
-    template<int Version = 3>
-    static IE::details::CNNNetworkImplPtr
-    buildSingleLayerNetwork(const std::string &layerType,
-                            const CommonTestUtils::InOutShapes &inOutShapes,
-                            std::map<std::string, std::string> *params,
-                            const std::string &layerDataName = "data") {
-        auto *parser = new IE::details::FormatParser(Version);
-        return buildSingleLayerNetworkCommon<Version>(parser, layerType, inOutShapes, params, layerDataName);
-    }
-
-protected:
-    std::vector<IE::SizeVector> outShapes;
-    std::map<std::string, std::string> params;
-    std::map<std::string, IE::Blob::Ptr> blobs;
-};
-
-class BuiltInShapeInferImplTest
-        : public BuiltInShapeInferTestWithParam<std::tuple<LayerType, InOutShapes, NewInOutShapes, MapParams, LayerDataName, CanInfer>> {
-protected:
-    void SetUp() override {
-        BuiltInShapeInferCommon::SetUp();
-        auto params = GetParam();
-        type = std::get<0>(params);
-        inOutShapes = std::get<1>(params);
-        newInOutShapes = std::get<2>(params);
-        layerParams = std::get<3>(params);
-        layerDataName = std::get<4>(params);
-        canInfer = std::get<5>(params);
-    }
-
-protected:
-    std::string type;
-    CommonTestUtils::InOutShapes inOutShapes;
-    CommonTestUtils::InOutShapes newInOutShapes;
-    MapStrStr layerParams;
-    std::string layerDataName;
-    bool canInfer{};
-};
-
diff --git a/inference-engine/tests_deprecated/unit/shape_infer/built_in_shape_infer_pool_test.cpp b/inference-engine/tests_deprecated/unit/shape_infer/built_in_shape_infer_pool_test.cpp
deleted file mode 100644 (file)
index f65d08d..0000000
+++ /dev/null
@@ -1,158 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <gtest/gtest.h>
-#include <shape_infer/built-in/ie_built_in_holder.hpp>
-#include <xml_net_builder.hpp>
-#include <cnn_network_impl.hpp>
-#include <ie_format_parser.h>
-#include <xml_helper.hpp>
-#include <shape_infer/ie_reshaper.hpp>
-#include "built_in_shape_infer_general_test.hpp"
-
-using namespace InferenceEngine;
-using namespace ShapeInfer;
-
-class BuiltInShapeInferPoolImplTest
-        : public BuiltInShapeInferTestWithParam<std::tuple<InOutShapes, kernel, stride, pad, pool_type, exclude_pad, auto_pad, NewInOutShapes, pad_end>> {
-protected:
-    void SetUp() override {
-        BuiltInShapeInferCommon::SetUp();
-        auto params = GetParam();
-        inOutShapes = std::get<0>(params);
-        kernel = std::get<1>(params);
-        stride = std::get<2>(params);
-        pad = std::get<3>(params);
-        pool_type = std::get<4>(params);
-        exclude_pad = std::get<5>(params);
-        auto_pad = std::get<6>(params);
-        newInOutShapes = std::get<7>(params);
-        pad_end = std::get<8>(params);
-    }
-
-    std::map<std::string, std::string> getMapParams() {
-        std::map<std::string, std::string> params = {
-                {"kernel",      kernel.toSeparetedRow(",")},
-                {"strides",     stride.toSeparetedRow(",")},
-                {"pads_begin",  pad.toSeparetedRow(",")},
-                {"pool-method", pool_type},
-                {"exclude-pad", exclude_pad ? "false" : "true"}
-        };
-        if (!auto_pad.empty()) params["auto_pad"] = auto_pad;
-        if (!pad_end.empty()) params["pads_end"] = pad_end.toSeparetedRow(",");
-        return params;
-    }
-
-protected:
-    std::string type = "Pooling";
-    testing::InOutShapes inOutShapes;
-    testing::InOutShapes newInOutShapes;
-    param_size kernel;
-    param_size stride;
-    param_size pad;
-    std::string pool_type;
-    bool exclude_pad;
-    std::string auto_pad;
-    param_size pad_end;
-};
-
-TEST_P(BuiltInShapeInferPoolImplTest, body) {
-    auto impl = getShapeInferImpl(type);
-    ASSERT_NE(nullptr, impl);
-    ASSERT_NO_THROW(sts = impl->inferShapes(getBlobs(inOutShapes.inDims), getMapParams(), blobs, outShapes, &resp));
-    ASSERT_EQ(int(OK), sts) << resp.msg;
-    ASSERT_EQ(inOutShapes.outDims, outShapes);
-}
-
-TEST_P(BuiltInShapeInferPoolImplTest, reshaper) {
-    auto layerParams = getMapParams();
-    auto cnnNetworkImplPtr = buildSingleLayerNetwork<4>(type, inOutShapes, &layerParams, "pooling_data");
-    auto reshaper = std::make_shared<Reshaper>(*cnnNetworkImplPtr);
-    auto inputShapes = setInputShapes(*cnnNetworkImplPtr, newInOutShapes.inDims);
-    reshaper->run(inputShapes);
-    checkNetworkInOut(*cnnNetworkImplPtr, newInOutShapes);
-}
-
-TEST_P(BuiltInShapeInferPoolImplTest, batch) {
-    auto layerParams = getMapParams();
-    auto cnnNetworkImplPtr = buildSingleLayerNetwork<4>(type, inOutShapes, &layerParams, "pooling_data");
-    auto reshaper = std::make_shared<Reshaper>(*cnnNetworkImplPtr);
-    sts = cnnNetworkImplPtr->setBatchSize(BATCH, &resp);
-    ASSERT_EQ((int)OK, sts) << resp.msg;
-    inOutShapes.inDims[0][0] = inOutShapes.outDims[0][0] = BATCH;
-    checkNetworkInOut(*cnnNetworkImplPtr, inOutShapes);
-}
-
-INSTANTIATE_TEST_CASE_P(
-        BuiltInImpls, BuiltInShapeInferPoolImplTest,
-        ::testing::Values(
-                // fixate pad
-                ::testing::make_tuple(InOutShapes({{{4, 3, 228, 228}},
-                                                   {{4, 3, 229, 115}}}), kernel({4, 2}), stride({2, 1}),
-                                      pad({2, 1}), pool_type("max"), exclude_pad(true), auto_pad(""),
-                                      NewInOutShapes({{{1, 3, 228, 228}},
-                                                      {{1, 3, 229, 115}}}), pad_end()),
-                // fixate pad + right/bottom
-                ::testing::make_tuple(InOutShapes({{{4, 3, 228, 228}},
-                                                   {{4, 3, 229, 115}}}), kernel({4, 2}), stride({2, 1}),
-                                      pad({2, 1}), pool_type("max"), exclude_pad(true), auto_pad(""),
-                                      NewInOutShapes({{{1, 3, 228, 228}},
-                                                      {{1, 3, 229, 115}}}), pad_end({3, 2})),
-                // valid + empty paddings
-                ::testing::make_tuple(InOutShapes({{{4, 3, 228, 228}},
-                                                   {{4, 3, 227, 113}}}), kernel({4, 2}), stride({2, 1}),
-                                      pad({0, 0}), pool_type("max"), exclude_pad(true), auto_pad("valid"),
-                                      NewInOutShapes({{{1, 3, 228, 228}},
-                                                      {{1, 3, 227, 113}}}), pad_end()),
-                // valid + fixated paddings (shouldn't affect)
-                ::testing::make_tuple(InOutShapes({{{4, 3, 228, 228}},
-                                                   {{4, 3, 227, 113}}}), kernel({4, 2}), stride({2, 1}),
-                                      pad({2, 4}), pool_type("max"), exclude_pad(true), auto_pad("valid"),
-                                      NewInOutShapes({{{1, 3, 228, 228}},
-                                                      {{1, 3, 227, 113}}}), pad_end({2, 1})),
-                // same_upper + empty paddings
-                ::testing::make_tuple(InOutShapes({{{4, 3, 227, 227}},
-                                                   {{4, 3, 227, 114}}}), kernel({4, 2}), stride({2, 1}),
-                                      pad({0, 0}), pool_type("max"), exclude_pad(true), auto_pad("same_upper"),
-                                      NewInOutShapes({{{1, 3, 227, 227}},
-                                                      {{1, 3, 227, 114}}}), pad_end()),
-                // same_upper + fixated paddings (shouldn't affect)
-                ::testing::make_tuple(InOutShapes({{{4, 3, 227, 227}},
-                                                   {{4, 3, 227, 114}}}), kernel({4, 2}), stride({2, 1}),
-                                      pad({2, 4}), pool_type("max"), exclude_pad(true), auto_pad("same_upper"),
-                                      NewInOutShapes({{{1, 3, 227, 227}},
-                                                      {{1, 3, 227, 114}}}), pad_end({0, 0})),
-                // same_lower + empty paddings
-                ::testing::make_tuple(InOutShapes({{{4, 3, 227, 227}},
-                                                   {{4, 3, 227, 113}}}), kernel({4, 2}), stride({2, 1}),
-                                      pad({0, 0}), pool_type("max"), exclude_pad(true), auto_pad("same_lower"),
-                                      NewInOutShapes({{{1, 3, 227, 227}},
-                                                      {{1, 3, 227, 113}}}), pad_end({0, 0})),
-                // same_lower + fixated paddings (shouldn't affect)
-                ::testing::make_tuple(InOutShapes({{{4, 3, 227, 227}},
-                                                   {{4, 3, 227, 113}}}), kernel({4, 2}), stride({2, 1}),
-                                      pad({2, 4}), pool_type("max"), exclude_pad(true), auto_pad("same_lower"),
-                                      NewInOutShapes({{{1, 3, 227, 227}},
-                                                      {{1, 3, 227, 113}}}), pad_end({0, 0})),
-                // 5D tensors
-                // fixate pad
-                ::testing::make_tuple(InOutShapes({{{4, 3, 16, 128, 130}},
-                                                   {{4, 3, 17, 129, 66}}}), kernel({4, 2, 2}), stride({2, 1, 1}),
-                                      pad({2, 1, 1}), pool_type("max"), exclude_pad(true), auto_pad(""),
-                                      NewInOutShapes({{{1, 3, 16, 128, 130}},
-                                                      {{1, 3, 17, 129, 66}}}), pad_end()),
-                // valid + empty paddings
-                ::testing::make_tuple(InOutShapes({{{4, 3, 16, 128, 130}},
-                                                   {{4, 3, 15, 127, 64}}}), kernel({4, 2, 2}), stride({2, 1, 1}),
-                                      pad({0, 0, 0}), pool_type("max"), exclude_pad(true), auto_pad("valid"),
-                                      NewInOutShapes({{{1, 3, 16, 128, 130}},
-                                                      {{1, 3, 15, 127, 64}}}), pad_end()),
-                // same_upper + empty paddings
-                ::testing::make_tuple(InOutShapes({{{4, 3, 16, 128, 130}},
-                                                   {{4, 3, 16, 128, 65}}}), kernel({4, 2, 2}), stride({2, 1, 1}),
-                                      pad({0, 0, 0}), pool_type("max"), exclude_pad(true), auto_pad("same_upper"),
-                                      NewInOutShapes({{{1, 3, 16, 128, 130}},
-                                                      {{1, 3, 16, 128, 65}}}), pad_end())
-        )
-);
diff --git a/inference-engine/tests_deprecated/unit/shape_infer/input_controller_test.cpp b/inference-engine/tests_deprecated/unit/shape_infer/input_controller_test.cpp
deleted file mode 100644 (file)
index 75d8d4b..0000000
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <gtest/gtest.h>
-#include <gmock/gmock-matchers.h>
-
-#include "unit_test_utils/mocks/shape_infer/mock_ishape_infer_impl.hpp"
-#include "unit_test_utils/mocks/shape_infer/ie_reshape_io_controllers.hpp"
-
-using namespace InferenceEngine;
-using namespace InferenceEngine::details;
-using namespace ShapeInfer;
-using namespace ::testing;
-
-class InputControllerTest : public ::testing::Test {
-public:
-    static const std::string TEST_NAME;
-    DataPtr notEmptyData = std::make_shared<Data>(TEST_NAME, Precision::UNSPECIFIED, Layout::C);
-    SizeVector inDims{1};
-};
-
-const std::string InputControllerTest::TEST_NAME = "TEST_NAME";
-
-TEST_F(InputControllerTest, failedToCreateWithEmptyInsData) {
-    EXPECT_THROW(InputController({}, TEST_NAME), InferenceEngineException);
-}
-
-TEST_F(InputControllerTest, failedToCreateWithNullData) {
-    EXPECT_THROW(InputController({nullptr}, TEST_NAME), InferenceEngineException);
-}
-
-TEST_F(InputControllerTest, canCreateInputController) {
-    ASSERT_NO_THROW(InputController({notEmptyData}, TEST_NAME));
-}
-
-TEST_F(InputControllerTest, canPushShapes) {
-    InputController controller({notEmptyData}, TEST_NAME);
-    ASSERT_NO_THROW(controller.setShapeByName(inDims, TEST_NAME));
-}
-
-TEST_F(InputControllerTest, DISABLED_throwOnGetWithNotEnoughShapes) {
-    InputController controller({notEmptyData, notEmptyData}, TEST_NAME);
-    controller.setShapeByName(inDims, TEST_NAME);
-    ASSERT_THROW(controller.getShapes(true), InferenceEngineException);
-}
-
-TEST_F(InputControllerTest, canGetWithNotEnoughShapes) {
-    InputController controller({notEmptyData, notEmptyData}, TEST_NAME);
-    controller.setShapeByName(inDims, TEST_NAME);
-    controller.getShapes(false);
-}
-
-TEST_F(InputControllerTest, canGetChanges) {
-    InputController controller({notEmptyData}, TEST_NAME);
-    controller.setShapeByName(inDims, TEST_NAME);
-    ASSERT_NO_THROW(controller.getShapes(true));
-}
-
-TEST_F(InputControllerTest, DISABLED_throwOnApplyWithNotEnoughShapes) {
-    InputController controller({notEmptyData, notEmptyData}, TEST_NAME);
-    controller.setShapeByName(inDims, TEST_NAME);
-    ASSERT_THROW(controller.applyChanges(), InferenceEngineException);
-}
-
-TEST_F(InputControllerTest, canApplyChanges) {
-    InputController controller({notEmptyData}, TEST_NAME);
-    controller.setShapeByName(inDims, TEST_NAME);
-    ASSERT_NO_THROW(controller.applyChanges());
-}
-
-TEST_F(InputControllerTest, canResetShapes) {
-    InputController controller({notEmptyData}, TEST_NAME);
-    controller.setShapeByName(inDims, TEST_NAME);
-    ASSERT_EQ(controller.getShapes(true)[0], inDims);
-    ASSERT_NO_THROW(controller.reset());
-    ASSERT_NE(controller.getShapes(true)[0], inDims);
-}
diff --git a/inference-engine/tests_deprecated/unit/shape_infer/input_reshape_launcher_test.cpp b/inference-engine/tests_deprecated/unit/shape_infer/input_reshape_launcher_test.cpp
deleted file mode 100644 (file)
index 837ed8b..0000000
+++ /dev/null
@@ -1,159 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <gtest/gtest.h>
-#include <gmock/gmock-matchers.h>
-
-#include "unit_test_utils/mocks/shape_infer/mock_ishape_infer_impl.hpp"
-#include "unit_test_utils/mocks/shape_infer/mock_reshaper_launcher.hpp"
-
-using namespace InferenceEngine;
-using namespace InferenceEngine::details;
-using namespace ShapeInfer;
-using namespace ::testing;
-
-class InputReshapeLauncherTest : public ::testing::Test {
-protected:
-    void SetUp() override {
-        notEmptyData = getNotEmptyData();
-        impl = std::make_shared<MockIShapeInferImpl>();
-    };
-
-public:
-    static const std::string TEST_NAME;
-    DataPtr notEmptyData;
-    MockIShapeInferImpl::Ptr impl;
-    SizeVector outDims{2};
-public:
-    DataPtr getNotEmptyData() {
-        return std::make_shared<Data>(TEST_NAME, Precision::UNSPECIFIED, Layout::C);
-    }
-
-    CNNLayerPtr createLayer(const std::string& name = TEST_NAME, const std::string& type = "Input") {
-        LayerParams params{name, type, Precision::UNSPECIFIED};
-        auto layer = std::make_shared<CNNLayer>(params);
-        if (layer == nullptr) {
-            THROW_IE_EXCEPTION << "InputReshapeLauncherTest::createLayer(). Could not create CNNLayer";
-        }
-        layer->outData = {notEmptyData};
-        notEmptyData->setDims(outDims);
-        return layer;
-    }
-};
-
-const std::string InputReshapeLauncherTest::TEST_NAME = "TEST_NAME";
-
-TEST_F(InputReshapeLauncherTest, failedToCreateWithNullLayer) {
-    const CNNLayer* layer = nullptr;
-    ASSERT_THROW(InputReshapeLauncher launcher(layer, impl), InferenceEngineException);
-}
-
-TEST_F(InputReshapeLauncherTest, failedToCreateWithEmptyOutData) {
-    CNNLayer layer({});
-    ASSERT_THROW(InputReshapeLauncher launcher(&layer, impl), InferenceEngineException);
-}
-
-TEST_F(InputReshapeLauncherTest, failedToCreateWithNullOutData) {
-    CNNLayer layer({});
-    layer.outData = {nullptr};
-    ASSERT_THROW(InputReshapeLauncher launcher(&layer, impl), InferenceEngineException);
-}
-
-TEST_F(InputReshapeLauncherTest, failedToCreateWithNotInputType) {
-    CNNLayer layer({});
-    layer.outData = {notEmptyData};
-    ASSERT_THROW(InputReshapeLauncher launcher(&layer, impl), InferenceEngineException);
-}
-
-TEST_F(InputReshapeLauncherTest, canCreateReshapeLauncher) {
-    ASSERT_NO_THROW(InputReshapeLauncher launcher(createLayer().get(), impl));
-}
-
-TEST_F(InputReshapeLauncherTest, canPushShapes) {
-    InputReshapeLauncher launcher(createLayer().get(), impl);
-    ASSERT_NO_THROW(launcher.setShapeByName(outDims, TEST_NAME));
-}
-
-TEST_F(InputReshapeLauncherTest, canPropagateWithNotEnoughShapes) {
-    InputReshapeLauncher launcher(createLayer().get(), impl);
-    launcher.reshape({});
-}
-
-TEST_F(InputReshapeLauncherTest, throwOnPropagateWithEmptyLaunchers) {
-    auto layer = createLayer();
-    layer->outData[0]->inputTo = {{{}, createLayer(TEST_NAME, TEST_NAME)}};
-    InputReshapeLauncher launcher(layer.get(), impl);
-    launcher.setShapeByName(outDims, TEST_NAME);
-    ASSERT_NO_THROW();
-    ASSERT_THROW(launcher.reshape({}), InferenceEngineException);
-}
-
-TEST_F(InputReshapeLauncherTest, throwOnPropagateWithoutProperLauncher) {
-    auto layer = createLayer();
-    layer->outData[0]->inputTo = {{{}, createLayer(TEST_NAME + "another", TEST_NAME)}};
-    InputReshapeLauncher inLauncher(layer.get(), impl);
-    inLauncher.setShapeByName(outDims, TEST_NAME);
-    auto launcher = std::make_shared<MockReshapeLauncher>();
-    EXPECT_CALL(*launcher.get(), getLayerName()).WillOnce(Return(TEST_NAME));
-    ASSERT_THROW(inLauncher.reshape({{launcher}}), InferenceEngineException);
-}
-
-TEST_F(InputReshapeLauncherTest, canPropagate) {
-    auto layer = createLayer();
-    layer->outData[0]->inputTo = {{{}, createLayer(TEST_NAME, TEST_NAME)}};
-    InputReshapeLauncher inLauncher(layer.get(), impl);
-    auto launcher = std::make_shared<MockReshapeLauncher>();
-    EXPECT_CALL(*launcher.get(), setShapeByName(outDims, TEST_NAME));
-    EXPECT_CALL(*launcher.get(), getLayerName()).WillOnce(Return(TEST_NAME));
-    inLauncher.setShapeByName(outDims, TEST_NAME);
-    inLauncher.reshape({{launcher}});
-}
-
-TEST_F(InputReshapeLauncherTest, canReset) {
-    auto layer = createLayer();
-    InputReshapeLauncher launcher(layer.get(), impl);
-    ASSERT_NO_THROW(launcher.reset());
-}
-
-TEST_F(InputReshapeLauncherTest, canApplyWithoutSettingShapes) {
-    auto layer = createLayer();
-    layer->outData.push_back(notEmptyData);
-    InputReshapeLauncher launcher(layer.get(), impl);
-    ASSERT_NO_THROW(launcher.applyChanges(layer.get()));
-}
-
-TEST_F(InputReshapeLauncherTest, canNotApplyForLayerWithAnotherName) {
-    auto layer1 = createLayer("");
-    auto layer2 = createLayer();
-    InputReshapeLauncher launcher(layer1.get(), impl);
-    launcher.setShapeByName(outDims, TEST_NAME);
-    ASSERT_THROW(launcher.applyChanges(layer2.get()), InferenceEngineException);
-}
-
-TEST_F(InputReshapeLauncherTest, canApplyChanges) {
-    auto layer = createLayer();
-    InputReshapeLauncher launcher(layer.get(), impl);
-    launcher.setShapeByName(outDims, TEST_NAME);
-    launcher.applyChanges(layer.get());
-
-    auto outData = layer->outData;
-    ASSERT_EQ(1, outData.size());
-    auto out0Data = outData[0];
-    ASSERT_NE(nullptr, out0Data);
-    ASSERT_EQ(outDims, out0Data->getDims());
-}
-
-TEST_F(InputReshapeLauncherTest, canGetShapesFromLayer) {
-    CNNLayer layer({});
-    layer.outData = {notEmptyData};
-    notEmptyData->setDims(outDims);
-    auto initializer = std::make_shared<MockReshapeLauncher::TestLauncherInitializer>();
-    InputReshapeLauncher launcher(&layer, impl, initializer);
-    auto outputController = initializer->getOutputController();
-    EXPECT_CALL(*outputController, getIRShapes()).WillOnce(Return(std::vector<SizeVector>{outDims}));
-    EXPECT_CALL(*outputController, getShapes(false)).WillOnce(Return(std::vector<SizeVector>{SizeVector()}));
-    EXPECT_CALL(*outputController, setShapeByIndex(outDims, 0));
-    EXPECT_CALL(*outputController, propagateShapes(_));
-    launcher.reshape({});
-}
diff --git a/inference-engine/tests_deprecated/unit/shape_infer/models_test.cpp b/inference-engine/tests_deprecated/unit/shape_infer/models_test.cpp
deleted file mode 100644 (file)
index 42380f7..0000000
+++ /dev/null
@@ -1,259 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <gtest/gtest.h>
-#include <cnn_network_impl.hpp>
-#include <shape_infer/ie_reshaper.hpp>
-#include "details/ie_cnn_network_tools.h"
-#include <cpp/ie_cnn_net_reader.h>
-#include <graph_tools.hpp>
-#include <test_model_path.hpp>
-#include <xml_helper.hpp>
-#include <file_utils.h>
-#include "built_in_shape_infer_general_test.hpp"
-
-using namespace InferenceEngine;
-using namespace InferenceEngine::details;
-using namespace ShapeInfer;
-
-class GeneralShapeInferModelsTests
-        : public BuiltInShapeInferTestWithParam<std::tuple<InOutShapes, ModelPath, CanInfer>> {
-protected:
-    void SetUp() override {
-        BuiltInShapeInferCommon::SetUp();
-        auto params = GetParam();
-        inOutShapes = std::get<0>(params);
-        modelsPath = std::get<1>(params);
-        canInfer = std::get<2>(params);
-    }
-
-protected:
-    testing::InOutShapes inOutShapes;
-    bool canInfer{};
-    std::string modelsPath;
-};
-
-class OriginalShapeInferModelsTests
-        : public BuiltInShapeInferTestWithParam<std::string> {
-protected:
-    void SetUp() override {
-        BuiltInShapeInferCommon::SetUp();
-        modelsPath = GetParam();
-    }
-
-    std::map<std::string, SizeVector> getShapes(const std::vector<CNNLayerPtr>& allLayers) {
-        std::map<std::string, SizeVector> shapes;
-        for (const auto& layer:allLayers) {
-            for (const auto& data:layer->outData) {
-                shapes[data->getName()] = data->getTensorDesc().getDims();
-            }
-        }
-        return shapes;
-    }
-
-    void compare(const std::vector<CNNLayerPtr>& allLayers, std::map<std::string, SizeVector>& oldShapes) {
-        for (const auto& layer:allLayers) {
-            for (const auto& data:layer->outData) {
-                ASSERT_EQ(oldShapes[data->getName()], data->getTensorDesc().getDims())
-                                            << "Shapes don't match:\n Data Name: " << data->getName() << "\n Layer Name: "
-                                            << layer->name << "\n Layer Type: " << layer->type;
-            }
-        }
-    }
-
-protected:
-    std::string modelsPath;
-};
-
-TEST_P(GeneralShapeInferModelsTests, reshape) {
-    CNNNetReader reader;
-    auto modelPath = ModelsPath() + kPathSeparator + modelsPath;
-    reader.ReadNetwork(modelPath);
-    reader.ReadWeights(FileUtils::fileNameNoExt(modelPath) + ".bin");
-    auto iCnnNetwork = reader.getNetwork();
-
-    auto reshaper = std::make_shared<Reshaper>(iCnnNetwork);
-    auto inputShapes = setInputShapes(iCnnNetwork, inOutShapes.inDims);
-
-    if (canInfer) {
-        reshaper->run(inputShapes);
-        checkNetworkInOut(iCnnNetwork, inOutShapes);
-    } else {
-        ASSERT_THROW(reshaper->run(inputShapes), InferenceEngine::details::InferenceEngineException);
-    }
-}
-
-TEST_P(OriginalShapeInferModelsTests, reshape) {
-    CNNNetReader reader;
-    auto modelPath = ModelsPath() + kPathSeparator + modelsPath;
-    reader.ReadNetwork(modelPath);
-    reader.ReadWeights(FileUtils::fileNameNoExt(modelPath) + ".bin");
-    auto network = reader.getNetwork();
-    auto allLayers = CNNNetSortTopologically(network);
-    auto oldShapes = getShapes(allLayers);
-    CNNNetwork cppNet = reader.getNetwork();
-    auto inputShapes = cppNet.getInputShapes();
-    auto reshaper = std::make_shared<Reshaper>(network);
-    try {
-        reshaper->run(inputShapes);
-    } catch (const InferenceEngineException& e) {
-        FAIL() << e.what();
-    }
-    compare(allLayers, oldShapes);
-}
-
-class SimpleBatchShapeInferModelsTests : public OriginalShapeInferModelsTests {
-};
-
-TEST_P(SimpleBatchShapeInferModelsTests, simpleBatch) {
-    const int batch = 777;
-    CNNNetReader reader;
-    auto modelPath = ModelsPath() + kPathSeparator + modelsPath;
-    reader.ReadNetwork(modelPath);
-    reader.ReadWeights(FileUtils::fileNameNoExt(modelPath) + ".bin");
-    auto network = reader.getNetwork();
-    auto allLayers = CNNNetSortTopologically(network);
-    auto oldShapes = getShapes(allLayers);
-    CNNNetwork cppNet = reader.getNetwork();
-    auto inputShapes = cppNet.getInputShapes();
-    for (auto& shape:inputShapes) {
-        auto dims = shape.second;
-        auto name = shape.first;
-        dims[0] = batch;
-        inputShapes[name] = dims;
-    }
-    auto reshaper = std::make_shared<Reshaper>(network);
-    try {
-        reshaper->run(inputShapes);
-    } catch (const InferenceEngineException& e) {
-        FAIL() << e.what();
-    }
-
-    for (auto& shape :oldShapes) {
-        shape.second[0] = batch;
-    }
-    compare(allLayers, oldShapes);
-}
-
-INSTANTIATE_TEST_CASE_P(
-        NewShapesForModels, GeneralShapeInferModelsTests,
-        ::testing::Values(
-                ::testing::make_tuple(InOutShapes({{{3, 3, 227, 227}},
-                                                   {{3, 4, 109, 109}, {3, 2, 109, 109}}}),
-                                      ModelPath("mtcnn/PNet_fp16.xml"), CanInfer(true)),
-                ::testing::make_tuple(InOutShapes({{{1, 3, 1000, 1000}},
-                                                   {{}}}),
-                                      ModelPath("alexnet/bvlc_alexnet_fp16.xml"), CanInfer(false)),
-                ::testing::make_tuple(InOutShapes({{{1, 3,  1000, 1000}},
-                                                   {{1, 21, 1000, 1000}}}),
-                                      ModelPath("fcn/fcn8s-heavy-pascal_fp16.xml"), CanInfer(true)),
-                ::testing::make_tuple(InOutShapes({{{1, 3, 1000, 1000}},
-                                                   {{}}}),
-                                      ModelPath("googlenet/bvlc_googlenet_fp16.xml"), CanInfer(false)),
-                ::testing::make_tuple(InOutShapes({{{7, 3, 300, 300}},
-                                                   {{1, 1, 700, 7}}}),
-                                      ModelPath("MobileNet-SSD/MobileNet-SSD_fp16.xml"), CanInfer(true)),
-                ::testing::make_tuple(InOutShapes({{{7, 3, 300,  300}},
-                                                   {{1, 1, 1400, 7}}}),
-                                      ModelPath("SSD_300/ssd_300_fp16.xml"), CanInfer(true)),
-                ::testing::make_tuple(InOutShapes({{{2, 3, 500, 500}},
-                                                   {{1, 1, 400, 7}}}),
-                                      ModelPath("SSD_512/ssd_512_fp16.xml"), CanInfer(true)),
-                ::testing::make_tuple(InOutShapes({{{1, 3, 24, 94}, {88, 1}},
-                                                   {{}}}),
-                                      ModelPath("lprnet/LPRNet_new_fp32.xml"),
-                                      CanInfer(false))  // can handle invalid IR without segfault
-//                ::testing::make_tuple(InOutShapes({{{1, 3,    400, 400}},
-//                                                   {{1, 1001, 2,   2}}}),
-//                                      ModelPath("GoogleNet-V3-TF/inception.xml"), CanInfer(true)) // TODO: is data axis="0" dim="1,1001" num_axes="-1" not enough for Reshape. need more attributes?
-        )
-);
-
-static std::vector<std::string> advancedBatchModels =
-        {"PC-Detect_0026/PVANET_fp32.xml",
-         "Perc_ZF_fasterRCNN/Perc_ZF_fasterRCNN_fp32.xml", // TODO: try to infer batch
-         "SSD_512/ssd_512_fp16.xml",
-         "SSD_GoogleNet_v2/SSD_GoogleNet_v2_fp32.xml",
-         "ssd_weights_performance/ssd_fp32.xml",
-         "ssd_weights_performance/ssd_slow_fp32.xml",
-         "squeezenet_ssd/squeezenet_ssd_fp32.xml",
-         "SSD_VGG/VGG_ILSVRC2016_SSD_300x300_deploy_fp32.xml",
-         "squeezenet_ssd_224_akhila/squeezenet_ssd_224_akhila_fp32.xml",
-         "lprnet-48/LPRNet_48_fp32.xml",
-         "lprnet-43/LPRNet_43_fp32.xml",
-         "lprnet-dla_25k/LPRNet_DLA_25k_fp32.xml",
-         "lprnet-70/LPRNet_fp32.xml",
-         "rm_lstm4f/rm_lstm4f_fp32.xml", // Changing shape (batch) is not supported now with hiding Memory layers from users
-         "fake-pvanet/PVANET_fp32.xml",
-         "SSD_300/ssd_300_fp16.xml",
-         "chuanqi305_squeezenet_ssd/squeezenet_ssd_fp32.xml",
-         "face-detection-retail-0004/FP16/face-detection-retail-0004.xml",
-         // "icvnet1.3-ssd/ICV1.3_320_orig_hyper_6_coco_iter_400000.pb.xml", // TODO: get correct IR, and Const is not Input and can't change batch for it
-        };
-
-static std::vector<std::string> simpleBatchModels =
-        {"cyberlink/style-frozen.xml",
-         "cyberlink/refine-frozen.xml",
-         "cyberlink/style-frozen-5-304-908.xml",
-         "cyberlink/refine-frozen-3-1208-404.xml",
-         "icv-hpe/icv-hpe-pre-poc-0001-201-102.xml",
-         "shape_infer/tf_alexnet.xml",
-         "shape_infer/tf_mobilenet_batch5.xml",
-         "shape_infer/tf_mobilenet_orig.xml",
-         "fcn/fcn8s-heavy-pascal_fp16.xml",
-         "alexnet/bvlc_alexnet_fp16.xml",
-         "GoogleNet-V1-TF/inceptionV1-501-402.xml",
-         "mtcnn/PNet_fp16.xml",
-         "conv_conv_med/conv_conv_med_fp16.xml",
-         "densenet-121/DENSENET_121_32.xml",
-         "MobileNet/mobilenet_fp16.xml",
-         "googlenet/bvlc_googlenet_fp16.xml",
-         "resnet-18/ResNet-18_fp32.xml",
-         "ResNet-50/ResNet-50_fp32.xml",
-         "ResNet-101/ResNet-101_fp32.xml",
-         "ResNet-152/ResNet-152_fp32.xml",
-         "vgg/VGG_ILSVRC_16_layers_fp32.xml",
-         "vgg-16/vgg16_tf_fp32.xml",
-         "vgg-19/VGG_ILSVRC_19_layers_fp32.xml",
-         "yolo-full/yolo-full_fp32.xml",
-         "yolo-tiny-internal/yolo-tiny_fp32.xml",
-         "yolo_v2/YoloV2_fp32.xml",
-         "yolo_tiny_v1/tiny_yolo_v1_fp32.xml",
-         "pvanet-reid/PVANET_Reid_fp32.xml",
-         "tsrnet/tsrnet_fp32.xml",
-         "pvanet-reid-simplifier/reid_pva_fp32.xml",
-         "eu-speed-limit-net/speednet_eu_fp32.xml",
-         "dlia_sandbox/conv_multi_out/conv_multi_out_fp32.xml",
-         "dlia_sandbox/conv_conv_fc_med/conv_conv_fc_med_fp32.xml",
-         "dlia_sandbox/conv_pool_relu_fc_med/conv_pool_relu_fc_med_fp32.xml",
-         "dlia_sandbox/eltwise/eltwise_fp32.xml",
-         "dlia_sandbox/concat/concat_straight_order_fp32.xml",
-         "dlia_sandbox/concat/concat_revert_order_fp32.xml",
-         "SqueezeNet_v1.1/SqueezeNet_v1.1_modified_fp32.xml",
-         "SqueezeNet_v1.1/SqueezeNet_v1.1_fp32.xml",
-         "squeezenet_ssd/snssd_a_1_2_backbone_fp32.xml",
-         "dlia_sandbox/reid_stuff_test/ReID_stuff_test_fp32.xml",
-         "dlia_sandbox/multi_fc_output_test/Multi_FC_output_test_fp32.xml",
-         "dlia_sandbox/relu_test/relu_test_fp32.xml",
-         "MobileNet/mobilenet_fp32.xml",
-         "dlia_sandbox/dilated_convolution/dilated_convolution_fp32.xml",
-         "dlia_sandbox/scale_tests/scale_test_0_fp32.xml",
-         "dlia_sandbox/scale_tests/scale_test_1_fp32.xml",
-         "dlia_sandbox/scale_tests/scale_test_2_fp32.xml",
-         "dlia_sandbox/scale_tests/scale_test_3_fp32.xml",
-         "dlia_sandbox/two_fc_out_from_conv/two_fc_out_from_conv_fp32.xml",
-         "dlia_sandbox/pooling_with_pads/pooling_with_pads_fp32.xml",
-         "dlia_sandbox/fc_branching_test/fc_branching_test_fp32.xml",
-         "SqueezeNet_v1.1_pool_kernel_13/SqueezeNet_v1.1_pool_kernel_13_fp32.xml",
-         "dlia_sandbox/output_transform_with_slicing_test/output_transform_with_slicing_test_fp32.xml",
-        };
-
-INSTANTIATE_TEST_CASE_P(
-        CanCalculateOriginalShapesSimple, OriginalShapeInferModelsTests, ::testing::ValuesIn(simpleBatchModels));
-
-INSTANTIATE_TEST_CASE_P(
-        CanCalculateOriginalShapesAdvanced, OriginalShapeInferModelsTests, ::testing::ValuesIn(advancedBatchModels));
-
-INSTANTIATE_TEST_CASE_P(
-        SimpleBatch, SimpleBatchShapeInferModelsTests, ::testing::ValuesIn(simpleBatchModels));
diff --git a/inference-engine/tests_deprecated/unit/shape_infer/output_controller_test.cpp b/inference-engine/tests_deprecated/unit/shape_infer/output_controller_test.cpp
deleted file mode 100644 (file)
index c0bfc80..0000000
+++ /dev/null
@@ -1,129 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <gtest/gtest.h>
-#include <gmock/gmock-matchers.h>
-
-#include <shape_infer/ie_reshape_io_controllers.hpp>
-
-#include "unit_test_utils/mocks/shape_infer/mock_reshaper_launcher.hpp"
-#include "unit_test_utils/mocks/shape_infer/mock_ishape_infer_impl.hpp"
-
-using namespace InferenceEngine;
-using namespace InferenceEngine::details;
-using namespace ShapeInfer;
-using namespace ::testing;
-
-class OutputControllerTest : public ::testing::Test {
-public:
-
-    static const std::string TEST_NAME;
-    DataPtr notEmptyData = std::make_shared<Data>(TEST_NAME, Precision::UNSPECIFIED, Layout::C);
-    SizeVector inDims{1};
-public:
-    CNNLayerPtr createLayer(const std::string& name) {
-        LayerParams params;
-        params.name = name;
-        return std::make_shared<CNNLayer>(params);
-    }
-};
-
-const std::string OutputControllerTest::TEST_NAME = "TEST_NAME";
-
-TEST_F(OutputControllerTest, failedToCreateWithEmptyOutData) {
-    std::vector<DataPtr> inData;
-    EXPECT_THROW(OutputController({}, TEST_NAME), InferenceEngineException);
-}
-
-TEST_F(OutputControllerTest, failedToCreateWithNullOutData) {
-    EXPECT_THROW(OutputController({nullptr}, TEST_NAME), InferenceEngineException);
-}
-
-TEST_F(OutputControllerTest, canCreateOutputController) {
-    ASSERT_NO_THROW(OutputController({notEmptyData}, TEST_NAME));
-}
-
-TEST_F(OutputControllerTest, canGetChanges) {
-    OutputController controller({notEmptyData}, TEST_NAME);
-    std::vector<SizeVector> shapes;
-    ASSERT_NO_THROW(shapes = controller.getShapes(false));
-    ASSERT_EQ(1, shapes.size());
-}
-
-TEST_F(OutputControllerTest, canSetShapes) {
-    OutputController controller({notEmptyData}, TEST_NAME);
-    auto shapes = {inDims, inDims};
-    ASSERT_NO_THROW(controller.setShapes(shapes));
-    ASSERT_EQ(shapes.size(), controller.getShapes(false).size());
-}
-
-TEST_F(OutputControllerTest, noThrowOnGetWithExcessShapes) {
-    OutputController controller({notEmptyData}, TEST_NAME);
-    ASSERT_NO_THROW(controller.setShapes({inDims, inDims}));
-    ASSERT_FALSE(controller.getShapes(false).empty());
-}
-
-TEST_F(OutputControllerTest, throwOnPropagateWithNotEnoughShapes) {
-    OutputController controller({notEmptyData, notEmptyData}, TEST_NAME);
-    controller.setShapes({inDims});
-    ASSERT_THROW(controller.propagateShapes({}), InferenceEngineException);
-}
-
-TEST_F(OutputControllerTest, throwOnPropagateWithExcessShapes) {
-    OutputController controller({notEmptyData}, TEST_NAME);
-    controller.setShapes({inDims, inDims});
-    ASSERT_THROW(controller.propagateShapes({}), InferenceEngineException);
-}
-
-TEST_F(OutputControllerTest, throwOnPropagateWithEmptyLaunchers) {
-    OutputController controller({notEmptyData}, TEST_NAME);
-    notEmptyData->inputTo = {{{}, createLayer(TEST_NAME)}};
-    controller.setShapes({inDims});
-    ASSERT_THROW(controller.propagateShapes({}), InferenceEngineException);
-}
-
-TEST_F(OutputControllerTest, throwOnPropagateWithoutProperLauncher) {
-    OutputController controller({notEmptyData}, TEST_NAME);
-    notEmptyData->inputTo = {{{}, createLayer(TEST_NAME + "another")}};
-    controller.setShapes({inDims});
-    auto launcher = std::make_shared<MockReshapeLauncher>();
-    EXPECT_CALL(*launcher.get(), getLayerName()).WillOnce(Return(TEST_NAME));
-    ASSERT_THROW(controller.propagateShapes({launcher}), InferenceEngineException);
-}
-
-TEST_F(OutputControllerTest, canPropagateShapes) {
-    OutputController controller({notEmptyData}, TEST_NAME);
-    notEmptyData->inputTo = {{{}, createLayer(TEST_NAME)}};
-    controller.setShapes({inDims});
-    auto launcher = std::make_shared<MockReshapeLauncher>();
-    EXPECT_CALL(*launcher.get(), setShapeByName(inDims, TEST_NAME));
-    EXPECT_CALL(*launcher.get(), getLayerName()).WillOnce(Return(TEST_NAME));
-    controller.propagateShapes({launcher});
-}
-
-TEST_F(OutputControllerTest, throwOnApplyWithNotEnoughShapes) {
-    OutputController controller({notEmptyData, notEmptyData}, TEST_NAME);
-    controller.setShapes({inDims});
-    ASSERT_THROW(controller.applyChanges(), InferenceEngineException);
-}
-
-TEST_F(OutputControllerTest, throwOnApplyWithExcessShapes) {
-    OutputController controller({notEmptyData}, TEST_NAME);
-    auto shapes = {inDims, inDims};
-    controller.setShapes(shapes);
-    ASSERT_THROW(controller.applyChanges(), InferenceEngineException);
-}
-
-TEST_F(OutputControllerTest, canApplyChanges) {
-    OutputController controller({notEmptyData}, TEST_NAME);
-    controller.setShapes({inDims});
-    ASSERT_NO_THROW(controller.applyChanges());
-}
-
-TEST_F(OutputControllerTest, canResetShapes) {
-    OutputController controller({notEmptyData}, TEST_NAME);
-    controller.setShapes({inDims});
-    ASSERT_NO_THROW(controller.reset());
-    ASSERT_TRUE(controller.getShapes(false).begin()->empty());
-}
diff --git a/inference-engine/tests_deprecated/unit/shape_infer/reshape_launcher_test.cpp b/inference-engine/tests_deprecated/unit/shape_infer/reshape_launcher_test.cpp
deleted file mode 100644 (file)
index a28ba3c..0000000
+++ /dev/null
@@ -1,257 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <gtest/gtest.h>
-#include <gmock/gmock-matchers.h>
-
-#include <shape_infer/ie_reshape_launcher.hpp>
-#include <blob_factory.hpp>
-
-#include "unit_test_utils/mocks/shape_infer/mock_ishape_infer_impl.hpp"
-#include "unit_test_utils/mocks/shape_infer/mock_reshaper_launcher.hpp"
-
-using namespace InferenceEngine;
-using namespace InferenceEngine::details;
-using namespace ShapeInfer;
-using namespace ::testing;
-
-class ReshapeLauncherTest : public ::testing::Test {
-protected:
-    void SetUp() override {
-        notEmptyData = getNotEmptyData();
-        impl = std::make_shared<MockIShapeInferImpl>();
-    };
-    std::vector<Blob::CPtr> getBlobs(const std::vector<SizeVector>& shapes) {
-        std::vector<Blob::CPtr> inBlobs;
-        for (auto const& dims : shapes) {
-            TensorDesc desc(Precision::FP32, dims, TensorDesc::getLayoutByDims(dims));
-            auto blob = make_blob_with_precision(desc);
-            inBlobs.push_back(blob);
-        }
-        return inBlobs;
-    }
-public:
-    StatusCode sts = GENERAL_ERROR;
-    ResponseDesc resp;
-    static const std::string TEST_NAME;
-    DataPtr notEmptyData;
-    MockIShapeInferImpl::Ptr impl;
-    SizeVector inDims{1};
-    SizeVector outDims{2};
-    std::map<std::string, std::string> changedParams{{TEST_NAME, TEST_NAME}};
-public:
-    DataPtr getNotEmptyData() {
-        return std::make_shared<Data>(TEST_NAME, Precision::FP32, Layout::C);
-    }
-};
-
-const std::string ReshapeLauncherTest::TEST_NAME = "TEST_NAME";
-
-TEST_F(ReshapeLauncherTest, failedToCreateWithNullLayer) {
-    const CNNLayer* layer = nullptr;
-    ASSERT_THROW(ReshapeLauncher launcher(layer, impl), InferenceEngineException);
-}
-
-TEST_F(ReshapeLauncherTest, failedToCreateWithNullInsData) {
-    CNNLayer layer({});
-    layer.outData = {notEmptyData};
-    ASSERT_THROW(ReshapeLauncher launcher(&layer, impl), InferenceEngineException);
-}
-
-TEST_F(ReshapeLauncherTest, failedToCreateWithExpiredInsData) {
-    CNNLayer layer({});
-    layer.outData = {notEmptyData};
-    DataWeakPtr expired = std::make_shared<Data>(TEST_NAME, Precision::UNSPECIFIED);
-    layer.insData = {expired};
-    ASSERT_THROW(ReshapeLauncher launcher(&layer, impl), InferenceEngineException);
-}
-
-TEST_F(ReshapeLauncherTest, failedToCreateWithEmptyOutData) {
-    CNNLayer layer({});
-    layer.insData = {notEmptyData};
-    ASSERT_THROW(ReshapeLauncher launcher(&layer, impl), InferenceEngineException);
-}
-
-TEST_F(ReshapeLauncherTest, failedToCreateWithNullOutData) {
-    CNNLayer layer({});
-    layer.insData = {notEmptyData};
-    layer.outData = {nullptr};
-    ASSERT_THROW(ReshapeLauncher launcher(&layer, impl), InferenceEngineException);
-}
-
-TEST_F(ReshapeLauncherTest, failedToCreateWithEmptyImpl) {
-    CNNLayer layer({});
-    layer.outData = {notEmptyData};
-    layer.insData = {notEmptyData};
-    impl = nullptr;
-    ASSERT_THROW(ReshapeLauncher launcher(&layer, impl), InferenceEngineException);
-}
-
-TEST_F(ReshapeLauncherTest, canCreateReshapeLauncher) {
-    CNNLayer layer({});
-    layer.outData = {notEmptyData};
-    layer.insData = {notEmptyData};
-    ReshapeLauncher launcher(&layer, impl);
-}
-
-TEST_F(ReshapeLauncherTest, throwOnReshapeWihtNotEnoughShapes) {
-    CNNLayer layer({});
-    layer.outData = {notEmptyData};
-    layer.insData = {notEmptyData, notEmptyData};
-    ReshapeLauncher launcher(&layer, impl);
-
-    launcher.setShapeByName(inDims, TEST_NAME);
-    try {
-        launcher.reshape({});
-        FAIL() << "Reshape should be failed!";
-    } catch (...) {}
-}
-
-TEST_F(ReshapeLauncherTest, implIsCalledOnReshape) {
-    CNNLayer layer({});
-    layer.insData = {notEmptyData};
-    auto initializer = std::make_shared<MockReshapeLauncher::TestLauncherInitializer>();
-    ReshapeLauncher launcher(&layer, impl, initializer);
-    auto inputController = initializer->getInputController();
-    auto outputController = initializer->getOutputController();
-    std::vector<SizeVector> shapes{inDims};
-    auto blobs = getBlobs(shapes);
-    EXPECT_CALL(*inputController, setShapeByName(inDims, TEST_NAME));
-    EXPECT_CALL(*inputController, getBlobs(true)).WillOnce(Return(blobs));
-    EXPECT_CALL(*outputController, setShapes(_));
-    EXPECT_CALL(*outputController, propagateShapes(_));
-    EXPECT_CALL(*impl.get(), inferShapes(blobs, _, _, _, _)).WillOnce(Return(OK));
-    launcher.setShapeByName(inDims, TEST_NAME);
-    launcher.reshape({});
-}
-
-TEST_F(ReshapeLauncherTest, canApplyChanges) {
-    CNNLayer layer({});
-    layer.outData = {getNotEmptyData()};
-    layer.insData = {notEmptyData};
-    ReshapeLauncher launcher(&layer, impl);
-    launcher.setShapeByName(inDims, TEST_NAME);
-
-    EXPECT_CALL(*impl.get(), inferShapes(_, _, _, _, _)).
-            WillOnce(DoAll(
-            WithArg<3>(Invoke([&](std::vector<SizeVector>& outShape) { outShape.push_back(outDims); })), Return(OK)));
-    launcher.reshape({});
-    launcher.applyChanges(&layer);
-
-    auto insData = layer.insData;
-    auto outData = layer.outData;
-    ASSERT_EQ(1, insData.size());
-    ASSERT_EQ(1, outData.size());
-    auto ins0Data = insData[0].lock();
-    auto out0Data = outData[0];
-    ASSERT_NE(nullptr, ins0Data);
-    ASSERT_NE(nullptr, out0Data);
-    ASSERT_EQ(inDims, ins0Data->getDims());
-    ASSERT_EQ(outDims, out0Data->getDims());
-}
-
-TEST_F(ReshapeLauncherTest, throwOnApplyingWithNotEnoughOutput) {
-    CNNLayer layer({});
-    layer.outData = {notEmptyData};
-    layer.insData = {notEmptyData};
-    ReshapeLauncher launcher(&layer, impl);
-    launcher.setShapeByName(inDims, TEST_NAME);
-    EXPECT_CALL(*impl.get(), inferShapes(_, _, _, _, _)).
-            WillOnce(DoAll(
-            WithArg<3>(Invoke([&](std::vector<SizeVector>& outShape) {
-                outShape.push_back(outDims);
-                outShape.push_back(outDims);
-            })),
-            Return(OK)));
-    ASSERT_THROW(launcher.reshape({}), InferenceEngineException);
-    ASSERT_THROW(launcher.applyChanges(&layer), InferenceEngineException);
-}
-
-TEST_F(ReshapeLauncherTest, throwOnApplyingWithNotEnoughShapes) {
-    CNNLayer layer({});
-    layer.outData = {notEmptyData, notEmptyData};
-    layer.insData = {notEmptyData};
-    ReshapeLauncher launcher(&layer, impl);
-    launcher.setShapeByName(inDims, TEST_NAME);
-    EXPECT_CALL(*impl.get(), inferShapes(_, _, _, _, _)).
-            WillOnce(DoAll(
-            WithArg<3>(Invoke([&](std::vector<SizeVector>& outShape) { outShape.push_back(outDims); })),
-            Return(OK)));
-    ASSERT_THROW(launcher.reshape({}), InferenceEngineException);
-    ASSERT_THROW(launcher.applyChanges(&layer), InferenceEngineException);
-}
-
-TEST_F(ReshapeLauncherTest, canNotApplyForLayerWithAnotherName) {
-    CNNLayer layer1({});
-    layer1.outData = {notEmptyData};
-    layer1.insData = {notEmptyData};
-    CNNLayer layer2({});
-    layer2.name = TEST_NAME;
-    ReshapeLauncher launcher(&layer1, impl);
-    {  // to not fail because of empty input and output shapes
-        launcher.setShapeByName(inDims, TEST_NAME);
-        EXPECT_CALL(*impl.get(), inferShapes(_, _, _, _, _)).
-                WillOnce(DoAll(
-                WithArg<3>(Invoke([&](std::vector<SizeVector>& outShape) { outShape.push_back(outDims); })),
-                Return(OK)));
-        launcher.reshape({});
-    }
-    ASSERT_THROW(launcher.applyChanges(&layer2), InferenceEngineException);
-}
-
-TEST_F(ReshapeLauncherTest, DISABLED_canNotApplyForLayerWithAnotherParams) {
-    CNNLayer layer1({});
-    layer1.outData = {notEmptyData};
-    layer1.insData = {notEmptyData};
-    CNNLayer layer2({});
-    layer2.params = changedParams;
-    ReshapeLauncher launcher(&layer1, impl);
-    {  // to not fail because of empty input and output shapes
-        launcher.setShapeByName(inDims, TEST_NAME);
-        EXPECT_CALL(*impl.get(), inferShapes(_, _, _, _, _)).
-                WillOnce(DoAll(
-                WithArg<3>(Invoke([&](std::vector<SizeVector>& outShape) { outShape.push_back(outDims); })),
-                Return(OK)));
-        launcher.reshape({});
-    }
-    ASSERT_THROW(launcher.applyChanges(&layer2), InferenceEngineException);
-}
-
-TEST_F(ReshapeLauncherTest, canNotApplyForLayerWithEmptyInShapes) {
-    CNNLayer layer1({});
-    layer1.outData = {notEmptyData};
-    layer1.insData = {notEmptyData};
-    CNNLayer layer2({});
-    layer2.params = changedParams;
-    ReshapeLauncher launcher(&layer1, impl);
-    {  // to not fail because of inconsistent number of input/outputs
-        layer1.insData.clear();
-        layer1.outData.clear();
-    }
-    ASSERT_THROW(launcher.applyChanges(&layer2), InferenceEngineException);
-}
-
-TEST_F(ReshapeLauncherTest, canNotApplyForLayerWithEmptyOutShapes) {
-    CNNLayer layer1({});
-    layer1.outData = {notEmptyData};
-    layer1.insData = {notEmptyData};
-    CNNLayer layer2({});
-    layer2.params = changedParams;
-    ReshapeLauncher launcher(&layer1, impl);
-    {  // to not fail because of inconsistent number of input/outputs
-        launcher.setShapeByName(inDims, TEST_NAME);
-        layer1.outData.clear();
-    }
-    ASSERT_THROW(launcher.applyChanges(&layer2), InferenceEngineException);
-}
-
-TEST_F(ReshapeLauncherTest, canReset) {
-    auto initializer = std::make_shared<MockReshapeLauncher::TestLauncherInitializer>();
-    MockReshapeLauncher launcher(initializer);
-    auto inputController = initializer->getInputController();
-    auto outputController = initializer->getOutputController();
-    EXPECT_CALL(*inputController, reset()).Times(1);
-    EXPECT_CALL(*outputController, reset()).Times(1);
-    launcher.realReset();
-}
diff --git a/inference-engine/tests_deprecated/unit/shape_infer/reshaper_test.cpp b/inference-engine/tests_deprecated/unit/shape_infer/reshaper_test.cpp
deleted file mode 100644 (file)
index 46982a7..0000000
+++ /dev/null
@@ -1,218 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <gtest/gtest.h>
-#include <gmock/gmock-matchers.h>
-
-#include <graph_tools/graph_test_base.hpp>
-#include <shape_infer/ie_reshaper.hpp>
-
-#include "unit_test_utils/mocks/mock_icnn_network.hpp"
-#include "unit_test_utils/mocks/shape_infer/mock_reshaper_launcher.hpp"
-#include "unit_test_utils/mocks/shape_infer/mock_ishape_infer_impl.hpp"
-#include "unit_test_utils/mocks/shape_infer/mock_shape_infer_extension.hpp"
-
-using namespace InferenceEngine;
-using namespace InferenceEngine::details;
-using namespace ShapeInfer;
-using namespace ::testing;
-using namespace ::GraphTest;
-
-class ReshaperTest : public GraphTestsBase {
-protected:
-    class TestLauncherCreator : public LauncherCreator {
-    public:
-        struct Mocks {
-            MockReshapeLauncher::Ptr launcher;
-            MockInputController* iController;
-            MockOutputController* oController;
-            MockIShapeInferImpl::Ptr shapeInferImpl;
-
-            Mocks(const MockReshapeLauncher::Ptr& _launcher, MockInputController* _iController,
-                  MockOutputController* _oController, const MockIShapeInferImpl::Ptr& _shapeInferImpl) :
-                    launcher(_launcher), iController(_iController), oController(_oController),
-                    shapeInferImpl(_shapeInferImpl) {}
-        };
-
-        ReshapeLauncher::Ptr
-        createNotInputLauncher(const CNNLayer* layer, const std::vector<IShapeInferExtensionPtr>& extensions) override {
-            return createLauncher(layer);
-        }
-
-        ReshapeLauncher::Ptr
-        createInputLauncher(const CNNLayer* layer, const std::vector<IShapeInferExtensionPtr>& extensions) override {
-            return createLauncher(layer);
-        }
-
-        std::vector<Mocks> getMocks() {
-            return _mocks;
-        }
-
-    private:
-        ReshapeLauncher::Ptr createLauncher(const CNNLayer* layer) {
-            auto initializer = std::make_shared<MockReshapeLauncher::TestLauncherInitializer>();
-            auto shapeInferImpl = std::make_shared<MockIShapeInferImpl>();
-            auto mockLauncher = std::make_shared<MockReshapeLauncher>(initializer, layer, shapeInferImpl);
-            _mocks.emplace_back(mockLauncher, initializer->getInputController(), initializer->getOutputController(),
-                                shapeInferImpl);
-            return mockLauncher;
-        }
-
-    private:
-        std::vector<Mocks> _mocks;
-    };
-
-    class TestEmptyLauncherCreator : public LauncherCreator {
-    public:
-        ReshapeLauncher::Ptr
-        createNotInputLauncher(const CNNLayer* layer, const std::vector<IShapeInferExtensionPtr>& extensions) override {
-            return std::make_shared<FakeReshapeLauncher>(layer, std::make_shared<MockIShapeInferImpl>());;
-        }
-
-        ReshapeLauncher::Ptr
-        createInputLauncher(const CNNLayer* layer, const std::vector<IShapeInferExtensionPtr>& extensions) override {
-            return std::make_shared<InputReshapeLauncher>(layer, std::make_shared<MockIShapeInferImpl>());
-        }
-    };
-
-    void prepareInputs(InputsDataMap& inputsMap, int batchSize = 1) override {
-        GraphTestsBase::prepareInputs(inputsMap);
-        for (auto layer = lhsLayers.begin(); layer != lhsLayers.end(); layer++) {
-            if ((*layer)->insData.empty()) {
-                (*layer)->type = "Input";
-            }
-        }
-    }
-
-    void SetUp() override {
-        GraphTestsBase::SetUp();
-        impl = std::make_shared<MockIShapeInferImpl>();
-        CONNECT(0, 1);
-    };
-
-public:
-    StatusCode sts = GENERAL_ERROR;
-    ResponseDesc resp;
-    static const std::string TEST_NAME;
-    MockIShapeInferImpl::Ptr impl;
-    ReshaperPtr reshaper;
-};
-
-const std::string ReshaperTest::TEST_NAME = "TEST_NAME";
-
-TEST_F(ReshaperTest, canCreateReshaper) {
-    EXPECT_CALL(mockNet, getInputsInfo(_)).WillRepeatedly(WithArg<0>(Invoke([&](InputsDataMap& maps) {
-        prepareInputs(maps);
-    })));
-    Reshaper reshaper(mockNet);
-}
-
-TEST_F(ReshaperTest, throwOnAddNullExtension) {
-    EXPECT_CALL(mockNet, getInputsInfo(_)).WillRepeatedly(WithArg<0>(Invoke([&](InputsDataMap& maps) {
-        prepareInputs(maps);
-    })));
-    Reshaper reshaper(mockNet);
-    MockShapeInferExtension::Ptr extension;
-    ASSERT_THROW(reshaper.AddExtension(extension), InferenceEngineException);
-}
-
-TEST_F(ReshaperTest, canAddExtensionWithNotRegistered) {
-    EXPECT_CALL(mockNet, getInputsInfo(_)).WillRepeatedly(WithArg<0>(Invoke([&](InputsDataMap& maps) {
-        prepareInputs(maps);
-    })));
-    Reshaper reshaper(mockNet);
-    auto extension = std::make_shared<MockShapeInferExtension>();
-    EXPECT_CALL(*extension.get(), getShapeInferTypes(_, _, _)).WillOnce(DoAll(
-            WithArg<0>(Invoke([&](char**& type) {
-                type = new char*[1];
-                type[0] = new char[TEST_NAME.size() + 1];
-                std::copy(TEST_NAME.begin(), TEST_NAME.end(), type[0]);
-                type[0][TEST_NAME.size()] = '\0';
-            })),
-            WithArg<1>(Invoke([&](unsigned int& size) { size = 1; })),
-            Return(OK)));
-    reshaper.AddExtension(extension);
-}
-
-TEST_F(ReshaperTest, throwOnExtensionWithAlreadyRegisteredImpl) {
-    EXPECT_CALL(mockNet, getInputsInfo(_)).WillRepeatedly(WithArg<0>(Invoke([&](InputsDataMap& maps) {
-        prepareInputs(maps);
-    })));
-    Reshaper reshaper(mockNet);
-    auto extension = std::make_shared<MockShapeInferExtension>();
-    std::string conv_name = "Convolution";
-    EXPECT_CALL(*extension.get(), getShapeInferTypes(_, _, _)).WillOnce(DoAll(
-            WithArg<0>(Invoke([&](char**& type) {
-                type = new char*[2];
-                type[0] = new char[TEST_NAME.size() + 1];
-                std::copy(TEST_NAME.begin(), TEST_NAME.end(), type[0]);
-                type[0][TEST_NAME.size()] = '\0';
-                type[1] = new char[conv_name.size() + 1];
-                std::copy(conv_name.begin(), conv_name.end(), type[1]);
-                type[1][conv_name.size()] = '\0';
-            })),
-            WithArg<1>(Invoke([&](unsigned int& size) { size = 2; })),
-            Return(OK)));
-    ASSERT_THROW(reshaper.AddExtension(extension), InferenceEngineException);
-}
-
-TEST_F(ReshaperTest, canResetOnReshape) {
-    EXPECT_CALL(mockNet, getInputsInfo(_)).WillRepeatedly(WithArg<0>(Invoke([&](InputsDataMap& maps) {
-        prepareInputs(maps);
-    })));
-    auto testCreator = std::make_shared<TestLauncherCreator>();
-    Reshaper reshaper(mockNet, testCreator);
-    auto mocks = testCreator->getMocks();
-    auto inputMock = mocks[0];
-    EXPECT_CALL(*(inputMock.launcher).get(), setShapeByName(_, _));
-    for (auto it:mocks) {
-        EXPECT_CALL(*(it.launcher).get(), getLayerName()).WillRepeatedly(Return(it.launcher->realGetLayerName()));
-        EXPECT_CALL(*(it.launcher).get(), reset());
-        EXPECT_CALL(*(it.launcher).get(), reshape(_));
-        EXPECT_CALL(*(it.launcher).get(), applyChanges(_));
-    }
-
-    auto extension = std::make_shared<MockShapeInferExtension>();
-    EXPECT_CALL(*extension.get(), getShapeInferTypes(_, _, _)).WillOnce(DoAll(
-            WithArg<0>(Invoke([&](char**& type) {
-                type = new char*[1];
-                type[0] = new char[TEST_NAME.size() + 1];
-                std::copy(TEST_NAME.begin(), TEST_NAME.end(), type[0]);
-                type[0][TEST_NAME.size()] = '\0';
-            })),
-            WithArg<1>(Invoke([&](unsigned int& size) { size = 1; })),
-            Return(OK)));
-    reshaper.AddExtension(extension);
-
-    reshaper.run({{"0", {2}}});
-}
-
-TEST_F(ReshaperTest, canUpdateFakeImpl) {
-    EXPECT_CALL(mockNet, getInputsInfo(_)).WillRepeatedly(WithArg<0>(Invoke([&](InputsDataMap& maps) {
-        prepareInputs(maps);
-    })));
-    auto testCreator = std::make_shared<TestEmptyLauncherCreator>();
-    Reshaper reshaper(mockNet, testCreator);
-    auto newImpl = std::make_shared<MockIShapeInferImpl>();
-
-    const char* registered[] = {""};
-    auto extension = std::make_shared<MockShapeInferExtension>();
-    EXPECT_CALL(*extension.get(), getShapeInferTypes(_, _, _)).WillOnce(DoAll(
-            WithArg<0>(Invoke([&](char**& type) {
-                type = new char*[1];
-                type[0] = new char[1];
-                type[0][0] = '\0';
-            })),
-            WithArg<1>(Invoke([&](unsigned int& size) { size = 1; })),
-            Return(OK)));
-    EXPECT_CALL(*extension.get(), getShapeInferImpl(_, _, _)).WillOnce(DoAll(
-            WithArg<0>(Invoke([&](IShapeInferImpl::Ptr& impl) { impl = newImpl; })),
-            Return(OK)));
-    reshaper.AddExtension(extension);
-
-    EXPECT_CALL(*newImpl.get(), inferShapes(_, _, _, _, _)).
-            WillOnce(DoAll(
-            WithArg<3>(Invoke([&](std::vector<SizeVector>& outShape) { outShape.push_back({1, 2}); })), Return(OK)));
-    reshaper.run({{"0", {1, 2}}});
-}
index ebf32c7..1cb62b2 100644 (file)
@@ -35,18 +35,29 @@ function(build_with_lto)
         add_subdirectory(clDNN)
     endif()
 
-    add_subdirectory(pugixml)
+    function(ie_build_pugixml)
+        set(BUILD_TESTS_current ${BUILD_TESTS})
+        set(BUILD_TESTS OFF CACHE BOOL "Build tests" FORCE)
+        add_subdirectory(pugixml)
+        set(BUILD_TESTS ${BUILD_TESTS_current} CACHE BOOL "Build tests" FORCE)
+    endfunction()
+
+    ie_build_pugixml()
     add_subdirectory(stb_lib)
     add_subdirectory(ade)
     add_subdirectory(fluid/modules/gapi)
 
-    # developer package
-
     target_include_directories(pugixml INTERFACE "$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/pugixml/src>")
 
+    set_target_properties(pugixml ade fluid stb_image
+                          PROPERTIES FOLDER thirdparty)
+
+    # developer package
+
     ie_developer_export_targets(ade fluid pugixml)
     if(TARGET pugixml_mt)
         ie_developer_export_targets(pugixml_mt)
+        set_target_properties(pugixml_mt PROPERTIES FOLDER thirdparty)
     endif()
 endfunction()
 
index ba41dd2..9c88a38 100644 (file)
@@ -67,7 +67,8 @@ enum class activation_func {
     sign,                 // val > 0: 1; val < 0: -1; val == 0: 0
     softplus,             // ln(exp(val) + 1)
     softsign,             // (val/(1+|val|))
-    swish                 // (val*sigmoid(val))
+    swish,                // (val*sigmoid(val))
+    gelu                  // (0.5*val*(1 + erf(val / sqrt(2)))
 };
 
 /// @brief activation gradient functions
index 1fe8b67..032e098 100644 (file)
@@ -28,9 +28,14 @@ namespace cldnn {
 
 /// @brief Performs elementwise select operation on two input primitives with selector primitive (mask)
 /// @notes
-/// - both inputs have to have equal sizes in all dimensions
 /// - format of both inputs has to be the same
-/// - mask primitive input have to be in shape that allows broadcasting it to the inputs shape
+/// - if broadcast_type=="numpy", both inputs have to be broadcastable to each other in a two-way
+/// (multidirectional) sense and mask input has to be broadcastable in a one-way (unidirectional)
+/// sense to the result of this two-way (multidirectional) broadcasting of both inputs to each other.
+/// - if broadcast_type=="none", all inputs (including mask) must have the same shape.
+///
+/// If broadcast_type=="numpy", broadcasting follow numpy-style (ONNX) rules described here:
+/// https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md)
 struct select : public primitive_base<select> {
     CLDNN_DECLARE_PRIMITIVE(select)
 
@@ -39,12 +44,21 @@ struct select : public primitive_base<select> {
     /// @param mask Input primitive id with values needed for select computation.
     /// @param input Input primitive id.
     /// @param input2 Second input primitive id.
+    /// @param output_padding Output data padding information.
+    /// @param broadcast_type String which determines broadcasting type:
+    /// "numpy" means that numpy-tyle (ONNX) broadcasting is allowed,
+    /// "none" means that all inputs need to have the same shape.
     select(const primitive_id& id,
            const primitive_id& mask,
            const primitive_id& input,
            const primitive_id& input2,
-           const padding& output_padding = padding())
-        : primitive_base(id, {mask, input, input2}, output_padding) {}
+           const padding& output_padding = padding(),
+           const std::string& broadcast_type = "numpy")
+        : primitive_base(id, {mask, input, input2}, output_padding),
+          broadcast_type(broadcast_type) {}
+
+    /// @brief String which determines broadcast type.
+    std::string broadcast_type;
 };
 /// @}
 /// @}
index 30650ee..b662f01 100644 (file)
@@ -171,6 +171,7 @@ struct format {
         os_is_y_x8_osv8_isv4_swizzled_by_4,           ///< format for weights for 1x1 MMAD convolutions
         os_is_yx_osv16_isv4,                          ///< format for weights for IMAD convolutions
         os_is_yx_osv32_isv4_swizzled_by_2,            ///< format for weights for IMAD convolutions
+        os_is_yx_osv32_isv4,                          ///< format for weights for IMAD convolutions
         os_is_yx_osv32_isv32p,                        ///< format for weights for binary convolutions
         lstm_weights_dio,                             ///< dynamic_lstm, direction,
                                                       ///< than IO (I - input size, O - 4 * hidden_size)
@@ -258,7 +259,8 @@ struct format {
                 { os_is_y_x8_osv8_isv4,                        { 1, 1, 2, 0, 0, "byxf",   "bfxy?",      {}}},
                 { os_is_y_x8_osv8_isv4_swizzled_by_4,          { 1, 1, 2, 0, 0, "byxf",   "bfxy?",      {}}},
                 { os_is_yx_osv16_isv4,                         { 1, 1, 2, 0, 0, "bfxy",   "bfxy?",      {{0, 16}, {1, 4}}}},
-                { os_is_yx_osv32_isv4_swizzled_by_2,           { 1, 1, 2, 0, 0, "bfxy",   "bfxy?",      {{0, 16}, {1, 4}}}},
+                { os_is_yx_osv32_isv4_swizzled_by_2,           { 1, 1, 2, 0, 0, "bfxy",   "bfxy?",      {{0, 32}, {1, 4}}}},
+                { os_is_yx_osv32_isv4,                         { 1, 1, 2, 0, 0, "bfxy",   "bfxy?",      {{0, 32}, {1, 4}}}},
                 { os_is_yx_osv32_isv32p,                       { 1, 1, 1, 0, 0, "bfxy",   "bfxy?",      {}}},
                 { os_is_zyx_isv16_osv16,                       { 1, 1, 3, 0, 0, "bfzyx",  "bfxyz",      {{0, 16}, {1, 16}}}},
                 { is_os_zyx_osv16_isv16,                       { 1, 1, 3, 0, 0, "fbzyx",  "bfxyz",      {{0, 16}, {1, 16}}}},
index b2aea93..9836cf6 100644 (file)
@@ -156,7 +156,8 @@ enum class ActivationFunction {
     SIGN,
     SOFTPLUS,
     SOFTSIGN,
-    SWISH
+    SWISH,
+    GELU
 };
 
 ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
index a8028ca..cb29d22 100644 (file)
@@ -95,6 +95,7 @@ WeightsTensor::WeightsChannelArray WeightsTensor::weightsChannelArray {{
     { WeightsLayout::os_is_y_x8_osv8_isv4_swizzled_by_4,          {  0,  1, -1,   2,   3, -1, -1, -1 } },
     { WeightsLayout::os_is_yx_osv16_isv4,                         {  0,  1, -1,   2,   3, -1, -1, -1 } },
     { WeightsLayout::os_is_yx_osv32_isv4_swizzled_by_2,           {  0,  1, -1,   2,   3, -1, -1, -1 } },
+    { WeightsLayout::os_is_yx_osv32_isv4,                         {  0,  1, -1,   2,   3, -1, -1, -1 } },
     { WeightsLayout::oizyx,                                       {  0,  1,  2,   3,   4, -1, -1, -1 } },
     { WeightsLayout::os_is_yx_osv32_isv32p,                       {  0,  1, -1,   2,   3, -1, -1, -1 } },
     { WeightsLayout::os_is_zyx_isv16_osv16,                       {  0,  1,  2,   3,   4, -1, -1, -1 } },
@@ -511,6 +512,7 @@ NDims WeightsTensor::GetSimpleDims(const std::vector<size_t>& d, WeightsLayout l
             newDims[3] = RoundUp(newDims[3], 16);
             break;
         case os_is_yx_osv32_isv4_swizzled_by_2:
+        case os_is_yx_osv32_isv4:
             assert(newDims.size() == 4);
             newDims[2] = RoundUp(newDims[2], 4);
             newDims[3] = RoundUp(newDims[3], 32);
index 05979ae..4932bbc 100644 (file)
@@ -119,6 +119,7 @@ enum WeightsLayout {
                                          // 1,5...
     os_is_yx_osv16_isv4,                 // swizzled weights for convolution using IMAD
     os_is_yx_osv32_isv4_swizzled_by_2,   //  weights for bfyx -> b_fs_yx_fsv32 convolution using IMAD with swizzeled ofm (0, 2, 4..), (1, 3, 5...)
+    os_is_yx_osv32_isv4,                 //  weights for bfyx -> b_fs_yx_fsv{32,16} convolution using IMAD
     oizyx,
     os_is_yx_osv32_isv32p,  // 2 blocks: 32 packed binary in channels and 32 output channels
     os_is_osv32_isv32_swizzled_by_4,     // for weights for 1x1 IMAD convolution
index 05f590c..5ea9e20 100644 (file)
@@ -58,11 +58,18 @@ ConvolutionKernel_b_fs_yx_fsv16::AutoTuneOption ConvolutionKernel_b_fs_yx_fsv16:
 ParamsKey ConvolutionKernel_b_fs_yx_fsv16::GetSupportedKey() const {
     ParamsKey k;
     k.EnableInputDataType(Datatype::F16);
-    k.EnableOutputDataType(Datatype::F16);
-    k.EnableInputWeightsType(WeightsType::F16);
     k.EnableInputDataType(Datatype::F32);
+
+    k.EnableOutputDataType(Datatype::F16);
     k.EnableOutputDataType(Datatype::F32);
+    k.EnableOutputDataType(Datatype::INT8);
+    k.EnableOutputDataType(Datatype::UINT8);
+
+    k.EnableInputWeightsType(WeightsType::F16);
     k.EnableInputWeightsType(WeightsType::F32);
+
+    k.EnableDifferentTypes();
+
     k.EnableInputLayout(DataLayout::b_fs_yx_fsv16);
     k.EnableOutputLayout(DataLayout::b_fs_yx_fsv16);
     k.EnableTensorOffset();
@@ -141,18 +148,21 @@ bool ConvolutionKernel_b_fs_yx_fsv16::Validate(const Params& p, const optional_p
     if (input.Feature().pad.before % feature_block_size != 0 || output.Feature().pad.before % feature_block_size != 0)
         return false;
 
+    if (!params.bias.empty() && params.bias[0].GetDType() != input.GetDType())
+        return false;
+
     return true;
 }
 
 JitConstants ConvolutionKernel_b_fs_yx_fsv16::GetJitConstants(const convolution_params& params,
-                                                         const DispatchData& runInfo) const {
+                                                              const DispatchData& runInfo) const {
     auto input = params.inputs[0];
     auto output = params.output;
     auto jit = Parent::GetJitConstants(params, runInfo);
 
     auto blockWidth = runInfo.cldnnStyle.blockWidth;
     if (!params.fused_ops.empty()) {
-        auto input_dt = GetUnitType(params);
+        auto input_dt = GetActivationType(params);
         FusedOpsConfiguration conf_vec = { "_VEC",
                                            {"b", "(f_block*16)", "y", "x"},
                                            "dst",
index f23e282..d216750 100644 (file)
@@ -431,6 +431,10 @@ Datatype ConvolutionKernelBase::GetActivationType(const convolution_params& para
     if (params.quantization != QuantizationType::NONE || quantized_inputs || quantized_weights)
         return Datatype::F32;
 
+    if (params.output.GetDType() == Datatype::UINT8 ||
+        params.output.GetDType() == Datatype::INT8)
+        return Datatype::F32;
+
     return GetUnitType(params);
 }
 
@@ -12,7 +12,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-#include "convolution_kernel_mmad_bfyx_b_fs_yx_fsv32.h"
+#include "convolution_kernel_mmad_bfyx_to_b_fs_yx_fsv32.h"
 #include <vector>
 #include <utility>
 #include <string>
@@ -21,7 +21,9 @@
 
 namespace kernel_selector {
 
-ParamsKey ConvolutionKernel_mmad_bfyx_b_fs_yx_fsv32::GetSupportedKey() const {
+static const size_t sub_group_size = 16;
+
+ParamsKey ConvolutionKernel_mmad_bfyx_to_b_fs_yx_fsv32::GetSupportedKey() const {
     ParamsKey k;
     k.EnableInputDataType(Datatype::INT8);
     k.EnableInputDataType(Datatype::UINT8);
@@ -31,11 +33,11 @@ ParamsKey ConvolutionKernel_mmad_bfyx_b_fs_yx_fsv32::GetSupportedKey() const {
     k.EnableInputWeightsType(WeightsType::INT8);
     k.EnableInputLayout(DataLayout::bfyx);
     k.EnableOutputLayout(DataLayout::b_fs_yx_fsv32);
+    k.EnableOutputLayout(DataLayout::b_fs_yx_fsv16);
     k.EnableTensorOffset();
     k.EnableTensorPitches();
     k.EnableDilation();
     k.EnableBiasPerFeature();
-    k.EnableBiasPerOutput();
     k.EnableNonBiasTerm();
     k.EnableBatching();
     k.EnableQuantization(QuantizationType::SYMMETRIC);
@@ -45,17 +47,19 @@ ParamsKey ConvolutionKernel_mmad_bfyx_b_fs_yx_fsv32::GetSupportedKey() const {
     k.EnableDifferentTypes();
     k.EnableDifferentInputWeightsTypes();
     k.DisableTuning();
+    k.EnableSubGroup();
+    k.EnableSubGroupShort();
     return k;
 }
 
-bool ConvolutionKernel_mmad_bfyx_b_fs_yx_fsv32::Validate(const Params &p, const optional_params &o) const {
+bool ConvolutionKernel_mmad_bfyx_to_b_fs_yx_fsv32::Validate(const Params &p, const optional_params &o) const {
     if (!Parent::Validate(p, o)) {
         return false;
     }
 
     auto params = dynamic_cast<const convolution_params&>(p);
 
-    if (params.inputs[0].Feature().v != 3)
+    if (params.inputs[0].Feature().v != 3 && params.inputs[0].Feature().v != 4)
         return false;
 
     if (params.output.Feature().v % 2 != 0)
@@ -69,7 +73,7 @@ bool ConvolutionKernel_mmad_bfyx_b_fs_yx_fsv32::Validate(const Params &p, const
     return true;
 }
 
-ConvolutionKernel_mmad_bfyx_b_fs_yx_fsv32::AutoTuneOption ConvolutionKernel_mmad_bfyx_b_fs_yx_fsv32::GetAutoTuneOptions(const Params &p,
+ConvolutionKernel_mmad_bfyx_to_b_fs_yx_fsv32::AutoTuneOption ConvolutionKernel_mmad_bfyx_to_b_fs_yx_fsv32::GetAutoTuneOptions(const Params &p,
                                                                                                                         int autoTuneIndex) const {
     if ((autoTuneIndex >= 0) && (autoTuneIndex < static_cast<int>(autoTuneOptions.size()))) {
         return autoTuneOptions[autoTuneIndex];
@@ -103,7 +107,9 @@ ConvolutionKernel_mmad_bfyx_b_fs_yx_fsv32::AutoTuneOption ConvolutionKernel_mmad
         bytes_per_simd += 4 * 9;
         return bytes_per_simd * simd;
     };
-    static const size_t register_bytes = 128 * 32;
+    static const size_t registers_count = 128;
+    static const size_t register_byte_size = 32;
+    static const size_t register_bytes = registers_count * register_byte_size;
     static const size_t max_register_bytes = register_bytes * 3 / 4;
     static const size_t simd_size = 16;
     if (output.LogicalSize() > 49 * 1024 && estimateRegUsage(8, simd_size) <= max_register_bytes) {
@@ -115,8 +121,25 @@ ConvolutionKernel_mmad_bfyx_b_fs_yx_fsv32::AutoTuneOption ConvolutionKernel_mmad
     return option;
 }
 
-ConvolutionKernelBase::DispatchData ConvolutionKernel_mmad_bfyx_b_fs_yx_fsv32::SetDefault(const convolution_params &cp,
-                                                                                          int autoTuneIndex) const {
+static size_t get_slm_byte_size(const convolution_params &cp, size_t lws, size_t block_size) {
+    return (cp.stride.x * (lws * block_size - 1) + (cp.weights.X().v - 1) * cp.dilation.x + 1)*
+            cp.weights.Y().v * sizeof(int32_t);
+}
+
+static size_t get_lws(const convolution_params &cp, size_t blocks_count, size_t block_size, size_t max_lws) {
+    while (max_lws > 1) {
+        if (blocks_count % max_lws == 0) {
+            if (get_slm_byte_size(cp, max_lws, block_size) < cp.engineInfo.maxLocalMemSize)
+                return max_lws;
+        }
+        max_lws--;
+    }
+
+    return 1;
+}
+
+ConvolutionKernelBase::DispatchData ConvolutionKernel_mmad_bfyx_to_b_fs_yx_fsv32::SetDefault(const convolution_params &cp,
+                                                                                             int autoTuneIndex) const {
     DispatchData runInfo = ConvolutionKernelBase::SetDefault(cp);
 
     auto tuneOptions = GetAutoTuneOptions(cp, autoTuneIndex);
@@ -126,49 +149,66 @@ ConvolutionKernelBase::DispatchData ConvolutionKernel_mmad_bfyx_b_fs_yx_fsv32::S
 
     runInfo.efficiency = FORCE_PRIORITY_3;
 
+    const size_t max_lws = std::max((size_t)1, cp.engineInfo.maxWorkGroupSize / sub_group_size);
     runInfo.gws0 = Align(cp.output.Feature().v, 32) / 2;
-    runInfo.gws1 = CeilDiv(cp.output.X().v, runInfo.cldnnStyle.blockWidth) * cp.output.Y().v;
-    runInfo.gws2 = cp.output.Batch().v;
+    runInfo.gws1 = CeilDiv(cp.output.X().v, runInfo.cldnnStyle.blockWidth);
+    runInfo.gws2 = cp.output.Batch().v * cp.output.Y().v;
 
-    runInfo.lws0 = 16;
-    runInfo.lws1 = 1;
+    runInfo.lws0 = sub_group_size;
+    runInfo.lws1 = get_lws(cp, runInfo.gws1, tuneOptions.blockWidth, max_lws);
     runInfo.lws2 = 1;
 
     return runInfo;
 }
 
-JitConstants ConvolutionKernel_mmad_bfyx_b_fs_yx_fsv32::GetJitConstants(const convolution_params &params,
-                                                                        const DispatchData &runInfo) const {
+JitConstants ConvolutionKernel_mmad_bfyx_to_b_fs_yx_fsv32::GetJitConstants(const convolution_params &params,
+                                                                           const DispatchData &runInfo) const {
     auto jit = Parent::GetJitConstants(params, runInfo);
 
     jit.AddConstant(MakeJitConstant("SUB_GROUP_SIZE", runInfo.lws0));
+    jit.AddConstant(MakeJitConstant("LWS0", runInfo.lws0));
+    jit.AddConstant(MakeJitConstant("LWS1", runInfo.lws1));
+    jit.AddConstant(MakeJitConstant("LWS2", runInfo.lws2));
     jit.AddConstant(MakeJitConstant("OSV", 32));
-    jit.AddConstant(MakeJitConstant("ISV", 32));
     jit.AddConstant(MakeJitConstant("X_BLOCK_SIZE", runInfo.cldnnStyle.blockWidth));
-    jit.AddConstant(MakeJitConstant("IFM_BLOCKS", CeilDiv(params.inputs[0].Feature().v, 32)));
     auto input = params.inputs[0];
     auto output = params.output;
     auto blockWidth = runInfo.cldnnStyle.blockWidth;
+    size_t slm_line_size = params.stride.x * (runInfo.lws1 * blockWidth - 1) + (params.weights.X().v - 1) * params.dilation.x + 1;
+    size_t slm_chunk_size = slm_line_size / runInfo.lws1;
+    size_t slm_tail = slm_line_size % runInfo.lws1;
+    size_t slm_line_aligned = slm_chunk_size*runInfo.lws1 + Align(slm_tail, sub_group_size);
+
     size_t input_line_size = std::min(params.stride.x * (blockWidth - 1) + (params.weights.X().v - 1) * params.dilation.x + 1,
                                       input.X().v + input.X().pad.Total());
 
-    jit.AddConstant(MakeJitConstant("OUTPUT_X_BLOCK_SIZE", blockWidth));
     jit.AddConstant(MakeJitConstant("INPUT_LINE_SIZE", input_line_size));
+    jit.AddConstant(MakeJitConstant("OUTPUT_X_BLOCK_SIZE", blockWidth));
+    jit.AddConstant(MakeJitConstant("GROUP_SIZE", blockWidth * runInfo.lws1));
+    jit.AddConstant(MakeJitConstant("SLM_LINE_SIZE", slm_line_aligned));
+    jit.AddConstant(MakeJitConstant("SLM_CHUNK_SIZE", slm_chunk_size));
+    jit.AddConstant(MakeJitConstant("SLM_TAIL", slm_tail));
 
     jit.Merge(MakeTypeJitConstants(GetPackedInputType(params), "PACKED_IN"));
     jit.Merge(MakeTypeJitConstants(GetPackedType(params.output.GetDType(), 2), "PACKED_OUT"));
 
     if (!params.fused_ops.empty()) {
         auto input_dt = GetActivationType(params);
-        FusedOpsConfiguration conf0 = {"_0", {"b", "(fg*32 + 2*lid+0)", "y", "(x+i)"}, "res0", input_dt, 1};
-        FusedOpsConfiguration conf1 = {"_1", {"b", "(fg*32 + 2*lid+1)", "y", "(x+i)"}, "res1", input_dt, 1};
-        jit.Merge(MakeFusedOpsJitConstants(params, {conf0, conf1}));
+        if (GetPreferredWeightsLayout(params) == WeightsLayout::os_is_yx_osv32_isv4) {
+            FusedOpsConfiguration conf0 = {"_0", {"b", "(fg*32 + lid)", "y", "(x+i)"}, "res0", input_dt, 1};
+            FusedOpsConfiguration conf1 = {"_1", {"b", "(fg*32 + lid+16)", "y", "(x+i)"}, "res1", input_dt, 1};
+            jit.Merge(MakeFusedOpsJitConstants(params, {conf0, conf1}));
+        } else {
+            FusedOpsConfiguration conf0 = {"_0", {"b", "(fg*32 + 2*lid + 0)", "y", "(x+i)"}, "res0", input_dt, 1};
+            FusedOpsConfiguration conf1 = {"_1", {"b", "(fg*32 + 2*lid + 1)", "y", "(x+i)"}, "res1", input_dt, 1};
+            jit.Merge(MakeFusedOpsJitConstants(params, {conf0, conf1}));
+        }
     }
 
     return jit;
 }
 
-KernelsData ConvolutionKernel_mmad_bfyx_b_fs_yx_fsv32::GetKernelsData(const Params &params, const optional_params &options) const {
+KernelsData ConvolutionKernel_mmad_bfyx_to_b_fs_yx_fsv32::GetKernelsData(const Params &params, const optional_params &options) const {
     KernelsData kd = GetTunedKernelsDataByIndex(params, options);
     if (!kd.empty()) {
         kd[0].estimatedTime = FORCE_PRIORITY_2;
@@ -177,8 +217,8 @@ KernelsData ConvolutionKernel_mmad_bfyx_b_fs_yx_fsv32::GetKernelsData(const Para
     return kd;
 }
 
-KernelsData ConvolutionKernel_mmad_bfyx_b_fs_yx_fsv32::GetKernelsDataForAutoTune(const Params &params,
-                                                                                 const optional_params &options) const {
+KernelsData ConvolutionKernel_mmad_bfyx_to_b_fs_yx_fsv32::GetKernelsDataForAutoTune(const Params &params,
+                                                                                    const optional_params &options) const {
     if (!Validate(params, options)) {
         return {};
     }
 
 namespace kernel_selector {
 
-class ConvolutionKernel_mmad_bfyx_b_fs_yx_fsv32 : public ConvolutionKernelBase {
+class ConvolutionKernel_mmad_bfyx_to_b_fs_yx_fsv32 : public ConvolutionKernelBase {
 public:
     using Parent = ConvolutionKernelBase;
-    ConvolutionKernel_mmad_bfyx_b_fs_yx_fsv32() : ConvolutionKernelBase("convolution_gpu_mmad_bfyx_b_fs_yx_fsv32") {}
-    virtual ~ConvolutionKernel_mmad_bfyx_b_fs_yx_fsv32() {}
+    ConvolutionKernel_mmad_bfyx_to_b_fs_yx_fsv32() : ConvolutionKernelBase("convolution_gpu_mmad_bfyx_to_b_fs_yx_fsv32") {}
+    virtual ~ConvolutionKernel_mmad_bfyx_to_b_fs_yx_fsv32() {}
 
     KernelsData GetKernelsData(const Params& params, const optional_params& options) const override;
     KernelsData GetKernelsDataForAutoTune(const Params& params, const optional_params& options) const override;
@@ -35,8 +35,13 @@ protected:
     bool Validate(const Params& p, const optional_params& o) const override;
     JitConstants GetJitConstants(const convolution_params& params, const DispatchData& kd) const override;
     DispatchData SetDefault(const convolution_params& arg, int autoTuneIndex = -1) const override;
-    WeightsLayout GetPreferredWeightsLayout(const convolution_params &) const override {
-        return WeightsLayout::os_is_yx_osv32_isv4_swizzled_by_2;
+    WeightsLayout GetPreferredWeightsLayout(const convolution_params &p) const override {
+        if (p.output.GetDType() == Datatype::F16 || p.output.GetDType() == Datatype::F32 ||
+            p.output.GetLayout() == DataLayout::b_fs_yx_fsv16) {
+            return WeightsLayout::os_is_yx_osv32_isv4;
+        } else {
+            return WeightsLayout::os_is_yx_osv32_isv4_swizzled_by_2;
+        }
     }
     std::vector<FusedOpType> GetSupportedFusedOps() const override {
         return { FusedOpType::ELTWISE,
index 4e069df..2e0b864 100644 (file)
@@ -68,7 +68,7 @@
 #include "convolution_kernel_mmad_bfyx_to_b_fs_yx_fsv4.h"
 #include "convolution_kernel_mmad_b_fs_yx_fsv32.h"
 #include "convolution_kernel_mmad_b_fs_yx_fsv32_dw.h"
-#include "convolution_kernel_mmad_bfyx_b_fs_yx_fsv32.h"
+#include "convolution_kernel_mmad_bfyx_to_b_fs_yx_fsv32.h"
 #include "convolution_kernel_bfyx_to_bs_fs_yx_bsv16_fsv16.h"
 #include "convolution_kernel_b_fs_yx_fsv16_imad_1x1.h"
 #include "convolution_kernel_b_fs_yx_fsv16_imad_3x3.h"
@@ -155,7 +155,7 @@ convolution_kernel_selector::convolution_kernel_selector() {
     // b_fs_yx_fsv32 kernels
     Attach<ConvolutionKernel_mmad_b_fs_yx_fsv32>();
     Attach<ConvolutionKernel_mmad_b_fs_yx_fsv32_dw>();
-    Attach<ConvolutionKernel_mmad_bfyx_b_fs_yx_fsv32>();
+    Attach<ConvolutionKernel_mmad_bfyx_to_b_fs_yx_fsv32>();
     Attach<ConvolutionKernel_b_fs_yx_fsv_16_32_imad_dw>();
 }
 
index 5afb524..6457cf8 100644 (file)
@@ -23,15 +23,23 @@ ParamsKey LRNKernelAcrossChannelMultipleFeatures::GetSupportedKey() const {
     k.EnableInputDataType(Datatype::F32);
     k.EnableOutputDataType(Datatype::F16);
     k.EnableOutputDataType(Datatype::F32);
+    k.EnableOutputDataType(Datatype::INT8);
+    k.EnableOutputDataType(Datatype::UINT8);
     k.EnableInputLayout(DataLayout::bfyx);
+    k.EnableInputLayout(DataLayout::b_fs_yx_fsv4);
+    k.EnableInputLayout(DataLayout::b_fs_yx_fsv16);
     k.EnableInputLayout(DataLayout::yxfb);
     k.EnableOutputLayout(DataLayout::bfyx);
+    k.EnableOutputLayout(DataLayout::bfyx);
+    k.EnableOutputLayout(DataLayout::b_fs_yx_fsv4);
+    k.EnableOutputLayout(DataLayout::b_fs_yx_fsv16);
     k.EnableOutputLayout(DataLayout::yxfb);
+    k.EnableLRNMode(LRNMode::ACROSS_CHANNEL);
+    k.EnableLRNKernelDividerMode(KernelDividerMode::FIXED);
     k.EnableTensorOffset();
     k.EnableTensorPitches();
     k.EnableBatching();
-    k.EnableLRNMode(LRNMode::ACROSS_CHANNEL);
-    k.EnableLRNKernelDividerMode(KernelDividerMode::FIXED);
+    k.EnableDifferentTypes();
     return k;
 }
 
@@ -56,7 +64,8 @@ CommonDispatchData LRNKernelAcrossChannelMultipleFeatures::SetDefault(const lrn_
 
     unsigned int ofm_per_simd = GetOfmPerSimd(params);
 
-    if (input.GetLayout() == DataLayout::bfyx) {
+    if (input.GetLayout() == DataLayout::bfyx || input.GetLayout() == DataLayout::b_fs_yx_fsv4 ||
+        input.GetLayout() == DataLayout::b_fs_yx_fsv16) {
         const auto& out = params.output;
         const unsigned int alignment = out.X().v > 16 ? 32 : 16;
 
@@ -93,22 +102,30 @@ bool LRNKernelAcrossChannelMultipleFeatures::Validate(const Params& p, const opt
     return true;
 }
 
-JitConstants LRNKernelAcrossChannelMultipleFeatures::GetJitConstants(const lrn_params& params, DispatchData kd) const {
-    auto cldnnJit = LRNKernelBase::GetJitConstants(params, kd);
+JitConstants LRNKernelAcrossChannelMultipleFeatures::GetJitConstants(const lrn_params& params, const DispatchData& kd) const {
+    JitConstants jit = Parent::GetJitConstants(params, kd);
     const auto& input = params.inputs[0];
+    const auto& input_dt = params.inputs[0].GetDType();
     const auto& output = params.output;
 
     unsigned int ofm_per_simd = GetOfmPerSimd(params);
+    jit.AddConstant(MakeJitConstant("OFM_PER_SIMD", ofm_per_simd));
+
+    if ((input.GetLayout() == DataLayout::bfyx || input.GetLayout() == DataLayout::b_fs_yx_fsv4 ||
+         input.GetLayout() == DataLayout::b_fs_yx_fsv16) && output.X().v <= 16) {
+        jit.AddConstant(MakeJitConstant("FORCE_SIMD_16", 1));
+    }
 
-    cldnnJit.AddConstant(MakeJitConstant("OFM_PER_SIMD", ofm_per_simd));
-    if (input.GetLayout() == DataLayout::bfyx && output.X().v <= 16) {
-        cldnnJit.AddConstant(MakeJitConstant("FORCE_SIMD_16", 1));
+    if (!params.fused_ops.empty()) {
+        FusedOpsConfiguration conf = {"", {"batch_id", "feature_id + j", "y", "x"}, "lrn_result", input_dt, 1};
+        jit.Merge(MakeFusedOpsJitConstants(params, {conf}));
     }
-    return cldnnJit;
+
+    return jit;
 }
 
 KernelsData LRNKernelAcrossChannelMultipleFeatures::GetKernelsData(const Params& params,
                                                                    const optional_params& options) const {
     return GetCommonKernelsData(params, options, FORCE_PRIORITY_6);
 }
-}  // namespace kernel_selector
\ No newline at end of file
+}  // namespace kernel_selector
index 3a23105..034facd 100644 (file)
 
 #pragma once
 
+#include <vector>
 #include "lrn_kernel_base.h"
 
 namespace kernel_selector {
 class LRNKernelAcrossChannelMultipleFeatures : public LRNKernelBase {
 public:
-    LRNKernelAcrossChannelMultipleFeatures() : LRNKernelBase("lrn_gpu_across_channel_multiple_features") {}
+    using Parent = LRNKernelBase;
+    LRNKernelAcrossChannelMultipleFeatures() : Parent("lrn_gpu_across_channel_multiple_features") {}
 
     KernelsData GetKernelsData(const Params& params, const optional_params& options) const override;
     ParamsKey GetSupportedKey() const override;
 
 private:
-    bool Validate(const Params& p, const optional_params& o) const override;
-    JitConstants GetJitConstants(const lrn_params& params, DispatchData kd) const override;
-    CommonDispatchData SetDefault(const lrn_params& params) const override;
+    DispatchData SetDefault(const lrn_params& params) const override;
+    std::vector<FusedOpType> GetSupportedFusedOps() const override {
+        return { FusedOpType::QUANTIZE,
+                 FusedOpType::SCALE,
+                 FusedOpType::ACTIVATION };
+    }
+    bool Validate(const Params& params, const optional_params& options) const override;
+    JitConstants GetJitConstants(const lrn_params& params, const DispatchData& kd) const override;
 };
-}  // namespace kernel_selector
\ No newline at end of file
+}  // namespace kernel_selector
index dd86f37..a551c18 100644 (file)
@@ -20,6 +20,9 @@ ParamsKey LRNKernelAcrossChannel_b8::GetSupportedKey() const {
     ParamsKey k;
     k.EnableInputDataType(Datatype::F32);
     k.EnableOutputDataType(Datatype::F32);
+    k.EnableOutputDataType(Datatype::F16);
+    k.EnableOutputDataType(Datatype::INT8);
+    k.EnableOutputDataType(Datatype::UINT8);
     k.EnableInputLayout(DataLayout::yxfb);
     k.EnableOutputLayout(DataLayout::yxfb);
     k.EnableTensorOffset();
@@ -28,6 +31,7 @@ ParamsKey LRNKernelAcrossChannel_b8::GetSupportedKey() const {
     k.EnableLRNMode(LRNMode::ACROSS_CHANNEL);
     k.EnableLRNKernelDividerMode(KernelDividerMode::FIXED);
     k.EnableSubGroup();
+    k.EnableDifferentTypes();
     return k;
 }
 
@@ -58,13 +62,30 @@ bool LRNKernelAcrossChannel_b8::Validate(const Params& p, const optional_params&
     return true;
 }
 
-JitConstants LRNKernelAcrossChannel_b8::GetJitConstants(const lrn_params& params, DispatchData kd) const {
-    auto cldnnJit = LRNKernelBase::GetJitConstants(params, kd);
-    cldnnJit.AddConstant(MakeJitConstant("SUB_GROUP_SIZE", 8));
-    return cldnnJit;
+JitConstants LRNKernelAcrossChannel_b8::GetJitConstants(const lrn_params& params, const DispatchData& kd) const {
+    JitConstants jit = Parent::GetJitConstants(params, kd);
+    const auto& input_dt = params.inputs[0].GetDType();
+
+    jit.AddConstant(MakeJitConstant("SUB_GROUP_SIZE", 8));
+
+    if (!params.fused_ops.empty()) {
+        FusedOpsConfiguration conf = {
+            "",
+            {"batch_id", "feature_id", "y", "x"},
+            "lrn_result",
+            input_dt,
+            8,
+            LoadType::LT_UNALIGNED,
+            BoundaryCheck::DISABLED,
+            IndexType::TENSOR_COORD,
+            Tensor::DataChannelName::BATCH
+        };
+        jit.Merge(MakeFusedOpsJitConstants(params, {conf}));
+    }
+    return jit;
 }
 
 KernelsData LRNKernelAcrossChannel_b8::GetKernelsData(const Params& params, const optional_params& options) const {
     return GetCommonKernelsData(params, options, FORCE_PRIORITY_8);
 }
-}  // namespace kernel_selector
\ No newline at end of file
+}  // namespace kernel_selector
index b76a9e2..9c1e298 100644 (file)
 #pragma once
 
 #include "lrn_kernel_base.h"
+#include <vector>
 
 namespace kernel_selector {
 class LRNKernelAcrossChannel_b8 : public LRNKernelBase {
 public:
+    using Parent = LRNKernelBase;
     LRNKernelAcrossChannel_b8() : LRNKernelBase("lrn_gpu_across_channel_yxfb_b8_opt") {}
     virtual ~LRNKernelAcrossChannel_b8() {}
 
@@ -27,8 +29,13 @@ public:
     ParamsKey GetSupportedKey() const override;
 
 private:
-    bool Validate(const Params& p, const optional_params& o) const override;
-    JitConstants GetJitConstants(const lrn_params& params, DispatchData kd) const override;
-    CommonDispatchData SetDefault(const lrn_params& params) const override;
+    DispatchData SetDefault(const lrn_params& params) const override;
+    std::vector<FusedOpType> GetSupportedFusedOps() const override {
+        return { FusedOpType::QUANTIZE,
+                 FusedOpType::SCALE,
+                 FusedOpType::ACTIVATION };
+    }
+    bool Validate(const Params& params, const optional_params& options) const override;
+    JitConstants GetJitConstants(const lrn_params& params, const DispatchData& kd) const override;
 };
-}  // namespace kernel_selector
\ No newline at end of file
+}  // namespace kernel_selector
index dbadac4..693b98a 100644 (file)
@@ -22,17 +22,20 @@ ParamsKey LRNKernelAcrossChannelRef::GetSupportedKey() const {
     k.EnableInputDataType(Datatype::F32);
     k.EnableOutputDataType(Datatype::F16);
     k.EnableOutputDataType(Datatype::F32);
+    k.EnableOutputDataType(Datatype::INT8);
+    k.EnableOutputDataType(Datatype::UINT8);
     k.EnableInputLayout(DataLayout::bfyx);
-    k.EnableInputLayout(DataLayout::yxfb);
     k.EnableInputLayout(DataLayout::byxf);
+    k.EnableInputLayout(DataLayout::yxfb);
     k.EnableOutputLayout(DataLayout::bfyx);
-    k.EnableOutputLayout(DataLayout::yxfb);
     k.EnableOutputLayout(DataLayout::byxf);
+    k.EnableOutputLayout(DataLayout::yxfb);
+    k.EnableLRNMode(LRNMode::ACROSS_CHANNEL);
+    k.EnableLRNKernelDividerMode(KernelDividerMode::FIXED);
     k.EnableTensorOffset();
     k.EnableTensorPitches();
     k.EnableBatching();
-    k.EnableLRNMode(LRNMode::ACROSS_CHANNEL);
-    k.EnableLRNKernelDividerMode(KernelDividerMode::FIXED);
+    k.EnableDifferentTypes();
     return k;
 }
 
@@ -53,7 +56,20 @@ CommonDispatchData LRNKernelAcrossChannelRef::SetDefault(const lrn_params& param
     return runInfo;
 }
 
+JitConstants LRNKernelAcrossChannelRef::GetJitConstants(const lrn_params& params,
+    const LRNKernelBase::DispatchData& kd) const {
+    JitConstants jit = Parent::GetJitConstants(params, kd);
+    const auto& input_dt = params.inputs[0].GetDType();
+
+    if (!params.fused_ops.empty()) {
+        FusedOpsConfiguration conf = {"", {"batch_id", "feature_id", "y", "x"}, "lrn_result", input_dt, 1};
+        jit.Merge(MakeFusedOpsJitConstants(params, {conf}));
+    }
+
+    return jit;
+}
+
 KernelsData LRNKernelAcrossChannelRef::GetKernelsData(const Params& params, const optional_params& options) const {
     return GetCommonKernelsData(params, options, FORCE_PRIORITY_9);
 }
-}  // namespace kernel_selector
\ No newline at end of file
+}  // namespace kernel_selector
index b88d4b0..fd206c5 100644 (file)
 
 
 #pragma once
-
+#include <vector>
 #include "lrn_kernel_base.h"
 
 namespace kernel_selector {
 class LRNKernelAcrossChannelRef : public LRNKernelBase {
 public:
-    LRNKernelAcrossChannelRef() : LRNKernelBase("lrn_gpu_across_channel_ref") {}
+    using Parent = LRNKernelBase;
+
+    LRNKernelAcrossChannelRef() : Parent("lrn_gpu_across_channel_ref") {}
     virtual ~LRNKernelAcrossChannelRef() {}
 
     KernelsData GetKernelsData(const Params& params, const optional_params& options) const override;
     ParamsKey GetSupportedKey() const override;
 
-private:
-    CommonDispatchData SetDefault(const lrn_params& params) const override;
+protected:
+    DispatchData SetDefault(const lrn_params& params) const override;
+    std::vector<FusedOpType> GetSupportedFusedOps() const override {
+        return { FusedOpType::QUANTIZE,
+                 FusedOpType::SCALE,
+                 FusedOpType::ACTIVATION };
+    }
+    JitConstants GetJitConstants(const lrn_params& params, const DispatchData& kd) const override;
 };
-}  // namespace kernel_selector
\ No newline at end of file
+}  // namespace kernel_selector
index 3a5ab50..8e444f9 100644 (file)
 
 namespace kernel_selector {
 bool LRNKernelBase::Validate(const Params& p, const optional_params& o) const {
-    if (p.GetType() != KernelType::LRN || o.GetType() != KernelType::LRN) {
+    if (!common_kernel_base::Validate(p, o) || p.GetType() != KernelType::LRN || o.GetType() != KernelType::LRN) {
         return false;
     }
 
+    const lrn_params& params = static_cast<const lrn_params&>(p);
+
+    for (auto& fused_op : params.fused_ops) {
+        if (!IsFusedPrimitiveSupported(fused_op))
+            return false;
+    }
+
     return true;
 }
 
-JitConstants LRNKernelBase::GetJitConstants(const lrn_params& params, LRNKernelBase::DispatchData kd) const {
+JitConstants LRNKernelBase::GetJitConstants(const lrn_params& params, const LRNKernelBase::DispatchData& kd) const {
     JitConstants mem_consts = MakeBaseParamsJitConstants(params);
 
     const auto padding = (params.localSize - 1) / 2;
@@ -95,12 +102,23 @@ KernelsData LRNKernelBase::GetCommonKernelsData(const Params& params,
     auto cldnnJit = GetJitConstants(orgParams, runInfo);
     auto entryPoint = GetEntryPoint(kernelName, orgParams.layerID, options);
     auto jit = CreateJit(kernelName, cldnnJit, entryPoint);
+    auto fused_deps_total = GetFusedPrimitiveInputsCount(params);
 
     auto& kernel = kd.kernels[0];
-    FillCLKernelData(kernel, runInfo, params.engineInfo, kernelName, jit, entryPoint);
+    FillCLKernelData(kernel,
+                     runInfo,
+                     params.engineInfo,
+                     kernelName,
+                     jit,
+                     entryPoint,
+                     "",
+                     false,
+                     false,
+                     1,
+                     fused_deps_total);
 
     kd.estimatedTime = estimatedTime;
 
     return {kd};
 }
-}  // namespace kernel_selector
\ No newline at end of file
+}  // namespace kernel_selector
index be03147..8314e85 100644 (file)
@@ -61,8 +61,8 @@ public:
 
 protected:
     bool Validate(const Params& p, const optional_params& o) const override;
-    virtual JitConstants GetJitConstants(const lrn_params& params, DispatchData kd) const;
+    virtual JitConstants GetJitConstants(const lrn_params& params, const DispatchData& kd) const;
     virtual DispatchData SetDefault(const lrn_params& params) const;
     KernelsData GetCommonKernelsData(const Params& params, const optional_params&, float estimatedTime) const;
 };
-}  // namespace kernel_selector
\ No newline at end of file
+}  // namespace kernel_selector
index d512b39..86ccca3 100644 (file)
@@ -24,6 +24,8 @@ ParamsKey LRNKernelRef::GetSupportedKey() const {
     k.EnableInputDataType(Datatype::F32);
     k.EnableOutputDataType(Datatype::F16);
     k.EnableOutputDataType(Datatype::F32);
+    k.EnableOutputDataType(Datatype::INT8);
+    k.EnableOutputDataType(Datatype::UINT8);
     k.EnableInputLayout(DataLayout::bfyx);
     k.EnableInputLayout(DataLayout::yxfb);
     k.EnableInputLayout(DataLayout::byxf);
@@ -37,12 +39,14 @@ ParamsKey LRNKernelRef::GetSupportedKey() const {
     k.EnableLRNMode(LRNMode::ACROSS_CHANNEL);
     k.EnableLRNKernelDividerMode(KernelDividerMode::DYNAMIC);
     k.EnableLRNKernelDividerMode(KernelDividerMode::FIXED);
+    k.EnableDifferentTypes();
     return k;
 }
 
-JitConstants LRNKernelRef::GetJitConstants(const lrn_params& params, LRNKernelRef::Parent::DispatchData kd) const {
+JitConstants LRNKernelRef::GetJitConstants(const lrn_params& params, const LRNKernelRef::Parent::DispatchData& kd) const {
     const uint32_t round_norm_size = (params.localSize / 2) * 2 + 1;
     uint32_t numElement = round_norm_size * round_norm_size;
+    const auto& input_dt = params.inputs[0].GetDType();
 
     if (params.normMode == LRNMode::ACROSS_CHANNEL) {
         numElement = round_norm_size;
@@ -58,6 +62,11 @@ JitConstants LRNKernelRef::GetJitConstants(const lrn_params& params, LRNKernelRe
         MakeJitConstant("GWS_YX", 0),
     });
 
+    if (!params.fused_ops.empty()) {
+        FusedOpsConfiguration conf = {"", {"b", "f", "y", "x"}, "lrn_result", input_dt, 1};
+        jit.Merge(MakeFusedOpsJitConstants(params, {conf}));
+    }
+
     return jit;
 }
 
index 1c2c5b1..0872feb 100644 (file)
@@ -16,6 +16,7 @@
 #pragma once
 
 #include "lrn_kernel_base.h"
+#include "vector"
 
 namespace kernel_selector {
 class LRNKernelRef : public LRNKernelBase {
@@ -27,8 +28,13 @@ public:
     KernelsData GetKernelsData(const Params& params, const optional_params& options) const override;
     ParamsKey GetSupportedKey() const override;
 
-protected:
-    JitConstants GetJitConstants(const lrn_params& params, DispatchData kd) const override;
+private:
     DispatchData SetDefault(const lrn_params& params) const override;
+    std::vector<FusedOpType> GetSupportedFusedOps() const override {
+        return { FusedOpType::QUANTIZE,
+                 FusedOpType::SCALE,
+                 FusedOpType::ACTIVATION };
+    }
+    JitConstants GetJitConstants(const lrn_params& params, const DispatchData& kd) const override;
 };
 }  // namespace kernel_selector
index 5c1574b..5b2f254 100644 (file)
@@ -25,22 +25,26 @@ ParamsKey LRNKernelWithinChannelByxfOpt::GetSupportedKey() const {
     k.EnableInputDataType(Datatype::F32);
     k.EnableOutputDataType(Datatype::F16);
     k.EnableOutputDataType(Datatype::F32);
+    k.EnableOutputDataType(Datatype::INT8);
+    k.EnableOutputDataType(Datatype::UINT8);
     k.EnableInputLayout(DataLayout::byxf);
     k.EnableOutputLayout(DataLayout::byxf);
-    k.EnableTensorOffset();
-    k.EnableTensorPitches();
-    k.EnableBatching();
     k.EnableLRNMode(LRNMode::WITHIN_CHANNEL);
     k.EnableLRNKernelDividerMode(KernelDividerMode::DYNAMIC);
     k.EnableLRNKernelDividerMode(KernelDividerMode::FIXED);
+    k.EnableTensorOffset();
+    k.EnableTensorPitches();
+    k.EnableBatching();
+    k.EnableDifferentTypes();
     return k;
 }
 
 JitConstants LRNKernelWithinChannelByxfOpt::GetJitConstants(
     const lrn_params& params,
-    LRNKernelWithinChannelByxfOpt::Parent::DispatchData kd) const {
+    const LRNKernelBase::DispatchData& kd) const {
     const uint32_t round_norm_size = (params.localSize / 2) * 2 + 1;
     uint32_t numElement = round_norm_size * round_norm_size;
+    const auto& input_dt = params.inputs[0].GetDType();
 
     if (params.normMode == LRNMode::ACROSS_CHANNEL) {
         numElement = round_norm_size;
@@ -56,6 +60,11 @@ JitConstants LRNKernelWithinChannelByxfOpt::GetJitConstants(
         MakeJitConstant("GWS_YX", 0),
     });
 
+    if (!params.fused_ops.empty()) {
+        FusedOpsConfiguration conf = {"", {"b", "f + i", "y", "x"}, "lrn_result", input_dt, 1};
+        jit.Merge(MakeFusedOpsJitConstants(params, {conf}));
+    }
+
     return jit;
 }
 
index b8a916f..9cdd64f 100644 (file)
@@ -17,6 +17,7 @@
 #pragma once
 
 #include "lrn_kernel_base.h"
+#include "vector"
 
 namespace kernel_selector {
 class LRNKernelWithinChannelByxfOpt : public LRNKernelBase {
@@ -28,9 +29,14 @@ public:
     KernelsData GetKernelsData(const Params& params, const optional_params& options) const override;
     ParamsKey GetSupportedKey() const override;
 
-protected:
-    bool Validate(const Params&, const optional_params&) const override;
-    JitConstants GetJitConstants(const lrn_params& params, DispatchData kd) const override;
+private:
     DispatchData SetDefault(const lrn_params& params) const override;
+    std::vector<FusedOpType> GetSupportedFusedOps() const override {
+        return { FusedOpType::QUANTIZE,
+                 FusedOpType::SCALE,
+                 FusedOpType::ACTIVATION };
+    }
+    bool Validate(const Params& params, const optional_params& options) const override;
+    JitConstants GetJitConstants(const lrn_params& params, const DispatchData& kd) const override;
 };
 }  // namespace kernel_selector
index 54702bd..b788ced 100644 (file)
@@ -22,6 +22,8 @@ ParamsKey LRNKernelWithinChannel::GetSupportedKey() const {
     k.EnableInputDataType(Datatype::F32);
     k.EnableOutputDataType(Datatype::F16);
     k.EnableOutputDataType(Datatype::F32);
+    k.EnableOutputDataType(Datatype::INT8);
+    k.EnableOutputDataType(Datatype::UINT8);
     k.EnableInputLayout(DataLayout::bfyx);
     k.EnableInputLayout(DataLayout::yxfb);
     k.EnableOutputLayout(DataLayout::bfyx);
@@ -31,6 +33,7 @@ ParamsKey LRNKernelWithinChannel::GetSupportedKey() const {
     k.EnableBatching();
     k.EnableLRNMode(LRNMode::WITHIN_CHANNEL);
     k.EnableLRNKernelDividerMode(KernelDividerMode::FIXED);
+    k.EnableDifferentTypes();
     return k;
 }
 
@@ -48,7 +51,27 @@ CommonDispatchData LRNKernelWithinChannel::SetDefault(const lrn_params& params)
     return runInfo;
 }
 
+JitConstants LRNKernelWithinChannel::GetJitConstants(const lrn_params& params,
+                                                     const LRNKernelWithinChannel::Parent::DispatchData& kd) const {
+    JitConstants jit = Parent::GetJitConstants(params, kd);
+    const auto& input_dt = params.inputs[0].GetDType();
+
+    if (!params.fused_ops.empty()) {
+        FusedOpsConfiguration conf = {"", {"batch_id", "feature_id", "y", "x"}, "lrn_result", input_dt, 1};
+        jit.Merge(MakeFusedOpsJitConstants(params, {conf}));
+    }
+
+    return jit;
+}
+
+bool LRNKernelWithinChannel::Validate(const Params& p, const optional_params& o) const {
+    if (!LRNKernelBase::Validate(p, o)) {
+        return false;
+    }
+    return true;
+}
+
 KernelsData LRNKernelWithinChannel::GetKernelsData(const Params& params, const optional_params& options) const {
     return GetCommonKernelsData(params, options, FORCE_PRIORITY_9);
 }
-}  // namespace kernel_selector
\ No newline at end of file
+}  // namespace kernel_selector
index 3c56601..adaf9c3 100644 (file)
 #pragma once
 
 #include "lrn_kernel_base.h"
+#include "vector"
 
 namespace kernel_selector {
 class LRNKernelWithinChannel : public LRNKernelBase {
 public:
+    using Parent = LRNKernelBase;
     LRNKernelWithinChannel() : LRNKernelBase("lrn_gpu_within_channel") {}
     virtual ~LRNKernelWithinChannel() {}
 
@@ -27,6 +29,13 @@ public:
     ParamsKey GetSupportedKey() const override;
 
 private:
-    CommonDispatchData SetDefault(const lrn_params& params) const override;
+    DispatchData SetDefault(const lrn_params& params) const override;
+    std::vector<FusedOpType> GetSupportedFusedOps() const override {
+        return { FusedOpType::QUANTIZE,
+                 FusedOpType::SCALE,
+                 FusedOpType::ACTIVATION };
+    }
+    bool Validate(const Params& params, const optional_params& options) const override;
+    JitConstants GetJitConstants(const lrn_params& params, const DispatchData& kd) const override;
 };
-}  // namespace kernel_selector
\ No newline at end of file
+}  // namespace kernel_selector
index 8996722..22e95f7 100644 (file)
@@ -22,6 +22,8 @@ ParamsKey LRNKernelWithinChannelOpt::GetSupportedKey() const {
     k.EnableInputDataType(Datatype::F32);
     k.EnableOutputDataType(Datatype::F16);
     k.EnableOutputDataType(Datatype::F32);
+    k.EnableOutputDataType(Datatype::UINT8);
+    k.EnableOutputDataType(Datatype::INT8);
     k.EnableInputLayout(DataLayout::bfyx);
     k.EnableInputLayout(DataLayout::yxfb);
     k.EnableOutputLayout(DataLayout::bfyx);
@@ -31,6 +33,7 @@ ParamsKey LRNKernelWithinChannelOpt::GetSupportedKey() const {
     k.EnableBatching();
     k.EnableLRNMode(LRNMode::WITHIN_CHANNEL);
     k.EnableLRNKernelDividerMode(KernelDividerMode::FIXED);
+    k.EnableDifferentTypes();
     return k;
 }
 
@@ -50,7 +53,26 @@ CommonDispatchData LRNKernelWithinChannelOpt::SetDefault(const lrn_params& param
     return runInfo;
 }
 
+bool LRNKernelWithinChannelOpt::Validate(const Params& p, const optional_params& o) const {
+    if (!LRNKernelBase::Validate(p, o)) {
+        return false;
+    }
+    return true;
+}
+
+JitConstants LRNKernelWithinChannelOpt::GetJitConstants(const lrn_params& params, const LRNKernelWithinChannelOpt::Parent::DispatchData& kd) const {
+    const auto& input_dt = params.inputs[0].GetDType();
+    JitConstants jit = Parent::GetJitConstants(params, kd);
+
+    if (!params.fused_ops.empty()) {
+        FusedOpsConfiguration conf = {"", {"batch_id", "feature_id", "y", "x"}, "lrn_result", input_dt, 1};
+        jit.Merge(MakeFusedOpsJitConstants(params, {conf}));
+    }
+
+    return jit;
+}
+
 KernelsData LRNKernelWithinChannelOpt::GetKernelsData(const Params& params, const optional_params& options) const {
     return GetCommonKernelsData(params, options, FORCE_PRIORITY_8);
 }
-}  // namespace kernel_selector
\ No newline at end of file
+}  // namespace kernel_selector
index 31b07a6..8740055 100644 (file)
 #pragma once
 
 #include "lrn_kernel_base.h"
+#include "vector"
 
 namespace kernel_selector {
 class LRNKernelWithinChannelOpt : public LRNKernelBase {
 public:
-    LRNKernelWithinChannelOpt() : LRNKernelBase("lrn_gpu_within_channel_opt") {}
+    using Parent = LRNKernelBase;
+    LRNKernelWithinChannelOpt() : Parent("lrn_gpu_within_channel_opt") {}
     virtual ~LRNKernelWithinChannelOpt() {}
-
     KernelsData GetKernelsData(const Params& params, const optional_params& options) const override;
     ParamsKey GetSupportedKey() const override;
 
 private:
-    CommonDispatchData SetDefault(const lrn_params& params) const override;
+    DispatchData SetDefault(const lrn_params& params) const override;
+    std::vector<FusedOpType> GetSupportedFusedOps() const override {
+        return { FusedOpType::QUANTIZE,
+                 FusedOpType::SCALE,
+                 FusedOpType::ACTIVATION };
+    }
+    bool Validate(const Params& params, const optional_params& options) const override;
+    JitConstants GetJitConstants(const lrn_params& params, const DispatchData& kd) const override;
 };
-}  // namespace kernel_selector
\ No newline at end of file
+}  // namespace kernel_selector
index db80ea7..0adfb29 100644 (file)
 // limitations under the License.
 
 #include "include/include_all.cl"
-#include "include/unit_type.cl"
+#include "include/mmad.cl"
 
-#define GET_SRC(data, id) AS_TYPE(MAKE_VECTOR_TYPE(UNIT_TYPE, OUTPUT_X_BLOCK_SIZE),                             \
-                            intel_sub_group_shuffle(                                                            \
-                                AS_TYPE(MAKE_VECTOR_TYPE(UNIT_BLOCK_RW_TYPE, OUTPUT_X_BLOCK_SIZE), data),       \
-                                id))
-//#define GET_SRC(data, id) intel_sub_group_shuffle(src, id)
+#define INPUT_TYPE        INPUT0_TYPE
+#define INPUT_TYPE2       MAKE_VECTOR_TYPE(INPUT0_TYPE, 2)
+#define INPUT_TYPE4       MAKE_VECTOR_TYPE(INPUT0_TYPE, 4)
+#define INPUT_TYPE8       MAKE_VECTOR_TYPE(INPUT0_TYPE, 8)
+
+#define FILTER_TYPE8      MAKE_VECTOR_TYPE(FILTER_TYPE, 8)
+
+#define AS_INPUT_TYPE     CAT(as_, INPUT_TYPE)
+#define AS_INPUT_TYPE2    CAT(as_, INPUT_TYPE2)
+#define AS_INPUT_TYPE4    CAT(as_, INPUT_TYPE4)
+#define AS_INPUT_TYPE8    CAT(as_, INPUT_TYPE8)
+
+#define AS_FILTER_TYPE8   CAT(as_, FILTER_TYPE8)
+
+#if INPUT0_TYPE_SIZE == 2
+#   define INPUT_BLOCK_READ(ptr, offset)    AS_INPUT_TYPE(intel_sub_group_block_read_us((__global ushort*)(ptr) + (offset)))
+#   define INPUT_BLOCK_READ2(ptr, offset)   AS_INPUT_TYPE2(intel_sub_group_block_read_us2((__global ushort*)(ptr) + (offset)))
+#   define INPUT_BLOCK_READ4(ptr, offset)   AS_INPUT_TYPE4(intel_sub_group_block_read_us4((__global ushort*)(ptr) + (offset)))
+#   define INPUT_BLOCK_READ8(ptr, offset)   AS_INPUT_TYPE8(intel_sub_group_block_read_us8((__global ushort*)(ptr) + (offset)))
+#elif INPUT0_TYPE_SIZE == 4
+#   define INPUT_BLOCK_READ(ptr, offset)    AS_INPUT_TYPE(intel_sub_group_block_read((__global uint*)(ptr) + (offset)))
+#   define INPUT_BLOCK_READ2(ptr, offset)   AS_INPUT_TYPE2(intel_sub_group_block_read2((__global uint*)(ptr) + (offset)))
+#   define INPUT_BLOCK_READ4(ptr, offset)   AS_INPUT_TYPE4(intel_sub_group_block_read4((__global uint*)(ptr) + (offset)))
+#   define INPUT_BLOCK_READ8(ptr, offset)   AS_INPUT_TYPE8(intel_sub_group_block_read8((__global uint*)(ptr) + (offset)))
+#else
+#   error convolution_gpu_bfyx_f16.cl - unsupported input type.
+#endif
+
+#if FILTER_TYPE_SIZE == 2
+#   define FILTER_BLOCK_READ8(ptr, offset) AS_FILTER_TYPE8(intel_sub_group_block_read_us8((__global ushort*)(ptr) + (offset)))
+#elif FILTER_TYPE_SIZE == 4
+#   define FILTER_BLOCK_READ8(ptr, offset) AS_FILTER_TYPE8(intel_sub_group_block_read8((__global uint*)(ptr) + (offset)))
+#else
+#   error convolution_gpu_bfyx_f16.cl - unsupported filter type.
+#endif
+
+#if OUTPUT_TYPE_SIZE == 1
+#   define OUTPUT_BLOCK_WRITE(ptr, offset, val)    BLOCK_WRITE_UC_1((__global uchar*)(ptr) + (offset), as_uchar(val))
+#   define OUTPUT_BLOCK_WRITE2(ptr, offset, val)   BLOCK_WRITE_UC_2((__global uchar*)(ptr) + (offset), as_uchar2(val))
+#   define OUTPUT_BLOCK_WRITE4(ptr, offset, val)   BLOCK_WRITE_UC_4((__global uchar*)(ptr) + (offset), as_uchar4(val))
+#   define OUTPUT_BLOCK_WRITE8(ptr, offset, val)   BLOCK_WRITE_UC_8((__global uchar*)(ptr) + (offset), as_uchar8(val))
+#elif OUTPUT_TYPE_SIZE == 2
+#   define OUTPUT_BLOCK_WRITE(ptr, offset, val)    intel_sub_group_block_write_us((__global ushort*)(ptr) + (offset), as_ushort(val))
+#   define OUTPUT_BLOCK_WRITE2(ptr, offset, val)   intel_sub_group_block_write_us2((__global ushort*)(ptr) + (offset), as_ushort2(val))
+#   define OUTPUT_BLOCK_WRITE4(ptr, offset, val)   intel_sub_group_block_write_us4((__global ushort*)(ptr) + (offset), as_ushort4(val))
+#   define OUTPUT_BLOCK_WRITE8(ptr, offset, val)   intel_sub_group_block_write_us8((__global ushort*)(ptr) + (offset), as_ushort8(val))
+#elif OUTPUT_TYPE_SIZE == 4
+#   define OUTPUT_BLOCK_WRITE(ptr, offset, val)    intel_sub_group_block_write((__global uint*)(ptr) + (offset), as_uint(val))
+#   define OUTPUT_BLOCK_WRITE2(ptr, offset, val)   intel_sub_group_block_write2((__global uint*)(ptr) + (offset), as_uint2(val))
+#   define OUTPUT_BLOCK_WRITE4(ptr, offset, val)   intel_sub_group_block_write4((__global uint*)(ptr) + (offset), as_uint4(val))
+#   define OUTPUT_BLOCK_WRITE8(ptr, offset, val)   intel_sub_group_block_write8((__global uint*)(ptr) + (offset), as_uint8(val))
+#else
+#   error convolution_gpu_bfyx_f16.cl - unsupported output type.
+#endif
+
+#if INPUT0_TYPE_SIZE == 2
+#   define AS_INPUT_SRC         CAT(as_, MAKE_VECTOR_TYPE(INPUT_TYPE, OUTPUT_X_BLOCK_SIZE))
+#   define AS_US_SRC            CAT(as_, MAKE_VECTOR_TYPE(ushort, OUTPUT_X_BLOCK_SIZE))
+#   define GET_SRC(data, id)    AS_INPUT_SRC(intel_sub_group_shuffle(AS_US_SRC(data), id))
+#else
+#   define GET_SRC(data, id)    intel_sub_group_shuffle(data, id)
+#endif
 #define FEATURE_SLICE_SIZE 16
 #define FILTER_OFM_NUM_ALIGNED (((FILTER_OFM_NUM + FEATURE_SLICE_SIZE - 1) / FEATURE_SLICE_SIZE) * FEATURE_SLICE_SIZE)
 #define FILTER_IFM_NUM_ALIGNED (((FILTER_IFM_NUM + FEATURE_SLICE_SIZE - 1) / FEATURE_SLICE_SIZE) * FEATURE_SLICE_SIZE)
@@ -57,7 +114,7 @@ KERNEL(convolution_bfyx_f16)(
     const int x = (xy % X_BLOCKS) * OUTPUT_X_BLOCK_SIZE;
     const int y = (xy / X_BLOCKS);
 
-    typedef MAKE_VECTOR_TYPE(UNIT_TYPE, OUTPUT_X_BLOCK_SIZE) vec_t;
+    typedef MAKE_VECTOR_TYPE(INPUT0_TYPE, OUTPUT_X_BLOCK_SIZE) vec_t;
 
     const int input_x = x * STRIDE_SIZE_X - PADDING_SIZE_X;
     const int input_y = y * STRIDE_SIZE_Y - PADDING_SIZE_Y;
@@ -99,9 +156,9 @@ KERNEL(convolution_bfyx_f16)(
 
 #if BIAS_TERM
     uint bias_offset = f_block * FEATURE_SLICE_SIZE;
-    vec_t dst = (vec_t)(UNIT_BLOCK_READ(biases, bias_offset));
+    vec_t dst = (vec_t)(INPUT_BLOCK_READ(biases, bias_offset));
 #else
-    vec_t dst = UNIT_VAL_ZERO;
+    vec_t dst = INPUT0_VAL_ZERO;
 #endif  // BIAS_TERM
 
 #ifndef MULTIPLE_GROUPS_INPUT_PRELOAD
@@ -131,7 +188,7 @@ KERNEL(convolution_bfyx_f16)(
                 if (input_y + kh*DILATION_SIZE_Y < 0 || input_y + kh*DILATION_SIZE_Y >= INPUT0_SIZE_Y)
                     continue;
 
-                UNIT_TYPE line_cache[INPUT_LINE_SIZE];
+                INPUT_TYPE line_cache[INPUT_LINE_SIZE];
 
 #if INPUT_LEFTOVERS
                 if ((icb+1)*FEATURE_SLICE_SIZE >= FILTER_IFM_NUM)
@@ -153,10 +210,10 @@ KERNEL(convolution_bfyx_f16)(
                 {
                     int xb = 0;
                     for (; xb + 8 <= INPUT_LINE_SIZE; xb += 8) {
-                        UNIT_TYPE8 vv = UNIT_BLOCK_READ8(input, grouped_input_offset +
-                                                                icb * input_fs_pitch +
-                                                                kh * DILATION_SIZE_Y * input_y_pitch +
-                                                                xb * input_x_pitch);
+                        INPUT_TYPE8 vv = INPUT_BLOCK_READ8(input, grouped_input_offset +
+                                                                  icb * input_fs_pitch +
+                                                                  kh * DILATION_SIZE_Y * input_y_pitch +
+                                                                  xb * input_x_pitch);
 
                         line_cache[xb + 0] = vv[0];
                         line_cache[xb + 1] = vv[1];
@@ -168,10 +225,10 @@ KERNEL(convolution_bfyx_f16)(
                         line_cache[xb + 7] = vv[7];
                     }
                     for (; xb + 4 <= INPUT_LINE_SIZE; xb += 4) {
-                        UNIT_TYPE4 vv = UNIT_BLOCK_READ4(input, grouped_input_offset +
-                                                                icb * input_fs_pitch +
-                                                                kh * DILATION_SIZE_Y * input_y_pitch +
-                                                                xb * input_x_pitch);
+                        INPUT_TYPE4 vv = INPUT_BLOCK_READ4(input, grouped_input_offset +
+                                                                  icb * input_fs_pitch +
+                                                                  kh * DILATION_SIZE_Y * input_y_pitch +
+                                                                  xb * input_x_pitch);
 
                         line_cache[xb + 0] = vv[0];
                         line_cache[xb + 1] = vv[1];
@@ -179,10 +236,10 @@ KERNEL(convolution_bfyx_f16)(
                         line_cache[xb + 3] = vv[3];
                     }
                     for (; xb < INPUT_LINE_SIZE; xb++) {
-                        line_cache[xb] = UNIT_BLOCK_READ(input, grouped_input_offset +
-                                                                icb * input_fs_pitch +
-                                                                kh * DILATION_SIZE_Y * input_y_pitch +
-                                                                xb * input_x_pitch);
+                        line_cache[xb] = INPUT_BLOCK_READ(input, grouped_input_offset +
+                                                                 icb * input_fs_pitch +
+                                                                 kh * DILATION_SIZE_Y * input_y_pitch +
+                                                                 xb * input_x_pitch);
                     }
                 }
 
@@ -194,9 +251,9 @@ KERNEL(convolution_bfyx_f16)(
                         src[i] = line_cache[kw*DILATION_SIZE_X + STRIDE_SIZE_X*i];
                     }
 #if MULTIPLE_GROUPS_INPUT_PRELOAD
-                    typedef MAKE_VECTOR_TYPE(UNIT_TYPE, FILTER_IFM_NUM) ifm_vec_t;
+                    typedef MAKE_VECTOR_TYPE(FILTER_TYPE, FILTER_IFM_NUM) ifm_vec_t;
 
-                    ifm_vec_t wei0 = UNIT_VAL_ZERO;
+                    ifm_vec_t wei0 = FILTER_VAL_ZERO;
                     for (int ifm = 0; ifm < FILTER_IFM_NUM; ifm++)
                         wei0[ifm] = weights[grouped_filter_offset +
                                             ofm_in_group +
@@ -242,15 +299,15 @@ KERNEL(convolution_bfyx_f16)(
 #error Unsupported input feature size for multiple groups input preload
 #endif  // FILTER_IFM_NUM
 #else
-                    UNIT_TYPE8 wei0 = UNIT_BLOCK_READ8(weights, grouped_filter_offset +
-                                                                icb * filter_is_pitch +
-                                                                kh * filter_y_pitch +
-                                                                kw * filter_x_pitch);
-                    UNIT_TYPE8 wei1 = UNIT_BLOCK_READ8(weights, grouped_filter_offset +
-                                                                icb * filter_is_pitch +
-                                                                kh * filter_y_pitch +
-                                                                kw * filter_x_pitch +
-                                                                8 * filter_isv_pitch);
+                    FILTER_TYPE8 wei0 = FILTER_BLOCK_READ8(weights, grouped_filter_offset +
+                                                                    icb * filter_is_pitch +
+                                                                    kh * filter_y_pitch +
+                                                                    kw * filter_x_pitch);
+                    FILTER_TYPE8 wei1 = FILTER_BLOCK_READ8(weights, grouped_filter_offset +
+                                                                    icb * filter_is_pitch +
+                                                                    kh * filter_y_pitch +
+                                                                    kw * filter_x_pitch +
+                                                                    8 * filter_isv_pitch);
                     const vec_t src0  = GET_SRC(src, 0);
                     const vec_t src1  = GET_SRC(src, 1);
                     const vec_t src2  = GET_SRC(src, 2);
@@ -293,16 +350,21 @@ KERNEL(convolution_bfyx_f16)(
 #endif  // MULTIPLE_GROUPS_INPUT_PRELOAD
     dst = ACTIVATION(dst, ACTIVATION_PARAMS);
 
+    typedef MAKE_VECTOR_TYPE(OUTPUT_TYPE, OUTPUT_X_BLOCK_SIZE) out_vec_t;
+    out_vec_t res;
+
 #if OUTPUT_LEFTOVERS
     if ((f_block+1)*FEATURE_SLICE_SIZE >= OUTPUT_FEATURE_NUM) {
         for (int i = 0; i < OUTPUT_X_BLOCK_SIZE; i++) {
 #if HAS_FUSED_OPS
             FUSED_OPS_SCALAR;
-            dst[i] = FUSED_OPS_RESULT_SCALAR;
+            res[i] = FUSED_OPS_RESULT_SCALAR;
+#else
+            res[i] = TO_OUTPUT_TYPE(dst[i]);
 #endif
             if ((f_block*FEATURE_SLICE_SIZE + lid < OUTPUT_FEATURE_NUM) && (x + i) < OUTPUT_SIZE_X) {
-                output[output_offset + i * output_x_pitch + lid] = dst[i];
-                }
+                output[output_offset + i * output_x_pitch + lid] = res[i];
+            }
         }
     }
     else
@@ -311,17 +373,19 @@ KERNEL(convolution_bfyx_f16)(
         if (x + OUTPUT_X_BLOCK_SIZE <= OUTPUT_SIZE_X) {
 #if HAS_FUSED_OPS
             FUSED_OPS_VEC;
-            dst = FUSED_OPS_RESULT_VEC;
+            res = FUSED_OPS_RESULT_VEC;
+#else
+            res = dst;
 #endif
             // TODO Generalize for other block sizes
 #if OUTPUT_X_BLOCK_SIZE == 8
-            UNIT_BLOCK_WRITE8(output, output_offset, dst);
+            OUTPUT_BLOCK_WRITE8(output, output_offset, res);
 #elif OUTPUT_X_BLOCK_SIZE == 4
-            UNIT_BLOCK_WRITE4(output, output_offset, dst);
+            OUTPUT_BLOCK_WRITE4(output, output_offset, res);
 #elif OUTPUT_X_BLOCK_SIZE == 2
-            UNIT_BLOCK_WRITE2(output, output_offset, dst);
+            OUTPUT_BLOCK_WRITE2(output, output_offset, res);
 #elif OUTPUT_X_BLOCK_SIZE == 1
-            UNIT_BLOCK_WRITE(output, output_offset, dst);
+            OUTPUT_BLOCK_WRITE(output, output_offset, res);
 #else
 #   error convolution_gpu_bfyx_f16.cl: Unsupported output x block size.
 #endif
@@ -330,15 +394,45 @@ KERNEL(convolution_bfyx_f16)(
             for (int i = 0; i < x_tail; i++) {
 #if HAS_FUSED_OPS
                 FUSED_OPS_SCALAR;
-                dst[i] = FUSED_OPS_RESULT_SCALAR;
+                res[i] = FUSED_OPS_RESULT_SCALAR;
+#else
+                res[i] = TO_OUTPUT_TYPE(dst[i]);
 #endif
-                UNIT_BLOCK_WRITE(output, output_offset + i * output_x_pitch, dst[i]);
+                OUTPUT_BLOCK_WRITE(output, output_offset + i * output_x_pitch, res[i]);
             }
         }
     }
 }
 
+#undef AS_INPUT_SRC
+#undef AS_US_SRC
 #undef GET_SRC
 #undef FEATURE_SLICE_SIZE
 #undef FILTER_OFM_NUM_ALIGNED
 #undef FILTER_IFM_NUM_ALIGNED
+
+#undef INPUT_TYPE
+#undef INPUT_TYPE2
+#undef INPUT_TYPE4
+#undef INPUT_TYPE8
+
+#undef FILTER_TYPE8
+
+#undef AS_INPUT_TYPE
+#undef AS_INPUT_TYPE2
+#undef AS_INPUT_TYPE4
+#undef AS_INPUT_TYPE8
+
+#undef AS_FILTER_TYPE8
+
+#undef INPUT_BLOCK_READ
+#undef INPUT_BLOCK_READ2
+#undef INPUT_BLOCK_READ4
+#undef INPUT_BLOCK_READ8
+
+#undef FILTER_BLOCK_READ8
+
+#undef OUTPUT_BLOCK_WRITE
+#undef OUTPUT_BLOCK_WRITE2
+#undef OUTPUT_BLOCK_WRITE4
+#undef OUTPUT_BLOCK_WRITE8
index 7a98d24..25a2b36 100644 (file)
@@ -14,6 +14,7 @@
 
 #include "include/include_all.cl"
 #include "include/unit_type.cl"
+#include "include/mmad.cl"
 
 #define GET_SRC(data, id) AS_TYPE(MAKE_VECTOR_TYPE(UNIT_TYPE, X_BLOCK_SIZE),                             \
                             intel_sub_group_shuffle(                                                     \
index db9b893..0764640 100644 (file)
@@ -63,7 +63,8 @@
 #define CAN_USE_BLOCK_READ                                          \
     (STRIDE_SIZE_X * OUTPUT_BLOCK_WIDTH * UNIT_TYPE_SIZE) % 4 == 0  \
     && (INPUT0_SIZE_X_WITH_PADDING * UNIT_TYPE_SIZE) % 4 == 0       \
-    && (INPUT0_PADDING_OFFSET_SIZE_X * UNIT_TYPE_SIZE) % 4 == 0
+    && (INPUT0_PADDING_OFFSET_SIZE_X * UNIT_TYPE_SIZE) % 4 == 0     \
+    && (INPUT0_PAD_BEFORE_FEATURE_NUM * UNIT_TYPE_SIZE) % 4 == 0
 
 #define ALIGNED_IFM_NUM (((FILTER_IFM_NUM + FSV - 1) / FSV) * FSV)
 
@@ -101,6 +102,7 @@ KERNEL(convolution_gpu_bfyx_to_fs_byx_fsv32)(
 
     uint input_offset = oc * STRIDE_SIZE_X + INPUT0_PADDING_OFFSET_SIZE_X;
     input_offset += (or * STRIDE_SIZE_Y + INPUT0_PADDING_OFFSET_SIZE_Y) * INPUT0_SIZE_X_WITH_PADDING;
+    input_offset += INPUT0_PAD_BEFORE_FEATURE_NUM * INPUT0_FEATURE_PITCH;
     input_offset += b * INPUT0_BATCH_PITCH;
 
     uint weight_offset = 0;
diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/cl_kernels/convolution_gpu_mmad_bfyx_b_fs_yx_fsv32.cl b/inference-engine/thirdparty/clDNN/kernel_selector/core/cl_kernels/convolution_gpu_mmad_bfyx_b_fs_yx_fsv32.cl
deleted file mode 100644 (file)
index 36c31da..0000000
+++ /dev/null
@@ -1,283 +0,0 @@
-// Copyright (c) 2019 Intel Corporation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "include/common.cl"
-
-#include "include/data_types.cl"
-
-#include "include/fetch.cl"
-#include "include/imad.cl"
-
-#define CEIL_DIV(x, y) (1 + ((x) - 1) / (y))
-#define AS_TYPE(type, val) CAT(as_, type)(val)
-
-#ifdef ACCUMULATOR_TYPE
-#undef ACCUMULATOR_TYPE
-#endif
-
-#ifdef TO_ACCUMULATOR_TYPE
-#undef TO_ACCUMULATOR_TYPE
-#endif
-
-#if QUANTIZATION_TERM
-#define ACCUMULATOR_TYPE int
-#define TO_ACCUMULATOR_TYPE(x) convert_int(x)
-#define ACTIVATION_TYPE float
-#define TO_ACTIVATION_TYPE(x) convert_float(x)
-
-#if OUTPUT_X_BLOCK_SIZE == 8
-    #define PACKED_TYPE_VEC MAKE_VECTOR_TYPE(PACKED_IN_TYPE, 8)
-    #define ACCUMULATOR_TYPE_VEC int8
-    #define TO_ACCUMULATOR_TYPE_VEC(x) convert_int8(x)
-    #define ACTIVATION_TYPE_VEC float8
-    #define TO_ACTIVATION_TYPE_VEC(x) convert_float8(x)
-    #define BLOCK_WRITE(ptr, val) intel_sub_group_block_write_us8((__global ushort*)(ptr), as_ushort8(val));
-#elif OUTPUT_X_BLOCK_SIZE == 4
-    #define PACKED_TYPE_VEC MAKE_VECTOR_TYPE(PACKED_IN_TYPE, 4)
-    #define ACCUMULATOR_TYPE_VEC int4
-    #define TO_ACCUMULATOR_TYPE_VEC(x) convert_int4(x)
-    #define ACTIVATION_TYPE_VEC float4
-    #define TO_ACTIVATION_TYPE_VEC(x) convert_float4(x)
-    #define BLOCK_WRITE(ptr, val) intel_sub_group_block_write_us4((__global ushort*)(ptr), as_ushort4(val));
-#else
-#error "convolution_gpu_mmad_bfyx_b_fs_yx_fsv32: Unsupported block size"
-#endif
-
-#else // QUANTIZATION_TERM
-#error "convolution_gpu_mmad_bfyx_b_fs_yx_fsv32: invalid parameters: quantization term is expected to be true"
-#endif
-
-#define AS_TYPE_N_(type, n, x) as_##type##n(x)
-#define AS_TYPE_N(type, n, x) AS_TYPE_N_(type, n, x)
-#define AS_INPUT0_TYPE_4(x) AS_TYPE_N(INPUT0_TYPE, 4, x)
-
-__attribute__((intel_reqd_sub_group_size(SUB_GROUP_SIZE)))
-__attribute__((reqd_work_group_size(SUB_GROUP_SIZE, 1, 1)))
-KERNEL(convolution_mmad_bfyx_b_fs_yx_fsv32)(
-    __global INPUT0_TYPE* input,
-    __global PACKED_OUT_TYPE* output,
-    __global FILTER_TYPE* weights,
-#if BIAS_TERM
-    __global BIAS_TYPE* biases,
-#endif
-#if ASYMMETRIC_WEIGHTS_QUANTIZATION
-    const __global WEIGHTS_ZERO_POINTS_TYPE *weights_zp,
-#endif
-#if ASYMMETRIC_DATA_QUANTIZATION
-    const __global ACTIVATIONS_ZERO_POINTS_TYPE *activations_zp,
-    const __global COMPENSATION_TYPE *compensation,
-#endif
-#if HAS_FUSED_OPS_DECLS
-    FUSED_OPS_DECLS,
-#endif
-    uint split_idx)
-{
-    const uint b = get_global_id(2);
-    const uint fg = get_group_id(0);
-    const uint x = ((uint)get_global_id(1) % CEIL_DIV(OUTPUT_SIZE_X, OUTPUT_X_BLOCK_SIZE)) * OUTPUT_X_BLOCK_SIZE;
-    const uint y = (uint)get_global_id(1) / CEIL_DIV(OUTPUT_SIZE_X, OUTPUT_X_BLOCK_SIZE);
-
-    const uint lid = get_sub_group_local_id();
-
-    const int input_x = x * STRIDE_SIZE_X - PADDING_SIZE_X;
-    const int input_y = y * STRIDE_SIZE_Y - PADDING_SIZE_Y;
-
-    ACCUMULATOR_TYPE_VEC acc[2] = { 0 }; // 2*8 packed channels * OUTPUT_X_BLOCK_SIZE
-#if ASYMMETRIC_WEIGHTS_QUANTIZATION
-    ACCUMULATOR_TYPE_VEC acc_assym_weights = 0;
-#endif
-    const uint in_split_offset = split_idx * INPUT0_FEATURE_PITCH * FILTER_IFM_NUM;
-
-    const uint input_offset = b*INPUT0_BATCH_PITCH + INPUT0_OFFSET + in_split_offset;
-
-    uint filter_idx = fg * FILTER_SIZE_X * FILTER_SIZE_Y * 4 * OSV;
-
-    int in_addr = input_offset + input_x * INPUT0_X_PITCH + input_y * INPUT0_Y_PITCH;
-#if ASYMMETRIC_WEIGHTS_QUANTIZATION
-    const char4 multiplier = (char4)(1, 1, 1, 0);
-#endif
-
-    __attribute__((opencl_unroll_hint(FILTER_SIZE_Y)))
-    for (int kh = 0; kh < FILTER_SIZE_Y ; ++kh) {
-        bool y_cross_fm = input_y + kh*DILATION_SIZE_Y < 0 || input_y + kh*DILATION_SIZE_Y >= INPUT0_SIZE_Y;
-#if !ASYMMETRIC_DATA_QUANTIZATION
-        if (y_cross_fm)
-            continue;
-#endif
-
-        PACKED_IN_TYPE line_cache[INPUT_LINE_SIZE] = {0};
-        {
-            int xb = 0;
-            for (; xb < INPUT_LINE_SIZE; xb++) {
-#if ASYMMETRIC_DATA_QUANTIZATION
-                bool x_cross_fm = input_x + xb < 0 || input_x + xb >= INPUT0_SIZE_X;
-                if (y_cross_fm || x_cross_fm) {
-                    const int azp_idx = (4*lid) % ACTIVATIONS_ZERO_POINTS_FEATURE_NUM;
-                    char4 zp = as_char4(((const __global uint*)(activations_zp + azp_idx))[0]);
-                    zp[3] = 0;
-                    line_cache[xb] = AS_PACKED_IN_TYPE(zp);
-                }
-                else
-#endif
-                {
-                    MAKE_VECTOR_TYPE(INPUT0_TYPE, 4) src = 0;
-                    src[0] = input[in_addr + 0 * INPUT0_FEATURE_PITCH
-                                           + kh * DILATION_SIZE_Y * INPUT0_Y_PITCH
-                                           + xb * INPUT0_X_PITCH];
-                    src[1] = input[in_addr + 1 * INPUT0_FEATURE_PITCH
-                                           + kh * DILATION_SIZE_Y * INPUT0_Y_PITCH
-                                           + xb * INPUT0_X_PITCH];
-                    src[2] = input[in_addr + 2 * INPUT0_FEATURE_PITCH
-                                           + kh * DILATION_SIZE_Y * INPUT0_Y_PITCH
-                                           + xb * INPUT0_X_PITCH];
-
-                    line_cache[xb] = AS_PACKED_IN_TYPE(src);
-                }
-            }
-        }
-
-        __attribute__((opencl_unroll_hint(FILTER_SIZE_X)))
-        for (uint kw = 0; kw < FILTER_SIZE_X ; ++kw) {
-            const uint f_off = filter_idx
-                             + kh * OSV * 4 * FILTER_SIZE_X
-                             + kw * OSV * 4;
-
-            int weights_data0 = as_int(intel_sub_group_block_read((const __global uint*)(weights + f_off)));
-            int weights_data1 = as_int(intel_sub_group_block_read((const __global uint*)(weights + f_off + 16*4)));
-
-            PACKED_TYPE_VEC src;
-
-            __attribute__((opencl_unroll_hint(OUTPUT_X_BLOCK_SIZE)))
-            for (int i = 0; i < OUTPUT_X_BLOCK_SIZE; i++) {
-                src[i] = line_cache[kw*DILATION_SIZE_X + STRIDE_SIZE_X*i];
-                acc[0][i] = IMAD(acc[0][i], AS_INPUT0_TYPE_4(src[i]), as_char4(weights_data0));
-                acc[1][i] = IMAD(acc[1][i], AS_INPUT0_TYPE_4(src[i]), as_char4(weights_data1));
-
-#if ASYMMETRIC_WEIGHTS_QUANTIZATION
-                acc_assym_weights[i] = IMAD(acc_assym_weights[i], AS_INPUT0_TYPE_4(src[i]), multiplier);
-#endif
-            }
-        }
-    }
-
-#if BIAS_TERM
-#if   BIAS_PER_OUTPUT
-    const uint bias_index = GET_DATA_INDEX(BIAS, b, f, y, x);
-#elif BIAS_PER_OFM
-    const uint bias_index = fg*OSV;
-#endif
-#endif
-
-#if OUTPUT_IS_FP
-    MAKE_VECTOR_TYPE(PACKED_OUT_TYPE, OUTPUT_X_BLOCK_SIZE) dst[2];
-
-    for (int i = 0; i < OUTPUT_X_BLOCK_SIZE; i++) {
-#if BIAS_TERM
-        ACTIVATION_TYPE res0 = TO_ACTIVATION_TYPE(acc[0][i]) + (ACTIVATION_TYPE)(biases[bias_index + 2*lid+0]);
-        ACTIVATION_TYPE res1 = TO_ACTIVATION_TYPE(acc[1][i]) + (ACTIVATION_TYPE)(biases[bias_index + 2*lid+1]);
-#else
-        ACTIVATION_TYPE res0 = TO_ACTIVATION_TYPE(acc[0][i]);
-        ACTIVATION_TYPE res1 = TO_ACTIVATION_TYPE(acc[1][i]);
-#endif
-
-#if ASYMMETRIC_WEIGHTS_QUANTIZATION
-        res0 -= acc_assym_weights[i] * TO_ACCUMULATOR_TYPE(weights_zp[fg * OSV + 2 * lid + 0]);
-        res1 -= acc_assym_weights[i] * TO_ACCUMULATOR_TYPE(weights_zp[fg * OSV + 2 * lid + 1]);
-#endif  // ASYMMETRIC_WEIGHTS_QUANTIZATION
-
-#if ASYMMETRIC_DATA_QUANTIZATION
-        res0 += compensation[fg*OSV + 2*lid + 0];
-        res1 += compensation[fg*OSV + 2*lid + 1];
-#endif  // ASYMMETRIC_DATA_QUANTIZATION
-
-#if HAS_FUSED_OPS
-        { FUSED_OPS_0; dst[0][i] = FUSED_OPS_RESULT_0; };
-        { FUSED_OPS_1; dst[1][i] = FUSED_OPS_RESULT_1; };
-#else
-        dst[0][i] = TO_OUTPUT_TYPE(res0);
-        dst[1][i] = TO_OUTPUT_TYPE(res1);
-#endif
-    }
-
-    const uint out_split_offset = split_idx * OUTPUT_FEATURE_PITCH * OUTPUT_FEATURE_NUM;
-    for (int i = 0; i < OUTPUT_X_BLOCK_SIZE; i++) {
-        for (int ofm = 0; ofm < 2; ofm++) {
-            const uint dst_index = OUTPUT_GET_INDEX(b, fg*OSV + ofm + 2*lid, y, x+i) + out_split_offset;
-            if (x + i < OUTPUT_SIZE_X) {
-                output[dst_index] = dst[ofm][i];
-            }
-        }
-    }
-#else  // OUTPUT_IS_FP
-    MAKE_VECTOR_TYPE(PACKED_OUT_TYPE, OUTPUT_X_BLOCK_SIZE) dst;
-
-    for (int i = 0; i < OUTPUT_X_BLOCK_SIZE; i++) {
-#if BIAS_TERM
-        ACTIVATION_TYPE res0 = TO_ACTIVATION_TYPE(acc[0][i]) + (ACTIVATION_TYPE)(biases[bias_index + 2*lid+0]);
-        ACTIVATION_TYPE res1 = TO_ACTIVATION_TYPE(acc[1][i]) + (ACTIVATION_TYPE)(biases[bias_index + 2*lid+1]);
-#else
-        ACTIVATION_TYPE res0 = TO_ACTIVATION_TYPE(acc[0][i]);
-        ACTIVATION_TYPE res1 = TO_ACTIVATION_TYPE(acc[1][i]);
-#endif
-
-#if ASYMMETRIC_WEIGHTS_QUANTIZATION
-        res0 -= acc_assym_weights[i] * TO_ACCUMULATOR_TYPE(weights_zp[fg * OSV + 2 * lid + 0]);
-        res1 -= acc_assym_weights[i] * TO_ACCUMULATOR_TYPE(weights_zp[fg * OSV + 2 * lid + 1]);
-#endif  // ASYMMETRIC_WEIGHTS_QUANTIZATION
-
-#if ASYMMETRIC_DATA_QUANTIZATION
-        res0 += compensation[fg*OSV + 2*lid + 0];
-        res1 += compensation[fg*OSV + 2*lid + 1];
-#endif  // ASYMMETRIC_DATA_QUANTIZATION
-
-        MAKE_VECTOR_TYPE(OUTPUT_TYPE, 2) pack;
-#if HAS_FUSED_OPS
-        { FUSED_OPS_0; pack[0] = FUSED_OPS_RESULT_0; };
-        { FUSED_OPS_1; pack[1] = FUSED_OPS_RESULT_1; };
-#else
-        pack[0] = TO_OUTPUT_TYPE(res0);
-        pack[1] = TO_OUTPUT_TYPE(res1);
-#endif
-        dst[i] = AS_PACKED_OUT_TYPE(pack);
-    }
-
-    const uint out_split_offset = split_idx * OUTPUT_FEATURE_PITCH * OUTPUT_FEATURE_NUM;
-    const bool full_x = OUTPUT_SIZE_X % OUTPUT_X_BLOCK_SIZE == 0 || x + OUTPUT_X_BLOCK_SIZE <= OUTPUT_SIZE_X;
-    const bool full_f = OUTPUT_FEATURE_NUM % OSV == 0 || (fg + 1) * OSV <= OUTPUT_FEATURE_NUM;
-    if (full_x && full_f) {
-        const uint dst_index = (OUTPUT_GET_INDEX(b, fg*OSV, y, x) + out_split_offset) / 2;
-        BLOCK_WRITE(output + dst_index, dst);
-    } else {
-        for (int i = 0; i < OUTPUT_X_BLOCK_SIZE; i++) {
-            const bool full_it_x = OUTPUT_SIZE_X % OUTPUT_X_BLOCK_SIZE == 0 || x + i < OUTPUT_SIZE_X;
-            const bool full_sgl_f = OUTPUT_FEATURE_NUM % OSV == 0 || fg * OSV + 2 * lid < OUTPUT_FEATURE_NUM;
-            if (full_it_x && full_sgl_f) {
-                const uint dst_index = OUTPUT_GET_INDEX(b, fg*OSV + 2*lid, y, x+i) + out_split_offset;
-                output[dst_index/2] = dst[i];
-            }
-        }
-    }
-#endif  // OUTPUT_IS_FP
-}
-#undef CEIL_DIV
-#undef PACKED_TYPE_VEC
-#undef ACCUMULATOR_TYPE_VEC
-#undef TO_ACCUMULATOR_TYPE_VEC
-#undef ACTIVATION_TYPE_VEC
-#undef TO_ACTIVATION_TYPE_VEC
-#undef MMAD
-
-#undef AS_TYPE_N_
-#undef AS_TYPE_N
-#undef AS_INPUT0_TYPE_4
diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/cl_kernels/convolution_gpu_mmad_bfyx_to_b_fs_yx_fsv32.cl b/inference-engine/thirdparty/clDNN/kernel_selector/core/cl_kernels/convolution_gpu_mmad_bfyx_to_b_fs_yx_fsv32.cl
new file mode 100644 (file)
index 0000000..fe1c406
--- /dev/null
@@ -0,0 +1,396 @@
+// Copyright (c) 2019 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "include/common.cl"
+
+#include "include/data_types.cl"
+
+#include "include/fetch.cl"
+#include "include/imad.cl"
+#include "include/mmad.cl"
+
+#define CEIL_DIV(x, y) (1 + ((x) - 1) / (y))
+#define AS_TYPE(type, val) CAT(as_, type)(val)
+
+#define ISV 4
+
+#ifdef ACCUMULATOR_TYPE
+#undef ACCUMULATOR_TYPE
+#endif
+
+#ifdef TO_ACCUMULATOR_TYPE
+#undef TO_ACCUMULATOR_TYPE
+#endif
+
+#if QUANTIZATION_TERM
+#define ACCUMULATOR_TYPE int
+#define TO_ACCUMULATOR_TYPE(x) convert_int(x)
+#define ACTIVATION_TYPE float
+#define TO_ACTIVATION_TYPE(x) convert_float(x)
+
+#if OUTPUT_X_BLOCK_SIZE == 8
+    #define PACKED_TYPE_VEC MAKE_VECTOR_TYPE(PACKED_IN_TYPE, 8)
+    #define ACCUMULATOR_TYPE_VEC int8
+    #define TO_ACCUMULATOR_TYPE_VEC(x) convert_int8(x)
+    #define ACTIVATION_TYPE_VEC float8
+    #define TO_ACTIVATION_TYPE_VEC(x) convert_float8(x)
+#if OUTPUT_LAYOUT_B_FS_YX_FSV32
+    #define BLOCK_WRITE(ptr, val) intel_sub_group_block_write_us8((__global ushort*)(ptr), as_ushort8(val));
+#else // OUTPUT_LAYOUT_B_FS_YX_FSV32
+    #define BLOCK_WRITE(ptr, val) BLOCK_WRITE_UC_8((__global uchar*)(ptr), as_uchar8(val))
+#endif // OUTPUT_LAYOUT_B_FS_YX_FSV32
+#elif OUTPUT_X_BLOCK_SIZE == 4
+    #define PACKED_TYPE_VEC MAKE_VECTOR_TYPE(PACKED_IN_TYPE, 4)
+    #define ACCUMULATOR_TYPE_VEC int4
+    #define TO_ACCUMULATOR_TYPE_VEC(x) convert_int4(x)
+    #define ACTIVATION_TYPE_VEC float4
+    #define TO_ACTIVATION_TYPE_VEC(x) convert_float4(x)
+#if OUTPUT_LAYOUT_B_FS_YX_FSV32
+    #define BLOCK_WRITE(ptr, val) intel_sub_group_block_write_us4((__global ushort*)(ptr), as_ushort4(val));
+#else // OUTPUT_LAYOUT_B_FS_YX_FSV32
+    #define BLOCK_WRITE(ptr, val) BLOCK_WRITE_UC_4((__global uchar*)(ptr), as_uchar4(val))
+#endif // OUTPUT_LAYOUT_B_FS_YX_FSV32
+#else
+#error "convolution_gpu_mmad_bfyx_b_fs_yx_fsv32: Unsupported block size"
+#endif
+
+#else // QUANTIZATION_TERM
+#error "convolution_gpu_mmad_bfyx_b_fs_yx_fsv32: invalid parameters: quantization term is expected to be true"
+#endif
+
+#define AS_TYPE_N_(type, n, x) as_##type##n(x)
+#define AS_TYPE_N(type, n, x) AS_TYPE_N_(type, n, x)
+#define AS_INPUT0_TYPE_4(x) AS_TYPE_N(INPUT0_TYPE, 4, x)
+
+__attribute__((intel_reqd_sub_group_size(SUB_GROUP_SIZE)))
+__attribute__((reqd_work_group_size(LWS0, LWS1, LWS2)))
+KERNEL(convolution_mmad_bfyx_to_b_fs_yx_fsv32)(
+    __global INPUT0_TYPE* input,
+#if OUTPUT_LAYOUT_B_FS_YX_FSV32
+    __global PACKED_OUT_TYPE* output,
+#else // OUTPUT_LAYOUT_B_FS_YX_FSV32
+    __global OUTPUT_TYPE* output,
+#endif //OUTPUT_LAYOUT_B_FS_YX_FSV32
+    __global FILTER_TYPE* weights,
+#if BIAS_TERM
+    __global BIAS_TYPE* biases,
+#endif
+#if ASYMMETRIC_WEIGHTS_QUANTIZATION
+    const __global WEIGHTS_ZERO_POINTS_TYPE *weights_zp,
+#endif
+#if ASYMMETRIC_DATA_QUANTIZATION
+    const __global ACTIVATIONS_ZERO_POINTS_TYPE *activations_zp,
+    const __global COMPENSATION_TYPE *compensation,
+#endif
+#if HAS_FUSED_OPS_DECLS
+    FUSED_OPS_DECLS,
+#endif
+    uint split_idx)
+{
+    const int fg = get_group_id(0);
+    const int x = (int)get_global_id(1) * OUTPUT_X_BLOCK_SIZE;
+    const int b = (int)get_global_id(2) / OUTPUT_SIZE_Y;
+    const int y = (int)get_global_id(2) % OUTPUT_SIZE_Y;
+
+    const int lid = get_sub_group_local_id();
+    const int group_id = get_group_id(1);
+    const int sg = get_sub_group_id();
+
+    const int x_wg_start = (group_id * GROUP_SIZE) * STRIDE_SIZE_X - PADDING_SIZE_X;
+    const int input_y = y * STRIDE_SIZE_Y - PADDING_SIZE_Y;
+
+    ACCUMULATOR_TYPE_VEC acc[2] = { 0 }; // 2*16 packed channels * OUTPUT_X_BLOCK_SIZE
+#if ASYMMETRIC_WEIGHTS_QUANTIZATION
+    ACCUMULATOR_TYPE_VEC acc_assym_weights = 0;
+#endif
+    const int input_offset = b*INPUT0_BATCH_PITCH + INPUT0_OFFSET + input_y * INPUT0_Y_PITCH;
+    int filter_idx = fg * FILTER_SIZE_X * FILTER_SIZE_Y * ISV * OSV;
+#if ASYMMETRIC_WEIGHTS_QUANTIZATION
+    char4 multiplier;
+    for (int i = 0; i < INPUT0_FEATURE_NUM; i++)
+        multiplier[i] = 1;
+#endif // ASYMMETRIC_WEIGHTS_QUANTIZATION
+
+#if ASYMMETRIC_DATA_QUANTIZATION
+    char4 zp = as_char4(((const __global uint*)(activations_zp))[0]);
+#if INPUT0_FEATURE_NUM == 3
+    zp[3] = 0;
+#endif // INPUT0_FEATURE_NUM == 3
+#endif // ASYMMETRIC_DATA_QUANTIZATION
+
+    __local PACKED_IN_TYPE slm[SLM_LINE_SIZE*FILTER_SIZE_Y];
+
+    for (int kh = 0; kh < FILTER_SIZE_Y ; ++kh) {
+        __local PACKED_IN_TYPE* slm_block = slm + kh*SLM_LINE_SIZE + sg*SLM_CHUNK_SIZE;
+        bool y_cross_fm = input_y + kh*DILATION_SIZE_Y < 0 || input_y + kh*DILATION_SIZE_Y >= INPUT0_SIZE_Y;
+        if (y_cross_fm) {
+#if ASYMMETRIC_DATA_QUANTIZATION
+            for (int c = 0; c < SLM_CHUNK_SIZE; c += SUB_GROUP_SIZE) {
+                if (sg*SLM_CHUNK_SIZE + c + lid < SLM_LINE_SIZE)
+                    slm_block[c + lid] = AS_PACKED_IN_TYPE(zp);
+            }
+#if SLM_TAIL > 0
+            if (sg == LWS1 - 1) {
+                __local PACKED_IN_TYPE* slm_block_tail = slm + kh*SLM_LINE_SIZE + LWS1*SLM_CHUNK_SIZE;
+                slm_block_tail[lid] = AS_PACKED_IN_TYPE(zp);
+            }
+#endif // SLM_TAIL > 0
+#endif // ASYMMETRIC_DATA_QUANTIZATION
+            continue;
+        }
+
+        {
+            for (int c = 0; c < SLM_CHUNK_SIZE; c += SUB_GROUP_SIZE) {
+                const int x_chunk = x_wg_start + sg*SLM_CHUNK_SIZE + c;
+                bool x_cross_fm = x_chunk + lid < 0 || x_chunk + lid >= INPUT0_SIZE_X;
+
+                if (!x_cross_fm) {
+                    MAKE_VECTOR_TYPE(INPUT0_TYPE, ISV) src = 0;
+                    __attribute__((opencl_unroll_hint(INPUT0_FEATURE_NUM)))
+                    for (int i = 0; i < INPUT0_FEATURE_NUM; i++) {
+                        src[i] = input[input_offset + i * INPUT0_FEATURE_PITCH
+                                                    + kh * DILATION_SIZE_Y * INPUT0_Y_PITCH
+                                                    + (x_chunk + lid)* INPUT0_X_PITCH];
+                    }
+                    slm_block[c + lid] = AS_PACKED_IN_TYPE(src);
+                } else {
+#if ASYMMETRIC_DATA_QUANTIZATION
+                    slm_block[c + lid] = AS_PACKED_IN_TYPE(zp);
+#else  // ASYMMETRIC_DATA_QUANTIZATION
+                    slm_block[c + lid] = 0;
+#endif  // ASYMMETRIC_DATA_QUANTIZATION
+                }
+            }
+#if SLM_TAIL > 0
+            if (sg == LWS1 - 1) {
+                __local PACKED_IN_TYPE* slm_block_tail = slm + kh*SLM_LINE_SIZE + LWS1*SLM_CHUNK_SIZE;
+                const int x_chunk = x_wg_start + LWS1*SLM_CHUNK_SIZE;
+                bool x_cross_fm = x_chunk + lid >= INPUT0_SIZE_X;
+                if (!x_cross_fm) {
+                    MAKE_VECTOR_TYPE(INPUT0_TYPE, ISV) src = 0;
+                    __attribute__((opencl_unroll_hint(INPUT0_FEATURE_NUM)))
+                    for (int i = 0; i < INPUT0_FEATURE_NUM; i++) {
+                        src[i] = input[input_offset + i * INPUT0_FEATURE_PITCH
+                                                    + kh * DILATION_SIZE_Y * INPUT0_Y_PITCH
+                                                    + (x_chunk + lid)* INPUT0_X_PITCH];
+                    }
+                    slm_block_tail[lid] = AS_PACKED_IN_TYPE(src);
+                } else {
+#if ASYMMETRIC_DATA_QUANTIZATION
+                    slm_block_tail[lid] = AS_PACKED_IN_TYPE(zp);
+#else  // ASYMMETRIC_DATA_QUANTIZATION
+                    slm_block_tail[lid] = 0;
+#endif  // ASYMMETRIC_DATA_QUANTIZATION
+                }
+            }
+#endif
+        }
+    }
+
+    barrier(CLK_LOCAL_MEM_FENCE);
+
+    __attribute__((opencl_unroll_hint(FILTER_SIZE_Y)))
+    for (int kh = 0; kh < FILTER_SIZE_Y ; ++kh) {
+        bool y_cross_fm = input_y + kh*DILATION_SIZE_Y < 0 || input_y + kh*DILATION_SIZE_Y >= INPUT0_SIZE_Y;
+#if !ASYMMETRIC_DATA_QUANTIZATION
+        if (y_cross_fm)
+            continue;
+#endif
+        PACKED_IN_TYPE line_cache[INPUT_LINE_SIZE];
+        for (int xb = 0; xb < INPUT_LINE_SIZE; xb++) {
+            line_cache[xb] = slm[kh*SLM_LINE_SIZE + sg*OUTPUT_X_BLOCK_SIZE*STRIDE_SIZE_X + xb];
+        }
+
+        __attribute__((opencl_unroll_hint(FILTER_SIZE_X)))
+        for (uint kw = 0; kw < FILTER_SIZE_X ; ++kw) {
+            const uint f_off = filter_idx
+                             + kh * OSV * ISV * FILTER_SIZE_X
+                             + kw * OSV * ISV;
+
+            int weights_data0 = as_int(intel_sub_group_block_read((const __global uint*)(weights + f_off)));
+            int weights_data1 = as_int(intel_sub_group_block_read((const __global uint*)(weights + f_off + SUB_GROUP_SIZE*ISV)));
+
+            PACKED_TYPE_VEC src;
+
+            __attribute__((opencl_unroll_hint(OUTPUT_X_BLOCK_SIZE)))
+            for (int i = 0; i < OUTPUT_X_BLOCK_SIZE; i++) {
+                // src[i] = slm[kh*SLM_LINE_SIZE + (sg*OUTPUT_X_BLOCK_SIZE + i)*STRIDE_SIZE_X + kw*DILATION_SIZE_X];
+                src[i] = line_cache[kw*DILATION_SIZE_X + STRIDE_SIZE_X*i];
+                acc[0][i] = IMAD(acc[0][i], AS_INPUT0_TYPE_4(src[i]), as_char4(weights_data0));
+                acc[1][i] = IMAD(acc[1][i], AS_INPUT0_TYPE_4(src[i]), as_char4(weights_data1));
+
+#if ASYMMETRIC_WEIGHTS_QUANTIZATION
+                acc_assym_weights[i] = IMAD(acc_assym_weights[i], AS_INPUT0_TYPE_4(src[i]), multiplier);
+#endif
+            }
+        }
+    }
+
+#if BIAS_TERM
+    const uint bias_index = fg*OSV;
+#endif
+
+#if OUTPUT_IS_FP
+    MAKE_VECTOR_TYPE(PACKED_OUT_TYPE, OUTPUT_X_BLOCK_SIZE) dst[2];
+
+    for (int i = 0; i < OUTPUT_X_BLOCK_SIZE; i++) {
+#if BIAS_TERM
+        ACTIVATION_TYPE res0 = TO_ACTIVATION_TYPE(acc[0][i]) + (ACTIVATION_TYPE)(biases[bias_index + lid]);
+        ACTIVATION_TYPE res1 = TO_ACTIVATION_TYPE(acc[1][i]) + (ACTIVATION_TYPE)(biases[bias_index + lid + SUB_GROUP_SIZE]);
+#else
+        ACTIVATION_TYPE res0 = TO_ACTIVATION_TYPE(acc[0][i]);
+        ACTIVATION_TYPE res1 = TO_ACTIVATION_TYPE(acc[1][i]);
+#endif
+
+#if ASYMMETRIC_WEIGHTS_QUANTIZATION
+        res0 -= acc_assym_weights[i] * TO_ACCUMULATOR_TYPE(weights_zp[fg * OSV + lid + 0]);
+        res1 -= acc_assym_weights[i] * TO_ACCUMULATOR_TYPE(weights_zp[fg * OSV + lid + 16]);
+#endif  // ASYMMETRIC_WEIGHTS_QUANTIZATION
+
+#if ASYMMETRIC_DATA_QUANTIZATION
+        res0 += compensation[fg*OSV + lid];
+        res1 += compensation[fg*OSV + lid + SUB_GROUP_SIZE];
+#endif  // ASYMMETRIC_DATA_QUANTIZATION
+#if HAS_FUSED_OPS
+        { FUSED_OPS_0; dst[0][i] = FUSED_OPS_RESULT_0; };
+        { FUSED_OPS_1; dst[1][i] = FUSED_OPS_RESULT_1; };
+#else
+        dst[0][i] = TO_OUTPUT_TYPE(res0);
+        dst[1][i] = TO_OUTPUT_TYPE(res1);
+#endif
+    }
+
+#if OUTPUT_LAYOUT_B_FS_YX_FSV32
+    for (int i = 0; i < OUTPUT_X_BLOCK_SIZE; i++) {
+        for (int ofm = 0; ofm < 2; ofm++) {
+            const uint dst_index = OUTPUT_GET_INDEX(b, fg*OSV + SUB_GROUP_SIZE*ofm + lid, y, x+i);
+            if (x + i < OUTPUT_SIZE_X) {
+                output[dst_index] = dst[ofm][i];
+            }
+        }
+    }
+#else // OUTPUT_LAYOUT_B_FS_YX_FSV32
+    for (int i = 0; i < OUTPUT_X_BLOCK_SIZE; i++) {
+#if OUTPUT_FEATURE_NUM > 16
+        for (int ofm = 0; ofm < 2; ofm++) {
+            const uint dst_index = OUTPUT_GET_INDEX(b, fg*OSV + SUB_GROUP_SIZE*ofm + lid, y, x+i);
+            if (x + i < OUTPUT_SIZE_X && fg*OSV + SUB_GROUP_SIZE*ofm + lid < OUTPUT_FEATURE_NUM) {
+                output[dst_index] = dst[ofm][i];
+            }
+        }
+#else // OUTPUT_FEATURE_NUM > 16
+        const uint dst_index = OUTPUT_GET_INDEX(b, fg*OSV + lid, y, x+i);
+        if (x + i < OUTPUT_SIZE_X && fg*OSV + lid < OUTPUT_FEATURE_NUM) {
+            output[dst_index] = dst[ofm][i];
+        }
+#endif // OUTPUT_FEATURE_NUM > 16
+    }
+#endif // OUTPUT_LAYOUT_B_FS_YX_FSV32
+
+#else  // OUTPUT_IS_FP
+#if OUTPUT_LAYOUT_B_FS_YX_FSV32
+    MAKE_VECTOR_TYPE(PACKED_OUT_TYPE, OUTPUT_X_BLOCK_SIZE) dst;
+    #define CHANNEL0_OFFSET (2*lid+0)
+    #define CHANNEL1_OFFSET (2*lid+1)
+#else // OUTPUT_LAYOUT_B_FS_YX_FSV32
+    MAKE_VECTOR_TYPE(OUTPUT_TYPE, OUTPUT_X_BLOCK_SIZE) dst[2];
+    #define CHANNEL0_OFFSET (lid)
+    #define CHANNEL1_OFFSET (lid+16)
+#endif // OUTPUT_LAYOUT_B_FS_YX_FSV32
+
+
+    for (int i = 0; i < OUTPUT_X_BLOCK_SIZE; i++) {
+#if BIAS_TERM
+        ACTIVATION_TYPE res0 = TO_ACTIVATION_TYPE(acc[0][i]) + (ACTIVATION_TYPE)(biases[bias_index + CHANNEL0_OFFSET]);
+        ACTIVATION_TYPE res1 = TO_ACTIVATION_TYPE(acc[1][i]) + (ACTIVATION_TYPE)(biases[bias_index + CHANNEL1_OFFSET]);
+#else
+        ACTIVATION_TYPE res0 = TO_ACTIVATION_TYPE(acc[0][i]);
+        ACTIVATION_TYPE res1 = TO_ACTIVATION_TYPE(acc[1][i]);
+#endif
+
+#if ASYMMETRIC_WEIGHTS_QUANTIZATION
+        res0 -= acc_assym_weights[i] * TO_ACCUMULATOR_TYPE(weights_zp[fg * OSV + CHANNEL0_OFFSET]);
+        res1 -= acc_assym_weights[i] * TO_ACCUMULATOR_TYPE(weights_zp[fg * OSV + CHANNEL1_OFFSET]);
+#endif  // ASYMMETRIC_WEIGHTS_QUANTIZATION
+
+#if ASYMMETRIC_DATA_QUANTIZATION
+        res0 += compensation[fg*OSV + CHANNEL0_OFFSET];
+        res1 += compensation[fg*OSV + CHANNEL1_OFFSET];
+#endif  // ASYMMETRIC_DATA_QUANTIZATION
+
+        MAKE_VECTOR_TYPE(OUTPUT_TYPE, 2) pack;
+#if HAS_FUSED_OPS
+        { FUSED_OPS_0; pack[0] = FUSED_OPS_RESULT_0; };
+        { FUSED_OPS_1; pack[1] = FUSED_OPS_RESULT_1; };
+#else
+        pack[0] = TO_OUTPUT_TYPE(res0);
+        pack[1] = TO_OUTPUT_TYPE(res1);
+#endif
+#if OUTPUT_LAYOUT_B_FS_YX_FSV32
+        dst[i] = AS_PACKED_OUT_TYPE(pack);
+#else // OUTPUT_LAYOUT_B_FS_YX_FSV32
+        dst[0][i] = pack[0];
+        dst[1][i] = pack[1];
+#endif // OUTPUT_LAYOUT_B_FS_YX_FSV32
+    }
+
+    const bool full_x = OUTPUT_SIZE_X % OUTPUT_X_BLOCK_SIZE == 0 || x + OUTPUT_X_BLOCK_SIZE <= OUTPUT_SIZE_X;
+    const bool full_f = OUTPUT_FEATURE_NUM % OSV == 0 || (fg + 1) * OSV <= OUTPUT_FEATURE_NUM;
+#if OUTPUT_LAYOUT_B_FS_YX_FSV32
+    if (full_x && full_f) {
+        const uint dst_index = OUTPUT_GET_INDEX(b, fg*OSV, y, x) / 2;
+        BLOCK_WRITE(output + dst_index, dst);
+    } else {
+        for (int i = 0; i < OUTPUT_X_BLOCK_SIZE; i++) {
+            const bool full_it_x = OUTPUT_SIZE_X % OUTPUT_X_BLOCK_SIZE == 0 || x + i < OUTPUT_SIZE_X;
+            const bool full_sgl_f = OUTPUT_FEATURE_NUM % OSV == 0 || fg * OSV + 2 * lid < OUTPUT_FEATURE_NUM;
+            if (full_it_x && full_sgl_f) {
+                const uint dst_index = OUTPUT_GET_INDEX(b, fg*OSV + 2*lid, y, x+i);
+                output[dst_index/2] = dst[i];
+            }
+        }
+    }
+#else // OUTPUT_LAYOUT_B_FS_YX_FSV32
+    if (full_x && full_f) {
+        const uint dst_index0 = OUTPUT_GET_INDEX(b, fg*OSV, y, x);
+        const uint dst_index1 = OUTPUT_GET_INDEX(b, fg*OSV+16, y, x);
+        BLOCK_WRITE(output + dst_index0, dst[0]);
+        BLOCK_WRITE(output + dst_index1, dst[1]);
+    } else {
+        for (int ofm = 0; ofm < 2; ofm++) {
+            for (int i = 0; i < OUTPUT_X_BLOCK_SIZE; i++) {
+                const bool full_it_x = OUTPUT_SIZE_X % OUTPUT_X_BLOCK_SIZE == 0 || x + i < OUTPUT_SIZE_X;
+                const bool full_sgl_f = OUTPUT_FEATURE_NUM % OSV == 0 || 16*ofm + lid < OUTPUT_FEATURE_NUM % OSV;
+                if (full_it_x && full_sgl_f) {
+                    const uint dst_index = OUTPUT_GET_INDEX(b, fg*OSV + 16*ofm + lid, y, x+i);
+                    output[dst_index] = dst[ofm][i];
+                }
+            }
+        }
+    }
+#endif // OUTPUT_LAYOUT_B_FS_YX_FSV32
+
+#endif  // OUTPUT_IS_FP
+}
+#undef CEIL_DIV
+#undef PACKED_TYPE_VEC
+#undef ACCUMULATOR_TYPE_VEC
+#undef TO_ACCUMULATOR_TYPE_VEC
+#undef ACTIVATION_TYPE_VEC
+#undef TO_ACTIVATION_TYPE_VEC
+#undef MMAD
+
+#undef AS_TYPE_N_
+#undef AS_TYPE_N
+#undef AS_INPUT0_TYPE_4
index 99ac419..485d24b 100644 (file)
@@ -913,18 +913,25 @@ inline uint FUNC(get_g_os_is_yx_osv16_isv4)(uint g, uint o, uint i, uint y, uint
 }
 
 #define GET_FILTER_OS_IS_YX_OSV16_ISV4_INDEX(prefix, o, i, y, x) \
-    FUNC_CALL(get_os_is_yx_osv16_isv4)(                          \
+    FUNC_CALL(get_os_is_yx_osv_isv4)(                            \
         o, i, y, x,                                              \
         CAT(prefix, _IFM_PITCH),                                 \
         CAT(prefix, _OFM_PITCH),                                 \
-        CAT(prefix, _SIZE_X))
+        CAT(prefix, _SIZE_X), 16)
 
-inline uint FUNC(get_os_is_yx_osv16_isv4)(uint o, uint i, uint y, uint x,
-                                          uint i_size,
-                                          uint o_size,
-                                          uint x_size)
+#define GET_FILTER_OS_IS_YX_OSV32_ISV4_INDEX(prefix, o, i, y, x) \
+    FUNC_CALL(get_os_is_yx_osv_isv4)(                            \
+        o, i, y, x,                                              \
+        CAT(prefix, _IFM_PITCH),                                 \
+        CAT(prefix, _OFM_PITCH),                                 \
+        CAT(prefix, _SIZE_X), 32)
+
+inline uint FUNC(get_os_is_yx_osv_isv4)(uint o, uint i, uint y, uint x,
+                                        uint i_size,
+                                        uint o_size,
+                                        uint x_size,
+                                        uint otd)
 {
-    const uint otd = 16;
     uint out_depth_tile = o / otd;
     uint od             = o - out_depth_tile * otd;
 
@@ -976,6 +983,7 @@ inline uint FUNC(get_os_is_yx_osv32_isv4_swizzled_by_2)(uint o, uint i, uint y,
 
     return idx;
 }
+
 #define GET_DATA_FS_B_YX_FSV32_INDEX(prefix, b, f, y, x) \
     FUNC_CALL(get_fs_b_yx_fsv32_index)(                  \
         b, f, y, x,                                      \
index f307158..6e4c6ae 100644 (file)
 
 #include "include/common.cl"
 #include "include/data_types.cl"
-
+#include "include/fetch.cl"
 
 #ifdef FORCE_SIMD_16
 __attribute__((intel_reqd_sub_group_size(16)))
 #endif
-KERNEL (lrn_gpu_across_channel_multiple_features)(const __global INPUT0_TYPE* input, __global OUTPUT_TYPE* output)
+
+KERNEL (lrn_gpu_across_channel_multiple_features)(
+    const __global INPUT0_TYPE* input,
+    __global OUTPUT_TYPE* output
+#if HAS_FUSED_OPS_DECLS
+    , FUSED_OPS_DECLS
+#endif
+    )
 {
-#if   defined OUTPUT_LAYOUT_BFYX
+#if defined OUTPUT_LAYOUT_BFYX || defined OUTPUT_LAYOUT_B_FS_YX_FSV4 || defined OUTPUT_LAYOUT_B_FS_YX_FSV16
 // PERF NOTE: SIMD IS OVER global_id(0) so in SIMD global_id(1) and global_id(2) does not change, so we can use group_id to have SIMD1 instructions
     const uint x            = get_global_id(0);
     const uint y            = get_group_id(1);
     const uint b_f          = get_group_id(2);
     const uint batch_id     = (b_f * OFM_PER_SIMD) / INPUT0_FEATURE_NUM;
     const uint feature_id   = (b_f % (INPUT0_FEATURE_NUM / OFM_PER_SIMD)) * OFM_PER_SIMD;
-    
+
     if (x >= INPUT0_SIZE_X)
         return;
 #elif defined OUTPUT_LAYOUT_YXFB
@@ -37,42 +44,57 @@ KERNEL (lrn_gpu_across_channel_multiple_features)(const __global INPUT0_TYPE* in
     const uint y            = get_group_id(2);
     const uint feature_id   = (b_f / INPUT0_BATCH_NUM) * OFM_PER_SIMD;
     const uint batch_id     = b_f % INPUT0_BATCH_NUM;
-#endif    
+#endif
 
     uint input_id = INPUT0_OFFSET + batch_id*INPUT0_BATCH_PITCH + feature_id*INPUT0_FEATURE_PITCH + y*INPUT0_Y_PITCH + x*INPUT0_X_PITCH;
 
-    int input_offset_f = feature_id - PADDING;
+#if INPUT0_SIMPLE
     uint input_idx = input_id - PADDING*INPUT0_FEATURE_PITCH;
+    input_idx =  MULTIPLY_OFFSET(INPUT0_TYPE, input_idx);
+#endif
 
-    input_idx =  MULTIPLY_OFFSET(UNIT_TYPE, input_idx);
+    int input_offset_f = feature_id - PADDING;
 
-    UNIT_TYPE vals[OFM_PER_SIMD];
-    UNIT_TYPE results[OFM_PER_SIMD] = { UNIT_VAL_ZERO };
+    INPUT0_TYPE vals[OFM_PER_SIMD];
+    INPUT0_TYPE results[OFM_PER_SIMD] = { INPUT0_VAL_ZERO };
 
     // prefetch
-    for(uint i = 0; i < OFM_PER_SIMD; i++)
+    for(uint j = 0; j < OFM_PER_SIMD; j++)
     {
+    #if !INPUT0_SIMPLE
+        uint input_idx = INPUT0_GET_INDEX(batch_id, feature_id - PADDING + j, y, x);
+        input_idx =  MULTIPLY_OFFSET(INPUT0_TYPE, input_idx);
         bool zero = input_offset_f < 0 || input_offset_f >= INPUT0_FEATURE_NUM;
-        vals[i] = zero ? UNIT_VAL_ZERO : TO_UNIT_TYPE(ALPHA_VAL_FACTOR_DIV_BY_SIZE) * (*OFFSET_GLOBAL_PTR(UNIT_TYPE, input, input_idx));
+        vals[j] = zero ? INPUT0_VAL_ZERO : TO_INPUT0_TYPE(ALPHA_VAL_FACTOR_DIV_BY_SIZE) * (*OFFSET_GLOBAL_PTR(INPUT0_TYPE, input, input_idx));
+    #else
+        bool zero = input_offset_f < 0 || input_offset_f >= INPUT0_FEATURE_NUM;
+        vals[j] = zero ? INPUT0_VAL_ZERO : TO_INPUT0_TYPE(ALPHA_VAL_FACTOR_DIV_BY_SIZE) * (*OFFSET_GLOBAL_PTR(INPUT0_TYPE, input, input_idx));
+        input_idx += MULTIPLY_OFFSET(INPUT0_VAL_ZERO, INPUT0_FEATURE_PITCH);
+    #endif
         input_offset_f++;
-        input_idx += MULTIPLY_OFFSET(UNIT_TYPE, INPUT0_FEATURE_PITCH);
     }
 
-    for (uint i = 0; i < LOCAL_SIZE-1; i++)
+    for (uint j = 0; j < LOCAL_SIZE-1; j++)
     {
-        for(uint j = 0; j < OFM_PER_SIMD; j++)
+        for(uint i = 0; i < OFM_PER_SIMD; i++)
         {
-            results[j] = mad(vals[j], vals[j], results[j]);
+            results[i] = mad(vals[i], vals[i], results[i]);
         }
-        for(uint j = 0; j < OFM_PER_SIMD-1; j++)
+        for(uint i = 0; i < OFM_PER_SIMD-1; i++)
         {
-            vals[j] = vals[j+1];
+            vals[i] = vals[i+1];
         }
-
+    #if !INPUT0_SIMPLE
+        uint input_idx = INPUT0_GET_INDEX(batch_id, input_offset_f, y, x);
+        input_idx =  MULTIPLY_OFFSET(INPUT0_TYPE, input_idx);
         bool zero = input_offset_f < 0 || input_offset_f >= INPUT0_FEATURE_NUM;
-        vals[OFM_PER_SIMD-1] = zero ? UNIT_VAL_ZERO : TO_UNIT_TYPE(ALPHA_VAL_FACTOR_DIV_BY_SIZE) * (*OFFSET_GLOBAL_PTR(UNIT_TYPE, input, input_idx));
+        vals[OFM_PER_SIMD-1] = zero ? INPUT0_VAL_ZERO : TO_INPUT0_TYPE(ALPHA_VAL_FACTOR_DIV_BY_SIZE) * (*OFFSET_GLOBAL_PTR(INPUT0_TYPE, input, input_idx));
+    #else
+        bool zero = input_offset_f < 0 || input_offset_f >= INPUT0_FEATURE_NUM;
+        vals[OFM_PER_SIMD-1] = zero ? INPUT0_VAL_ZERO : TO_INPUT0_TYPE(ALPHA_VAL_FACTOR_DIV_BY_SIZE) * (*OFFSET_GLOBAL_PTR(INPUT0_TYPE, input, input_idx));
+        input_idx += MULTIPLY_OFFSET(INPUT0_TYPE, INPUT0_FEATURE_PITCH);
+    #endif
         input_offset_f++;
-        input_idx += MULTIPLY_OFFSET(UNIT_TYPE, INPUT0_FEATURE_PITCH);
     }
 
     for(uint j = 0; j < OFM_PER_SIMD; j++)
@@ -82,15 +104,33 @@ KERNEL (lrn_gpu_across_channel_multiple_features)(const __global INPUT0_TYPE* in
 
     for(uint j = 0; j < OFM_PER_SIMD; j++)
     {
-        results[j] = mad(results[j], TO_UNIT_TYPE(ALPHA_DIV_BY_SIZE), TO_UNIT_TYPE(K));
-        results[j] = native_powr(results[j], -TO_UNIT_TYPE(BETA));
+        results[j] = mad(results[j], TO_INPUT0_TYPE(ALPHA_DIV_BY_SIZE), TO_INPUT0_TYPE(K));
+        results[j] = native_powr(results[j], -TO_INPUT0_TYPE(BETA));
     }
 
-    uint output_idx = OUTPUT_OFFSET + batch_id*OUTPUT_BATCH_PITCH + feature_id*OUTPUT_FEATURE_PITCH + y*OUTPUT_Y_PITCH + x*OUTPUT_X_PITCH;
+    #if OUTPUT_SIMPLE
+        uint output_idx = OUTPUT_OFFSET + batch_id*OUTPUT_BATCH_PITCH + feature_id*OUTPUT_FEATURE_PITCH + y*OUTPUT_Y_PITCH + x*OUTPUT_X_PITCH;
+    #endif
+
+    INPUT0_TYPE lrn_result;
+
     for(uint j = 0; j < OFM_PER_SIMD; j++)
     {
-        output[output_idx] = ACTIVATION(results[j] * input[input_id], ACTIVATION_PARAMS);
+    #if !OUTPUT_SIMPLE
+        uint output_idx = OUTPUT_GET_INDEX(batch_id, feature_id + j, y, x);
+        input_id = INPUT0_GET_INDEX(batch_id, feature_id + j, y, x);
+    #endif
+        lrn_result = results[j] * input[input_id];
+        #if HAS_FUSED_OPS
+            FUSED_OPS;
+            OUTPUT_TYPE res = FUSED_OPS_RESULT;
+            output[output_idx] = res;
+        #else
+            output[output_idx] = ACTIVATION(lrn_result, ACTIVATION_PARAMS);
+        #endif
+    #if OUTPUT_SIMPLE
         output_idx += OUTPUT_FEATURE_PITCH;
         input_id += INPUT0_FEATURE_PITCH;
+    #endif
     }
-}
\ No newline at end of file
+}
index 62776b3..3c40a5c 100644 (file)
 #include "include/common.cl"
 #include "include/data_types.cl"
 
-
-#if FP16_UNIT_USED
-    #define UNIT_CVT_FUNC(val) convert_half(val)
-#else
-    #define UNIT_CVT_FUNC(val) (val)
+KERNEL (lrn_gpu_across_channel_ref)(
+    const __global INPUT0_TYPE* input,
+    __global OUTPUT_TYPE* output
+#if HAS_FUSED_OPS_DECLS
+    , FUSED_OPS_DECLS
 #endif
-
-
-KERNEL (lrn_gpu_across_channel_ref)(const __global UNIT_TYPE* input, __global UNIT_TYPE* output)
+    )
 {
 #if   defined OUTPUT_LAYOUT_BFYX
-    const uint x            = get_global_id(0);    
+    const uint x            = get_global_id(0);
     const uint y            = get_global_id(1);
     const uint b_f          = get_global_id(2);
     const uint batch_id     = b_f / INPUT0_FEATURE_NUM;
     const uint feature_id   = b_f % INPUT0_FEATURE_NUM;
-    
+
     if (x >= INPUT0_SIZE_X)
         return;
 #else
@@ -40,11 +38,11 @@ KERNEL (lrn_gpu_across_channel_ref)(const __global UNIT_TYPE* input, __global UN
     const uint y            = (uint)get_global_id(2);
     const uint feature_id   = b_f / INPUT0_BATCH_NUM;
     const uint batch_id     = b_f % INPUT0_BATCH_NUM;
-#endif    
+#endif
 
     const uint input_id = INPUT0_OFFSET + batch_id*INPUT0_BATCH_PITCH + feature_id*INPUT0_FEATURE_PITCH + y*INPUT0_Y_PITCH + x*INPUT0_X_PITCH;
 
-    UNIT_TYPE acc = UNIT_VAL_ZERO;
+    INPUT0_TYPE acc = INPUT0_VAL_ZERO;
 
     int input_offset_f = feature_id - PADDING;
     int input_idx = (int)input_id - PADDING*INPUT0_FEATURE_PITCH;
@@ -53,17 +51,24 @@ KERNEL (lrn_gpu_across_channel_ref)(const __global UNIT_TYPE* input, __global UN
     {
         bool zero = input_offset_f < 0 || input_offset_f >= INPUT0_FEATURE_NUM;
 
-        UNIT_TYPE value = zero ? UNIT_VAL_ZERO : UNIT_CVT_FUNC(ALPHA_VAL_FACTOR_DIV_BY_SIZE) * input[input_idx];
+        INPUT0_TYPE value = zero ? INPUT0_VAL_ZERO : TO_INPUT0_TYPE(ALPHA_VAL_FACTOR_DIV_BY_SIZE) * input[input_idx];
         acc = mad(value, value, acc);
 
         input_offset_f++;
         input_idx += INPUT0_FEATURE_PITCH;
     }
-    acc = mad(acc, UNIT_CVT_FUNC(ALPHA_DIV_BY_SIZE), UNIT_CVT_FUNC(K));
-    acc = native_powr(acc, -UNIT_CVT_FUNC(BETA));
+    acc = mad(acc, TO_INPUT0_TYPE(ALPHA_DIV_BY_SIZE), TO_INPUT0_TYPE(K));
+    acc = native_powr(acc, -TO_INPUT0_TYPE(BETA));
 
     const uint output_idx = OUTPUT_OFFSET + batch_id*OUTPUT_BATCH_PITCH + feature_id*OUTPUT_FEATURE_PITCH + y*OUTPUT_Y_PITCH + x*OUTPUT_X_PITCH;
-    output[output_idx] = ACTIVATION(acc * input[input_id], ACTIVATION_PARAMS);
-}
+    INPUT0_TYPE lrn_result = acc * input[input_id];
 
-#undef UNIT_CVT_FUNC
+#if HAS_FUSED_OPS
+    FUSED_OPS;
+    OUTPUT_TYPE res = FUSED_OPS_RESULT;
+    output[output_idx] = res;
+#else
+    output[output_idx] = ACTIVATION(lrn_result, ACTIVATION_PARAMS);
+#endif
+
+}
index 91b4b23..32aebc2 100644 (file)
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-#include "include/common.cl"
-#include "include/data_types.cl"
+#include "include/include_all.cl"
 
-
-#if FP16_UNIT_USED
-    #define UNIT_CVT_FUNC(val) convert_half(val)
-#else
-    #define UNIT_CVT_FUNC(val) (val)
-#endif
+#define INPUT_VECTOR_TYPE MAKE_VECTOR_TYPE(INPUT0_TYPE, 8)
+#define OUTPUT_VECTOR_TYPE MAKE_VECTOR_TYPE(OUTPUT_TYPE, 8)
 
 __attribute__((reqd_work_group_size(SUB_GROUP_SIZE, 1, 1)))
-KERNEL (lrn_gpu_yxfb_b8)(const __global UNIT_TYPE* input, __global UNIT_TYPE* output)
+KERNEL (lrn_gpu_yxfb_b8)(
+    const __global INPUT0_TYPE* input,
+    __global OUTPUT_TYPE* output
+#if HAS_FUSED_OPS_DECLS
+    , FUSED_OPS_DECLS
+#endif
+    )
 {
-    
+
     const uint batch_num_group  = (INPUT0_BATCH_NUM/SUB_GROUP_SIZE);
     const uint b_f              = get_global_id(0);
     const uint x                = (uint)get_global_id(1);
@@ -38,11 +39,11 @@ KERNEL (lrn_gpu_yxfb_b8)(const __global UNIT_TYPE* input, __global UNIT_TYPE* ou
     const uint input_id_group = input_id / SUB_GROUP_SIZE;
 
     int input_offset_f = feature_id - PADDING;
-    
+
     const uint input_feature_pitch_group  = (INPUT0_FEATURE_PITCH/SUB_GROUP_SIZE);
     int input_idx_group = (int)input_id_group - PADDING*input_feature_pitch_group;
-    
-    float8 acc = 0;
+
+    INPUT_VECTOR_TYPE acc = 0;
 
     for (int i = 0; i < LOCAL_SIZE; i++)
     {
@@ -50,19 +51,29 @@ KERNEL (lrn_gpu_yxfb_b8)(const __global UNIT_TYPE* input, __global UNIT_TYPE* ou
 
         if(!zero)
         {
-            float8 value = vload8(input_idx_group, input);
+            INPUT_VECTOR_TYPE value = vload8(input_idx_group, input);
             acc = mad(value, value, acc);
         }
 
         input_offset_f++;
         input_idx_group += input_feature_pitch_group;
     }
-    acc = mad(acc, UNIT_CVT_FUNC(ALPHA_DIV_BY_SIZE), UNIT_CVT_FUNC(K));
-    acc = native_powr(acc, -UNIT_CVT_FUNC(BETA));
+    acc = mad(acc, TO_INPUT0_TYPE(ALPHA_DIV_BY_SIZE), TO_INPUT0_TYPE(K));
+    acc = native_powr(acc, -TO_INPUT0_TYPE(BETA));
 
     const uint output_idx = OUTPUT_OFFSET + batch_id*OUTPUT_BATCH_PITCH + feature_id*OUTPUT_FEATURE_PITCH + y*OUTPUT_Y_PITCH + x*OUTPUT_X_PITCH;
     const uint output_idx_group = output_idx / SUB_GROUP_SIZE;
     float8 _in = vload8(input_id_group, input);
-    float8 res = ACTIVATION(acc * _in, ACTIVATION_PARAMS);
-    vstore8(res, output_idx_group, output);
-}
\ No newline at end of file
+    float8 lrn_result = ACTIVATION(acc * _in, ACTIVATION_PARAMS);
+
+    #if HAS_FUSED_OPS
+        FUSED_OPS;
+        OUTPUT_VECTOR_TYPE res = FUSED_OPS_RESULT;
+        vstore8(res, output_idx_group, output);
+    #else
+        vstore8(lrn_result, output_idx_group, output);
+    #endif
+}
+
+#undef INPUT_VECTOR_TYPE
+#undef OUTPUT_VECTOR_TYPE
index 4f7b6fe..1a0d988 100644 (file)
 #include "include/common.cl"
 #include "include/data_types.cl"
 
-
-KERNEL (lrn_gpu_within_channel)(const __global UNIT_TYPE* input, __global UNIT_TYPE* output)
+KERNEL (lrn_gpu_within_channel)(
+    const __global INPUT0_TYPE* input,
+    __global OUTPUT_TYPE* output
+#if HAS_FUSED_OPS_DECLS
+    , FUSED_OPS_DECLS
+#endif
+    )
 {
     for (uint index = get_global_id(0) ; index < INPUT0_LENGTH ; index += get_global_size(0))
     {
@@ -48,23 +53,32 @@ KERNEL (lrn_gpu_within_channel)(const __global UNIT_TYPE* input, __global UNIT_T
         wstart = max(wstart, (int)0);
         hend = min(hend, INPUT0_SIZE_Y);
         wend = min(wend, INPUT0_SIZE_X);
-        UNIT_TYPE aveval = 0;
+        INPUT0_TYPE aveval = 0;
 
-        __global const UNIT_TYPE* bottom_slice = input + first_index_in_feature;
+        __global const INPUT0_TYPE* bottom_slice = input + first_index_in_feature;
         for (int h = hstart; h < hend; ++h)
         {
             for (int w = wstart; w < wend; ++w)
             {
-                UNIT_TYPE tmp_val = bottom_slice[h*INPUT0_Y_PITCH + w*INPUT0_X_PITCH] * TO_UNIT_TYPE(ALPHA_VAL_FACTOR);
+                INPUT0_TYPE tmp_val = bottom_slice[h*INPUT0_Y_PITCH + w*INPUT0_X_PITCH] * TO_INPUT0_TYPE(ALPHA_VAL_FACTOR);
                 aveval += (tmp_val * tmp_val);
             }
         }
 
-        UNIT_TYPE acc = aveval / pool_size;
-        acc = mad(acc, TO_UNIT_TYPE(ALPHA_AFTER_FACTORED), TO_UNIT_TYPE(K));
-        acc = native_powr(acc, -TO_UNIT_TYPE(BETA));
+        INPUT0_TYPE acc = aveval / pool_size;
+        acc = mad(acc, TO_INPUT0_TYPE(ALPHA_AFTER_FACTORED), TO_INPUT0_TYPE(K));
+        acc = native_powr(acc, -TO_INPUT0_TYPE(BETA));
 
         const uint output_idx = OUTPUT_OFFSET + batch_id*OUTPUT_BATCH_PITCH + feature_id*OUTPUT_FEATURE_PITCH + y*OUTPUT_Y_PITCH + x*OUTPUT_X_PITCH;
-        output[output_idx] = ACTIVATION(acc * input[input_id], ACTIVATION_PARAMS);
+        INPUT0_TYPE lrn_result = acc * input[input_id];
+
+    #if HAS_FUSED_OPS
+        FUSED_OPS;
+        OUTPUT_TYPE res = FUSED_OPS_RESULT;
+        output[output_idx] = res;
+    #else
+        output[output_idx] = ACTIVATION(lrn_result, ACTIVATION_PARAMS);
+    #endif
+
     }
-}
\ No newline at end of file
+}
index b63de97..bdbdc6e 100644 (file)
 #include "include/common.cl"
 #include "include/data_types.cl"
 
-
-KERNEL (lrn_gpu_within_channel_opt)(const __global UNIT_TYPE* input, __global UNIT_TYPE* output)
+KERNEL (lrn_gpu_within_channel_opt)(
+    const __global INPUT0_TYPE* input,
+    __global OUTPUT_TYPE* output
+#if HAS_FUSED_OPS_DECLS
+    , FUSED_OPS_DECLS
+#endif
+    )
 {
     uint index = get_global_id(0);
 #if   defined OUTPUT_LAYOUT_YXFB
@@ -39,9 +44,9 @@ KERNEL (lrn_gpu_within_channel_opt)(const __global UNIT_TYPE* input, __global UN
     const uint first_index_in_feature = INPUT0_OFFSET + batch_id * INPUT0_BATCH_PITCH + feature_id * INPUT0_FEATURE_PITCH;
     const uint input_id = first_index_in_feature + y * INPUT0_Y_PITCH + x * INPUT0_X_PITCH;
 
-    UNIT_TYPE aveval = 0;
+    INPUT0_TYPE aveval = 0;
     uint pool_size = 0;
-    int wstart = x - PADDING; 
+    int wstart = x - PADDING;
     int hstart = y - PADDING;
 
 
@@ -52,13 +57,13 @@ KERNEL (lrn_gpu_within_channel_opt)(const __global UNIT_TYPE* input, __global UN
     {
         pool_size = LOCAL_SIZE * LOCAL_SIZE;
 
-        __global const UNIT_TYPE* bottom_slice = input + first_index_in_feature + hstart * INPUT0_Y_PITCH + wstart * INPUT0_X_PITCH;
+        __global const INPUT0_TYPE* bottom_slice = input + first_index_in_feature + hstart * INPUT0_Y_PITCH + wstart * INPUT0_X_PITCH;
         for (int h = 0; h < LOCAL_SIZE; ++h)
         {
             uint hPitch = h * INPUT0_Y_PITCH;
             for (int w = 0; w < LOCAL_SIZE; ++w)
             {
-                UNIT_TYPE tmp_val = bottom_slice[hPitch + w * INPUT0_X_PITCH] * TO_UNIT_TYPE(ALPHA_VAL_FACTOR);
+                INPUT0_TYPE tmp_val = bottom_slice[hPitch + w * INPUT0_X_PITCH] * TO_INPUT0_TYPE(ALPHA_VAL_FACTOR);
                 aveval = mad(tmp_val, tmp_val, aveval);
             }
         }
@@ -73,22 +78,30 @@ KERNEL (lrn_gpu_within_channel_opt)(const __global UNIT_TYPE* input, __global UN
         hend = min(hend, INPUT0_SIZE_Y);
         wend = min(wend, INPUT0_SIZE_X);
 
-        __global const UNIT_TYPE* bottom_slice = input + first_index_in_feature;
+        __global const INPUT0_TYPE* bottom_slice = input + first_index_in_feature;
         for (uint h = hstart; h < hend; ++h)
         {
             uint hPitch = h * INPUT0_Y_PITCH;
             for (uint w = wstart; w < wend; ++w)
             {
-                UNIT_TYPE tmp_val = bottom_slice[hPitch + w * INPUT0_X_PITCH] * TO_UNIT_TYPE(ALPHA_VAL_FACTOR);
+                INPUT0_TYPE tmp_val = bottom_slice[hPitch + w * INPUT0_X_PITCH] * TO_INPUT0_TYPE(ALPHA_VAL_FACTOR);
                 aveval = mad(tmp_val, tmp_val, aveval);
             }
         }
     }
 
-    UNIT_TYPE acc = aveval / pool_size;
-    acc = mad(acc, TO_UNIT_TYPE(ALPHA_AFTER_FACTORED), TO_UNIT_TYPE(K));
-    acc = native_powr(acc, -TO_UNIT_TYPE(BETA));
+    INPUT0_TYPE acc = aveval / pool_size;
+    acc = mad(acc, TO_INPUT0_TYPE(ALPHA_AFTER_FACTORED), TO_INPUT0_TYPE(K));
+    acc = native_powr(acc, -TO_INPUT0_TYPE(BETA));
 
     const uint output_idx = OUTPUT_OFFSET + batch_id * OUTPUT_BATCH_PITCH + feature_id * OUTPUT_FEATURE_PITCH + y * OUTPUT_Y_PITCH + x * OUTPUT_X_PITCH;
-    output[output_idx] = ACTIVATION(acc * input[input_id], ACTIVATION_PARAMS);
-}
\ No newline at end of file
+    INPUT0_TYPE lrn_result = acc * input[input_id];
+
+#if HAS_FUSED_OPS
+    FUSED_OPS;
+    OUTPUT_TYPE res = FUSED_OPS_RESULT;
+    output[output_idx] = res;
+#else
+    output[output_idx] = ACTIVATION(lrn_result, ACTIVATION_PARAMS);
+#endif
+}
index 7eab50c..e66ab65 100644 (file)
 #include "include/fetch.cl"
 #include "include/data_types.cl"
 
-
-KERNEL(normalization)(__global const INPUT0_TYPE* input, __global OUTPUT_TYPE* output)
+KERNEL(normalization)(
+    __global const INPUT0_TYPE* input, 
+    __global OUTPUT_TYPE* output
+#if HAS_FUSED_OPS_DECLS
+    , FUSED_OPS_DECLS
+#endif                        
+    )
 {
     const uint b = get_global_id(GWS_BATCH);
     const uint f = get_global_id(GWS_FEATURE);
@@ -42,7 +47,7 @@ KERNEL(normalization)(__global const INPUT0_TYPE* input, __global OUTPUT_TYPE* o
     {
         const int z_idx = (j + f - PADDING);
         bool zero = (z_idx < 0 || z_idx >= INPUT0_FEATURE_NUM);
-        UNIT_TYPE val = zero ? 0.0f : input[j_offset];
+        INPUT0_TYPE val = zero ? 0.0f : input[j_offset];
         sum += val*val;
         j_offset += INPUT0_FEATURE_PITCH;
 #ifdef DYNAMIC_KERNEL_DIVIDER 
@@ -68,7 +73,7 @@ KERNEL(normalization)(__global const INPUT0_TYPE* input, __global OUTPUT_TYPE* o
             zero = input_offset_x >= INPUT0_SIZE_X ? true : zero;
             zero = input_offset_y >= INPUT0_SIZE_Y ? true : zero;
 
-            UNIT_TYPE val = zero ? UNIT_VAL_ZERO : input[input_offset];
+            INPUT0_TYPE val = zero ? INPUT0_VAL_ZERO : input[input_offset];
             
             sum += val*val;
             input_offset += INPUT0_X_PITCH;
@@ -81,15 +86,22 @@ KERNEL(normalization)(__global const INPUT0_TYPE* input, __global OUTPUT_TYPE* o
 #endif
 
 #ifdef DYNAMIC_KERNEL_DIVIDER 
-    const UNIT_TYPE num_elementes_div = UNIT_VAL_ONE / TO_UNIT_TYPE(num_elementes);
+    const INPUT0_TYPE num_elementes_div = INPUT0_VAL_ONE / TO_INPUT0_TYPE(num_elementes);
 #else
-    const UNIT_TYPE num_elementes_div = NUM_ELEMENTS_DIV;
+    const INPUT0_TYPE num_elementes_div = NUM_ELEMENTS_DIV;
 #endif
     
-    const UNIT_TYPE base = TO_UNIT_TYPE(K) + TO_UNIT_TYPE((ACCUMULATOR_TYPE)ALPHA*sum * num_elementes_div);
-    const UNIT_TYPE normalization_factor = native_powr(base, TO_UNIT_TYPE(-BETA));
+    INPUT0_TYPE base = TO_INPUT0_TYPE(K) + TO_INPUT0_TYPE((ACCUMULATOR_TYPE)ALPHA*sum * num_elementes_div);
+    INPUT0_TYPE normalization_factor = native_powr(base, TO_INPUT0_TYPE(-BETA));
     
-    const UNIT_TYPE val = input[input_index];
-    const UNIT_TYPE normres =  val*normalization_factor;
-    output[output_index] = ACTIVATION(normres, ACTIVATION_PARAMS);
+    INPUT0_TYPE lrn_result = input[input_index] * normalization_factor;
+
+#if HAS_FUSED_OPS
+    FUSED_OPS;
+    OUTPUT_TYPE res = FUSED_OPS_RESULT;
+    output[output_index] = res;
+#else
+    output[output_index] = ACTIVATION(lrn_result, ACTIVATION_PARAMS);
+#endif
+
 }
index 3befa63..b47d0ad 100644 (file)
 // limitations under the License.
 */
 
-
-
 #include "include/include_all.cl"
 
-#define VECTOR_TYPE MAKE_VECTOR_TYPE(UNIT_TYPE,8)
-#define ACCUMULATOR_VECTOR_TYPE MAKE_VECTOR_TYPE(ACCUMULATOR_TYPE, 8)
+#define VECTOR_TYPE MAKE_VECTOR_TYPE(INPUT0_TYPE, 8)
+#define ACCUMULATOR_VECTOR_TYPE MAKE_VECTOR_TYPE(INPUT0_TYPE, 8)
 #define FEATURE_PER_ITEM 8
 #define FEATURE_BLOCK_NUM (OUTPUT_FEATURE_NUM / 8)
 
-KERNEL(lrn_within_channel_byxf_opt)(__global const INPUT0_TYPE* input, __global OUTPUT_TYPE* output)
+KERNEL(lrn_within_channel_byxf_opt)(
+    __global const INPUT0_TYPE* input, 
+    __global OUTPUT_TYPE* output
+#if HAS_FUSED_OPS_DECLS
+    , FUSED_OPS_DECLS
+#endif        
+    )
 {
     const uint b = get_global_id(GWS_BATCH);
     const uint f = (uint)get_global_id(GWS_FEATURE)*FEATURE_PER_ITEM;
@@ -56,7 +60,7 @@ KERNEL(lrn_within_channel_byxf_opt)(__global const INPUT0_TYPE* input, __global
             zero = input_offset_x >= INPUT0_SIZE_X ? true : zero;
             zero = input_offset_y >= INPUT0_SIZE_Y ? true : zero;
 
-            VECTOR_TYPE val = zero ? UNIT_VAL_ZERO : vload8(input_offset+FEATURE_BLOCK_NUM*i, input);
+            VECTOR_TYPE val = zero ? INPUT0_VAL_ZERO : vload8(input_offset+FEATURE_BLOCK_NUM*i, input);
             
             sum = mad(val,val,sum);
 #ifdef DYNAMIC_KERNEL_DIVIDER
@@ -67,19 +71,28 @@ KERNEL(lrn_within_channel_byxf_opt)(__global const INPUT0_TYPE* input, __global
     }
 
 #ifdef DYNAMIC_KERNEL_DIVIDER 
-    const UNIT_TYPE num_elementes_div = UNIT_VAL_ONE / TO_UNIT_TYPE(num_elementes);
+    const INPUT0_TYPE num_elementes_div = INPUT0_VAL_ONE / TO_INPUT0_TYPE(num_elementes);
 #else
-    const UNIT_TYPE num_elementes_div = NUM_ELEMENTS_DIV;
+    const INPUT0_TYPE num_elementes_div = NUM_ELEMENTS_DIV;
 #endif
     
-    const VECTOR_TYPE base = mad((ACCUMULATOR_TYPE)ALPHA*num_elementes_div, sum, TO_UNIT_TYPE(K));
-    const VECTOR_TYPE normalization_factor = native_powr(base, TO_UNIT_TYPE(-BETA));
+    const VECTOR_TYPE base = mad((ACCUMULATOR_TYPE)ALPHA*num_elementes_div, sum, TO_INPUT0_TYPE(K));
+    const VECTOR_TYPE normalization_factor = native_powr(base, TO_INPUT0_TYPE(-BETA));
     const VECTOR_TYPE val = vload8(input_index/FEATURE_PER_ITEM, input);
-    const VECTOR_TYPE normres = val*normalization_factor;
+    const VECTOR_TYPE normes = val*normalization_factor;
+
+    INPUT0_TYPE lrn_result;
 
     for(uint i = 0; i < FEATURE_PER_ITEM; i++)
     {
-        output[output_index+i] = ACTIVATION(normres[i], ACTIVATION_PARAMS);
+        lrn_result = normes[i];
+        #if HAS_FUSED_OPS
+            FUSED_OPS;
+            OUTPUT_TYPE res = FUSED_OPS_RESULT;
+            output[output_index+i] = res;
+        #else
+            output[output_index+i] = ACTIVATION(lrn_result, ACTIVATION_PARAMS);
+        #endif
     }
 }
 
index 59e1f5f..4867898 100644 (file)
 
 DECLARE_PACKED_ACCUMULATE(accumulate_sum_input, int, INPUT0_TYPE, FSV, INPUT_SLICE_PITCH, ITEMS_NUM, GWS, ACCUMULATE_SUM)
 
+#if SG_NUM != 1
 DECLARE_WG_PACKED_REDUCE_ADD(reduce_sum_across_sg, int, FSV, SG_NUM, REDUCE_NO_POST_OP)
+#else
+DECLARE_SG_PACKED_REDUCE_ADD(reduce_sum_inside_sg, int, FSV, REDUCE_NO_POST_OP)
+#endif
 
 __attribute__((intel_reqd_sub_group_size(SIMD)))
 __attribute__((reqd_work_group_size(LWS, 1, 1)))
@@ -113,10 +117,15 @@ KERNEL(mvn_mean_1)(const __global INPUT0_TYPE* input,
 
     const uint data_sets_offset = INPUT0_GET_INDEX(b, f, 0, 0);
 
-    __local int slm_acc[(SG_NUM - 1) * FSV];
 
     INT_PACKED_TYPE partial_sum = FUNC_CALL(accumulate_sum_input)(input, data_sets_offset, get_global_id(0));
+
+#if SG_NUM != 1
+    __local int slm_acc[(SG_NUM - 1) * FSV];
     int full_sum = FUNC_CALL(reduce_sum_across_sg)(partial_sum, slm_acc);
+#else
+    int full_sum = FUNC_CALL(reduce_sum_inside_sg)(partial_sum);
+#endif
 
     if (sgid == 0 && (sglid < FSV || SIMD == FSV)) {
         intermidiate_sum[flat_data_set_group * ITEM_GROUPS * FSV + items_group * FSV + sglid] = full_sum;
@@ -128,7 +137,11 @@ KERNEL(mvn_mean_1)(const __global INPUT0_TYPE* input,
 DECLARE_PACKED_ACCUMULATE(accumulate_sum_input, int, int, FSV, INPUT_SLICE_PITCH, ITEM_GROUPS, LWS, ACCUMULATE_SUM)
 
 #define CALC_MEAN(sum) ((sum) / ITEMS_NUM)
+#if SG_NUM != 1
 DECLARE_WG_PACKED_REDUCE_ADD(reduce_mean_across_sg, MEAN_TYPE, FSV, SG_NUM, CALC_MEAN)
+#else
+DECLARE_SG_PACKED_REDUCE_ADD(reduce_mean_inside_sg, MEAN_TYPE, FSV, CALC_MEAN)
+#endif
 
 __attribute__((intel_reqd_sub_group_size(SIMD)))
 __attribute__((reqd_work_group_size(LWS, 1, 1)))
@@ -144,8 +157,13 @@ KERNEL(mvn_mean_2)(const __global int* intermidiate_sum,
     const uint data_sets_offset = flat_data_set_group * ITEM_GROUPS * FSV;
 
     INT_PACKED_TYPE complete_sum = FUNC_CALL(accumulate_sum_input)(intermidiate_sum, data_sets_offset, get_local_id(0));
+
+#if SG_NUM != 1
     __local MEAN_TYPE slm_acc[(SG_NUM - 1) * FSV];
     MEAN_TYPE mean = FUNC_CALL(reduce_mean_across_sg)(TO_MEAN_PACKED_TYPE(complete_sum), slm_acc);
+#else
+    MEAN_TYPE mean = FUNC_CALL(reduce_mean_inside_sg)(TO_MEAN_PACKED_TYPE(complete_sum));
+#endif
 
     if (sgid == 0 && (sglid < FSV || SIMD == FSV)) {
         intermidiate_mean[flat_data_set_group * FSV + sglid] = mean;
@@ -161,7 +179,11 @@ KERNEL(mvn_mean_2)(const __global int* intermidiate_sum,
 #define ACCUMULATE_SUM_SQ_DEV(curr, next, idx, mean)   ACCUMULATE_SUM_SQ(curr, TO_MEAN_TYPE(next) - intel_sub_group_shuffle(mean, idx), idx)
 DECLARE_PACKED_ACCUMULATE_EARGS(accumulate_sum_sq_dev, MEAN_TYPE, INPUT0_TYPE, FSV, INPUT_SLICE_PITCH, ITEMS_NUM, GWS, ACCUMULATE_SUM_SQ_DEV, EXTRA_ARGS_DECL, EXTRA_ARGS)
 
+#if SG_NUM != 1
 DECLARE_WG_PACKED_REDUCE_ADD(reduce_sum_across_sg, MEAN_TYPE, FSV, SG_NUM, REDUCE_NO_POST_OP)
+#else
+DECLARE_SG_PACKED_REDUCE_ADD(reduce_sum_inside_sg, MEAN_TYPE, FSV, REDUCE_NO_POST_OP)
+#endif
 
 __attribute__((intel_reqd_sub_group_size(SIMD)))
 __attribute__((reqd_work_group_size(LWS, 1, 1)))
@@ -178,11 +200,16 @@ KERNEL(mvn_var_1)(const __global INPUT0_TYPE* input,
 
     const uint data_sets_offset = INPUT0_GET_INDEX(b, f, 0, 0);
 
-    __local MEAN_TYPE slm_acc[(SG_NUM - 1) * FSV];
 
     MEAN_TYPE mean = means[flat_data_set_group * FSV + sglid];
     MEAN_PACKED_TYPE partial_sum = FUNC_CALL(accumulate_sum_sq_dev)(input, data_sets_offset, get_global_id(0), mean);
+
+#if SG_NUM != 1
+    __local MEAN_TYPE slm_acc[(SG_NUM - 1) * FSV];
     MEAN_TYPE full_sum = FUNC_CALL(reduce_sum_across_sg)(partial_sum, slm_acc);
+#else
+    MEAN_TYPE full_sum = FUNC_CALL(reduce_sum_inside_sg)(partial_sum);
+#endif
 
     if (sgid == 0 && (sglid < FSV || SIMD == FSV)) {
         intermidiate_sum[flat_data_set_group * ITEM_GROUPS * FSV + items_group * FSV + sglid] = full_sum;
@@ -194,7 +221,11 @@ KERNEL(mvn_var_1)(const __global INPUT0_TYPE* input,
 DECLARE_PACKED_ACCUMULATE(accumulate_sum, MEAN_TYPE, MEAN_TYPE, FSV, INPUT_SLICE_PITCH, ITEM_GROUPS, LWS, ACCUMULATE_SUM)
 
 #define CALC_INVERSE_VARIANCE(sum_diff_sq)   native_powr((sum_diff_sq) / ITEMS_NUM + (MEAN_TYPE)EPSILON, -0.5f)
+#if SG_NUM != 1
 DECLARE_WG_PACKED_REDUCE_ADD(reduce_var_across_sg, MEAN_TYPE, FSV, SG_NUM, CALC_INVERSE_VARIANCE)
+#else
+DECLARE_SG_PACKED_REDUCE_ADD(reduce_var_inside_sg, MEAN_TYPE, FSV, CALC_INVERSE_VARIANCE)
+#endif
 
 __attribute__((intel_reqd_sub_group_size(SIMD)))
 __attribute__((reqd_work_group_size(LWS, 1, 1)))
@@ -212,8 +243,12 @@ KERNEL(mvn_var_2)(const __global MEAN_TYPE* intermidiate_sum,
 
     MEAN_PACKED_TYPE complete_sum = FUNC_CALL(accumulate_sum)(intermidiate_sum, data_sets_offset, get_local_id(0));
 
+#if SG_NUM != 1
     __local MEAN_TYPE slm_acc[(SG_NUM - 1) * FSV];
     MEAN_TYPE inv_variance = FUNC_CALL(reduce_var_across_sg)(complete_sum, slm_acc);
+#else
+    MEAN_TYPE inv_variance = FUNC_CALL(reduce_var_inside_sg)(complete_sum);
+#endif
 
     if (sgid == 0 && (sglid < FSV || SIMD == FSV)) {
         intermidiate_ivar[flat_data_set_group * FSV + sglid] = inv_variance;
@@ -226,7 +261,11 @@ KERNEL(mvn_var_2)(const __global MEAN_TYPE* intermidiate_sum,
 DECLARE_PACKED_ACCUMULATE(accumulate_sum_input, int, INPUT0_TYPE, FSV, INPUT_SLICE_PITCH, ITEMS_NUM, LWS, ACCUMULATE_SUM)
 
 #define CALC_MEAN(sum) ((sum) / ITEMS_NUM)
+#if SG_NUM != 1
 DECLARE_WG_PACKED_REDUCE_ADD(reduce_mean, MEAN_TYPE, FSV, SG_NUM, CALC_MEAN)
+#else
+DECLARE_SG_PACKED_REDUCE_ADD(reduce_mean, MEAN_TYPE, FSV, CALC_MEAN)
+#endif
 
 // Variance:
 #define EXTRA_ARGS_DECL_IMPL    , MEAN_TYPE mean
@@ -237,7 +276,11 @@ DECLARE_WG_PACKED_REDUCE_ADD(reduce_mean, MEAN_TYPE, FSV, SG_NUM, CALC_MEAN)
 DECLARE_PACKED_ACCUMULATE_EARGS(accumulate_sum_sq_dev, MEAN_TYPE, INPUT0_TYPE, FSV, INPUT_SLICE_PITCH, ITEMS_NUM, LWS, ACCUMULATE_SUM_SQ_DEV, EXTRA_ARGS_DECL, EXTRA_ARGS)
 
 #define CALC_INVERSE_VARIANCE(sum_diff_sq)   native_powr((sum_diff_sq) / ITEMS_NUM + (MEAN_TYPE)EPSILON, -0.5f)
+#if SG_NUM != 1
 DECLARE_WG_PACKED_REDUCE_ADD(reduce_inverse_variance, MEAN_TYPE, FSV, SG_NUM, CALC_INVERSE_VARIANCE)
+#else
+DECLARE_SG_PACKED_REDUCE_ADD(reduce_inverse_variance, MEAN_TYPE, FSV, CALC_INVERSE_VARIANCE)
+#endif
 
 #define INPUT_PACKED_BLOCK_READ(ptr)   CAT(as_, INPUT_PACKED_TYPE)(CAT(BLOCK_READ_UC_, FSV)((const __global uchar*)ptr))
 
@@ -272,7 +315,7 @@ KERNEL(mvn_final)(
     const uint data_sets_offset = INPUT0_GET_INDEX(b, f, 0, 0);
     uint input_offset;
 
-#if !PRECALC_MEAN || (NORMALIZE_VARIANCE && !PRECALC_VARIANCE)
+#if (!PRECALC_MEAN || (NORMALIZE_VARIANCE && !PRECALC_VARIANCE)) && SG_NUM != 1
     __local MEAN_TYPE slm_acc[(SG_NUM - 1) * FSV];
 #endif
 
@@ -280,7 +323,11 @@ KERNEL(mvn_final)(
     MEAN_TYPE mean = means[flat_data_set_group * FSV + sglid];
 #else
     INT_PACKED_TYPE partial_sum = FUNC_CALL(accumulate_sum_input)(input, data_sets_offset, get_local_id(0));
+#   if SG_NUM != 1
     MEAN_TYPE mean = FUNC_CALL(reduce_mean)(TO_MEAN_PACKED_TYPE(partial_sum), slm_acc);
+#   else
+    MEAN_TYPE mean = FUNC_CALL(reduce_mean)(TO_MEAN_PACKED_TYPE(partial_sum));
+#   endif
 #endif
 
 #if NORMALIZE_VARIANCE
@@ -288,7 +335,11 @@ KERNEL(mvn_final)(
     MEAN_TYPE inv_variance = variances[flat_data_set_group * FSV + sglid];
 #   else
     MEAN_PACKED_TYPE partial_dev = FUNC_CALL(accumulate_sum_sq_dev)(input, data_sets_offset, get_local_id(0), mean);
+#       if SG_NUM != 1
     MEAN_TYPE inv_variance = FUNC_CALL(reduce_inverse_variance)(partial_dev, slm_acc);
+#       else
+    MEAN_TYPE inv_variance = FUNC_CALL(reduce_inverse_variance)(partial_dev);
+#       endif
 #   endif
 #else
     MEAN_TYPE inv_variance = 1;
index 1b61b79..16f8eb3 100644 (file)
@@ -15,6 +15,7 @@
 #include "include/common.cl"
 
 // ==============================================================================================================================
+// DECLARE_SG_PACKED_REDUCE_ADD(Name, Type, VecSize, PostOp)
 // DECLARE_WG_PACKED_REDUCE_ADD(Name, Type, VecSize, SgNum, PostOp)
 //
 // Declares function "Name" performing work-group reduction on vector data, using addition operator:
 //  for other work-items in sub-group the result will be undefined.
 // All work-items in sub-group must enter declared function.
 //
+// DECLARE_SG_PACKED_REDUCE_ADD - declares function with same behaviour, but specialized for case with single sub-group
+// and not using local memory. It is declared as:
+//   Type Name (Type<VecSize> value)
+//
 // Template arguments:
 //   Name    - Name of function to declare.
 //   Type    - Type of values to reduce.  Can't be vector type. Examples: int, float, half.
 
 #define REDUCE_NO_POST_OP(val) (val)
 
+#define DECLARE_SG_PACKED_REDUCE_ADD(Name, Type, VecSize, PostOp)                                                       \
+    inline Type FUNC(Name) (MAKE_VECTOR_TYPE(Type, VecSize) value) {                                                    \
+        typedef MAKE_VECTOR_TYPE(Type, VecSize) packed_t;                                                               \
+                                                                                                                        \
+        Type result;                                                                                                    \
+                                                                                                                        \
+        /* [uniform] Current sub-groups id */                                                                           \
+        const uint sgid = get_sub_group_id();                                                                           \
+        /* Id of work-item inside sub-group */                                                                          \
+        const uint sglid = get_sub_group_local_id();                                                                    \
+        /* [constexpr] Maximum simd/sub-group size */                                                                   \
+        const uint simd = get_max_sub_group_size();                                                                     \
+                                                                                                                        \
+        /* Accumulation inside sub-group */                                                                             \
+        packed_t acc;  /* [uniform] Accumulator variable */                                                             \
+        __attribute__((opencl_unroll_hint))                                                                             \
+        for (uint idx = 0; idx < VecSize; ++idx) {                                                                      \
+            acc[idx] = sub_group_reduce_add(value[idx]);                                                                \
+        }                                                                                                               \
+        /* Transpose the data to correct layout */                                                                      \
+        if (sglid < VecSize || simd == VecSize) {                                                                       \
+            result = PostOp(acc[sglid]);                                                                                \
+        }                                                                                                               \
+        return result;                                                                                                  \
+    }
+
 #define DECLARE_WG_PACKED_REDUCE_ADD(Name, Type, VecSize, SgNum, PostOp)                                                \
     inline Type FUNC(Name) (MAKE_VECTOR_TYPE(Type, VecSize) value, __local Type* slm_acc) {                             \
         typedef MAKE_VECTOR_TYPE(Type, VecSize) packed_t;                                                               \
         for (uint idx = 0; idx < VecSize; ++idx) {                                                                      \
             acc[idx] = sub_group_reduce_add(value[idx]);                                                                \
         }                                                                                                               \
-        if ((SgNum) != 1) {                                                                                             \
-            /* More than one sub-group in work-group, reduce using local memory */                                      \
-            /* Store partial results into local memory from sub-groups other than first one */                          \
-            if (sgid != 0 && (sglid < VecSize || simd == VecSize)) {                                                    \
-                slm_acc[(sgid - 1) * VecSize + sglid] = acc[sglid];                                                     \
-            }                                                                                                           \
-            barrier(CLK_LOCAL_MEM_FENCE);                                                                               \
-            /* Accumulate partial results inside first sub-group */                                                     \
-            if (sgid == 0) {                                                                                            \
-                __attribute__((opencl_unroll_hint))                                                                     \
-                for (uint vi = 0; vi < VecSize; ++vi) {                                                                 \
-                    /* Accumulate single vector element using sub_group_reduce_add */                                   \
-                    /* Last work-item inside sub-group holds previous value (iteration or sub-group reduction stage) */ \
+        /* More than one sub-group in work-group, reduce using local memory */                                          \
+        /* Store partial results into local memory from sub-groups other than first one */                              \
+        if (sgid != 0 && (sglid < VecSize || simd == VecSize)) {                                                        \
+            slm_acc[(sgid - 1) * VecSize + sglid] = acc[sglid];                                                         \
+        }                                                                                                               \
+        barrier(CLK_LOCAL_MEM_FENCE);                                                                                   \
+        /* Accumulate partial results inside first sub-group */                                                         \
+        if (sgid == 0) {                                                                                                \
+            __attribute__((opencl_unroll_hint))                                                                         \
+            for (uint vi = 0; vi < VecSize; ++vi) {                                                                     \
+                /* Accumulate single vector element using sub_group_reduce_add */                                       \
+                /* Last work-item inside sub-group holds previous value (iteration or sub-group reduction stage) */     \
                                                                                                                         \
-                    Type tmp = acc[vi];                                                                                 \
-                    __attribute__((opencl_unroll_hint))                                                                 \
-                    for (uint sg = 0; sg < (SgNum) - 1; sg += (simd - 1)) {                                             \
-                        bool last_sglid = sglid == simd - 1;                                                            \
-                        bool sglid_inside_sgs = sg + simd - 1 <= (SgNum) - 1 || sg + sglid < (SgNum) - 1;               \
-                        Type tmp_in_slm = slm_acc[sg * VecSize + sglid * VecSize + vi];                                 \
-                        tmp = last_sglid ? tmp :                                                                        \
-                              sglid_inside_sgs ? tmp_in_slm                                                             \
-                              : 0;                                                                                      \
-                        tmp = sub_group_reduce_add(tmp);                                                                \
-                    }                                                                                                   \
-                    acc[vi] = tmp;                                                                                      \
-                }                                                                                                       \
-                if (sglid < VecSize || simd == VecSize) {                                                               \
-                    result = PostOp(acc[sglid]);                                                                        \
-                    slm_acc[sglid] = result;                                                                            \
+                Type tmp = acc[vi];                                                                                     \
+                __attribute__((opencl_unroll_hint))                                                                     \
+                for (uint sg = 0; sg < (SgNum) - 1; sg += (simd - 1)) {                                                 \
+                    bool last_sglid = sglid == simd - 1;                                                                \
+                    bool sglid_inside_sgs = sg + simd - 1 <= (SgNum) - 1 || sg + sglid < (SgNum) - 1;                   \
+                    Type tmp_in_slm = slm_acc[sg * VecSize + sglid * VecSize + vi];                                     \
+                    tmp = last_sglid ? tmp :                                                                            \
+                          sglid_inside_sgs ? tmp_in_slm                                                                 \
+                          : 0;                                                                                          \
+                    tmp = sub_group_reduce_add(tmp);                                                                    \
                 }                                                                                                       \
+                acc[vi] = tmp;                                                                                          \
             }                                                                                                           \
-            barrier(CLK_LOCAL_MEM_FENCE);                                                                               \
-            /* Read result in all other sub-groups */                                                                   \
-            if (sgid != 0 && (sglid < VecSize || simd == VecSize)) {                                                    \
-                result = slm_acc[sglid];                                                                                \
-            }                                                                                                           \
-        } else {                                                                                                        \
-            /* Single sub-group case, just transpose the data to correct layout */                                      \
             if (sglid < VecSize || simd == VecSize) {                                                                   \
                 result = PostOp(acc[sglid]);                                                                            \
                 slm_acc[sglid] = result;                                                                                \
             }                                                                                                           \
         }                                                                                                               \
+        barrier(CLK_LOCAL_MEM_FENCE);                                                                                   \
+        /* Read result in all other sub-groups */                                                                       \
+        if (sgid != 0 && (sglid < VecSize || simd == VecSize)) {                                                        \
+            result = slm_acc[sglid];                                                                                    \
+        }                                                                                                               \
         return result;                                                                                                  \
     }
index 5a42d46..c5a130d 100644 (file)
@@ -145,6 +145,8 @@ inline uint FUNC(get_output_index)(uint g, uint o, uint i, uint z, uint y, uint
     return GET_FILTER_OS_IS_YX_OSV16_ISV4_INDEX(OUTPUT, o, i, y, x);
 #elif defined OUTPUT_LAYOUT_OS_IS_YX_OSV32_ISV4_SWIZZLED_BY_2
     return GET_FILTER_OS_IS_YX_OSV32_ISV4_SWIZZLED_BY_2_INDEX(OUTPUT, o, i, y, x);
+#elif defined OUTPUT_LAYOUT_OS_IS_YX_OSV32_ISV4
+    return GET_FILTER_OS_IS_YX_OSV32_ISV4_INDEX(OUTPUT, o, i, y, x);
 #elif defined OUTPUT_LAYOUT_OS_IS_YX_ISA8_OSV8_ISV4_SWIZZLED_BY_4
     return GET_FILTER_OS_IS_YX_ISA8_OSV8_ISV4_SWIZZLED_BY_4_INDEX(OUTPUT, o, i, y, x);
 #elif defined OUTPUT_LAYOUT_OS_IS_YX_OSA4_ISA8_OSV8_ISV4_SWIZZLED_BY_4
index d0657b6..061079c 100644 (file)
@@ -15,7 +15,7 @@
 
 #include "include/include_all.cl"
 
-KERNEL(reverse_sequence_ref)(const __global UNIT_TYPE* input, const __global float* seq_lengths, __global UNIT_TYPE* output)
+KERNEL(reverse_sequence_ref)(const __global UNIT_TYPE* input, const __global INPUT1_TYPE* seq_lengths, __global UNIT_TYPE* output)
 {
     const uint batch = get_global_id(0);
     const uint feature = get_global_id(1);
@@ -29,7 +29,7 @@ KERNEL(reverse_sequence_ref)(const __global UNIT_TYPE* input, const __global flo
                              y * INPUT0_Y_PITCH +
                              x * INPUT0_X_PITCH;
 
-    const uint length = seq_lengths[dimensions[BATCH_AXIS]];
+    const uint length = (uint)seq_lengths[dimensions[BATCH_AXIS]];
     if (dimensions[SEQ_AXIS] < length)
         dimensions[SEQ_AXIS] = length - dimensions[SEQ_AXIS] - 1;
 
index 7bbe95e..e6a3470 100644 (file)
@@ -97,6 +97,11 @@ JitTerm exp(const JitTerm& arg) {
     return jit_term;
 }
 
+JitTerm erf(const JitTerm& arg) {
+    JitTerm jit_term{"(erf(" + arg.str() + "))"};
+    return jit_term;
+}
+
 JitTerm log(const JitTerm& arg) {
     JitTerm jit_term{"(log(" + arg.str() + "))"};
     return jit_term;
@@ -689,7 +694,7 @@ JitConstants MakeActivationJitConstants(ActivationFunction activation_function,
             jitConstants.AddConstant(MakeJitConstant(macro_def, "(-input)"));
             break;
         case ActivationFunction::ERF:
-            jitConstants.AddConstant(MakeJitConstant(macro_def, "erf(input)"));
+            jitConstants.AddConstant(MakeJitConstant(macro_def, erf(input).str()));
             break;
         case ActivationFunction::HARD_SIGMOID: {
             auto alpha = disable_type_conversion ? "m"_jit : to_type("m"_jit);
@@ -733,6 +738,15 @@ JitConstants MakeActivationJitConstants(ActivationFunction activation_function,
                     (input / (one + exp(neg(input)))).str()));
             break;
         }
+        case ActivationFunction::GELU: {
+            std::string type_suffix = out_dt == Datatype::F32 ? "f" : "h";
+            const JitTerm half{"0.5" + type_suffix};
+            const JitTerm mult{std::to_string(1.0f / std::sqrt(2.0f)) + type_suffix};
+            jitConstants.AddConstant(MakeJitConstant(
+                    macro_def,
+                    (half * input * (one + erf((input * mult)))).str()));
+            break;
+        }
         case ActivationFunction::NOT:
             jitConstants.AddConstant(MakeJitConstant(
                 macro_def,
@@ -1298,6 +1312,10 @@ std::string FusedOpsCodeGenerator::GetJitLoad(const FusedOpsConfiguration& conf,
                 block_read = CastToType(" intel_sub_group_block_read_us" + vs + "("
                                         + "(const __global ushort*)(" + GetInputPtrName(input_id) + " + " + index_func_call_vec + "))",
                                         input_dt, vec_size);
+            } else if (input_dt == Datatype::UINT8 || input_dt == Datatype::INT8) {
+                block_read = CastToType("BLOCK_READ_UC_" + std::to_string(vec_size) + "("
+                                        + "(const __global uchar*)(" + GetInputPtrName(input_id) + " + " + index_func_call_vec + "))",
+                                        input_dt, vec_size);
             } else {
                 throw std::runtime_error("Aligned load is not supported yet for " + toCLType(input_dt) + " data type");
             }
index 8283c5c..be16f4b 100644 (file)
@@ -85,6 +85,7 @@ std::string toString(ActivationFunction activation) {
         case ActivationFunction::SOFTPLUS:                 method = "SOFTPLUS"; break;
         case ActivationFunction::SOFTSIGN:                 method = "SOFTSIGN"; break;
         case ActivationFunction::SWISH:                    method = "SWISH"; break;
+        case ActivationFunction::GELU:                     method = "GELU"; break;
         default: break;
     }
     return method;
@@ -336,6 +337,7 @@ std::string toString(WeightsLayout layout) {
         case WeightsLayout::os_is_y_x8_osv8_isv4:                        return "OS_IS_Y_X8_OSV8_ISV4";
         case WeightsLayout::os_is_yx_osv16_isv4:                         return "OS_IS_YX_OSV16_ISV4";
         case WeightsLayout::os_is_yx_osv32_isv4_swizzled_by_2:           return "OS_IS_YX_OSV32_ISV4_SWIZZLED_BY_2";
+        case WeightsLayout::os_is_yx_osv32_isv4:                         return "OS_IS_YX_OSV32_ISV4";
         case WeightsLayout::os_is_y_x8_osv8_isv4_swizzled_by_4:          return "OS_IS_Y_X8_OSV8_ISV4_SWIZZLED_BY_4";
         case WeightsLayout::os_is_yx_osv32_isv32p:                       return "OS_IS_YX_OSV32_ISV32P";
         case WeightsLayout::oizyx:                                       return "OIZYX";
index 769e74a..28555a8 100644 (file)
@@ -61,18 +61,30 @@ struct lrn_gpu : typed_primitive_gpu_impl<lrn> {
 namespace detail {
 
 attach_lrn_gpu::attach_lrn_gpu() {
-    implementation_map<lrn>::add(std::make_tuple(engine_types::ocl, data_types::f32, format::yxfb),
-                                 lrn_gpu::create);
-    implementation_map<lrn>::add(std::make_tuple(engine_types::ocl, data_types::f16, format::yxfb),
-                                 lrn_gpu::create);
-    implementation_map<lrn>::add(std::make_tuple(engine_types::ocl, data_types::f32, format::bfyx),
-                                 lrn_gpu::create);
-    implementation_map<lrn>::add(std::make_tuple(engine_types::ocl, data_types::f16, format::bfyx),
-                                 lrn_gpu::create);
-    implementation_map<lrn>::add(std::make_tuple(engine_types::ocl, data_types::f32, format::byxf),
-                                 lrn_gpu::create);
-    implementation_map<lrn>::add(std::make_tuple(engine_types::ocl, data_types::f16, format::byxf),
-                                 lrn_gpu::create);
+    implementation_map<lrn>::add(std::make_tuple(engine_types::ocl, data_types::f32, format::yxfb), lrn_gpu::create);
+    implementation_map<lrn>::add(std::make_tuple(engine_types::ocl, data_types::f16, format::yxfb), lrn_gpu::create);
+    implementation_map<lrn>::add(std::make_tuple(engine_types::ocl, data_types::u8, format::yxfb), lrn_gpu::create);
+    implementation_map<lrn>::add(std::make_tuple(engine_types::ocl, data_types::i8, format::yxfb), lrn_gpu::create);
+
+    implementation_map<lrn>::add(std::make_tuple(engine_types::ocl, data_types::f32, format::bfyx), lrn_gpu::create);
+    implementation_map<lrn>::add(std::make_tuple(engine_types::ocl, data_types::f16, format::bfyx), lrn_gpu::create);
+    implementation_map<lrn>::add(std::make_tuple(engine_types::ocl, data_types::u8, format::bfyx), lrn_gpu::create);
+    implementation_map<lrn>::add(std::make_tuple(engine_types::ocl, data_types::i8, format::bfyx), lrn_gpu::create);
+
+    implementation_map<lrn>::add(std::make_tuple(engine_types::ocl, data_types::f32, format::byxf), lrn_gpu::create);
+    implementation_map<lrn>::add(std::make_tuple(engine_types::ocl, data_types::f16, format::byxf), lrn_gpu::create);
+    implementation_map<lrn>::add(std::make_tuple(engine_types::ocl, data_types::u8, format::byxf), lrn_gpu::create);
+    implementation_map<lrn>::add(std::make_tuple(engine_types::ocl, data_types::i8, format::byxf), lrn_gpu::create);
+
+    implementation_map<lrn>::add(std::make_tuple(engine_types::ocl, data_types::f32, format::b_fs_yx_fsv4), lrn_gpu::create);
+    implementation_map<lrn>::add(std::make_tuple(engine_types::ocl, data_types::f16, format::b_fs_yx_fsv4), lrn_gpu::create);
+    implementation_map<lrn>::add(std::make_tuple(engine_types::ocl, data_types::u8, format::b_fs_yx_fsv4), lrn_gpu::create);
+    implementation_map<lrn>::add(std::make_tuple(engine_types::ocl, data_types::i8, format::b_fs_yx_fsv4), lrn_gpu::create);
+
+    implementation_map<lrn>::add(std::make_tuple(engine_types::ocl, data_types::f32, format::b_fs_yx_fsv16), lrn_gpu::create);
+    implementation_map<lrn>::add(std::make_tuple(engine_types::ocl, data_types::f16, format::b_fs_yx_fsv16), lrn_gpu::create);
+    implementation_map<lrn>::add(std::make_tuple(engine_types::ocl, data_types::u8, format::b_fs_yx_fsv16), lrn_gpu::create);
+    implementation_map<lrn>::add(std::make_tuple(engine_types::ocl, data_types::i8, format::b_fs_yx_fsv16), lrn_gpu::create);
 }
 
 }  // namespace detail
index ae38bdd..a869aa3 100644 (file)
@@ -97,6 +97,11 @@ attach_quantize_gpu::attach_quantize_gpu() {
     implementation_map<quantize>::add(std::make_tuple(engine_types::ocl, data_types::u8, format::byxf_af32), val_fw);
     implementation_map<quantize>::add(std::make_tuple(engine_types::ocl, data_types::i8, format::byxf_af32), val_fw);
 
+    implementation_map<quantize>::add(std::make_tuple(engine_types::ocl, data_types::f32, format::byxf), val_fw);
+    implementation_map<quantize>::add(std::make_tuple(engine_types::ocl, data_types::f16, format::byxf), val_fw);
+    implementation_map<quantize>::add(std::make_tuple(engine_types::ocl, data_types::u8, format::byxf), val_fw);
+    implementation_map<quantize>::add(std::make_tuple(engine_types::ocl, data_types::i8, format::byxf), val_fw);
+
     implementation_map<quantize>::add(std::make_tuple(engine_types::ocl, data_types::f32, format::b_fs_yx_fsv4), val_fw);
     implementation_map<quantize>::add(std::make_tuple(engine_types::ocl, data_types::f16, format::b_fs_yx_fsv4), val_fw);
     implementation_map<quantize>::add(std::make_tuple(engine_types::ocl, data_types::u8, format::b_fs_yx_fsv4), val_fw);
index 5bd635e..66d1ce4 100644 (file)
@@ -377,6 +377,8 @@ void prepare_primitive_fusing::fuse_simple_primitives(program_impl &p) {
 
             should_fuse |= input_data.is_type<gemm>() && gemm_supports_fusings(input_data.as<gemm>());
 
+            should_fuse |= input_data.is_type<lrn>();
+
             should_fuse |= input_data.is_type<pooling>() &&
                 (input_data.get_dependency(0).get_output_layout().data_type == data_types::i8 ||
                  input_data.get_dependency(0).get_output_layout().data_type == data_types::u8) &&
@@ -410,6 +412,8 @@ void prepare_primitive_fusing::fuse_simple_primitives(program_impl &p) {
 
             should_fuse |= input_data.is_type<gemm>() && gemm_supports_fusings(input_data.as<gemm>());
 
+            should_fuse |= input_data.is_type<lrn>();
+
             should_fuse |= input_data.is_type<pooling>() &&
                 (input_data.get_dependency(0).get_output_layout().data_type == data_types::i8 ||
                  input_data.get_dependency(0).get_output_layout().data_type == data_types::u8) &&
@@ -450,6 +454,9 @@ void prepare_primitive_fusing::fuse_simple_primitives(program_impl &p) {
             should_fuse |= input_data.is_type<convolution>() && conv_supports_fusings(input_data.as<convolution>()) &&
                            quantize_node.get_scale_shift_opt() &&
                            ((out_layout.data_type == data_types::f32 || out_layout.data_type == data_types::f16)  ||
+                            input_data.get_output_layout().format == format::b_fs_yx_fsv16 ||
+                            (_lo.should_select_b_fs_yx_fsv16_layout(input_data.as<convolution>(), input_data.get_dependency(1).get_output_layout()) &&
+                             !is_grouped_conv(input_data.as<convolution>())) ||
                            // Avoid fusing to b_fs_yx_fsv16 (and similar) kernels
                            ((input_data.get_dependency(0).get_output_layout().data_type == data_types::u8 ||
                            input_data.get_dependency(0).get_output_layout().data_type == data_types::i8) &&
@@ -467,6 +474,9 @@ void prepare_primitive_fusing::fuse_simple_primitives(program_impl &p) {
                            quantize_node.get_scale_shift_opt() &&
                            (out_layout.data_type == data_types::u8 || out_layout.data_type == data_types::i8);
 
+            should_fuse |= input_data.is_type<lrn>() &&
+                           quantize_node.get_scale_shift_opt();
+
             should_fuse |= input_data.is_type<gemm>() && gemm_supports_fusings(input_data.as<gemm>()) &&
                            quantize_node.get_scale_shift_opt() &&
                            (out_layout.data_type == data_types::u8 || out_layout.data_type == data_types::i8);
@@ -661,7 +671,7 @@ void prepare_conv_eltw_fusing::fuse_conv_eltwise(program_impl& p, program_node*
     // only single ADD operation is currently supported
     // TODO: enable more
     eltwise& eltw = const_cast<eltwise&>(*eltw_node->get_primitive());
-    if (eltw.mode != eltwise_mode::sum)
+    if (eltw.mode != eltwise_mode::sum || !eltw.coefficients.empty())
         return;
 
     int eltw_fused_input_idx;   // <-- this input gets fused with eltwise
index 969885c..7d7f7ef 100644 (file)
@@ -83,7 +83,8 @@ void remove_redundant_reorders::run(program_impl& p) {
                 continue;
 
             auto output_padded = static_cast<bool>(output_layout.data_padding);
-            auto can_omit_padding = output_layout.format == format::b_fs_yx_fsv16 && input.get_output_layout().format == format::bfyx;
+            auto can_omit_padding = (output_layout.format == format::b_fs_yx_fsv16 || output_layout.format == format::b_fs_yx_fsv32) &&
+                                    input.get_output_layout().format == format::bfyx;
 
             if (output_padded && !can_omit_padding) {
                 if (input.get_users().size() != 1)
index 4fe44eb..f4e5157 100644 (file)
@@ -162,6 +162,8 @@ inline std::string fmt_to_str(format fmt) {
             return "os_is_yx_osv16_isv4";
         case format::os_is_yx_osv32_isv4_swizzled_by_2:
             return "os_is_yx_osv32_isv4_swizzled_by_2";
+        case format::os_is_yx_osv32_isv4:
+            return "os_is_yx_osv32_isv4";
         case format::os_is_y_x8_osv8_isv4:
             return "os_is_y_x8_osv8_isv4";
         case format::os_is_yx_osv32_isv32p:
index 0d9d3b9..a10f926 100644 (file)
@@ -269,6 +269,8 @@ kernel_selector::weights_layout to_weights_layout(format f) {
             return kernel_selector::weights_layout::os_is_yx_osv16_isv4;
         case format::os_is_yx_osv32_isv4_swizzled_by_2:
             return kernel_selector::weights_layout::os_is_yx_osv32_isv4_swizzled_by_2;
+        case format::os_is_yx_osv32_isv4:
+            return kernel_selector::weights_layout::os_is_yx_osv32_isv4;
         case format::os_is_yx_osv32_isv32p:
             return kernel_selector::weights_layout::os_is_yx_osv32_isv32p;
         case format::os_is_yx_isv16_osv16:
@@ -387,6 +389,8 @@ cldnn::format::type from_weights_layout(kernel_selector::weights_layout l) {
             return cldnn::format::os_is_y_x8_osv8_isv4;
         case kernel_selector::weights_layout::os_is_yx_osv32_isv4_swizzled_by_2:
             return format::os_is_yx_osv32_isv4_swizzled_by_2;
+        case kernel_selector::weights_layout::os_is_yx_osv32_isv4:
+            return format::os_is_yx_osv32_isv4;
         case kernel_selector::weights_layout::os_is_y_x8_osv8_isv4_swizzled_by_4:
             return cldnn::format::os_is_y_x8_osv8_isv4_swizzled_by_4;
         case kernel_selector::weights_layout::os_is_yx_osv32_isv32p:
@@ -616,6 +620,8 @@ kernel_selector::activation_function get_kernel_selector_activation_param(activa
             return kernel_selector::activation_function::HARD_SIGMOID;
         case cldnn::activation_func::swish:
             return kernel_selector::activation_function::SWISH;
+        case cldnn::activation_func::gelu:
+            return kernel_selector::activation_function::GELU;
         default:
             throw std::runtime_error("Unknown activation function");
             break;
index bbf7825..bf9ce7f 100644 (file)
@@ -196,11 +196,9 @@ bool layout_optimizer::can_fuse_reorder(program_node& prev, program_node& next,
     if (next.is_type<convolution>() &&
         fmt_prev == format::bfyx &&
         ((fmt_next == format::fs_b_yx_fsv32 && next.as<convolution>().get_primitive()->groups == 1) ||
-        (fmt_next == format::b_fs_yx_fsv32 && prev_output_layout.size.feature[0] == 3) ||
+        (fmt_next == format::b_fs_yx_fsv32 && (prev_output_layout.size.feature[0] == 3 || prev_output_layout.size.feature[0] == 4)) ||
         (fmt_next == format::b_fs_yx_fsv16 && next_output_layout.size.feature[0] >= 16 &&
-        prev_output_layout.size.feature[0] == 3 &&
-        (next_output_layout.data_type != data_types::i8 && next_output_layout.data_type != data_types::u8)) ||
-         (fmt_next == format::b_fs_yx_fsv16 && next_output_layout.size.feature[0] >= 16 && prev_output_layout.size.feature[0] == 3) ||
+        (prev_output_layout.size.feature[0] == 3 || (prev_output_layout.size.feature[0] == 4 && (prev_dt == data_types::u8 || prev_dt == data_types::i8)))) ||
         (fmt_next == format::bs_fs_yx_bsv16_fsv16 && next_output_layout.size.feature[0] % 16 == 0 && prev_output_layout.size.feature[0] == 3)))
         return true;
 
@@ -349,6 +347,8 @@ bool layout_optimizer::convolution_b_fs_yx_fsv16_opt(layout const &input_layout,
     if (i8_dt_case) {
         auto ks_x = weights_layout.size.spatial[0];
         auto ks_y = weights_layout.size.spatial[1];
+
+        // Check for depthwise convolution
         if (input_layout.size.spatial[2] == 1 &&
             input_layout.size.batch[0] < 16 &&
             ((ks_x == 7 && ks_y == 7) || (ks_x == 3 && ks_y == 3) || (ks_x == 1 && ks_y == 1) || (ks_x == 5 && ks_y == 5)) &&
@@ -356,7 +356,13 @@ bool layout_optimizer::convolution_b_fs_yx_fsv16_opt(layout const &input_layout,
             ((conv->groups == 1 && conv->split() == 1) ||
              conv->groups == static_cast<uint32_t>(input_layout.size.feature[0]) ||
              conv->split() == static_cast<int32_t>(input_layout.size.feature[0])) &&
-            conv->dilation == tensor{ 1 })
+             conv->dilation == tensor{ 1 })
+            return true;
+        // Check for grouped convolution
+        else if (input_layout.size.spatial[2] == 1 && input_layout.size.batch[0] < 16 &&
+                 weights_layout.size.batch[0] >= 16 &&
+                ((input_layout.size.feature[0] / conv->groups) % 4 == 0) &&
+                ((conv->dilation.spatial[0] + 1) * (ks_x - 1)) < 16)
             return true;
     }
     // A set of rules that define when b_fs_yx_fsv16 mem format can be used for fp16/fp32 case
index dba1fb3..8d5d0e8 100644 (file)
@@ -29,7 +29,17 @@ primitive_type_id lrn::type_id() {
 layout lrn_inst::calc_output_layout(lrn_node const& node) {
     assert(static_cast<bool>(node.get_primitive()->output_data_type) == false &&
            "Output data type forcing is not supported for lrn_node!");
-    return node.input().get_non_padded_output_layout();
+    auto input_layout = node.input().get_output_layout();
+    auto output_type = input_layout.data_type;
+
+    if (node.has_fused_primitives()) {
+        output_type = node.get_fused_output_layout().data_type;
+    }
+
+    auto result = node.input().get_non_padded_output_layout();
+    result.data_type = output_type;
+
+    return result;
 }
 
 std::string lrn_inst::to_string(lrn_node const& node) {
index df2aca4..3d94213 100644 (file)
@@ -30,7 +30,14 @@ primitive_type_id select::type_id() {
 layout select_inst::calc_output_layout(select_node const& node) {
     assert(static_cast<bool>(node.get_primitive()->output_data_type) == false &&
            "Output data type forcing is not supported for select_node!");
-    return node.input(1).get_non_padded_output_layout();
+
+    auto output_layout = node.input(1).get_non_padded_output_layout();
+
+    if (node.get_primitive()->broadcast_type == "numpy") {
+        output_layout.size = tensor::max(node.input(1).get_output_layout().size, node.input(2).get_output_layout().size);
+    }
+
+    return output_layout;
 }
 
 std::string select_inst::to_string(select_node const& node) {
@@ -53,51 +60,64 @@ std::string select_inst::to_string(select_node const& node) {
 select_inst::typed_primitive_inst(network_impl& network, select_node const& node) : parent(network, node) {
     auto& deps = node.get_dependencies();
 
-    for (size_t i = 1; i < deps.size() - 1; i++) {
-        auto batch1 = deps[i]->get_output_layout().size.batch[0];
-        auto batch2 = deps[i + 1]->get_output_layout().size.batch[0];
-        CLDNN_ERROR_NOT_EQUAL(node.id(), "Batch size input", batch1, "Batch size next input", batch2, "");
-
-        auto feature1 = deps[i]->get_output_layout().size.feature[0];
-        auto feature2 = deps[i + 1]->get_output_layout().size.feature[0];
-        CLDNN_ERROR_NOT_EQUAL(node.id(), "Feature size input", feature1, "Feature size next input", feature2, "");
-
-        auto spatial1 = deps[i]->get_output_layout().size.spatial[0];
-        auto spatial2 = deps[i + 1]->get_output_layout().size.spatial[0];
-        CLDNN_ERROR_NOT_EQUAL(node.id(), "Spatial size input", spatial1, "Spatial size next input", spatial2, "");
-
-        auto format1 = deps[i]->get_output_layout().format;
-        auto format2 = deps[i + 1]->get_output_layout().format;
-        CLDNN_ERROR_NOT_EQUAL(node.id(), "Format input", format1, "Format next input", format2, "");
+    CLDNN_ERROR_LESS_THAN(node.id(),
+                                "Number of inputs",
+                                deps.size(),
+                                "Expected number of inputs",
+                                3,
+                                "");
+
+    CLDNN_ERROR_NOT_EQUAL(node.id(),
+                                "Mask format",
+                                deps[0]->get_output_layout().format,
+                                "Positive input format",
+                                deps[1]->get_output_layout().format,
+                                "");
+
+    if (node.get_primitive()->broadcast_type == "none") {
+        CLDNN_ERROR_LAYOUT_MISMATCH(node.id(),
+                                "Positive input layout",
+                                deps[1]->get_output_layout(),
+                                "Negative input layout",
+                                deps[2]->get_output_layout(),
+                                "");
+
+        CLDNN_ERROR_NOT_EQUAL(node.id(),
+                                "Mask size",
+                                deps[0]->get_output_layout().size,
+                                "Positive input format",
+                                deps[1]->get_output_layout().size,
+                                "");
+    } else if (node.get_primitive()->broadcast_type == "numpy") {
+        CLDNN_ERROR_NOT_EQUAL(node.id(),
+                                "Positive input format",
+                                deps[1]->get_output_layout().format,
+                                "Negative input format",
+                                deps[2]->get_output_layout().format,
+                                "");
+
+        CLDNN_ERROR_DATA_TYPES_MISMATCH(node.id(),
+                                "Positive input data type",
+                                deps[1]->get_output_layout().data_type,
+                                "Negative input data type",
+                                deps[2]->get_output_layout().data_type,
+                                "");
+
+        cldnn::tensor output_tensor = tensor::max(deps[1]->get_output_layout().size, deps[2]->get_output_layout().size);
+        auto max_dim_count = output_tensor.raw.size();
+
+        for (size_t i = 0; i < deps.size(); i++) {
+            for (size_t d = 0; d < max_dim_count; d++) {
+                auto current_dim = deps[i]->get_output_layout().size.raw[d];
+
+                CLDNN_ERROR_BOOL(node.id(),
+                                    "Sizes equal or broadcast is possible",
+                                    !(current_dim == output_tensor.raw[d] || current_dim == 1),
+                                    "Invalid input shapes");
+            }
+        }
+    } else {
+        CLDNN_ERROR_MESSAGE(node.id(), "Unsupported broadcast_type: " + node.get_primitive()->broadcast_type);
     }
-
-    // For mask added special validations (it can differ from inputs in size)
-    auto batch1 = deps[0]->get_output_layout().size.batch[0];
-    auto batch2 = deps[1]->get_output_layout().size.batch[0];
-    if (batch1 != batch2 && batch1 != 1)
-        CLDNN_ERROR_MESSAGE(node.id(), "Incorrect mask batch size with respect to inputs batch size");
-
-    auto feature1 = deps[0]->get_output_layout().size.feature[0];
-    auto feature2 = deps[1]->get_output_layout().size.feature[0];
-    if (feature1 != feature2 && batch1 != 1)
-        CLDNN_ERROR_MESSAGE(node.id(), "Incorrect mask feature size with respect to inputs feature size");
-
-    auto spatial01 = deps[0]->get_output_layout().size.spatial[0];
-    auto spatial02 = deps[1]->get_output_layout().size.spatial[0];
-    if (spatial01 != spatial02 && spatial01 != 1)
-        CLDNN_ERROR_MESSAGE(node.id(), "Incorrect mask spatial size with respect to inputs spatial size");
-
-    auto spatial11 = deps[0]->get_output_layout().size.spatial[1];
-    auto spatial12 = deps[1]->get_output_layout().size.spatial[1];
-    if (spatial11 != spatial12 && spatial11 != 1)
-        CLDNN_ERROR_MESSAGE(node.id(), "Incorrect mask spatial size with respect to inputs spatial size");
-
-    auto format1 = deps[0]->get_output_layout().format;
-    auto format2 = deps[1]->get_output_layout().format;
-    CLDNN_ERROR_NOT_EQUAL(node.id(), "Format input", format1, "Format next input", format2, "");
-
-    auto data_type1 = deps[1]->get_output_layout().data_type;
-    auto data_type2 = deps[2]->get_output_layout().data_type;
-    CLDNN_ERROR_DATA_TYPES_MISMATCH(node.id(), "Data type input 1", data_type1, "Data type input 2", data_type2, "");
 }
 }  // namespace cldnn
index 3bb158b..a293a86 100644 (file)
@@ -699,7 +699,8 @@ TEST(activation_f32_fw_gpu, basic_yxfb_all_functions)
         activation_func::tan,
         activation_func::negative,
         activation_func::abs,
-        activation_func::swish
+        activation_func::swish,
+        activation_func::gelu
     };
 
     activation_additional_params params = { 0.5f, 2.5f };
@@ -816,6 +817,10 @@ TEST(activation_f32_fw_gpu, basic_yxfb_all_functions)
                 case activation_func::swish:
                     EXPECT_FLOAT_EQ((float)input_ptr[i] / (1.f + std::exp((float)(-input_ptr[i]))), output_ptr[i]);
                     break;
+                case activation_func::gelu:
+                    EXPECT_NEAR(0.5f * (float)input_ptr[i] * (1.f + std::erf((float)(input_ptr[i]) / std::sqrt(2.0f))),
+                                output_ptr[i], 1e-5f);
+                    break;
                 default:
                     break;
                 }
index ecd134e..f623585 100644 (file)
@@ -4133,7 +4133,7 @@ TEST(convolution_f32_fw_gpu, byte_activation) {
     //
     //  Bias:
     //  1 -8
-    engine_configuration eng_conf(false, false, false, "", "", true, "", "/home/vparamuz/tmp/cldnn/sources/");
+    auto eng_conf = get_test_engine();
     engine engine{ eng_conf };
     auto input = memory::allocate(engine, { data_types::i8, format::bfyx,{ 1, 1, 5, 4 } });
 
@@ -5115,14 +5115,15 @@ using TestParamType_convolution_depthwise_gpu = ::testing::tuple<int,   // 0 - I
         bool>; // 6 - With bias
 
 using TestParamType_grouped_convolution_gpu = ::testing::tuple<  int,    // 0 - Input X size
-        int,   // 1 - Input Y size
-        int,   // 2 - Input features
-        int,   // 3 - Output features
-        int,   // 4 - Kernel sizeX
-        int,   // 5 - Kernel sizeY
-        int,   // 6 - Groups number
-        int,   // 7 - Stride
-        int>;  // 8 - Batch
+        int,        // 1 - Input Y size
+        int,        // 2 - Input features
+        int,        // 3 - Output features
+        int,        // 4 - Kernel sizeX
+        int,        // 5 - Kernel sizeY
+        int,        // 6 - Groups number
+        int,        // 7 - Stride
+        int,        // 8 - Batch
+        format>;    // 9 - Input data format
 
 struct convolution_gpu : public ::testing::TestWithParam<TestParamType_convolution_gpu>
 {
@@ -5204,7 +5205,8 @@ struct convolution_grouped_gpu : public ::testing::TestWithParam<TestParamType_g
                            std::to_string(testing::get<5>(param_info.param)) + "y" +
                "_groups" + std::to_string(testing::get<6>(param_info.param)) +
                "_stride" + std::to_string(testing::get<7>(param_info.param)) +
-               "_batch"  + std::to_string(testing::get<8>(param_info.param));
+               "_batch"  + std::to_string(testing::get<8>(param_info.param)) +
+               "_format" + std::to_string(testing::get<9>(param_info.param));
     }
 };
 
@@ -6236,7 +6238,7 @@ TEST(convolution_gpu, bfyx_iyxo_5x5_fp16)
         EXPECT_EQ(1, 1);
         return;
     }
-    
+
     const int batch_num = 1;
     const int output_f = 4;
 
@@ -6309,7 +6311,7 @@ TEST(convolution_gpu, bfyx_iyxo_5x5_fp16)
     }
     else
     {
-        
+
         // Calculate reference values without bias
         for (auto bi = 0; bi < batch_num; ++bi)
         {
@@ -6324,8 +6326,8 @@ TEST(convolution_gpu, bfyx_iyxo_5x5_fp16)
                     output_padding, output_padding);
             }
         }
-        
-        
+
+
         auto conv_fsv = convolution("conv_fsv", "input", { "weights_fsv" },
             { 1, 1, stride, stride }, { 0, 0, input_offset, input_offset });
         conv_fsv.output_padding = padding({ 0, 0, output_padding, output_padding }, 0.f);
@@ -6345,7 +6347,7 @@ TEST(convolution_gpu, bfyx_iyxo_5x5_fp16)
 
     auto out_mem = network.get_output("conv_fsv").get_memory();
     auto out_ptr = out_mem.pointer<FLOAT16>();
-    
+
     for (int bi = 0; bi < batch_num; ++bi)
         for (int fi = 0; fi < output_f; ++fi)
             for (int yi = 0; yi < output_y; ++yi)
@@ -6363,7 +6365,7 @@ TEST(convolution_gpu, bfyx_iyxo_5x5_fp16)
                         std::cout << "At b = " << bi << ", fi = " << fi << ", xi = " << xi << ", yi = " << yi << std::endl;
                     }
                 }
-                
+
 }
 
 INSTANTIATE_TEST_CASE_P(convolution_gpu_block,
@@ -7632,21 +7634,31 @@ INSTANTIATE_TEST_CASE_P(convolution_depthwise_gpu_bfyx,
                         ),
                         convolution_depthwise_gpu::PrintToStringParamName);
 
-INSTANTIATE_TEST_CASE_P(convolution_grouped_b_fs_yx_fsv4,
+INSTANTIATE_TEST_CASE_P(convolution_grouped_fsv4_fsv16,
                         convolution_grouped_gpu,
                         ::testing::Values(
                             // Input X size, Input Y size, Input features, Output features, Kernel size X, Kernel size
-                            // Y, Groups number, Stride, Output padding, Batch
-                            TestParamType_grouped_convolution_gpu(4, 4, 16, 16, 3, 3, 4, 1, 1),
-                            TestParamType_grouped_convolution_gpu(4, 4, 8, 4, 2, 2, 2, 1, 4),
-                            TestParamType_grouped_convolution_gpu(8, 8, 16, 16, 4, 4, 4, 1, 1),
-                            TestParamType_grouped_convolution_gpu(17, 17, 32, 96, 3, 3, 2, 2, 2),
-                            TestParamType_grouped_convolution_gpu(16, 16, 8, 48, 2, 2, 2, 2, 1),
-                            TestParamType_grouped_convolution_gpu(3, 3, 48, 96, 2, 2, 2, 8, 1),
-                            TestParamType_grouped_convolution_gpu(6, 6, 8, 26, 3, 3, 2, 4, 1)),
+                            // Y, Groups number, Stride, Output padding, Batch, Input data format
+                            // Format: b_fs_yx_fsv4
+                            TestParamType_grouped_convolution_gpu(4, 4, 16, 16, 3, 3, 4, 1, 1, format::b_fs_yx_fsv4),
+                            TestParamType_grouped_convolution_gpu(4, 4, 8, 4, 2, 2, 2, 1, 4, format::b_fs_yx_fsv4),
+                            TestParamType_grouped_convolution_gpu(8, 8, 16, 16, 4, 4, 4, 1, 1, format::b_fs_yx_fsv4),
+                            TestParamType_grouped_convolution_gpu(17, 17, 32, 96, 3, 3, 2, 2, 2, format::b_fs_yx_fsv4),
+                            TestParamType_grouped_convolution_gpu(16, 16, 8, 48, 2, 2, 2, 2, 1, format::b_fs_yx_fsv4),
+                            TestParamType_grouped_convolution_gpu(3, 3, 48, 96, 2, 2, 2, 8, 1, format::b_fs_yx_fsv4),
+                            TestParamType_grouped_convolution_gpu(6, 6, 8, 26, 3, 3, 2, 4, 1, format::b_fs_yx_fsv4),
+                            // Format: b_fs_yx_fsv16
+                            TestParamType_grouped_convolution_gpu(4, 4, 16, 16, 3, 3, 4, 1, 1, format::b_fs_yx_fsv16),
+                            TestParamType_grouped_convolution_gpu(4, 4, 8, 4, 2, 2, 2, 1, 4, format::b_fs_yx_fsv16),
+                            TestParamType_grouped_convolution_gpu(8, 8, 16, 16, 4, 4, 4, 1, 1, format::b_fs_yx_fsv16),
+                            TestParamType_grouped_convolution_gpu(17, 17, 32, 96, 3, 3, 2, 2, 2, format::b_fs_yx_fsv16),
+                            TestParamType_grouped_convolution_gpu(16, 16, 8, 48, 2, 2, 2, 2, 1, format::b_fs_yx_fsv16),
+                            TestParamType_grouped_convolution_gpu(3, 3, 48, 96, 2, 2, 2, 8, 1, format::b_fs_yx_fsv16),
+                            TestParamType_grouped_convolution_gpu(6, 6, 8, 26, 3, 3, 2, 4, 1, format::b_fs_yx_fsv16)
+                        ),
                         convolution_grouped_gpu::PrintToStringParamName);
 
-TEST_P(convolution_grouped_gpu, grouped_b_fs_yx_fsv4) {
+TEST_P(convolution_grouped_gpu, base) {
     const auto& engine = get_test_engine();
 
     const int input_x = testing::get<0>(GetParam()),
@@ -7661,6 +7673,7 @@ TEST_P(convolution_grouped_gpu, grouped_b_fs_yx_fsv4) {
               output_padding = 0,
               input_offset_y = (filter_x - 1) / 2,
               input_offset_x = (filter_y - 1) / 2;
+    auto input_data_format = testing::get<9>(GetParam());
 
     auto input_size = tensor(batch(batch_num), feature(input_f), spatial(input_x, input_y));
     auto input_rnd = generate_random_4d<uint8_t>(batch_num, input_f, input_y, input_x, 0, 255);
@@ -7709,7 +7722,7 @@ TEST_P(convolution_grouped_gpu, grouped_b_fs_yx_fsv4) {
 
     topology topology(input_layout("input", input.get_layout()),
                       data("weights", weights),
-                      reorder("input_fsv", "input", {data_types::u8, format::b_fs_yx_fsv4, input_size}),
+                      reorder("input_fsv", "input", {data_types::u8, input_data_format, input_size}),
                       convolution("conv",
                                   "input_fsv",
                                   {"weights"},
@@ -7721,7 +7734,7 @@ TEST_P(convolution_grouped_gpu, grouped_b_fs_yx_fsv4) {
 
     build_options options;
     options.set_option(build_option::optimize_data(true));
-    implementation_desc conv_impl = {format::b_fs_yx_fsv4, "fused_conv_eltwise_gpu_imad"};
+    implementation_desc conv_impl = {input_data_format, "fused_conv_eltwise_gpu_imad"};
     options.set_option(build_option::force_implementations({{"conv", conv_impl}}));
 
     network network(engine, topology, options);
@@ -7732,7 +7745,7 @@ TEST_P(convolution_grouped_gpu, grouped_b_fs_yx_fsv4) {
     auto out_ptr = out_mem.pointer<float>();
     auto out_lay = out_mem.get_layout();
 
-    ASSERT_EQ(out_mem.get_layout().format, format::b_fs_yx_fsv4);
+    ASSERT_EQ(out_mem.get_layout().format, input_data_format);
     ASSERT_EQ(out_lay.size.batch[0], expected_result.size());
     ASSERT_EQ(out_lay.size.feature[0], expected_result[0].size());
     ASSERT_EQ(out_lay.size.spatial[1], expected_result[0][0].size());
@@ -7759,22 +7772,23 @@ template <typename InputT, typename WeightsT, typename OutputT>
 class convolution_test_base {
 public:
     virtual topology build_topology(const cldnn::engine& engine) {
-        auto input_lay = layout(input_type(), input_format(), input_size());
+        auto input_lay = layout(input_type(), format::bfyx, input_size());
         auto wei_lay = layout(weights_type(), format::bfyx, weights_size());
 
         auto wei_mem = memory::allocate(engine, wei_lay);
         auto weights_flat = flatten_4d(format::bfyx, _weights);
         set_values(wei_mem, weights_flat);
-
+        layout reordered_layout = layout{input_type(), input_format(), input_size(), padding_size()};
         auto topo = topology();
         topo.add(input_layout("input", input_lay));
-        std::string input_id = "input";
+        topo.add(reorder("input_reorder", "input", reordered_layout));
+        std::string input_id = "input_reorder";
         if (has_input_zp()) {
             auto input_zp_lay = layout(input_type(), format::bfyx, tensor(feature(input_features())));
             auto input_zp_mem = memory::allocate(engine, input_zp_lay);
             set_values(input_zp_mem, _input_zp);
             topo.add(data("input_zp", input_zp_mem));
-            topo.add(eltwise("input_asymm", { "input", "input_zp" }, eltwise_mode::sub));
+            topo.add(eltwise("input_asymm", { "input_reorder", "input_zp" }, eltwise_mode::sub));
             input_id = "input_asymm";
         }
         topo.add(data("weights", wei_mem));
@@ -7836,7 +7850,7 @@ public:
 
         auto net = network(prog, 0);
 
-        auto input_lay = layout(input_type(), input_format(), input_size());
+        auto input_lay = layout(input_type(), format::bfyx, input_size());
         auto input_mem = memory::allocate(engine, input_lay);
         std::vector<InputT> input_flat(input_lay.get_linear_size(), static_cast<InputT>(0));
         for (size_t bi = 0; bi < batch_num(); ++bi)
@@ -7858,6 +7872,7 @@ public:
         std::stringstream description;
         for (auto i : net.get_primitives_info()) {
             if (i.original_id == "conv") {
+                std::cout << i.kernel_id << std::endl;
                 description << "  kernel: " << i.kernel_id << std::endl;
             }
         }
@@ -7921,6 +7936,10 @@ public:
         _weights_zp = std::move(weights_zp);
     }
 
+    void set_padded_input(bool padded_input) {
+        _padded_input = padded_input;
+    }
+
 protected:
     VVVVF<InputT> _input;
     VVVVF<WeightsT> _weights;
@@ -7931,6 +7950,7 @@ protected:
     int _stride_x, _stride_y;
     int _offset_x, _offset_y;
     int _dilation_x, _dilation_y;
+    bool _padded_input;
 
     size_t batch_num() const { return _input.size(); }
     size_t input_features() const { return _input[0].size(); }
@@ -7945,6 +7965,7 @@ protected:
     bool has_bias() { return _bias.size() > 0; }
     bool has_input_zp() { return _input_zp.size() > 0; }
     bool has_weights_zp() { return _weights_zp.size() > 0; }
+    bool need_padded_input() { return _padded_input; }
 
     data_types input_type() const { return type_to_data_type<InputT>::value; }
     format input_format() const { return _input_fmt; }
@@ -7962,6 +7983,15 @@ protected:
                       TensorValue(filter_x()),
                       TensorValue(filter_y()));
     }
+    padding padding_size() const {
+        if (_padded_input) {
+            return padding{
+                      tensor(0, 0, TensorValue(filter_x() / 2), TensorValue(filter_y() / 2)).sizes(),
+                      tensor(0, 0, TensorValue(filter_x() / 2), TensorValue(filter_y() / 2)).sizes()};
+        } else {
+            return padding{};
+        }
+    }
 
     data_types output_type() const { return type_to_data_type<OutputT>::value; }
 };
@@ -7981,6 +8011,7 @@ struct convolution_random_test_all_params {
     format::type input_format;
     bool asymmetric_weights;
     bool asymmetric_data;
+    bool need_padded_input;
 };
 
 template <typename InputT, typename WeightsT, typename OutputT>
@@ -8037,6 +8068,7 @@ public:
         this->set_dilation(params.dilation_xy[0], params.dilation_xy[1]);
         this->set_weights_zp(std::move(weights_zp_data));
         this->set_input_zp(std::move(input_zp_data));
+        this->set_padded_input(params.need_padded_input);
     }
 
     void run_random(const convolution_random_test_all_params& params) {
@@ -8064,6 +8096,7 @@ static std::string to_string_convolution_all_params(const testing::TestParamInfo
     format::type iType = params.input_format;  // input format
     bool asymm_weights = params.asymmetric_weights;
     bool asymm_input = params.asymmetric_data;
+    bool padded_input = params.need_padded_input;
     // Wrapper for negative walues as ex. "-1" will generate invalid gtest param string
     auto to_string_neg = [](int val) {
         if (val >= 0)
@@ -8079,7 +8112,8 @@ static std::string to_string_convolution_all_params(const testing::TestParamInfo
         "_ofs" + to_string_neg(Offset[0]) + 'x' + to_string_neg(Offset[1]) +
         "_d" + std::to_string(Dilation[0]) + 'x' + std::to_string(Dilation[1]) +
         "_g" + std::to_string(groups) +
-        (Bias ? "_bias" : "") + (asymm_weights ? "_wzp" : "") + (asymm_input ? "_izp" : "");
+        (Bias ? "_bias" : "") + (asymm_weights ? "_wzp" : "") + (asymm_input ? "_izp" : "") +
+        (padded_input ? "_in_pad" : "");
 }
 
 template <typename InputT, typename WeightsT, typename OutputT>
@@ -8141,66 +8175,72 @@ using convolution_scale_random_test_s8s8f32 = convolution_scale_random_test<int8
 using convolution_scale_random_test_u8s8f32 = convolution_scale_random_test<uint8_t, int8_t, float>;
 
 struct params_generator : std::vector<convolution_random_test_all_params> {
-    params_generator& smoke_test_params(format::type input_format, bool asymm_weights = false, bool asymm_data = false) {
+    params_generator& smoke_test_params(format::type input_format, bool asymm_weights = false, bool asymm_data = false, bool padded_input = false) {
         std::vector<size_t> batches = { 1, 2 };
         for (auto b : batches) {
-            // 7x7
+            // first conv
+            push_back(convolution_random_test_all_params{
+                b, 3, 32, { 28, 28 }, { 7, 7 }, { 2, 2 }, { -3, -3 }, { 1, 1 }, true, 1, input_format, asymm_weights, asymm_data, padded_input });
+            push_back(convolution_random_test_all_params{
+                b, 3, 64, { 1024, 10 }, { 5, 5 }, { 2, 2 }, { -2, -2 }, { 1, 1 }, true, 1, input_format, asymm_weights, asymm_data, padded_input });
+            push_back(convolution_random_test_all_params{
+                b, 3, 15, { 10, 10 }, { 5, 5 }, { 1, 1 }, { -2, -2 }, { 1, 1 }, true, 1, input_format, asymm_weights, asymm_data, padded_input });
             push_back(convolution_random_test_all_params{
-                b, 3, 32, { 28, 28 }, { 7, 7 }, { 2, 2 }, { -3, -3 }, { 1, 1 }, true, 1, input_format, asymm_weights, asymm_data });
+                b, 4, 18, { 10, 10 }, { 5, 5 }, { 1, 1 }, { -2, -2 }, { 1, 1 }, true, 1, input_format, asymm_weights, asymm_data, padded_input });
             // 3x3
             push_back(convolution_random_test_all_params{
-                b, 32, 48, { 14, 14 }, { 3, 3 }, { 1, 1 }, { -1, -1 }, { 1, 1 }, true, 1, input_format, asymm_weights, asymm_data });
+                b, 32, 48, { 14, 14 }, { 3, 3 }, { 1, 1 }, { -1, -1 }, { 1, 1 }, true, 1, input_format, asymm_weights, asymm_data, padded_input });
             push_back(convolution_random_test_all_params{
-                b, 32, 48, { 14, 14 }, { 3, 3 }, { 2, 2 }, { -1, -1 }, { 1, 1 }, true, 1, input_format, asymm_weights, asymm_data });
+                b, 32, 48, { 14, 14 }, { 3, 3 }, { 2, 2 }, { -1, -1 }, { 1, 1 }, true, 1, input_format, asymm_weights, asymm_data, padded_input });
             // 1x1
             push_back(convolution_random_test_all_params{
-                b, 32, 48, { 28, 28 }, { 1, 1 }, { 1, 1 }, { 0, 0 }, { 1, 1 }, true, 1, input_format, asymm_weights, asymm_data });
+                b, 32, 48, { 28, 28 }, { 1, 1 }, { 1, 1 }, { 0, 0 }, { 1, 1 }, true, 1, input_format, asymm_weights, asymm_data, padded_input });
             push_back(convolution_random_test_all_params{
-                b, 32, 48, { 28, 28 }, { 1, 1 }, { 2, 2 }, { 0, 0 }, { 1, 1 }, true, 1, input_format, asymm_weights, asymm_data });
+                b, 32, 48, { 28, 28 }, { 1, 1 }, { 2, 2 }, { 0, 0 }, { 1, 1 }, true, 1, input_format, asymm_weights, asymm_data, padded_input });
             // 5x5
             push_back(convolution_random_test_all_params{
-                b, 32, 48, { 28, 28 }, { 5, 5 }, { 1, 1 }, { -2, -2 }, { 1, 1 }, true, 1, input_format, asymm_weights, asymm_data });
+                b, 32, 48, { 28, 28 }, { 5, 5 }, { 1, 1 }, { -2, -2 }, { 1, 1 }, true, 1, input_format, asymm_weights, asymm_data, padded_input });
             push_back(convolution_random_test_all_params{
-                b, 32, 48, { 28, 28 }, { 5, 5 }, { 2, 2 }, { -2, -2 }, { 1, 1 }, true, 1, input_format, asymm_weights, asymm_data });
+                b, 32, 48, { 28, 28 }, { 5, 5 }, { 2, 2 }, { -2, -2 }, { 1, 1 }, true, 1, input_format, asymm_weights, asymm_data, padded_input });
             // depthwise
             push_back(convolution_random_test_all_params{
-                b, 64, 64, { 19, 19 }, { 3, 3 }, { 1, 1 }, { -1, -1 }, { 1, 1 }, true, 64, input_format, asymm_weights, asymm_data });
+                b, 64, 64, { 19, 19 }, { 3, 3 }, { 1, 1 }, { -1, -1 }, { 1, 1 }, true, 64, input_format, asymm_weights, asymm_data, padded_input });
             push_back(convolution_random_test_all_params{
-                b, 64, 64, { 19, 19 }, { 3, 3 }, { 2, 2 }, { -1, -1 }, { 1, 1 }, true, 64, input_format, asymm_weights, asymm_data });
+                b, 64, 64, { 19, 19 }, { 3, 3 }, { 2, 2 }, { -1, -1 }, { 1, 1 }, true, 64, input_format, asymm_weights, asymm_data, padded_input });
             // dilation
             push_back(convolution_random_test_all_params{
-                b, 32, 24, { 19, 19 }, { 3, 3 }, { 1, 1 }, { -1, -1 }, { 2, 2 }, true, 1, input_format, asymm_weights, asymm_data });
+                b, 32, 24, { 19, 19 }, { 3, 3 }, { 1, 1 }, { -1, -1 }, { 2, 2 }, true, 1, input_format, asymm_weights, asymm_data, padded_input });
             push_back(convolution_random_test_all_params{
-                b, 32, 24, { 19, 19 }, { 3, 3 }, { 2, 2 }, { -1, -1 }, { 2, 2 }, true, 1, input_format, asymm_weights, asymm_data });
+                b, 32, 24, { 19, 19 }, { 3, 3 }, { 2, 2 }, { -1, -1 }, { 2, 2 }, true, 1, input_format, asymm_weights, asymm_data, padded_input });
         }
         return *this;
     }
 
-    params_generator& extra_test_params(format::type input_format, bool asymm_weights = false, bool asymm_data = false) {
+    params_generator& extra_test_params(format::type input_format, bool asymm_weights = false, bool asymm_data = false, bool padded_input = false) {
         std::vector<size_t> batches = { 1, 2 };
         for (auto b : batches) {
             // 1x1
             push_back(convolution_random_test_all_params{
-                b, 23, 41, { 19, 19 }, { 1, 1 }, { 1, 1 }, { 0, 0 }, { 1, 1 }, true, 1, input_format, asymm_weights, asymm_data });
+                b, 23, 41, { 19, 19 }, { 1, 1 }, { 1, 1 }, { 0, 0 }, { 1, 1 }, true, 1, input_format, asymm_weights, asymm_data, padded_input });
             push_back(convolution_random_test_all_params{
-                b, 23, 41, { 19, 19 }, { 1, 1 }, { 2, 2 }, { 0, 0 }, { 1, 1 }, true, 1, input_format, asymm_weights, asymm_data });
+                b, 23, 41, { 19, 19 }, { 1, 1 }, { 2, 2 }, { 0, 0 }, { 1, 1 }, true, 1, input_format, asymm_weights, asymm_data, padded_input });
             // 3x3
             push_back(convolution_random_test_all_params{
-                b, 16, 28, { 14, 14 }, { 3, 3 }, { 1, 1 }, { -1, -1 }, { 1, 1 }, true, 1, input_format, asymm_weights, asymm_data });
+                b, 16, 28, { 14, 14 }, { 3, 3 }, { 1, 1 }, { -1, -1 }, { 1, 1 }, true, 1, input_format, asymm_weights, asymm_data, padded_input });
             push_back(convolution_random_test_all_params{
-                b, 23, 41, { 19, 17 }, { 3, 3 }, { 1, 1 }, { -1, -1 }, { 1, 1 }, true, 1, input_format, asymm_weights, asymm_data });
+                b, 23, 41, { 19, 17 }, { 3, 3 }, { 1, 1 }, { -1, -1 }, { 1, 1 }, true, 1, input_format, asymm_weights, asymm_data, padded_input });
             // 5x5
             push_back(convolution_random_test_all_params{
-                b, 16, 28, { 14, 14 }, { 5, 5 }, { 1, 1 }, { -2, -2 }, { 1, 1 }, true, 1, input_format, asymm_weights, asymm_data });
+                b, 16, 28, { 14, 14 }, { 5, 5 }, { 1, 1 }, { -2, -2 }, { 1, 1 }, true, 1, input_format, asymm_weights, asymm_data, padded_input });
             push_back(convolution_random_test_all_params{
-                b, 23, 41, { 19, 17 }, { 5, 5 }, { 1, 1 }, { -2, -2 }, { 1, 1 }, true, 1, input_format, asymm_weights, asymm_data });
+                b, 23, 41, { 19, 17 }, { 5, 5 }, { 1, 1 }, { -2, -2 }, { 1, 1 }, true, 1, input_format, asymm_weights, asymm_data, padded_input });
         }
         return *this;
     }
 
-    params_generator& all_test_params(format::type input_format, bool asymm_weights = false, bool asymm_data = false) {
-        return smoke_test_params(input_format, asymm_weights, asymm_data)
-            .extra_test_params(input_format, asymm_weights, asymm_data);
+    params_generator& all_test_params(format::type input_format, bool asymm_weights = false, bool asymm_data = false, bool padded_input = false) {
+        return smoke_test_params(input_format, asymm_weights, asymm_data, padded_input)
+            .extra_test_params(input_format, asymm_weights, asymm_data, padded_input);
     }
 
     params_generator& add(convolution_random_test_all_params params) {
@@ -8230,6 +8270,8 @@ INSTANTIATE_TEST_CASE_P(
         .smoke_test_params(format::b_fs_yx_fsv32, true, true)
         .smoke_test_params(format::b_fs_yx_fsv32, false, true)
         .smoke_test_params(format::b_fs_yx_fsv32, true, false)
+        .smoke_test_params(format::b_fs_yx_fsv32, false, false, true)
+        .smoke_test_params(format::b_fs_yx_fsv16, false, false, true)
         .smoke_test_params(format::b_fs_yx_fsv16)
     ),
     to_string_convolution_all_params
@@ -8275,7 +8317,7 @@ INSTANTIATE_TEST_CASE_P(
         .all_test_params(format::b_fs_yx_fsv32, true, false)
         .all_test_params(format::b_fs_yx_fsv16)
         .add(convolution_random_test_all_params{
-            1, 89, 3, { 1, 1 }, { 3, 3 }, { 1, 1 }, { -1, -1 }, { 1, 1 }, true, 1, format::b_fs_yx_fsv4, false, false })
+            1, 89, 3, { 1, 1 }, { 3, 3 }, { 1, 1 }, { -1, -1 }, { 1, 1 }, true, 1, format::b_fs_yx_fsv4, false, false, false })
     ),
     to_string_convolution_all_params
 );
index 3ee79aa..a90428e 100644 (file)
@@ -371,6 +371,7 @@ public:
 #define CASE_CONV_U8S8_5 {1, 16, 5, 5}, {1, 32, 5, 5}, {1, 1, 1, 1}, tensor{1}, tensor{0}, tensor{1}, 1, data_types::u8, format::bfyx, data_types::i8, format::bfyx, data_types::f32, format::bfyx
 #define CASE_CONV_U8S8_6 {1, 17, 4, 5}, {1, 17, 4, 5}, {1, 1, 1, 1}, tensor{1}, tensor{0}, tensor{1}, 17, data_types::u8, format::bfyx, data_types::i8, format::goiyx, data_types::f32, format::bfyx
 #define CASE_CONV_U8S8_7 {1, 64, 7, 7}, {1, 32, 7, 7}, {1, 1, 3, 3}, tensor{1}, tensor{0, 0, -1, -1, 0, 0}, tensor{1}, 1, data_types::u8, format::bfyx, data_types::i8, format::bfyx, data_types::f32, format::bfyx
+#define CASE_CONV_U8S8_8 {1, 3, 4, 5}, {1, 32, 4, 5}, {1, 1, 3, 3}, tensor{1}, tensor{0, 0, -1, -1, 0, 0}, tensor{1}, 1, data_types::u8, format::bfyx, data_types::i8, format::bfyx, data_types::f32, format::bfyx
 
 #define CASE_CONV_S8S8_1 {1, 15, 4, 5}, {1, 30, 2, 3}, {1, 1, 3, 3}, tensor{1}, tensor{0}, tensor{1}, 1, data_types::i8, format::bfyx, data_types::i8, format::bfyx, data_types::f32, format::bfyx
 #define CASE_CONV_S8S8_2 {1, 15, 5, 5}, {1, 30, 3, 3}, {1, 1, 3, 3}, tensor{1}, tensor{0}, tensor{1}, 1, data_types::i8, format::bfyx, data_types::i8, format::bfyx, data_types::f32, format::bfyx
@@ -379,6 +380,7 @@ public:
 #define CASE_CONV_S8S8_5 {1, 16, 5, 5}, {1, 32, 5, 5}, {1, 1, 1, 1}, tensor{1}, tensor{0}, tensor{1}, 1, data_types::i8, format::bfyx, data_types::i8, format::bfyx, data_types::f32, format::bfyx
 #define CASE_CONV_S8S8_6 {1, 17, 4, 5}, {1, 17, 4, 5}, {1, 1, 1, 1}, tensor{1}, tensor{0}, tensor{1}, 17, data_types::i8, format::bfyx, data_types::i8, format::goiyx, data_types::f32, format::bfyx
 #define CASE_CONV_S8S8_7  {1, 64, 7, 7}, {1, 32, 7, 7}, {1, 1, 3, 3}, tensor{1}, tensor{0, 0, -1, -1, 0, 0}, tensor{1}, 1, data_types::i8, format::bfyx, data_types::i8, format::bfyx, data_types::f32, format::bfyx
+#define CASE_CONV_S8S8_8 {1, 3, 4, 5}, {1, 32, 4, 5}, {1, 1, 3, 3}, tensor{1}, tensor{0, 0, -1, -1, 0, 0}, tensor{1}, 1, data_types::i8, format::bfyx, data_types::i8, format::bfyx, data_types::f32, format::bfyx
 
 #define CASE_CONV3D_U8S8_1 {1, 15, 5, 4, 5}, {1, 30, 3, 2, 3}, {1, 1, 3, 3, 3}, tensor{1}, tensor{0}, tensor{1}, 1, data_types::u8, format::bfzyx, data_types::i8, format::bfzyx, data_types::f32, format::bfzyx
 #define CASE_CONV3D_U8S8_2 {1, 15, 5, 5, 5}, {1, 30, 3, 3, 3}, {1, 1, 3, 3, 3}, tensor{1}, tensor{0}, tensor{1}, 1, data_types::u8, format::bfzyx, data_types::i8, format::bfzyx, data_types::f32, format::bfzyx
@@ -418,7 +420,30 @@ public:
 /* ---------------------------------------- FP32 convolution cases ------------------------------------- */
 /* ----------------------------------------------------------------------------------------------------- */
 /* ----------- NOTE: A part of tests is disabled until all FP kernels don't support fusings ------------ */
-class conv_fp32_activation : public WeightsPrimitiveFusingTest {};
+class ConvFusingTest : public WeightsPrimitiveFusingTest {
+public:
+    void execute(bc_test_params& p) {
+        auto input_prim = get_mem(get_input_layout(p));
+        network network_not_fused(this->engine, this->topology_non_fused, bo_not_fused);
+        network network_fused(this->engine, this->topology_fused, bo_fused);
+        network_fused.set_input_data("input", input_prim);
+        network_not_fused.set_input_data("input", input_prim);
+
+        compare(network_not_fused, network_fused, p);
+        auto find_conv = [](primitive_info& p) -> bool {
+            if (p.original_id == "conv_prim")
+                return true;
+            return false;
+        };
+
+        auto pi_fused = network_fused.get_primitives_info();
+        auto info_fused = std::find_if(pi_fused.begin(), pi_fused.end(), find_conv);
+        if (info_fused != pi_fused.end())
+            std::cout << "kernel: " << info_fused->kernel_id << std::endl;
+    }
+};
+
+class conv_fp32_activation : public ConvFusingTest {};
 TEST_P(conv_fp32_activation, basic) {
     auto p = GetParam();
     create_topologies(input_layout("input", get_input_layout(p)),
@@ -445,7 +470,7 @@ INSTANTIATE_TEST_CASE_P(fusings_gpu, conv_fp32_activation, ::testing::ValuesIn(s
 }), );
 
 
-class conv_fp32_scale : public WeightsPrimitiveFusingTest {};
+class conv_fp32_scale : public ConvFusingTest {};
 TEST_P(conv_fp32_scale, basic) {
     auto p = GetParam();
     create_topologies(input_layout("input", get_input_layout(p)),
@@ -476,7 +501,7 @@ INSTANTIATE_TEST_CASE_P(fusings_gpu, conv_fp32_scale,
                                              bc_test_params{CASE_CONV_FP16_10, 2, 3},
                                              }), );
 
-class conv_fp32_prelu_eltwise : public WeightsPrimitiveFusingTest {};
+class conv_fp32_prelu_eltwise : public ConvFusingTest {};
 TEST_P(conv_fp32_prelu_eltwise, basic) {
     auto p = GetParam();
     create_topologies(input_layout("input", get_input_layout(p)),
@@ -548,7 +573,7 @@ INSTANTIATE_TEST_CASE_P(fusings_gpu, conv_fp32_prelu_eltwise,
                                              bc_test_params{CASE_CONV_FP16_4, 2, 4},
                                              }), );
 
-class conv_fp32_eltwise_b_fs_zyx_fsv16 : public WeightsPrimitiveFusingTest {};
+class conv_fp32_eltwise_b_fs_zyx_fsv16 : public ConvFusingTest {};
 
 TEST_P(conv_fp32_eltwise_b_fs_zyx_fsv16, vector_ops) {
     auto p = GetParam();
@@ -568,7 +593,7 @@ TEST_P(conv_fp32_eltwise_b_fs_zyx_fsv16, vector_ops) {
     execute(p);
 }
 
-class conv_fp32_swish : public WeightsPrimitiveFusingTest {};
+class conv_fp32_swish : public ConvFusingTest {};
 TEST_P(conv_fp32_swish, basic) {
     auto p = GetParam();
     create_topologies(input_layout("input", get_input_layout(p)),
@@ -638,8 +663,8 @@ INSTANTIATE_TEST_CASE_P(fusings_gpu, conv_fp32_eltwise_b_fs_zyx_fsv16,
                                 bc_test_params{CASE_CONV_FP16_12, 2, 3},
                         }), );
 
-class conv_fp32_quantize_u8 : public WeightsPrimitiveFusingTest {};
-TEST_P(conv_fp32_quantize_u8, DISABLED_basic) {
+class conv_fp32_quantize_u8 : public ConvFusingTest {};
+TEST_P(conv_fp32_quantize_u8, basic) {
     auto p = GetParam();
     create_topologies(input_layout("input", get_input_layout(p)),
                  data("weights", get_mem(get_weights_layout(p))),
@@ -659,11 +684,13 @@ TEST_P(conv_fp32_quantize_u8, DISABLED_basic) {
 
 INSTANTIATE_TEST_CASE_P(fusings_gpu, conv_fp32_quantize_u8,
                         ::testing::ValuesIn(std::vector<bc_test_params>{
-                                bc_test_params{CASE_CONV_FP32_1, 2, 3},
+                                // For now only b_fs_yx_fsv16 supports this case
+                                bc_test_params{CASE_CONV_FP32_2, 2, 3},
+                                bc_test_params{CASE_CONV_FP32_3, 2, 3},
                         }), );
 
-class conv_fp32_scale_quantize_i8 : public WeightsPrimitiveFusingTest {};
-TEST_P(conv_fp32_scale_quantize_i8, DISABLED_basic) {
+class conv_fp32_scale_quantize_i8 : public ConvFusingTest {};
+TEST_P(conv_fp32_scale_quantize_i8, basic) {
     auto p = GetParam();
     create_topologies(input_layout("input", get_input_layout(p)),
                  data("weights", get_mem(get_weights_layout(p))),
@@ -687,11 +714,13 @@ TEST_P(conv_fp32_scale_quantize_i8, DISABLED_basic) {
 
 INSTANTIATE_TEST_CASE_P(fusings_gpu, conv_fp32_scale_quantize_i8,
                         ::testing::ValuesIn(std::vector<bc_test_params>{
-                                bc_test_params{CASE_CONV_FP32_1, 2, 4},
+                                // For now only b_fs_yx_fsv16 supports this case
+                                bc_test_params{CASE_CONV_FP32_2, 2, 4},
+                                bc_test_params{CASE_CONV_FP32_3, 2, 4},
                         }), );
 
-class conv_fp32_scale_activation_quantize_i8 : public WeightsPrimitiveFusingTest {};
-TEST_P(conv_fp32_scale_activation_quantize_i8, DISABLED_basic) {
+class conv_fp32_scale_activation_quantize_i8 : public ConvFusingTest {};
+TEST_P(conv_fp32_scale_activation_quantize_i8, basic) {
     auto p = GetParam();
     create_topologies(input_layout("input", get_input_layout(p)),
                  data("weights", get_mem(get_weights_layout(p))),
@@ -714,11 +743,13 @@ TEST_P(conv_fp32_scale_activation_quantize_i8, DISABLED_basic) {
 
 INSTANTIATE_TEST_CASE_P(fusings_gpu, conv_fp32_scale_activation_quantize_i8,
                         ::testing::ValuesIn(std::vector<bc_test_params>{
-                                bc_test_params{CASE_CONV_FP32_1, 2, 5},
+                                // For now only b_fs_yx_fsv16 supports this case
+                                bc_test_params{CASE_CONV_FP32_2, 2, 5},
+                                bc_test_params{CASE_CONV_FP32_3, 2, 5},
                         }), );
 
-class conv_fp32_scale_activation_quantize_i8_eltwise_fp32 : public WeightsPrimitiveFusingTest {};
-TEST_P(conv_fp32_scale_activation_quantize_i8_eltwise_fp32, DISABLED_basic) {
+class conv_fp32_scale_activation_quantize_i8_eltwise_fp32 : public ConvFusingTest {};
+TEST_P(conv_fp32_scale_activation_quantize_i8_eltwise_fp32, basic) {
     auto p = GetParam();
     create_topologies(input_layout("input", get_input_layout(p)),
                  data("weights", get_mem(get_weights_layout(p))),
@@ -742,11 +773,13 @@ TEST_P(conv_fp32_scale_activation_quantize_i8_eltwise_fp32, DISABLED_basic) {
 
 INSTANTIATE_TEST_CASE_P(fusings_gpu, conv_fp32_scale_activation_quantize_i8_eltwise_fp32,
                         ::testing::ValuesIn(std::vector<bc_test_params>{
-                                bc_test_params{CASE_CONV_FP32_1, 2, 6},
+                                // For now only b_fs_yx_fsv16 supports this case
+                                bc_test_params{CASE_CONV_FP32_2, 2, 6},
+                                bc_test_params{CASE_CONV_FP32_3, 2, 6},
                         }), );
 
-class conv_fp32_scale_activation_quantize_i8_activation : public WeightsPrimitiveFusingTest {};
-TEST_P(conv_fp32_scale_activation_quantize_i8_activation, DISABLED_basic) {
+class conv_fp32_scale_activation_quantize_i8_activation : public ConvFusingTest {};
+TEST_P(conv_fp32_scale_activation_quantize_i8_activation, basic) {
     auto p = GetParam();
     create_topologies(input_layout("input", get_input_layout(p)),
                  data("weights", get_mem(get_weights_layout(p))),
@@ -770,12 +803,13 @@ TEST_P(conv_fp32_scale_activation_quantize_i8_activation, DISABLED_basic) {
 
 INSTANTIATE_TEST_CASE_P(fusings_gpu, conv_fp32_scale_activation_quantize_i8_activation,
                         ::testing::ValuesIn(std::vector<bc_test_params>{
-                                bc_test_params{CASE_CONV_FP32_1, 2, 6},
+                                bc_test_params{CASE_CONV_FP32_2, 2, 6},
+                                bc_test_params{CASE_CONV_FP32_3, 2, 6},
                         }), );
 
 
-class conv_fp32_scale_activation_quantize_i8_eltwise_fp32_quantize_i8 : public WeightsPrimitiveFusingTest {};
-TEST_P(conv_fp32_scale_activation_quantize_i8_eltwise_fp32_quantize_i8, DISABLED_basic) {
+class conv_fp32_scale_activation_quantize_i8_eltwise_fp32_quantize_i8 : public ConvFusingTest {};
+TEST_P(conv_fp32_scale_activation_quantize_i8_eltwise_fp32_quantize_i8, basic) {
     auto p = GetParam();
     create_topologies(input_layout("input", get_input_layout(p)),
                  data("weights", get_mem(get_weights_layout(p))),
@@ -804,15 +838,45 @@ TEST_P(conv_fp32_scale_activation_quantize_i8_eltwise_fp32_quantize_i8, DISABLED
 
 INSTANTIATE_TEST_CASE_P(fusings_gpu, conv_fp32_scale_activation_quantize_i8_eltwise_fp32_quantize_i8,
                         ::testing::ValuesIn(std::vector<bc_test_params>{
-                                bc_test_params{CASE_CONV_FP32_1, 2, 7},
+                                bc_test_params{CASE_CONV_FP32_2, 2, 7},
+                                bc_test_params{CASE_CONV_FP32_3, 2, 7},
                         }), );
 
+class conv_fp32_activation_eltwise_in_u8_fp32 : public WeightsPrimitiveFusingTest {};
+TEST_P(conv_fp32_activation_eltwise_in_u8_fp32, basic) {
+    auto p = GetParam();
+    create_topologies(input_layout("input", get_input_layout(p)),
+                 data("weights", get_mem(get_weights_layout(p))),
+                 data("bias", get_mem(get_bias_layout(p))),
+                 data("eltwise_data", get_mem(layout{ data_types::i8, p.input_format, p.out_shape })),
+                 convolution("conv_prim", "input", { "weights" }, { "bias" }, p.groups, p.stride, p.pad, p.dilation),
+                 activation("activation", "conv_prim", activation_func::relu_negative_slope),
+                 eltwise("sum", { "activation", "eltwise_data" }, eltwise_mode::sum, data_types::f32),
+                 reorder("reorder_bfyx", "sum", p.default_format, data_types::f32)
+    );
+    tolerance = 1.f;
+    execute(p);
+}
+
+INSTANTIATE_TEST_CASE_P(fusings_gpu, conv_fp32_activation_eltwise_in_u8_fp32,
+                        ::testing::ValuesIn(std::vector<bc_test_params>{
+                                // bc_test_params{CASE_CONV_FP32_1, 2, 4}, - eltwise fusing not supported
+                                bc_test_params{CASE_CONV_FP32_2, 2, 4},
+                                bc_test_params{CASE_CONV_FP32_3, 2, 4},
+                                bc_test_params{CASE_CONV_FP32_4, 2, 4},
+                                // bc_test_params{CASE_CONV_FP32_5, 2, 4}, - eltwise fusing not supported
+                                bc_test_params{CASE_CONV_FP32_6, 2, 4},
+                                bc_test_params{CASE_CONV_FP32_7, 2, 4},
+                                // bc_test_params{CASE_CONV_FP32_8, 2, 4}, - unknown bug
+                                bc_test_params{CASE_CONV_FP32_9, 2, 4},
+                                bc_test_params{CASE_CONV_FP32_10, 2, 4},
+                        }), );
 
 /* ----------------------------------------------------------------------------------------------------- */
 /* -------------------------------------- binary convolution cases ------------------------------------- */
 /* ----------------------------------------------------------------------------------------------------- */
 
-class conv_bin_activation : public WeightsPrimitiveFusingTest {};
+class conv_bin_activation : public ConvFusingTest {};
 TEST_P(conv_bin_activation, basic) {
     auto p = GetParam();
     create_topologies(input_layout("input", get_input_layout(p)),
@@ -831,7 +895,7 @@ INSTANTIATE_TEST_CASE_P(fusings_gpu, conv_bin_activation,
                             bc_test_params{CASE_BIN_CONV1, 2, 3},
                                             }), );
 
-class conv_bin_scale_activation : public WeightsPrimitiveFusingTest {};
+class conv_bin_scale_activation : public ConvFusingTest {};
 TEST_P(conv_bin_scale_activation, basic) {
     auto p = GetParam();
     create_topologies(input_layout("input", get_input_layout(p)),
@@ -852,7 +916,7 @@ INSTANTIATE_TEST_CASE_P(fusings_gpu, conv_bin_scale_activation,
                             bc_test_params{CASE_BIN_CONV2, 2, 4},
                                             }), );
 
-class conv_bin_quantize_bin : public WeightsPrimitiveFusingTest {};
+class conv_bin_quantize_bin : public ConvFusingTest {};
 TEST_P(conv_bin_quantize_bin, channel_wise_quantize) {
     auto p = GetParam();
     auto in_thresh = get_mem(get_per_channel_layout(p), min_random, max_random);
@@ -893,7 +957,7 @@ INSTANTIATE_TEST_CASE_P(fusings_gpu, conv_bin_quantize_bin,
                             bc_test_params{CASE_BIN_CONV2, 2, 3},
                                             }), );
 
-class conv_bin_scale_conv_dw : public WeightsPrimitiveFusingTest {};
+class conv_bin_scale_conv_dw : public ConvFusingTest {};
 TEST_P(conv_bin_scale_conv_dw, dw_kernel_3x3_stride2) {
     auto p = GetParam();
     auto dw_tensor = cldnn::tensor(group(p.out_shape.feature[0]), batch(1), feature(1), spatial(3, 3));
@@ -938,7 +1002,7 @@ INSTANTIATE_TEST_CASE_P(fusings_gpu, conv_bin_scale_conv_dw,
                             bc_test_params{CASE_BIN_CONV3, 3, 4},
                                             }), );
 
-class conv_bin_scale_conv_dw_prelu : public WeightsPrimitiveFusingTest {};
+class conv_bin_scale_conv_dw_prelu : public ConvFusingTest {};
 TEST_P(conv_bin_scale_conv_dw_prelu, dw_kernel_3x3_stride2) {
     auto p = GetParam();
     auto dw_tensor = cldnn::tensor(group(p.out_shape.feature[0]), batch(1), feature(1), spatial(3, 3));
@@ -993,7 +1057,7 @@ INSTANTIATE_TEST_CASE_P(fusings_gpu, conv_bin_scale_conv_dw_prelu,
 /* ----------------------------------------------------------------------------------------------------- */
 /* ---------------------------------------- INT8 convolution cases ------------------------------------- */
 /* ----------------------------------------------------------------------------------------------------- */
-class conv_int8_scale : public WeightsPrimitiveFusingTest {};
+class conv_int8_scale : public ConvFusingTest {};
 TEST_P(conv_int8_scale, basic) {
     auto p = GetParam();
     create_topologies(input_layout("input", get_input_layout(p)),
@@ -1030,7 +1094,7 @@ INSTANTIATE_TEST_CASE_P(fusings_gpu, conv_int8_scale,
                                 bc_test_params{CASE_CONV3D_S8S8_4, 2, 3},
                         }), );
 
-class conv_int8_scale_shift_swish : public WeightsPrimitiveFusingTest {};
+class conv_int8_scale_shift_swish : public ConvFusingTest {};
 TEST_P(conv_int8_scale_shift_swish, basic) {
     auto p = GetParam();
     create_topologies(input_layout("input", get_input_layout(p)),
@@ -1072,7 +1136,7 @@ INSTANTIATE_TEST_CASE_P(fusings_gpu, conv_int8_scale_shift_swish,
                         }), );
 
 
-class conv_int8_byxf_af32 : public WeightsPrimitiveFusingTest {};
+class conv_int8_byxf_af32 : public ConvFusingTest {};
 TEST_P(conv_int8_byxf_af32, per_channel_coeffs) {
     auto p = GetParam();
     create_topologies(input_layout("input", get_input_layout(p)),
@@ -1123,7 +1187,7 @@ INSTANTIATE_TEST_CASE_P(fusings_gpu, conv_int8_byxf_af32,
                                 bc_test_params{CASE_CONV_S8S8_6, 2, 3},
                         }), );
 
-class conv_int8_prelu_eltwise : public WeightsPrimitiveFusingTest {};
+class conv_int8_prelu_eltwise : public ConvFusingTest {};
 TEST_P(conv_int8_prelu_eltwise, basic) {
     auto p = GetParam();
     create_topologies(input_layout("input", get_input_layout(p)),
@@ -1173,11 +1237,13 @@ INSTANTIATE_TEST_CASE_P(fusings_gpu, conv_int8_prelu_eltwise,
                                 bc_test_params{CASE_CONV_U8S8_3, 2, 4},
                                 bc_test_params{CASE_CONV_U8S8_4, 2, 4},
                                 bc_test_params{CASE_CONV_U8S8_7, 2, 4},
+                                bc_test_params{CASE_CONV_U8S8_8, 2, 4},
                                 bc_test_params{CASE_CONV_S8S8_1, 2, 4},
                                 bc_test_params{CASE_CONV_S8S8_2, 2, 4},
                                 bc_test_params{CASE_CONV_S8S8_3, 2, 4},
                                 bc_test_params{CASE_CONV_S8S8_4, 2, 4},
                                 bc_test_params{CASE_CONV_S8S8_7, 2, 4},
+                                bc_test_params{CASE_CONV_S8S8_8, 2, 4},
 
                                 bc_test_params{CASE_CONV3D_U8S8_1, 2, 4},
                                 bc_test_params{CASE_CONV3D_U8S8_2, 2, 4},
@@ -1189,7 +1255,147 @@ INSTANTIATE_TEST_CASE_P(fusings_gpu, conv_int8_prelu_eltwise,
                                 bc_test_params{CASE_CONV3D_S8S8_4, 2, 4},
                         }), );
 
-class conv_int8_quantize_u8 : public WeightsPrimitiveFusingTest {};
+class conv_int8_activation_eltwise_quantize : public ConvFusingTest {};
+TEST_P(conv_int8_activation_eltwise_quantize, fsv16) {
+    auto p = GetParam();
+    create_topologies(input_layout("input", get_input_layout(p)),
+                 data("weights", get_mem(get_weights_layout(p))),
+                 data("bias", get_mem(get_bias_layout(p))),
+                 data("eltwise_data", get_mem(get_output_layout(p))),
+                 data("in_lo", get_mem(get_per_channel_layout(p), min_random, 0)),
+                 data("in_hi", get_mem(get_per_channel_layout(p), 1, max_random)),
+                 data("out_lo", get_mem(get_single_element_layout(p), -127)),
+                 data("out_hi", get_mem(get_single_element_layout(p), 127)),
+                 convolution("conv_prim", "input", { "weights" }, { "bias" }, p.groups, p.stride, p.pad, p.dilation),
+                 activation("activation", "conv_prim", activation_func::negative),
+                 eltwise("eltwise", "activation", "eltwise_data", eltwise_mode::sum),
+                 quantize("quantize", "eltwise", "in_lo", "in_hi", "out_lo", "out_hi", 255, data_types::i8),
+                 reorder("reorder_bfyx", "quantize", p.default_format, data_types::f32)
+    );
+
+    if (p.default_format.dimension() == 4) {
+        implementation_desc conv_impl = { format::b_fs_yx_fsv16, "" };
+        bo_fused.set_option(build_option::force_implementations({ {"conv_prim", conv_impl} }));
+    } else {
+        // TODO Add 5D int8 optimized convolution implementations
+        return;
+    }
+
+    tolerance = 1.f;
+    execute(p);
+}
+
+TEST_P(conv_int8_activation_eltwise_quantize, fsv32) {
+    auto p = GetParam();
+    create_topologies(input_layout("input", get_input_layout(p)),
+                 data("weights", get_mem(get_weights_layout(p))),
+                 data("bias", get_mem(get_bias_layout(p))),
+                 data("eltwise_data", get_mem(get_output_layout(p))),
+                 data("in_lo", get_mem(get_per_channel_layout(p), min_random, 0)),
+                 data("in_hi", get_mem(get_per_channel_layout(p), 1, max_random)),
+                 data("out_lo", get_mem(get_single_element_layout(p), -127)),
+                 data("out_hi", get_mem(get_single_element_layout(p), 127)),
+                 convolution("conv_prim", "input", { "weights" }, { "bias" }, p.groups, p.stride, p.pad, p.dilation),
+                 activation("activation", "conv_prim", activation_func::negative),
+                 eltwise("eltwise", "activation", "eltwise_data", eltwise_mode::sum),
+                 quantize("quantize", "eltwise", "in_lo", "in_hi", "out_lo", "out_hi", 255, data_types::i8),
+                 reorder("reorder_bfyx", "quantize", p.default_format, data_types::f32)
+    );
+
+    if (p.default_format.dimension() == 4) {
+        implementation_desc conv_impl = { format::b_fs_yx_fsv32, "" };
+        bo_fused.set_option(build_option::force_implementations({ {"conv_prim", conv_impl} }));
+    } else {
+        // TODO Add 5D int8 optimized convolution implementations
+        return;
+    }
+
+    tolerance = 1.f;
+    execute(p);
+}
+
+INSTANTIATE_TEST_CASE_P(fusings_gpu, conv_int8_activation_eltwise_quantize,
+                        ::testing::ValuesIn(std::vector<bc_test_params>{
+                                bc_test_params{CASE_CONV_U8S8_1, 2, 5},
+                                bc_test_params{CASE_CONV_U8S8_2, 2, 5},
+                                bc_test_params{CASE_CONV_U8S8_3, 2, 5},
+                                bc_test_params{CASE_CONV_U8S8_4, 2, 5},
+                                bc_test_params{CASE_CONV_U8S8_7, 2, 5},
+                                bc_test_params{CASE_CONV_U8S8_8, 2, 5},
+                                bc_test_params{CASE_CONV_S8S8_1, 2, 5},
+                                bc_test_params{CASE_CONV_S8S8_2, 2, 5},
+                                bc_test_params{CASE_CONV_S8S8_3, 2, 5},
+                                bc_test_params{CASE_CONV_S8S8_4, 2, 5},
+                                bc_test_params{CASE_CONV_S8S8_7, 2, 5},
+                                bc_test_params{CASE_CONV_S8S8_8, 2, 5},
+                        }), );
+
+class conv_int8_activation_eltwise : public ConvFusingTest {};
+TEST_P(conv_int8_activation_eltwise, fsv16) {
+    auto p = GetParam();
+    create_topologies(input_layout("input", get_input_layout(p)),
+                 data("weights", get_mem(get_weights_layout(p))),
+                 data("bias", get_mem(get_bias_layout(p))),
+                 data("eltwise_data", get_mem(get_output_layout(p))),
+                 convolution("conv_prim", "input", { "weights" }, { "bias" }, p.groups, p.stride, p.pad, p.dilation),
+                 activation("activation", "conv_prim", activation_func::negative),
+                 eltwise("eltwise", "activation", "eltwise_data", eltwise_mode::sum),
+                 reorder("reorder_bfyx", "eltwise", p.default_format, data_types::f32)
+    );
+
+    if (p.default_format.dimension() == 4) {
+        implementation_desc conv_impl = { format::b_fs_yx_fsv16, "" };
+        bo_fused.set_option(build_option::force_implementations({ {"conv_prim", conv_impl} }));
+    } else {
+        // TODO Add 5D int8 optimized convolution implementations
+        return;
+    }
+
+    tolerance = 1e-5f;
+    execute(p);
+}
+
+TEST_P(conv_int8_activation_eltwise, fsv32) {
+    auto p = GetParam();
+    create_topologies(input_layout("input", get_input_layout(p)),
+                 data("weights", get_mem(get_weights_layout(p))),
+                 data("bias", get_mem(get_bias_layout(p))),
+                 data("eltwise_data", get_mem(get_output_layout(p))),
+                 convolution("conv_prim", "input", { "weights" }, { "bias" }, p.groups, p.stride, p.pad, p.dilation),
+                 activation("activation", "conv_prim", activation_func::negative),
+                 eltwise("eltwise", "activation", "eltwise_data", eltwise_mode::sum),
+                 reorder("reorder_bfyx", "eltwise", p.default_format, data_types::f32)
+    );
+
+    if (p.default_format.dimension() == 4) {
+        implementation_desc conv_impl = { format::b_fs_yx_fsv32, "" };
+        bo_fused.set_option(build_option::force_implementations({ {"conv_prim", conv_impl} }));
+    } else {
+        // TODO Add 5D int8 optimized convolution implementations
+        return;
+    }
+
+    tolerance = 1e-5f;
+    execute(p);
+}
+
+INSTANTIATE_TEST_CASE_P(fusings_gpu, conv_int8_activation_eltwise,
+                        ::testing::ValuesIn(std::vector<bc_test_params>{
+                                bc_test_params{CASE_CONV_U8S8_1, 2, 4},
+                                bc_test_params{CASE_CONV_U8S8_2, 2, 4},
+                                bc_test_params{CASE_CONV_U8S8_3, 2, 4},
+                                bc_test_params{CASE_CONV_U8S8_4, 2, 4},
+                                bc_test_params{CASE_CONV_U8S8_7, 2, 4},
+                                bc_test_params{CASE_CONV_U8S8_8, 2, 4},
+                                bc_test_params{CASE_CONV_S8S8_1, 2, 4},
+                                bc_test_params{CASE_CONV_S8S8_2, 2, 4},
+                                bc_test_params{CASE_CONV_S8S8_3, 2, 4},
+                                bc_test_params{CASE_CONV_S8S8_4, 2, 4},
+                                bc_test_params{CASE_CONV_S8S8_7, 2, 4},
+                                bc_test_params{CASE_CONV_S8S8_8, 2, 4},
+                        }), );
+
+class conv_int8_quantize_u8 : public ConvFusingTest {};
 TEST_P(conv_int8_quantize_u8, per_channel) {
     auto p = GetParam();
     create_topologies(input_layout("input", get_input_layout(p)),
@@ -1236,6 +1442,7 @@ INSTANTIATE_TEST_CASE_P(fusings_gpu, conv_int8_quantize_u8,
                                 bc_test_params{CASE_CONV_S8S8_2, 2, 3},
                                 bc_test_params{CASE_CONV_S8S8_3, 2, 3},
                                 bc_test_params{CASE_CONV_S8S8_4, 2, 3},
+                                bc_test_params{CASE_CONV_S8S8_8, 2, 3},
 
                                 bc_test_params{CASE_CONV3D_U8S8_1, 2, 3},
                                 bc_test_params{CASE_CONV3D_U8S8_2, 2, 3},
@@ -1247,7 +1454,7 @@ INSTANTIATE_TEST_CASE_P(fusings_gpu, conv_int8_quantize_u8,
                                 bc_test_params{CASE_CONV3D_S8S8_4, 2, 3},
                         }), );
 
-class conv_int8_scale_quantize_i8 : public WeightsPrimitiveFusingTest {};
+class conv_int8_scale_quantize_i8 : public ConvFusingTest {};
 TEST_P(conv_int8_scale_quantize_i8, basic) {
     auto p = GetParam();
     create_topologies(input_layout("input", get_input_layout(p)),
@@ -1291,7 +1498,7 @@ INSTANTIATE_TEST_CASE_P(fusings_gpu, conv_int8_scale_quantize_i8,
                                 bc_test_params{CASE_CONV3D_S8S8_4, 2, 4},
                         }), );
 
-class conv_int8_scale_activation_quantize_i8 : public WeightsPrimitiveFusingTest {};
+class conv_int8_scale_activation_quantize_i8 : public ConvFusingTest {};
 TEST_P(conv_int8_scale_activation_quantize_i8, basic) {
     auto p = GetParam();
     create_topologies(input_layout("input", get_input_layout(p)),
@@ -1334,7 +1541,7 @@ INSTANTIATE_TEST_CASE_P(fusings_gpu, conv_int8_scale_activation_quantize_i8,
                                 bc_test_params{CASE_CONV3D_S8S8_4, 2, 5},
                         }), );
 
-class conv_int8_scale_activation_quantize_i8_eltwise_fp32 : public WeightsPrimitiveFusingTest {};
+class conv_int8_scale_activation_quantize_i8_eltwise_fp32 : public ConvFusingTest {};
 TEST_P(conv_int8_scale_activation_quantize_i8_eltwise_fp32, basic) {
     auto p = GetParam();
     create_topologies(input_layout("input", get_input_layout(p)),
@@ -1378,7 +1585,7 @@ INSTANTIATE_TEST_CASE_P(fusings_gpu, conv_int8_scale_activation_quantize_i8_eltw
                                 bc_test_params{CASE_CONV3D_S8S8_4, 2, 6},
                         }), );
 
-class conv_int8_scale_activation_quantize_i8_activation : public WeightsPrimitiveFusingTest {};
+class conv_int8_scale_activation_quantize_i8_activation : public ConvFusingTest {};
 TEST_P(conv_int8_scale_activation_quantize_i8_activation, basic) {
     auto p = GetParam();
     create_topologies(input_layout("input", get_input_layout(p)),
@@ -1423,7 +1630,7 @@ INSTANTIATE_TEST_CASE_P(fusings_gpu, conv_int8_scale_activation_quantize_i8_acti
                         }), );
 
 
-class conv_int8_scale_activation_quantize_i8_eltwise_fp32_quantize_i8 : public WeightsPrimitiveFusingTest {};
+class conv_int8_scale_activation_quantize_i8_eltwise_fp32_quantize_i8 : public ConvFusingTest {};
 TEST_P(conv_int8_scale_activation_quantize_i8_eltwise_fp32_quantize_i8, basic) {
     auto p = GetParam();
     create_topologies(input_layout("input", get_input_layout(p)),
@@ -1472,7 +1679,7 @@ INSTANTIATE_TEST_CASE_P(fusings_gpu, conv_int8_scale_activation_quantize_i8_eltw
                                 bc_test_params{CASE_CONV3D_S8S8_4, 2, 7},
                         }), );
 
-class conv_int8_scale_prelu_quantize_i8_eltwise_fp32_quantize_i8_vec : public WeightsPrimitiveFusingTest {};
+class conv_int8_scale_prelu_quantize_i8_eltwise_fp32_quantize_i8_vec : public ConvFusingTest {};
 TEST_P(conv_int8_scale_prelu_quantize_i8_eltwise_fp32_quantize_i8_vec, vector_ops) {
     auto p = GetParam();
     create_topologies(input_layout("input", get_input_layout(p)),
@@ -1545,7 +1752,7 @@ INSTANTIATE_TEST_CASE_P(fusings_gpu, conv_int8_scale_prelu_quantize_i8_eltwise_f
                                 bc_test_params{CASE_CONV_S8S8_5, 2, 7},
                         }), );
 
-class conv_int8_asymmetric_weights : public WeightsPrimitiveFusingTest {};
+class conv_int8_asymmetric_weights : public ConvFusingTest {};
 TEST_P(conv_int8_asymmetric_weights, basic) {
     auto p = GetParam();
     auto weights_format = (p.weights_format == format::goiyx) ? format::bfyx : format::bfzyx;
@@ -1612,7 +1819,7 @@ INSTANTIATE_TEST_CASE_P(fusings_gpu, conv_int8_asymmetric_weights,
                                 bc_test_params{CASE_CONV3D_S8S8_4, 2, 2},
                         }), );
 
-class conv_int8_asymmetric_data : public WeightsPrimitiveFusingTest {};
+class conv_int8_asymmetric_data : public ConvFusingTest {};
 TEST_P(conv_int8_asymmetric_data, basic) {
     auto p = GetParam();
     auto weights_format = (p.weights_format == format::goiyx) ? format::bfyx : format::bfzyx;
@@ -1679,7 +1886,7 @@ INSTANTIATE_TEST_CASE_P(fusings_gpu, conv_int8_asymmetric_data,
                                 bc_test_params{CASE_CONV3D_S8S8_4, 2, 3},
                         }), );
 
-class conv_int8_asymmetric_data_and_weights : public WeightsPrimitiveFusingTest {};
+class conv_int8_asymmetric_data_and_weights : public ConvFusingTest {};
 TEST_P(conv_int8_asymmetric_data_and_weights, basic) {
     auto p = GetParam();
     auto weights_format = (p.weights_format == format::goiyx) ? format::bfyx : format::bfzyx;
@@ -1751,7 +1958,8 @@ INSTANTIATE_TEST_CASE_P(fusings_gpu, conv_int8_asymmetric_data_and_weights,
 /* ----------------------------------------------------------------------------------------------------- */
 /* ---------------------------------------- FC cases --------------------------------------------------- */
 /* ----------------------------------------------------------------------------------------------------- */
-class fc_fp32_activation : public WeightsPrimitiveFusingTest {};
+class FCFusingTest : public WeightsPrimitiveFusingTest {};
+class fc_fp32_activation : public FCFusingTest {};
 TEST_P(fc_fp32_activation, basic) {
     auto p = GetParam();
     create_topologies(input_layout("input", get_input_layout(p)),
@@ -1772,7 +1980,7 @@ INSTANTIATE_TEST_CASE_P(fusings_gpu, fc_fp32_activation, ::testing::ValuesIn(std
                                                                             bc_test_params{ CASE_FC_FP32_3, 2, 3 },
 }), );
 
-class fc_int8_scale : public WeightsPrimitiveFusingTest {};
+class fc_int8_scale : public FCFusingTest {};
 TEST_P(fc_int8_scale, basic) {
     auto p = GetParam();
     create_topologies(input_layout("input", get_input_layout(p)),
@@ -1795,7 +2003,7 @@ INSTANTIATE_TEST_CASE_P(fusings_gpu, fc_int8_scale,
                         bc_test_params{ CASE_FC_U8S8_3, 2, 3 },
                         }), );
 
-class fc_int8_quantize_u8 : public WeightsPrimitiveFusingTest {};
+class fc_int8_quantize_u8 : public FCFusingTest {};
 TEST_P(fc_int8_quantize_u8, basic) {
     auto p = GetParam();
     create_topologies(input_layout("input", get_input_layout(p)),
@@ -1821,7 +2029,7 @@ INSTANTIATE_TEST_CASE_P(fusings_gpu_fc, fc_int8_quantize_u8,
         bc_test_params{CASE_FC_U8S8_3, 2, 3},
         }), );
 
-class fc_int8_scale_quantize_i8 : public WeightsPrimitiveFusingTest {};
+class fc_int8_scale_quantize_i8 : public FCFusingTest {};
 TEST_P(fc_int8_scale_quantize_i8, basic) {
     auto p = GetParam();
     create_topologies(input_layout("input", get_input_layout(p)),
@@ -1850,7 +2058,7 @@ INSTANTIATE_TEST_CASE_P(fusings_gpu, fc_int8_scale_quantize_i8,
 
 
 
-class fc_int8_scale_activation_quantize_i8 : public WeightsPrimitiveFusingTest {};
+class fc_int8_scale_activation_quantize_i8 : public FCFusingTest {};
 TEST_P(fc_int8_scale_activation_quantize_i8, basic) {
     auto p = GetParam();
     create_topologies(input_layout("input", get_input_layout(p)),
@@ -2256,7 +2464,6 @@ INSTANTIATE_TEST_CASE_P(fusings_gpu, mvn_scale_activation_quantize_i8_eltwise_fp
         mvn_test_params{ CASE_MVN_3D_U8_2, 2, 7 },
 }), );
 
-
 /* ----------------------------------------------------------------------------------------------------- */
 /* --------------------------------------- Pooling cases ----------------------------------------------- */
 /* ----------------------------------------------------------------------------------------------------- */
@@ -2367,3 +2574,271 @@ INSTANTIATE_TEST_CASE_P(fusings_gpu, pooling_scale,
                         pooling_test_params{ CASE_POOLING_I8_2, 2, 3 },
                         pooling_test_params{ CASE_POOLING_I8_3, 2, 3 },
 }), );
+
+/* ----------------------------------------------------------------------------------------------------- */
+/* ---------------------------------------- LRN cases -------------------------------------------------- */
+/* ----------------------------------------------------------------------------------------------------- */
+struct lrn_test_params {
+    tensor in_shape;
+    data_types data_type;
+    format input_format;
+    data_types default_type;
+    format default_format;
+    size_t expected_fused_primitives;
+    size_t expected_not_fused_primitives;
+    lrn_norm_region lrn_type;
+    std::string kernel_name;
+};
+
+#define CASE_LRN_FP32_1 {2, 16, 4, 4}, data_types::f32, format::bfyx, data_types::f32, format::bfyx
+#define CASE_LRN_FP32_2 {8, 16, 4, 4}, data_types::f32, format::yxfb, data_types::f32, format::yxfb
+#define CASE_LRN_FP32_3 {2, 16, 4, 4}, data_types::f32, format::byxf, data_types::f32, format::byxf
+#define CASE_LRN_FP32_4 {2, 16, 4, 4}, data_types::f32, format::b_fs_yx_fsv4, data_types::f32, format::bfyx
+#define CASE_LRN_FP32_5 {2, 16, 4, 4}, data_types::f32, format::b_fs_yx_fsv16, data_types::f32, format::bfyx
+
+#define CASE_LRN_FP32_TO_FP16_1 {2, 16, 5, 5}, data_types::f32, format::bfyx, data_types::f16, format::bfyx
+#define CASE_LRN_FP32_TO_FP16_2 {2, 16, 5, 5}, data_types::f32, format::byxf, data_types::f16, format::byxf
+#define CASE_LRN_FP32_TO_FP16_3 {8, 16, 4, 4}, data_types::f32, format::yxfb, data_types::f16, format::byxf
+#define CASE_LRN_FP32_TO_FP16_4 {2, 16, 4, 4}, data_types::f32, format::b_fs_yx_fsv4, data_types::f16, format::bfyx
+#define CASE_LRN_FP32_TO_FP16_5 {2, 16, 4, 4}, data_types::f32, format::b_fs_yx_fsv16, data_types::f16, format::bfyx
+
+#define CASE_LRN_FP16_1 {2, 16, 4, 4}, data_types::f16, format::bfyx, data_types::f16, format::bfyx
+#define CASE_LRN_FP16_2 {8, 16, 4, 4}, data_types::f16, format::yxfb, data_types::f16, format::yxfb
+#define CASE_LRN_FP16_3 {2, 16, 4, 4}, data_types::f16, format::byxf, data_types::f16, format::byxf
+#define CASE_LRN_FP16_4 {2, 16, 4, 4}, data_types::f16, format::b_fs_yx_fsv4, data_types::f16, format::bfyx
+#define CASE_LRN_FP16_5 {2, 16, 4, 4}, data_types::f16, format::b_fs_yx_fsv16, data_types::f16, format::bfyx
+
+class LrnFusingTest : public ::BaseFusingTest<lrn_test_params> {
+public:
+    void execute(lrn_test_params& p) {
+        auto input_prim = get_mem(get_input_layout(p));
+
+        build_options options;
+        implementation_desc lrn_impl = {p.input_format, p.kernel_name};
+        options.set_option(build_option::optimize_data(true));
+        options.set_option(build_option::force_implementations({{"lrn_norm", lrn_impl}}));
+        network network_fused(this->engine, this->topology_fused, options);
+        network network_not_fused(this->engine, this->topology_non_fused, this->bo_not_fused);
+
+        network_fused.set_input_data("input", input_prim);
+        network_not_fused.set_input_data("input", input_prim);
+
+        ASSERT_FALSE(network_fused.get_primitives_info().empty());
+        ASSERT_FALSE(network_not_fused.get_primitives_info().empty());
+
+        auto find_lrn = [&](primitive_info& p) -> bool {
+            if (p.original_id == "lrn_norm" || p.original_id == "reorder")
+                return true;
+            return false;
+        };
+
+        auto pi_fused = network_fused.get_primitives_info();
+        auto pi_not_fused = network_not_fused.get_primitives_info();
+        auto info_fused = std::find_if(pi_fused.begin(), pi_fused.end(), find_lrn);
+        auto info_not_fused = std::find_if(pi_not_fused.begin(), pi_not_fused.end(), find_lrn);
+
+        ASSERT_TRUE(info_fused != pi_fused.end());
+        ASSERT_TRUE(info_not_fused != pi_not_fused.end());
+
+        compare(network_not_fused, network_fused, p);
+    }
+
+    layout get_input_layout(lrn_test_params& p) { return layout{p.data_type, p.input_format, p.in_shape}; }
+
+    layout get_per_channel_layout(lrn_test_params& p) {
+        return layout{p.default_type, p.default_format, tensor{1, p.in_shape.feature[0], 1, 1}};
+    }
+};
+
+class lrn_fp32_quantize_u8_scale_activation : public LrnFusingTest {};
+TEST_P(lrn_fp32_quantize_u8_scale_activation, basic) {
+    auto p = GetParam();
+
+    uint32_t size = 5;
+    float k = 1.0f;
+    float alpha = (float)9.9e-05;
+    float beta = 0.75;
+
+    create_topologies(input_layout("input", get_input_layout(p)),
+                      data("in_lo", get_mem(get_single_element_layout(p), min_random, 0)),
+                      data("in_hi", get_mem(get_single_element_layout(p), 1, max_random)),
+                      data("out_lo", get_mem(get_single_element_layout(p), 0)),
+                      data("out_hi", get_mem(get_single_element_layout(p), 255)),
+                      data("scale_data", get_mem(get_single_element_layout(p), 1.0f / 255)),
+                      lrn("lrn_norm", "input", size, k, alpha, beta, p.lrn_type),
+                      quantize("quantize", "lrn_norm", "in_lo", "in_hi", "out_lo", "out_hi", 256, data_types::u8),
+                      scale("scale", "quantize", "scale_data"),
+                      activation("activation", "scale", activation_func::exp),
+                      reorder("reorder", "activation", p.default_format, data_types::f32));
+
+    tolerance = 1.0f;
+    execute(p);
+}
+
+TEST_P(lrn_fp32_quantize_u8_scale_activation, per_channel) {
+    auto p = GetParam();
+
+    uint32_t size = 5;
+    float k = 1.0f;
+    float alpha = (float)9.9e-05;
+    float beta = 0.75;
+
+   create_topologies(input_layout("input", get_input_layout(p)),
+                     data("in_lo", get_mem(get_per_channel_layout(p), min_random, 0)),
+                     data("in_hi", get_mem(get_per_channel_layout(p), 1, max_random)),
+                     data("out_lo", get_mem(get_single_element_layout(p), 0)),
+                     data("out_hi", get_mem(get_single_element_layout(p), 255)),
+                     data("scale_data", get_mem(get_per_channel_layout(p), 1.0f / 255)),
+                     lrn("lrn_norm", "input", size, k, alpha, beta, p.lrn_type),
+                     quantize("quantize", "lrn_norm", "in_lo", "in_hi", "out_lo", "out_hi", 256, data_types::u8),
+                     scale("scale", "quantize", "scale_data"),
+                     activation("activation", "scale", activation_func::exp),
+                     reorder("reorder", "activation", p.default_format, data_types::f32));
+
+    tolerance = 1.0f;
+    execute(p);
+}
+
+INSTANTIATE_TEST_CASE_P(fusings_gpu,
+                        lrn_fp32_quantize_u8_scale_activation,
+                        ::testing::ValuesIn(std::vector<lrn_test_params>{
+                            // InputDataType = FP32   OutputDataType = FP32
+                            lrn_test_params{CASE_LRN_FP32_1, 2, 5, lrn_norm_region_across_channel, "lrn_ref"},
+                            lrn_test_params{CASE_LRN_FP32_1, 2, 5, lrn_norm_region_within_channel, "lrn_gpu_within_channel_opt"},
+                            lrn_test_params{CASE_LRN_FP32_1, 2, 5, lrn_norm_region_within_channel, "lrn_gpu_within_channel"},
+                            lrn_test_params{CASE_LRN_FP32_1, 2, 5, lrn_norm_region_across_channel, "lrn_gpu_across_channel_ref"},
+                            lrn_test_params{CASE_LRN_FP32_1, 2, 5, lrn_norm_region_across_channel, "lrn_gpu_across_channel_multiple_features"},
+                            lrn_test_params{CASE_LRN_FP32_2, 2, 5, lrn_norm_region_across_channel, "lrn_gpu_across_channel_yxfb_b8_opt"},
+                            lrn_test_params{CASE_LRN_FP32_3, 2, 5, lrn_norm_region_within_channel, "lrn_within_channel_byxf_opt"},
+                            lrn_test_params{CASE_LRN_FP32_4, 2, 5, lrn_norm_region_across_channel, "lrn_gpu_across_channel_multiple_features"},
+                            lrn_test_params{CASE_LRN_FP32_5, 2, 5, lrn_norm_region_across_channel, "lrn_gpu_across_channel_multiple_features"},
+
+                            // InputDataType = FP32   OutputDataType = FP16
+                            lrn_test_params{CASE_LRN_FP32_TO_FP16_1, 2, 5, lrn_norm_region_across_channel, "lrn_ref"},
+                            lrn_test_params{CASE_LRN_FP32_TO_FP16_1, 2, 5, lrn_norm_region_across_channel, "lrn_gpu_across_channel_multiple_features"},
+                            lrn_test_params{CASE_LRN_FP32_TO_FP16_1, 2, 5, lrn_norm_region_across_channel, "lrn_gpu_across_channel_ref"},
+                            lrn_test_params{CASE_LRN_FP32_TO_FP16_1, 2, 5, lrn_norm_region_within_channel, "lrn_gpu_within_channel_opt"},
+                            lrn_test_params{CASE_LRN_FP32_TO_FP16_1, 2, 5, lrn_norm_region_within_channel, "lrn_gpu_within_channel"},
+                            lrn_test_params{CASE_LRN_FP32_TO_FP16_3, 2, 5, lrn_norm_region_across_channel, "lrn_gpu_across_channel_yxfb_b8_opt"},
+                            lrn_test_params{CASE_LRN_FP32_TO_FP16_4, 2, 5, lrn_norm_region_across_channel, "lrn_gpu_across_channel_multiple_features"},
+                            lrn_test_params{CASE_LRN_FP32_TO_FP16_5, 2, 5, lrn_norm_region_across_channel, "lrn_gpu_across_channel_multiple_features"},
+
+                        }), );
+
+class lrn_fp32_quantize_i8_scale_activation : public LrnFusingTest {};
+TEST_P(lrn_fp32_quantize_i8_scale_activation, basic) {
+    auto p = GetParam();
+
+    uint32_t size = 5;
+    float k = 1.0f;
+    float alpha = (float)9.9e-05;
+    float beta = 0.75;
+
+   create_topologies(input_layout("input", get_input_layout(p)),
+                     data("in_lo", get_mem(get_single_element_layout(p), min_random, 0)),
+                     data("in_hi", get_mem(get_single_element_layout(p), 1, max_random)),
+                     data("out_lo", get_mem(get_single_element_layout(p), -127)),
+                     data("out_hi", get_mem(get_single_element_layout(p),  127)),
+                     data("scale_data", get_mem(get_single_element_layout(p), 1.0f / 255)),
+                     lrn("lrn_norm", "input", size, k, alpha, beta, p.lrn_type),
+                     scale("scale", "lrn_norm", "scale_data"),
+                     activation("activation", "scale", activation_func::exp),
+                     quantize("quantize", "activation", "in_lo", "in_hi", "out_lo", "out_hi", 256, data_types::i8),
+                     reorder("reorder", "quantize", p.default_format, data_types::f32));
+
+    tolerance = 1.0f;
+    execute(p);
+}
+
+INSTANTIATE_TEST_CASE_P(fusings_gpu,
+                        lrn_fp32_quantize_i8_scale_activation,
+                        ::testing::ValuesIn(std::vector<lrn_test_params>{
+                            // InputDataType = FP32   OutputDataType = INT8
+                            lrn_test_params{CASE_LRN_FP32_1, 2, 5, lrn_norm_region_within_channel, "lrn_gpu_within_channel_opt"},
+                            lrn_test_params{CASE_LRN_FP32_1, 2, 5, lrn_norm_region_within_channel, "lrn_gpu_within_channel"},
+                            lrn_test_params{CASE_LRN_FP32_1, 2, 5, lrn_norm_region_across_channel, "lrn_ref"},
+                            lrn_test_params{CASE_LRN_FP32_1, 2, 5, lrn_norm_region_across_channel, "lrn_gpu_across_channel_multiple_features"},
+                            lrn_test_params{CASE_LRN_FP32_1, 2, 5, lrn_norm_region_across_channel, "lrn_gpu_across_channel_ref"},
+                            lrn_test_params{CASE_LRN_FP32_2, 2, 5, lrn_norm_region_across_channel, "lrn_gpu_across_channel_yxfb_b8_opt"},
+                            lrn_test_params{CASE_LRN_FP32_3, 2, 5, lrn_norm_region_within_channel, "lrn_within_channel_byxf_opt"},
+                            lrn_test_params{CASE_LRN_FP32_4, 2, 5, lrn_norm_region_across_channel, "lrn_gpu_across_channel_multiple_features"},
+                            lrn_test_params{CASE_LRN_FP32_5, 2, 5, lrn_norm_region_across_channel, "lrn_gpu_across_channel_multiple_features"},
+
+                            // InputDataType = FP16   OutputDataType = INT8/UINT8 can't be tested for now, because quantize
+                            // primitive doesn't support input type FP16 while fusing (prepare_quantization.cpp :114 -> prepare_primitive_fusing.cpp :474)
+                        }), );
+
+class lrn_fp32_scale_activation_quantize_u8 : public LrnFusingTest {};
+TEST_P(lrn_fp32_scale_activation_quantize_u8, basic) {
+    auto p = GetParam();
+
+    uint32_t size = 5;
+    float k = 1.0f;
+    float alpha = (float)9.9e-05;
+    float beta = 0.75;
+
+   create_topologies(input_layout("input", get_input_layout(p)),
+                     data("in_lo", get_mem(get_single_element_layout(p), min_random, 0)),
+                     data("in_hi", get_mem(get_single_element_layout(p), 1, max_random)),
+                     data("out_lo", get_mem(get_single_element_layout(p), 0)),
+                     data("out_hi", get_mem(get_single_element_layout(p), 255)),
+                     data("scale_data", get_mem(get_single_element_layout(p), 1.0f / 255)),
+                     lrn("lrn_norm", "input", size, k, alpha, beta, p.lrn_type),
+                     scale("scale", "lrn_norm", "scale_data"),
+                     activation("activation", "scale", activation_func::exp),
+                     quantize("quantize", "activation", "in_lo", "in_hi", "out_lo", "out_hi", 256, data_types::u8),
+                     reorder("reorder", "quantize", p.default_format, data_types::f32));
+
+    tolerance = 1.0f;
+    execute(p);
+}
+
+INSTANTIATE_TEST_CASE_P(fusings_gpu,
+                        lrn_fp32_scale_activation_quantize_u8,
+                        ::testing::ValuesIn(std::vector<lrn_test_params>{
+                            // InputDataType = FP32   OutputDataType = UINT8
+                            lrn_test_params{CASE_LRN_FP32_1, 2, 5, lrn_norm_region_across_channel, "lrn_gpu_across_channel_ref"},
+                            lrn_test_params{CASE_LRN_FP32_1, 2, 5, lrn_norm_region_within_channel, "lrn_gpu_within_channel_opt"},
+                            lrn_test_params{CASE_LRN_FP32_1, 2, 5, lrn_norm_region_within_channel, "lrn_gpu_within_channel"},
+                            lrn_test_params{CASE_LRN_FP32_1, 2, 5, lrn_norm_region_across_channel, "lrn_ref"},
+                            lrn_test_params{CASE_LRN_FP32_1, 2, 5, lrn_norm_region_across_channel, "lrn_gpu_across_channel_multiple_features"},
+                            lrn_test_params{CASE_LRN_FP32_2, 2, 5, lrn_norm_region_across_channel, "lrn_gpu_across_channel_yxfb_b8_opt"},
+                            lrn_test_params{CASE_LRN_FP32_3, 2, 5, lrn_norm_region_within_channel, "lrn_within_channel_byxf_opt"},
+                            lrn_test_params{CASE_LRN_FP32_4, 2, 5, lrn_norm_region_across_channel, "lrn_gpu_across_channel_multiple_features"},
+                            lrn_test_params{CASE_LRN_FP32_5, 2, 5, lrn_norm_region_across_channel, "lrn_gpu_across_channel_multiple_features"},
+                        }), );
+
+class lrn_fp16_scale_activation : public LrnFusingTest {};
+TEST_P(lrn_fp16_scale_activation, basic) {
+    auto p = GetParam();
+
+    uint32_t size = 5;
+    float k = 1.0f;
+    float alpha = (float)9.9e-05;
+    float beta = 0.75;
+
+    create_topologies(input_layout("input", get_input_layout(p)),
+                      data("scale_data", get_mem(get_single_element_layout(p), 1.0f / 255)),
+                      lrn("lrn_norm", "input", size, k, alpha, beta, p.lrn_type),
+                      scale("scale", "lrn_norm", "scale_data"),
+                      activation("activation", "scale", activation_func::exp),
+                      reorder("reorder", "activation", p.default_format, data_types::f32));
+
+    tolerance = 1e-05f;
+    execute(p);
+}
+
+INSTANTIATE_TEST_CASE_P(fusings_gpu,
+                        lrn_fp16_scale_activation,
+                        ::testing::ValuesIn(std::vector<lrn_test_params>{
+                            // InputDataType = FP16   OutputDataType = FP16
+                            lrn_test_params{CASE_LRN_FP16_1, 2, 4, lrn_norm_region_within_channel, "lrn_gpu_within_channel_opt"},
+                            lrn_test_params{CASE_LRN_FP16_1, 2, 4, lrn_norm_region_within_channel, "lrn_gpu_within_channel"},
+                            lrn_test_params{CASE_LRN_FP16_1, 2, 4, lrn_norm_region_across_channel, "lrn_ref"},
+                            lrn_test_params{CASE_LRN_FP16_1, 2, 4, lrn_norm_region_across_channel, "lrn_gpu_across_channel_multiple_features"},
+                            lrn_test_params{CASE_LRN_FP16_1, 2, 4, lrn_norm_region_across_channel, "lrn_gpu_across_channel_ref"},
+                            lrn_test_params{CASE_LRN_FP16_3, 2, 4, lrn_norm_region_within_channel, "lrn_within_channel_byxf_opt"},
+                            lrn_test_params{CASE_LRN_FP16_4, 2, 4, lrn_norm_region_across_channel, "lrn_gpu_across_channel_multiple_features"},
+                            lrn_test_params{CASE_LRN_FP16_5, 2, 4, lrn_norm_region_across_channel, "lrn_gpu_across_channel_multiple_features"},
+                        }), );
+
index ddb5edf..f941eb8 100644 (file)
@@ -344,6 +344,48 @@ TEST(reverese_sequence_gpu_test, fp16_d2_2_ba1_sa0) {
     }
 }
 
+TEST(reverese_sequence_gpu_test, fp16x2_d2_2_ba1_sa0) {
+    engine engine;
+
+    auto input = memory::allocate(engine, { data_types::f16, format::bfyx, { 2, 2, 1, 1 } });
+    auto seq_lengths = memory::allocate(engine, { data_types::f16, format::bfyx, { 2, 1, 1, 1 } });
+    int32_t batch_axis = 1;
+    int32_t seq_axis = 0;
+
+    set_values(input, {
+            FLOAT16(0.0f), FLOAT16(1.0f), FLOAT16(2.0f), FLOAT16(3.0f)
+        });
+
+    set_values(seq_lengths, {
+            FLOAT16(1.0f), FLOAT16(2.0f)
+        });
+
+    topology topology;
+    topology.add(input_layout("input", input.get_layout()));
+    topology.add(input_layout("seq_lengths", seq_lengths.get_layout()));
+    topology.add(
+        reverse_sequence("reverse_sequence", "input", "seq_lengths", seq_axis, batch_axis)
+    );
+
+    network network(engine, topology);
+
+    network.set_input_data("input", input);
+    network.set_input_data("seq_lengths", seq_lengths);
+
+    auto outputs = network.execute();
+
+    auto output = outputs.at("reverse_sequence").get_memory();
+    auto output_ptr = output.pointer<uint16_t>();
+
+    std::vector<float> expected_results = {
+            0.0f, 3.0f, 2.0f, 1.0f
+    };
+
+    for (size_t i = 0; i < expected_results.size(); ++i) {
+        EXPECT_EQ(expected_results[i], float16_to_float32(output_ptr[i]));
+    }
+}
+
 TEST(reverese_sequence_gpu_test, fp16_d3_3_3_ba0_sa1) {
     engine engine;
 
index e43a3d2..24a2155 100644 (file)
@@ -136,6 +136,1044 @@ TEST(select_gpu_f32, select_basic_negative) {
     }
 }
 
+TEST(select_gpu_f32, select_basic_bfyx_2x2x2x2_bcast_mask_2x2x1x2) {
+    const auto& engine = get_test_engine();
+
+    auto input1 = memory::allocate(engine, { data_types::f32, format::bfyx, { 2, 2, 2, 2 } });
+    auto input2 = memory::allocate(engine, { data_types::f32, format::bfyx, { 2, 2, 2, 2 } });
+    auto mask = memory::allocate(engine, { data_types::f32, format::bfyx,{ 2, 2, 1, 2 } });
+
+    topology topology;
+    topology.add(input_layout("input1", input1.get_layout()));
+    topology.add(input_layout("input2", input2.get_layout()));
+    topology.add(input_layout("mask", mask.get_layout()));
+    topology.add(cldnn::select("select", "mask", "input1", "input2"));
+
+    set_values(input1, {
+        1.f,  0.f,
+        5.f,  1.5f,
+
+        2.f,  0.f,
+        6.f,  5.2f,
+
+        3.f,  0.5f,
+        7.f,  12.f,
+
+        4.f,  -0.5f,
+        8.f,  8.f
+    });
+
+    set_values(input2, {
+        0.5f,  2.5f,
+        1.5f,  3.f,
+
+        5.f,   7.f,
+        2.f,   4.f,
+
+        15.f,  17.f,
+        8.f,   10.f,
+
+        -2.f,  6.5f,
+        -0.5f, -2.5f
+    });
+
+    set_values(mask, {
+        0.f,
+        0.f,
+
+        1.f,
+        1.f,
+
+        0.f,
+        1.f,
+
+        1.f,
+        0.f,
+    });
+
+    network network(engine, topology);
+
+    network.set_input_data("input1", input1);
+    network.set_input_data("input2", input2);
+    network.set_input_data("mask", mask);
+    auto outputs = network.execute();
+
+    auto output = outputs.at("select").get_memory();
+
+    float answers[16] = {
+        0.5f,  2.5f,
+        1.5f,  3.f,
+
+        2.f,   0.f,
+        6.f,   5.2f,
+
+        15.f,  17.f,
+        7.f,   12.f,
+
+        4.f,   -0.5f,
+        -0.5f, -2.5f
+    };
+
+    auto output_ptr = output.pointer<float>();
+
+    for (int i = 0; i < 16; i++)
+    {
+        EXPECT_TRUE(are_equal(answers[i], output_ptr[i]));
+    }
+}
+
+TEST(select_gpu_f32, select_basic_bfyx_2x2x2x2_bcast_mask_1x1x1x1) {
+    const auto& engine = get_test_engine();
+
+    auto input1 = memory::allocate(engine, { data_types::f32, format::bfyx, { 2, 2, 2, 2 } });
+    auto input2 = memory::allocate(engine, { data_types::f32, format::bfyx, { 2, 2, 2, 2 } });
+    auto mask = memory::allocate(engine, { data_types::f32, format::bfyx,{ 1, 1, 1, 1 } });
+
+    topology topology;
+    topology.add(input_layout("input1", input1.get_layout()));
+    topology.add(input_layout("input2", input2.get_layout()));
+    topology.add(input_layout("mask", mask.get_layout()));
+    topology.add(cldnn::select("select", "mask", "input1", "input2"));
+
+    set_values(input1, {
+        1.f,  0.f,
+        5.f,  1.5f,
+
+        2.f,  0.f,
+        6.f,  5.2f,
+
+        3.f,  0.5f,
+        7.f,  12.f,
+
+        4.f,  -0.5f,
+        8.f,  8.f
+    });
+
+    set_values(input2, {
+        0.5f,  2.5f,
+        1.5f,  3.f,
+
+        5.f,   7.f,
+        2.f,   4.f,
+
+        15.f,  17.f,
+        8.f,   10.f,
+
+        -2.f,  6.5f,
+        -0.5f, -2.5f
+    });
+
+    set_values(mask, {
+        0.f
+    });
+
+    network network(engine, topology);
+
+    network.set_input_data("input1", input1);
+    network.set_input_data("input2", input2);
+    network.set_input_data("mask", mask);
+    auto outputs = network.execute();
+
+    auto output = outputs.at("select").get_memory();
+
+    float answers[16] = {
+        0.5f,  2.5f,
+        1.5f,  3.f,
+
+        5.f,   7.f,
+        2.f,   4.f,
+
+        15.f,  17.f,
+        8.f,   10.f,
+
+        -2.f,  6.5f,
+        -0.5f, -2.5f
+    };
+
+    auto output_ptr = output.pointer<float>();
+
+    for (int i = 0; i < 16; i++)
+    {
+        EXPECT_TRUE(are_equal(answers[i], output_ptr[i]));
+    }
+}
+
+TEST(select_gpu_f32, select_basic_comma_byxf_2x2x2x2_bcast_mask_2x2x2x1) {
+    const auto& engine = get_test_engine();
+
+    auto input1 = memory::allocate(engine, { data_types::f32, format::byxf, { 2, 2, 2, 2 } });
+    auto input2 = memory::allocate(engine, { data_types::f32, format::byxf ,{ 2, 2, 2, 2 } });
+    auto mask = memory::allocate(engine, { data_types::f32, format::byxf, { 2, 2, 2, 1 } });
+
+    topology topology;
+    topology.add(input_layout("input1", input1.get_layout()));
+    topology.add(input_layout("input2", input2.get_layout()));
+    topology.add(input_layout("mask", mask.get_layout()));
+    topology.add(cldnn::select("select", "mask", "input1", "input2"));
+
+    set_values(input1, {
+        1.f,   0.f,
+        5.f,   1.5f,
+
+        2.f,   0.f,
+        6.f,   5.2f,
+
+        3.f,   0.5f,
+        7.f,   12.f,
+
+        4.f,   -0.5f,
+        8.f,   8.f
+    });
+
+    set_values(input2, {
+        0.5f,  2.5f,
+        1.5f,  3.f,
+
+        5.f,   7.f,
+        2.f,   4.f,
+
+        15.f,  17.f,
+        8.f,   10.f,
+
+        -2.f,  6.5f,
+        -0.5f, -2.5f
+    });
+
+    set_values(mask, {
+        0.1f,  0.0f,
+        0.5f,  0.0f,
+
+        -0.f,  -0.1f,
+        -0.f,  -0.5f,
+    });
+
+    network network(engine, topology);
+
+    network.set_input_data("input1", input1);
+    network.set_input_data("input2", input2);
+    network.set_input_data("mask", mask);
+    auto outputs = network.execute();
+
+    auto output = outputs.at("select").get_memory();
+
+    float answers[16] = {
+        1.f,  2.5f,
+        5.f,  3.f,
+
+        2.f,  7.f,
+        6.f,  4.f,
+
+        15.f, 0.5f,
+        8.f,  12.f,
+
+        -2.f, -0.5f,
+        -0.5f, 8.f
+    };
+
+    auto output_ptr = output.pointer<float>();
+
+    for (int i = 0; i < 16; i++)
+    {
+        EXPECT_TRUE(are_equal(answers[i], output_ptr[i]));
+    }
+}
+
+TEST(select_gpu_f32, select_basic_bfyx_2x2x2x2_bcast_in2_2x2x1x2) {
+    const auto& engine = get_test_engine();
+
+    auto input1 = memory::allocate(engine, { data_types::f32, format::bfyx, { 2, 2, 2, 2 } });
+    auto input2 = memory::allocate(engine, { data_types::f32, format::bfyx, { 2, 2, 1, 2 } });
+    auto mask = memory::allocate(engine, { data_types::f32, format::bfyx,{ 2, 2, 2, 2 } });
+
+    topology topology;
+    topology.add(input_layout("input1", input1.get_layout()));
+    topology.add(input_layout("input2", input2.get_layout()));
+    topology.add(input_layout("mask", mask.get_layout()));
+    topology.add(cldnn::select("select", "mask", "input1", "input2"));
+
+    set_values(input1, {
+        1.f,  0.f,
+        5.f,  1.5f,
+
+        2.f,  0.f,
+        6.f,  5.2f,
+
+        3.f,  0.5f,
+        7.f,  12.f,
+
+        4.f,  -0.5f,
+        8.f,  8.f
+    });
+
+    set_values(input2, {
+        0.5f,
+        1.5f,
+
+        5.f,
+        2.f,
+
+        15.f,
+        8.f,
+
+        -2.f,
+        -0.5f,
+    });
+
+    set_values(mask, {
+        0.f,  0.f,
+        0.f,  0.f,
+
+        1.f,  1.f,
+        1.f,  1.f,
+
+        0.f,  1.f,
+        0.f,  1.f,
+
+        1.f,  0.f,
+        1.f,  0.f
+    });
+
+    network network(engine, topology);
+
+    network.set_input_data("input1", input1);
+    network.set_input_data("input2", input2);
+    network.set_input_data("mask", mask);
+    auto outputs = network.execute();
+
+    auto output = outputs.at("select").get_memory();
+
+    float answers[16] = {
+        0.5f,  0.5f,
+        1.5f,  1.5f,
+
+        2.f,   0.f,
+        6.f,   5.2f,
+
+        15.f,  0.5f,
+        8.f,   12.f,
+
+        4.f,   -2.f,
+        8.f,   -0.5f
+    };
+
+    auto output_ptr = output.pointer<float>();
+
+    for (int i = 0; i < 16; i++)
+    {
+        EXPECT_TRUE(are_equal(answers[i], output_ptr[i]));
+    }
+}
+
+TEST(select_gpu_f32, select_basic_bfyx_2x2x2x2_bcast_in1_2x2x2x1_bcast_in2_2x2x1x2) {
+    const auto& engine = get_test_engine();
+
+    auto input1 = memory::allocate(engine, { data_types::f32, format::bfyx, { 2, 2, 2, 1 } });
+    auto input2 = memory::allocate(engine, { data_types::f32, format::bfyx, { 2, 2, 1, 2 } });
+    auto mask = memory::allocate(engine, { data_types::f32, format::bfyx,{ 2, 2, 2, 2 } });
+
+    topology topology;
+    topology.add(input_layout("input1", input1.get_layout()));
+    topology.add(input_layout("input2", input2.get_layout()));
+    topology.add(input_layout("mask", mask.get_layout()));
+    topology.add(cldnn::select("select", "mask", "input1", "input2"));
+
+    set_values(input1, {
+        1.f,  0.f,
+
+        2.f,  0.f,
+
+        3.f,  0.5f,
+
+        4.f,  -0.5f,
+    });
+
+    set_values(input2, {
+        0.5f,
+        1.5f,
+
+        5.f,
+        2.f,
+
+        15.f,
+        8.f,
+
+        -2.f,
+        -0.5f,
+    });
+
+    set_values(mask, {
+        0.f,  0.f,
+        0.f,  0.f,
+
+        1.f,  1.f,
+        1.f,  1.f,
+
+        0.f,  1.f,
+        0.f,  1.f,
+
+        1.f,  0.f,
+        1.f,  0.f
+    });
+
+    network network(engine, topology);
+
+    network.set_input_data("input1", input1);
+    network.set_input_data("input2", input2);
+    network.set_input_data("mask", mask);
+    auto outputs = network.execute();
+
+    auto output = outputs.at("select").get_memory();
+
+    float answers[16] = {
+        0.5f,  0.5f,
+        1.5f,  1.5f,
+
+        2.f,   0.f,
+        2.f,   0.f,
+
+        15.f,  0.5f,
+        8.f,   0.5f,
+
+        4.f,   -2.f,
+        4.f,   -0.5f
+    };
+
+    auto output_ptr = output.pointer<float>();
+
+    for (int i = 0; i < 16; i++)
+    {
+        EXPECT_TRUE(are_equal(answers[i], output_ptr[i]));
+    }
+}
+
+TEST(select_gpu_f32, select_basic_bfyx_2x2x2x2_bcast_mask_2x1x2x2_in1_1x2x2x2_in2_2x2x1x2) {
+    const auto& engine = get_test_engine();
+
+    auto input1 = memory::allocate(engine, { data_types::f32, format::bfyx, { 1, 2, 2, 2 } });
+    auto input2 = memory::allocate(engine, { data_types::f32, format::bfyx, { 2, 2, 1, 2 } });
+    auto mask = memory::allocate(engine, { data_types::f32, format::bfyx,{ 2, 1, 2, 2 } });
+
+    topology topology;
+    topology.add(input_layout("input1", input1.get_layout()));
+    topology.add(input_layout("input2", input2.get_layout()));
+    topology.add(input_layout("mask", mask.get_layout()));
+    topology.add(cldnn::select("select", "mask", "input1", "input2"));
+
+    set_values(input1, {
+        1.f,  0.f,
+        5.f,  1.5f,
+
+        2.f,  0.f,
+        6.f,  5.2f
+    });
+
+    set_values(input2, {
+        0.5f,
+        1.5f,
+
+        5.f,
+        2.f,
+
+        15.f,
+        8.f,
+
+        -2.f,
+        -0.5f,
+    });
+
+    set_values(mask, {
+        1.f,  0.f,
+        1.f,  0.f,
+
+        0.f,  1.f,
+        0.f,  1.f,
+    });
+
+    network network(engine, topology);
+
+    network.set_input_data("input1", input1);
+    network.set_input_data("input2", input2);
+    network.set_input_data("mask", mask);
+    auto outputs = network.execute();
+
+    auto output = outputs.at("select").get_memory();
+
+    float answers[16] = {
+        1.f,   0.5f,
+        5.f,   1.5f,
+
+        2.f,   5.f,
+        6.f,   2.f,
+
+        15.f,  0.f,
+        8.f,   1.5f,
+
+        -2.f,  0.f,
+        -0.5f, 5.2f
+    };
+
+    auto output_ptr = output.pointer<float>();
+
+    for (int i = 0; i < 16; i++)
+    {
+        EXPECT_TRUE(are_equal(answers[i], output_ptr[i]));
+    }
+}
+
+TEST(select_gpu_f32, select_basic_comma_byxf_2x2x2x2_bcast_mask_2x1x2x2_in1_2x2x2x1_in2_2x2x1x2) {
+    const auto& engine = get_test_engine();
+
+    auto input1 = memory::allocate(engine, { data_types::f32, format::byxf, { 2, 2, 2, 1 } });
+    auto input2 = memory::allocate(engine, { data_types::f32, format::byxf ,{ 2, 2, 1, 2 } });
+    auto mask = memory::allocate(engine, { data_types::f32, format::byxf, { 2, 1, 2, 2 } });
+
+    topology topology;
+    topology.add(input_layout("input1", input1.get_layout()));
+    topology.add(input_layout("input2", input2.get_layout()));
+    topology.add(input_layout("mask", mask.get_layout()));
+    topology.add(cldnn::select("select", "mask", "input1", "input2"));
+
+    set_values(input1, {
+        1.f,  0.f,
+        5.f,  1.5f,
+
+        3.f,  0.5f,
+        7.f,  12.f,
+    });
+
+    set_values(input2, {
+        0.5f,  2.5f,
+
+        5.f,   7.f,
+
+        15.f,  17.f,
+
+        -2.f,  6.5f,
+    });
+
+    set_values(mask, {
+        0.f,
+        0.f,
+
+        0.1f,
+        0.5f,  
+
+        -0.f,
+        -0.5f,
+
+        -0.7f,
+        -0.f
+    });
+
+    network network(engine, topology);
+
+    network.set_input_data("input1", input1);
+    network.set_input_data("input2", input2);
+    network.set_input_data("mask", mask);
+    auto outputs = network.execute();
+
+    auto output = outputs.at("select").get_memory();
+
+    float answers[16] = {
+        0.5f,  2.5f,
+        0.5f,  2.5f,
+
+        1.f,   0.f,
+        5.f,   1.5f,
+
+        15.f,  17.f,
+        7.f,   12.f,
+
+        3.f,   0.5f,
+        -2.f,  6.5f
+    };
+
+    auto output_ptr = output.pointer<float>();
+
+    for (int i = 0; i < 16; i++)
+    {
+        EXPECT_TRUE(are_equal(answers[i], output_ptr[i]));
+    }
+}
+
+TEST(select_gpu_f32, select_basic_bfyx_2x2x2x2_bcast_in2_1x1x1x1) {
+    const auto& engine = get_test_engine();
+
+    auto input1 = memory::allocate(engine, { data_types::f32, format::bfyx, { 2, 2, 2, 2 } });
+    auto input2 = memory::allocate(engine, { data_types::f32, format::bfyx, { 1, 1, 1, 1 } });
+    auto mask = memory::allocate(engine, { data_types::f32, format::bfyx,{ 2, 2, 2, 2 } });
+
+    topology topology;
+    topology.add(input_layout("input1", input1.get_layout()));
+    topology.add(input_layout("input2", input2.get_layout()));
+    topology.add(input_layout("mask", mask.get_layout()));
+    topology.add(cldnn::select("select", "mask", "input1", "input2"));
+
+    set_values(input1, {
+        1.f,  0.f,
+        5.f,  1.5f,
+
+        2.f,  0.f,
+        6.f,  5.2f,
+
+        3.f,  0.5f,
+        7.f,  12.f,
+
+        4.f,  -0.5f,
+        8.f,  8.f
+    });
+
+    set_values(input2, {
+        1.f
+    });
+
+    set_values(mask, {
+        0.f,  0.f,
+        0.f,  0.f,
+
+        1.f,  1.f,
+        1.f,  1.f,
+
+        0.f,  1.f,
+        0.f,  1.f,
+
+        1.f,  0.f,
+        1.f,  0.f
+    });
+
+    network network(engine, topology);
+
+    network.set_input_data("input1", input1);
+    network.set_input_data("input2", input2);
+    network.set_input_data("mask", mask);
+    auto outputs = network.execute();
+
+    auto output = outputs.at("select").get_memory();
+
+    float answers[16] = {
+        1.f,   1.f,
+        1.f,   1.f,
+
+        2.f,   0.f,
+        6.f,   5.2f,
+
+        1.f,   0.5f,
+        1.f,   12.f,
+
+        4.f,   1.f,
+        8.f,   1.f
+    };
+
+    auto output_ptr = output.pointer<float>();
+
+    for (int i = 0; i < 16; i++)
+    {
+        EXPECT_TRUE(are_equal(answers[i], output_ptr[i]));
+    }
+}
+
+TEST(select_gpu_f32, select_basic_comma_byxf_2x2x2x2_bcast_in2_2x2x2x1) {
+    const auto& engine = get_test_engine();
+
+    auto input1 = memory::allocate(engine, { data_types::f32, format::byxf, { 2, 2, 2, 2 } });
+    auto input2 = memory::allocate(engine, { data_types::f32, format::byxf ,{ 2, 2, 2, 1 } });
+    auto mask = memory::allocate(engine, { data_types::f32, format::byxf, { 2, 2, 2, 2 } });
+
+    topology topology;
+    topology.add(input_layout("input1", input1.get_layout()));
+    topology.add(input_layout("input2", input2.get_layout()));
+    topology.add(input_layout("mask", mask.get_layout()));
+    topology.add(cldnn::select("select", "mask", "input1", "input2"));
+
+    set_values(input1, {
+        1.f,   0.f,
+        5.f,   1.5f,
+
+        2.f,   0.f,
+        6.f,   5.2f,
+
+        3.f,   0.5f,
+        7.f,   12.f,
+
+        4.f,   -0.5f,
+        8.f,   8.f
+    });
+
+    set_values(input2, {
+        0.5f,  2.5f,
+        1.5f,  3.f,
+
+        15.f,  17.f,
+        8.f,   10.f,
+    });
+
+    set_values(mask, {
+        0.1f,  0.3f,
+        0.5f,  0.7f,
+
+        0.f,   0.f,
+        0.f,   0.f,        
+
+        -0.f,  -0.1f,
+        -0.f,  -0.5f,
+
+        -0.7f, -0.f,
+        -1.5f, -0.f
+    });
+
+    network network(engine, topology);
+
+    network.set_input_data("input1", input1);
+    network.set_input_data("input2", input2);
+    network.set_input_data("mask", mask);
+    auto outputs = network.execute();
+
+    auto output = outputs.at("select").get_memory();
+
+    float answers[16] = {
+        1.f,  0.f,
+        5.f,  1.5f,
+
+        0.5f, 2.5f,
+        1.5f, 3.f,
+
+        15.f, 0.5f,
+        8.f,  12.f,
+
+        4.f,  17.0f,
+        8.f,  10.0f
+    };
+
+    auto output_ptr = output.pointer<float>();
+
+    for (int i = 0; i < 16; i++)
+    {
+        EXPECT_TRUE(are_equal(answers[i], output_ptr[i]));
+    }
+}
+
+TEST(select_gpu_f32, select_basic_bfyx_2x2x2x2_bcast_in1_2x2x1x2) {
+    const auto& engine = get_test_engine();
+
+    auto input1 = memory::allocate(engine, { data_types::f32, format::bfyx, { 2, 2, 1, 2 } });
+    auto input2 = memory::allocate(engine, { data_types::f32, format::bfyx, { 2, 2, 2, 2 } });
+    auto mask = memory::allocate(engine, { data_types::f32, format::bfyx,{ 2, 2, 2, 2 } });
+
+    topology topology;
+    topology.add(input_layout("input1", input1.get_layout()));
+    topology.add(input_layout("input2", input2.get_layout()));
+    topology.add(input_layout("mask", mask.get_layout()));
+    topology.add(cldnn::select("select", "mask", "input1", "input2"));
+
+    set_values(input1, {
+        1.f,
+        5.f,
+
+        2.f,
+        6.f,
+
+        3.f,
+        7.f,
+
+        4.f,
+        8.f,
+    });
+
+    set_values(input2, {
+        0.5f, 2.5f,
+        1.5f, 1.f,
+
+        5.f,  7.f,
+        2.f,  4.f,
+
+        15.f, 17.f,
+        8.f,  10.f,
+
+        -2.f,  6.5f,
+        -0.5f, -2.5f
+    });
+
+    set_values(mask, {
+        0.f,  0.f,
+        0.f,  0.f,
+
+        1.f,  1.f,
+        1.f,  1.f,
+
+        0.f,  1.f,
+        0.f,  1.f,
+
+        1.f,  0.f,
+        1.f,  0.f
+    });
+
+    network network(engine, topology);
+
+    network.set_input_data("input1", input1);
+    network.set_input_data("input2", input2);
+    network.set_input_data("mask", mask);
+    auto outputs = network.execute();
+
+    auto output = outputs.at("select").get_memory();
+
+    float answers[16] = {
+        0.5f,  2.5f,
+        1.5f,  1.f,
+
+        2.f,   2.f,
+        6.f,   6.f,
+
+        15.f,  3.f,
+        8.f,   7.f,
+
+        4.f,   6.5f,
+        8.f,   -2.5f
+    };
+
+    auto output_ptr = output.pointer<float>();
+
+    for (int i = 0; i < 16; i++)
+    {
+        EXPECT_TRUE(are_equal(answers[i], output_ptr[i]));
+    }
+}
+
+TEST(select_gpu_f32, select_basic_bfyx_2x2x2x2_bcast_in1_1x1x1x1) {
+    const auto& engine = get_test_engine();
+
+    auto input1 = memory::allocate(engine, { data_types::f32, format::bfyx, { 1, 1, 1, 1 } });
+    auto input2 = memory::allocate(engine, { data_types::f32, format::bfyx, { 2, 2, 2, 2 } });
+    auto mask = memory::allocate(engine, { data_types::f32, format::bfyx,{ 2, 2, 2, 2 } });
+
+    topology topology;
+    topology.add(input_layout("input1", input1.get_layout()));
+    topology.add(input_layout("input2", input2.get_layout()));
+    topology.add(input_layout("mask", mask.get_layout()));
+    topology.add(cldnn::select("select", "mask", "input1", "input2"));
+
+    set_values(input1, {
+        1.f
+    });
+
+    set_values(input2, {
+        0.5f, 2.5f,
+        1.5f, 1.f,
+
+        5.f,  7.f,
+        2.f,  4.f,
+
+        15.f, 17.f,
+        8.f,  10.f,
+
+        -2.f,  6.5f,
+        -0.5f, -2.5f
+    });
+
+    set_values(mask, {
+        0.f,  0.f,
+        0.f,  0.f,
+
+        1.f,  1.f,
+        1.f,  1.f,
+
+        0.f,  1.f,
+        0.f,  1.f,
+
+        1.f,  0.f,
+        1.f,  0.f
+    });
+
+    network network(engine, topology);
+
+    network.set_input_data("input1", input1);
+    network.set_input_data("input2", input2);
+    network.set_input_data("mask", mask);
+    auto outputs = network.execute();
+
+    auto output = outputs.at("select").get_memory();
+
+    float answers[16] = {
+        0.5f,  2.5f,
+        1.5f,  1.f,
+
+        1.f,   1.f,
+        1.f,   1.f,
+
+        15.f,  1.f,
+        8.f,   1.f,
+
+        1.f,   6.5f,
+        1.f,   -2.5f
+    };
+
+    auto output_ptr = output.pointer<float>();
+
+    for (int i = 0; i < 16; i++)
+    {
+        EXPECT_TRUE(are_equal(answers[i], output_ptr[i]));
+    }
+}
+
+TEST(select_gpu_f32, select_basic_comma_byxf_2x2x2x2_bcast_in1_2x2x2x1) {
+    const auto& engine = get_test_engine();
+
+    auto input1 = memory::allocate(engine, { data_types::f32, format::byxf, { 2, 2, 2, 1 } });
+    auto input2 = memory::allocate(engine, { data_types::f32, format::byxf ,{ 2, 2, 2, 2 } });
+    auto mask = memory::allocate(engine, { data_types::f32, format::byxf, { 2, 2, 2, 2 } });
+
+    topology topology;
+    topology.add(input_layout("input1", input1.get_layout()));
+    topology.add(input_layout("input2", input2.get_layout()));
+    topology.add(input_layout("mask", mask.get_layout()));
+    topology.add(cldnn::select("select", "mask", "input1", "input2"));
+
+    set_values(input1, {
+        1.f,  0.f,
+        5.f,  1.5f,
+
+        3.f,  0.5f,
+        7.f,  12.f,
+    });
+
+    set_values(input2, {
+        0.5f,  2.5f,
+        1.5f,  3.f,
+
+        5.f,   7.f,
+        2.f,   4.f,
+
+        15.f,  17.f,
+        8.f,   10.f,
+
+        -2.f,  6.5f,
+        -0.5f, -2.5f
+    });
+
+    set_values(mask, {
+        0.f,   0.f,
+        0.f,   0.f,
+
+        0.1f,  0.3f,
+        0.5f,  0.7f,   
+
+        -0.f,  -0.1f,
+        -0.f,  -0.5f,
+
+        -0.7f, -0.f,
+        -1.5f, -0.f
+    });
+
+    network network(engine, topology);
+
+    network.set_input_data("input1", input1);
+    network.set_input_data("input2", input2);
+    network.set_input_data("mask", mask);
+    auto outputs = network.execute();
+
+    auto output = outputs.at("select").get_memory();
+
+    float answers[16] = {
+        0.5f, 2.5f,
+        1.5f, 3.f,
+
+        1.f,  0.f,
+        5.f,  1.5f,
+
+        15.f, 0.5f,
+        8.f,  12.f,
+
+        3.f,  6.5f,
+        7.f,  -2.5f
+    };
+
+    auto output_ptr = output.pointer<float>();
+
+    for (int i = 0; i < 16; i++)
+    {
+        EXPECT_TRUE(are_equal(answers[i], output_ptr[i]));
+    }
+}
+
+TEST(select_gpu_f32, select_basic_comma_byxf_2x2x2x2_bcast_mask_2x1x2x2_in1_2x2x2x1) {
+    const auto& engine = get_test_engine();
+
+    auto input1 = memory::allocate(engine, { data_types::f32, format::byxf, { 2, 2, 2, 1 } });
+    auto input2 = memory::allocate(engine, { data_types::f32, format::byxf ,{ 2, 2, 2, 2 } });
+    auto mask = memory::allocate(engine, { data_types::f32, format::byxf, { 2, 1, 2, 2 } });
+
+    topology topology;
+    topology.add(input_layout("input1", input1.get_layout()));
+    topology.add(input_layout("input2", input2.get_layout()));
+    topology.add(input_layout("mask", mask.get_layout()));
+    topology.add(cldnn::select("select", "mask", "input1", "input2"));
+
+    set_values(input1, {
+        1.f,  0.f,
+        5.f,  1.5f,
+
+        3.f,  0.5f,
+        7.f,  12.f,
+    });
+
+    set_values(input2, {
+        0.5f,  2.5f,
+        1.5f,  3.f,
+
+        5.f,   7.f,
+        2.f,   4.f,
+
+        15.f,  17.f,
+        8.f,   10.f,
+
+        -2.f,  6.5f,
+        -0.5f, -2.5f
+    });
+
+    set_values(mask, {
+        0.f,
+        0.f,
+
+        0.1f,
+        0.5f,  
+
+        -0.f,
+        -0.5f,
+
+        -0.7f,
+        -0.f
+    });
+
+    network network(engine, topology);
+
+    network.set_input_data("input1", input1);
+    network.set_input_data("input2", input2);
+    network.set_input_data("mask", mask);
+    auto outputs = network.execute();
+
+    auto output = outputs.at("select").get_memory();
+
+    float answers[16] = {
+        0.5f,  2.5f,
+        1.5f,  3.f,
+
+        1.f,   0.f,
+        5.f,   1.5f,
+
+        15.f,  17.f,
+        7.f,   12.f,
+
+        3.f,   0.5f,
+        -0.5f, -2.5f
+    };
+
+    auto output_ptr = output.pointer<float>();
+
+    for (int i = 0; i < 16; i++)
+    {
+        EXPECT_TRUE(are_equal(answers[i], output_ptr[i]));
+    }
+}
+
 TEST(select_gpu_f32, select_basic_comma) {
     const auto& engine = get_test_engine();
 
index df6e347..e45f624 100644 (file)
@@ -175,10 +175,8 @@ struct IEUnit {
     explicit IEUnit(const cv::gapi::ie::detail::ParamDesc &pp)
         : params(pp) {
 
-        IE::CNNNetReader reader;
-        reader.ReadNetwork(params.model_path);
-        reader.ReadWeights(params.weights_path);
-        net = reader.getNetwork();
+        IE::Core ie;
+        net = ie.ReadNetwork(params.model_path, params.weights_path);
         inputs = net.getInputsInfo();
         outputs = net.getOutputsInfo();
 
index d81285f..b9538d7 100644 (file)
@@ -94,19 +94,16 @@ TEST(TestAgeGenderIE, InferBasicTensor)
 
     IE::Blob::Ptr ie_age, ie_gender;
     {
-        IE::CNNNetReader reader;
-        reader.ReadNetwork(topology_path);
-        reader.ReadWeights(weights_path);
-        auto net = reader.getNetwork();
+        IE::Core ie;
+        auto net = ie.ReadNetwork(topology_path, weights_path);
 
         const auto &iedims = net.getInputsInfo().begin()->second->getTensorDesc().getDims();
               auto  cvdims = cv::gapi::ie::util::to_ocv(iedims);
         in_mat.create(cvdims, CV_32F);
         cv::randu(in_mat, -1, 1);
 
-        auto plugin = IE::PluginDispatcher().getPluginByDevice("CPU");
-        auto plugin_net = plugin.LoadNetwork(net, {});
-        auto infer_request = plugin_net.CreateInferRequest();
+        auto execNet = ie.LoadNetwork(net, "CPU");
+        auto infer_request = execNet.CreateInferRequest();
 
         infer_request.SetBlob("data", cv::gapi::ie::util::to_ie(in_mat));
         infer_request.Infer();
@@ -153,17 +150,14 @@ TEST(TestAgeGenderIE, InferBasicImage)
     namespace IE = InferenceEngine;
     IE::Blob::Ptr ie_age, ie_gender;
     {
-        IE::CNNNetReader reader;
-        reader.ReadNetwork(topology_path);
-        reader.ReadWeights(weights_path);
-        auto net = reader.getNetwork();
+        IE::Core ie;
+        auto net = reader.ReadNetwork(topology_path, weights_path);
         auto &ii = net.getInputsInfo().at("data");
         ii->setPrecision(IE::Precision::U8);
         ii->setLayout(IE::Layout::NHWC);
         ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR);
 
-        auto plugin = IE::PluginDispatcher().getPluginByDevice("CPU");
-        auto plugin_net = plugin.LoadNetwork(net, {});
+        auto plugin_net = ie.LoadNetwork(net, "CPU");
         auto infer_request = plugin_net.CreateInferRequest();
 
         infer_request.SetBlob("data", cv::gapi::ie::util::to_ie(in_mat));
@@ -216,17 +210,14 @@ TEST(TestAgeGenderIE, InferROIList)
     namespace IE = InferenceEngine;
     std::vector<cv::Mat> ie_age, ie_gender;
     {
-        IE::CNNNetReader reader;
-        reader.ReadNetwork(topology_path);
-        reader.ReadWeights(weights_path);
-        auto net = reader.getNetwork();
+        IE::Core ie;
+        auto net = ie.ReadNetwork(topology_path,weights_path );
         auto &ii = net.getInputsInfo().at("data");
         ii->setPrecision(IE::Precision::U8);
         ii->setLayout(IE::Layout::NHWC);
         ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR);
 
-        auto plugin = IE::PluginDispatcher().getPluginByDevice("CPU");
-        auto plugin_net = plugin.LoadNetwork(net, {});
+        auto plugin_net = ie.LoadNetwork(net, "CPU");
         auto infer_request = plugin_net.CreateInferRequest();
         auto frame_blob = cv::gapi::ie::util::to_ie(in_mat);
 
index 7ccba0c..1aeea9c 100644 (file)
@@ -69,6 +69,8 @@ const char MKLDNN_API *mkldnn_prim_kind2str(mkldnn_primitive_kind_t v);
 const char MKLDNN_API *mkldnn_alg_kind2str(mkldnn_alg_kind_t v);
 const char MKLDNN_API *mkldnn_rnn_direction2str(mkldnn_rnn_direction_t v);
 
+mkldnn_memory_format_t MKLDNN_API mkldnn_str2fmt(const char *str);
+
 #ifdef __cplusplus
 }
 #endif
index 4c2395f..71e2c79 100644 (file)
@@ -20,6 +20,7 @@
 
 #include "mkldnn_debug.h"
 #include "mkldnn_types.h"
+#include <cstring>
 
 const char *mkldnn_status2str(mkldnn_status_t v) {
     if (v == mkldnn_success) return "success";
@@ -55,6 +56,134 @@ const char *mkldnn_rmode2str(mkldnn_round_mode_t v) {
     return "unknown rmode";
 }
 
+mkldnn_memory_format_t mkldnn_str2fmt(const char *str) {
+#define CASE(_fmt) do { \
+    if (!strcmp(#_fmt, str) \
+            || !strcmp("mkldnn_" #_fmt, str)) \
+        return mkldnn_ ## _fmt; \
+} while (0)
+    CASE(x);
+    CASE(nc);
+    CASE(ncw);
+    CASE(nwc);
+    CASE(nCw4c);
+    CASE(nCw8c);
+    CASE(nCw16c);
+    CASE(nchw);
+    CASE(nhwc);
+    CASE(chwn);
+    CASE(nChw4c);
+    CASE(nChw8c);
+    CASE(nChw16c);
+    CASE(oi);
+    CASE(io);
+    CASE(oiw);
+    CASE(wio);
+    CASE(OIw16i16o);
+    CASE(OIw16o16i);
+    CASE(Oiw16o);
+    CASE(Owi16o);
+    CASE(OIw8i16o2i);
+    CASE(OIw4i16o4i);
+    CASE(OIw4i16o4i_s8s8);
+    CASE(oihw);
+    CASE(ihwo);
+    CASE(hwio);
+    CASE(iohw);
+    CASE(hwio_s8s8);
+    CASE(dhwio);
+    CASE(OIhw8i8o);
+    CASE(OIhw16i16o);
+    CASE(OIhw8i16o2i);
+    CASE(IOhw8i16o2i);
+    CASE(OIdhw8i16o2i);
+    CASE(OIhw4i16o4i);
+    CASE(OIhw4i16o4i_s8s8);
+    CASE(OIdhw4i16o4i);
+    CASE(OIdhw4i16o4i_s8s8);
+    CASE(OIhw8o16i2o);
+    CASE(IOhw8o16i2o);
+    CASE(OIhw8o8i);
+    CASE(OIhw16o16i);
+    CASE(IOhw16o16i);
+    CASE(Oihw16o);
+    CASE(Ohwi8o);
+    CASE(Ohwi16o);
+    CASE(goiw);
+    CASE(goihw);
+    CASE(hwigo);
+    CASE(giohw);
+    CASE(hwigo_s8s8);
+    CASE(dhwigo_s8s8);
+    CASE(dhwigo);
+    CASE(dhwio_s8s8);
+    CASE(goiw);
+    CASE(gOIw16i16o);
+    CASE(gOIw16o16i);
+    CASE(gOiw16o);
+    CASE(gOwi16o);
+    CASE(gOIw8i16o2i);
+    CASE(gOIw4i16o4i);
+    CASE(gOIw4i16o4i_s8s8);
+    CASE(Goiw16g);
+    CASE(Goiw16g_s8s8);
+    CASE(gOIhw8i8o);
+    CASE(gOIhw16i16o);
+    CASE(gOIhw8i16o2i);
+    CASE(gIOhw8i16o2i);
+    CASE(gOIdhw8i16o2i);
+    CASE(gOIhw2i8o4i);
+    CASE(gOIhw2i8o4i_s8s8);
+    CASE(gOIhw4i16o4i);
+    CASE(gOIhw4i16o4i_s8s8);
+    CASE(gOIdhw4i16o4i);
+    CASE(gOIdhw4i16o4i_s8s8);
+    CASE(gOIhw8o16i2o);
+    CASE(gIOhw8o16i2o);
+    CASE(gOIhw4o4i);
+    CASE(gOIhw4o4i_s8s8);
+    CASE(gOIhw8o8i);
+    CASE(gOIhw16o16i);
+    CASE(gIOhw16o16i);
+    CASE(gOihw16o);
+    CASE(gOhwi8o);
+    CASE(gOhwi16o);
+    CASE(Goihw8g);
+    CASE(Goihw8g_s8s8);
+    CASE(Goihw16g);
+    CASE(Goihw16g_s8s8);
+    CASE(Goidhw8g);
+    CASE(Goidhw8g_s8s8);
+    CASE(Goidhw16g);
+    CASE(Goidhw16g_s8s8);
+    CASE(oIhw8i);
+    CASE(oIhw16i);
+    CASE(ncdhw);
+    CASE(ndhwc);
+    CASE(oidhw);
+    CASE(goidhw);
+    CASE(nCdhw4c);
+    CASE(nCdhw8c);
+    CASE(nCdhw16c);
+    CASE(OIdhw16i16o);
+    CASE(gOIdhw16i16o);
+    CASE(OIdhw16o16i);
+    CASE(gOIdhw16o16i);
+    CASE(Oidhw16o);
+    CASE(Odhwi16o);
+    CASE(gOidhw16o);
+    CASE(gOdhwi16o);
+    CASE(ntc);
+    CASE(tnc);
+    CASE(ldsnc);
+    CASE(ldigo);
+    CASE(ldgoi);
+    CASE(ldgo);
+#undef CASE
+    assert(!"unknown memory format");
+    return mkldnn_format_undef;
+}
+
 const char *mkldnn_fmt2str(mkldnn_memory_format_t v) {
     if (v == mkldnn_format_undef) return "undef";
     if (v == mkldnn_any) return "any";
index 4c0ab56..a1ebc17 100644 (file)
@@ -132,52 +132,76 @@ void jit_uni_depthwise_injector_f32<isa>::assign_regs() {
 
 template <cpu_isa_t isa>
 void jit_uni_depthwise_injector_f32<isa>::scale_shift_compute_vector(const Vmm &vmm_src,
-        const Xbyak::Reg64& p_weights, const Xbyak::Reg64& p_bias) {
+        const Xbyak::Reg64& p_weights, const Xbyak::Reg64& p_bias, bool is_broadcast) {
     if (isa == sse42) {
-        h->movups(vmm_mask, h->ptr[p_weights]);
+        if (is_broadcast)
+            h->uni_vbroadcastss(vmm_mask, h->ptr[p_weights]);
+        else
+            h->movups(vmm_mask, h->ptr[p_weights]);
         h->mulps(vmm_src, vmm_mask);
-        h->movups(vmm_mask, h->ptr[p_bias]);
+        if (is_broadcast)
+            h->uni_vbroadcastss(vmm_mask, h->ptr[p_bias]);
+        else
+            h->movups(vmm_mask, h->ptr[p_bias]);
         h->addps(vmm_src, vmm_mask);
     } else {
-        h->uni_vmulps(vmm_src, vmm_src, h->ptr[p_weights]);
-        h->uni_vaddps(vmm_src, vmm_src, h->ptr[p_bias]);
+        if (is_broadcast) {
+            h->uni_vbroadcastss(vmm_mask, h->ptr[p_weights]);
+            h->uni_vmulps(vmm_src, vmm_src, vmm_mask);
+            h->uni_vbroadcastss(vmm_mask, h->ptr[p_bias]);
+            h->uni_vaddps(vmm_src, vmm_src, vmm_mask);
+        } else {
+            h->uni_vmulps(vmm_src, vmm_src, h->ptr[p_weights]);
+            h->uni_vaddps(vmm_src, vmm_src, h->ptr[p_bias]);
+        }
     };
 }
 
 template <cpu_isa_t isa>
 void jit_uni_depthwise_injector_f32<isa>::prelu_compute_vector(const Vmm &vmm_src,
-        const Xbyak::Reg64& p_weights, const Xbyak::Reg64& p_bias) {
+        const Xbyak::Reg64& p_weights, const Xbyak::Reg64& p_bias, bool is_broadcast) {
     const unsigned char _cmp_gt_os = 6;
     const unsigned char _cmp_lt_os = 1;
 
     if (isa == sse42) {
         h->pxor(vmm_mask, vmm_mask);
         h->cmpps(vmm_mask, vmm_src, _cmp_gt_os);
-        h->movups(vmm_aux0, h->ptr[p_weights]);
+        if (is_broadcast)
+            h->uni_vbroadcastss(vmm_aux0, h->ptr[p_weights]);
+        else
+            h->movups(vmm_aux0, h->ptr[p_weights]);
         h->mulps(vmm_aux0, vmm_src);
         h->blendvps(vmm_src, vmm_aux0);
     } else if (isa == avx2) {
+        if (is_broadcast) {
+            h->uni_vbroadcastss(vmm_mask, h->ptr[p_weights]);
+            h->vmulps(vmm_aux0, vmm_src, vmm_mask);
+        } else
+            h->vmulps(vmm_aux0, vmm_src, h->ptr[p_weights]);
         h->vxorps(vmm_mask, vmm_mask, vmm_mask);
         h->vcmpgtps(vmm_mask, vmm_src, vmm_mask);
-        h->vmulps(vmm_aux0, vmm_src, h->ptr[p_weights]);
         h->vblendvps(vmm_src, vmm_aux0, vmm_src, vmm_mask);
     } else if (isa == avx512_common) {
         h->vxorpd(vmm_mask, vmm_mask, vmm_mask);
         h->vmovups(vmm_aux0, vmm_src);
         h->vcmpps(k_mask, vmm_src, vmm_mask, _cmp_lt_os);
-        h->vmulps(vmm_src | k_mask, vmm_aux0, h->ptr[p_weights]);
+        if (is_broadcast) {
+            h->uni_vbroadcastss(vmm_mask, h->ptr[p_weights]);
+            h->vmulps(vmm_src | k_mask, vmm_aux0, vmm_mask);
+        } else
+            h->vmulps(vmm_src | k_mask, vmm_aux0, h->ptr[p_weights]);
     }
 }
 
 template <cpu_isa_t isa>
 void jit_uni_depthwise_injector_f32<isa>::compute_body(size_t start_idx, size_t end_idx,
-        const Xbyak::Reg64& p_weights, const Xbyak::Reg64& p_bias) {
+        const Xbyak::Reg64& p_weights, const Xbyak::Reg64& p_bias, bool is_broadcast) {
     for (size_t idx = start_idx; idx < end_idx; idx++) {
         switch (depthwise_alg) {
             case alg_kind::depthwise_scale_shift:
-                scale_shift_compute_vector(Vmm(idx), p_weights, p_bias); break;
+                scale_shift_compute_vector(Vmm(idx), p_weights, p_bias, is_broadcast); break;
             case alg_kind::depthwise_prelu:
-                prelu_compute_vector(Vmm(idx), p_weights, p_bias); break;
+                prelu_compute_vector(Vmm(idx), p_weights, p_bias, is_broadcast); break;
             default: assert(!"unsupported depthwise algorithm");
         }
     }
@@ -185,11 +209,11 @@ void jit_uni_depthwise_injector_f32<isa>::compute_body(size_t start_idx, size_t
 
 template <cpu_isa_t isa>
 void jit_uni_depthwise_injector_f32<isa>::compute_vector_range(int start_idx, int end_idx,
-        const Xbyak::Reg64& p_weights, const Xbyak::Reg64& p_bias) {
+        const Xbyak::Reg64& p_weights, const Xbyak::Reg64& p_bias, bool is_broadcast) {
     injector_preamble(start_idx, end_idx);
-    compute_body(start_idx_tail, end_idx, p_weights, p_bias);
+    compute_body(start_idx_tail, end_idx, p_weights, p_bias, is_broadcast);
     injector_preamble_tail(start_idx, end_idx);
-    compute_body(start_idx, start_idx_tail, p_weights, p_bias);
+    compute_body(start_idx, start_idx_tail, p_weights, p_bias, is_broadcast);
     injector_postamble();
 }
 
index 40a5704..1ab4ed2 100644 (file)
@@ -41,7 +41,7 @@ struct jit_uni_depthwise_injector_f32 {
         assert(utils::one_of(depthwise_alg, alg_kind::depthwise_scale_shift, alg_kind::depthwise_prelu));
     }
 
-    void compute_vector_range(int start_idx, int end_idx, const Xbyak::Reg64& p_weights, const Xbyak::Reg64& p_bias);
+    void compute_vector_range(int start_idx, int end_idx, const Xbyak::Reg64& p_weights, const Xbyak::Reg64& p_bias, bool is_broadcast = false);
 
 private:
     jit_generator* h;
@@ -67,14 +67,14 @@ private:
 
     int aux_vecs_count(alg_kind_t elt_alg);
 
-    void compute_body(size_t start_idx, size_t end_idx, const Xbyak::Reg64& p_weights, const Xbyak::Reg64& p_bias);
+    void compute_body(size_t start_idx, size_t end_idx, const Xbyak::Reg64& p_weights, const Xbyak::Reg64& p_bias, bool is_broadcast = false);
     void injector_preamble(size_t start_idx, size_t end_idx);
     void injector_preamble_tail(size_t start_idx, size_t end_idx);
     void injector_postamble();
     void assign_regs();
 
-    void scale_shift_compute_vector(const Vmm &vmm_src, const Xbyak::Reg64& p_weights, const Xbyak::Reg64& p_bias);
-    void prelu_compute_vector(const Vmm &vmm_src, const Xbyak::Reg64& p_weights, const Xbyak::Reg64& p_bias);
+    void scale_shift_compute_vector(const Vmm &vmm_src, const Xbyak::Reg64& p_weights, const Xbyak::Reg64& p_bias, bool is_broadcast = false);
+    void prelu_compute_vector(const Vmm &vmm_src, const Xbyak::Reg64& p_weights, const Xbyak::Reg64& p_bias, bool is_broadcast = false);
 };
 
 struct jit_uni_depthwise_kernel_f32;
index efabd97..27ec971 100644 (file)
@@ -42,7 +42,7 @@ void jit_uni_quantization_injector_f32<isa>::init_crop_ptrs(const Xbyak::Operand
 }
 
 template <cpu_isa_t isa>
-void jit_uni_quantization_injector_f32<isa>::compute_crop(int start_idx, int end_idx, int offset, bool is_scalar) {
+void jit_uni_quantization_injector_f32<isa>::compute_crop(int start_idx, int end_idx, int offset, bool is_scalar, bool is_broadcast) {
     if (is_scalar) {
         if (post_op_.quantization.crop_low_data->count_ == 1)
             h->movss(xmm_d_weights_, h->ptr[reg_d_weights_]);
@@ -55,6 +55,8 @@ void jit_uni_quantization_injector_f32<isa>::compute_crop(int start_idx, int end
             h->uni_vbroadcastss(vmm_d_weights_, h->ptr[reg_d_weights_]);
         else if (post_op_.quantization.crop_low_data->has_default_values())
             h->uni_vpxor(vmm_d_weights_, vmm_d_weights_, vmm_d_weights_);
+        else if (is_broadcast)
+            h->uni_vbroadcastss(vmm_d_weights_, h->ptr[reg_d_weights_ + offset]);
         else
             h->uni_vmovups(vmm_d_weights_, h->ptr[reg_d_weights_ + offset]);
     }
@@ -78,6 +80,8 @@ void jit_uni_quantization_injector_f32<isa>::compute_crop(int start_idx, int end
             h->uni_vbroadcastss(vmm_d_bias_, h->ptr[reg_d_bias_]);
         else if (post_op_.quantization.crop_high_data->has_default_values())
             h->uni_vpxor(vmm_d_bias_, vmm_d_bias_, vmm_d_bias_);
+        else if (is_broadcast)
+            h->uni_vbroadcastss(vmm_d_bias_, h->ptr[reg_d_bias_ + offset]);
         else
             h->uni_vmovups(vmm_d_bias_, h->ptr[reg_d_bias_ + offset]);
     }
@@ -104,7 +108,7 @@ void jit_uni_quantization_injector_f32<isa>::init_input_scale_shift_ptrs(const X
 }
 
 template <cpu_isa_t isa>
-void jit_uni_quantization_injector_f32<isa>::compute_input_scale_shift(int start_idx, int end_idx, int offset, bool do_rounding, bool is_scalar) {
+void jit_uni_quantization_injector_f32<isa>::compute_input_scale_shift(int start_idx, int end_idx, int offset, bool do_rounding, bool is_scalar, bool is_broadcast) {
     if (is_scalar) {
         if (post_op_.quantization.input_scale_data->count_ == 1)
             h->movss(xmm_d_weights_, h->ptr[reg_d_weights_]);
@@ -113,6 +117,8 @@ void jit_uni_quantization_injector_f32<isa>::compute_input_scale_shift(int start
     } else {
         if (post_op_.quantization.input_scale_data->count_ == 1)
             h->uni_vbroadcastss(vmm_d_weights_, h->ptr[reg_d_weights_]);
+        else if (is_broadcast)
+            h->uni_vbroadcastss(vmm_d_weights_, h->ptr[reg_d_weights_ + offset]);
         else
             h->uni_vmovups(vmm_d_weights_, h->ptr[reg_d_weights_ + offset]);
     }
@@ -137,6 +143,8 @@ void jit_uni_quantization_injector_f32<isa>::compute_input_scale_shift(int start
             h->uni_vbroadcastss(vmm_d_bias_, h->ptr[reg_d_bias_]);
         else if (post_op_.quantization.input_shift_data->has_default_values())
             h->uni_vpxor(vmm_d_bias_, vmm_d_bias_, vmm_d_bias_);
+        else if (is_broadcast)
+            h->uni_vbroadcastss(vmm_d_bias_, h->ptr[reg_d_bias_ + offset]);
         else
             h->uni_vmovups(vmm_d_bias_, h->ptr[reg_d_bias_ + offset]);
     }
@@ -169,7 +177,7 @@ void jit_uni_quantization_injector_f32<isa>::init_output_scale_shift_ptrs(const
 }
 
 template <cpu_isa_t isa>
-void jit_uni_quantization_injector_f32<isa>::compute_output_scale_shift(int start_idx, int end_idx, int offset, bool is_scalar) {
+void jit_uni_quantization_injector_f32<isa>::compute_output_scale_shift(int start_idx, int end_idx, int offset, bool is_scalar, bool is_broadcast) {
     if (!do_dequantization)
         return;
 
@@ -181,6 +189,8 @@ void jit_uni_quantization_injector_f32<isa>::compute_output_scale_shift(int star
     } else {
         if (post_op_.quantization.output_scale_data->count_ == 1)
             h->uni_vbroadcastss(vmm_d_weights_, h->ptr[reg_d_weights_]);
+        else if (is_broadcast)
+            h->uni_vbroadcastss(vmm_d_weights_, h->ptr[reg_d_weights_ + offset]);
         else
             h->uni_vmovups(vmm_d_weights_, h->ptr[reg_d_weights_ + offset]);
     }
@@ -205,6 +215,8 @@ void jit_uni_quantization_injector_f32<isa>::compute_output_scale_shift(int star
             h->uni_vbroadcastss(vmm_d_bias_, h->ptr[reg_d_bias_]);
         else if (post_op_.quantization.output_shift_data->has_default_values())
             h->uni_vpxor(vmm_d_bias_, vmm_d_bias_, vmm_d_bias_);
+        else if (is_broadcast)
+            h->uni_vbroadcastss(vmm_d_bias_, h->ptr[reg_d_bias_ + offset]);
         else
             h->uni_vmovups(vmm_d_bias_, h->ptr[reg_d_bias_ + offset]);
     }
index 497af46..58d12bb 100644 (file)
@@ -55,9 +55,9 @@ struct jit_uni_quantization_injector_f32 {
     void init_input_scale_shift_ptrs(const Xbyak::Operand& ch_off);
     void init_output_scale_shift_ptrs(const Xbyak::Operand& ch_off);
 
-    void compute_crop(int start_idx, int end_idx, int offset, bool is_scalar = false);
-    void compute_input_scale_shift(int start_idx, int end_idx, int offset, bool do_rounding, bool is_scalar = false);
-    void compute_output_scale_shift(int start_idx, int end_idx, int offset, bool is_scalar = false);
+    void compute_crop(int start_idx, int end_idx, int offset, bool is_scalar = false, bool is_broadcast = false);
+    void compute_input_scale_shift(int start_idx, int end_idx, int offset, bool do_rounding, bool is_scalar = false, bool is_broadcast = false);
+    void compute_output_scale_shift(int start_idx, int end_idx, int offset, bool is_scalar = false, bool is_broadcast = false);
 
 private:
     jit_generator* h;
index 4af47e6..6f53b54 100644 (file)
@@ -70,129 +70,5 @@ const char *fmt2str(mkldnn_memory_format_t fmt) {
 }
 
 mkldnn_memory_format_t str2fmt(const char *str) {
-#define CASE(_fmt) do { \
-    if (!strcmp(STRINGIFY(_fmt), str) \
-            || !strcmp("mkldnn_" STRINGIFY(_fmt), str)) \
-        return CONCAT2(mkldnn_, _fmt); \
-} while (0)
-    CASE(x);
-    CASE(nc);
-    CASE(ncw);
-    CASE(nwc);
-    CASE(nCw4c);
-    CASE(nCw8c);
-    CASE(nCw16c);
-    CASE(nchw);
-    CASE(nhwc);
-    CASE(chwn);
-    CASE(nChw4c);
-    CASE(nChw8c);
-    CASE(nChw16c);
-    CASE(oi);
-    CASE(io);
-    CASE(oiw);
-    CASE(wio);
-    CASE(OIw16i16o);
-    CASE(OIw16o16i);
-    CASE(Oiw16o);
-    CASE(Owi16o);
-    CASE(OIw8i16o2i);
-    CASE(OIw4i16o4i);
-    CASE(OIw4i16o4i_s8s8);
-    CASE(oihw);
-    CASE(ihwo);
-    CASE(hwio);
-    CASE(iohw);
-    CASE(hwio_s8s8);
-    CASE(dhwio);
-    CASE(OIhw8i8o);
-    CASE(OIhw16i16o);
-    CASE(OIhw8i16o2i);
-    CASE(IOhw8i16o2i);
-    CASE(OIdhw8i16o2i);
-    CASE(OIhw4i16o4i);
-    CASE(OIhw4i16o4i_s8s8);
-    CASE(OIdhw4i16o4i);
-    CASE(OIdhw4i16o4i_s8s8);
-    CASE(OIhw8o16i2o);
-    CASE(IOhw8o16i2o);
-    CASE(OIhw8o8i);
-    CASE(OIhw16o16i);
-    CASE(IOhw16o16i);
-    CASE(Oihw16o);
-    CASE(Ohwi8o);
-    CASE(Ohwi16o);
-    CASE(goiw);
-    CASE(goihw);
-    CASE(hwigo);
-    CASE(giohw);
-    CASE(hwigo_s8s8);
-    CASE(dhwigo_s8s8);
-    CASE(dhwigo);
-    CASE(dhwio_s8s8);
-    CASE(goiw);
-    CASE(gOIw16i16o);
-    CASE(gOIw16o16i);
-    CASE(gOiw16o);
-    CASE(gOwi16o);
-    CASE(gOIw8i16o2i);
-    CASE(gOIw4i16o4i);
-    CASE(gOIw4i16o4i_s8s8);
-    CASE(Goiw16g);
-    CASE(Goiw16g_s8s8);
-    CASE(gOIhw8i8o);
-    CASE(gOIhw16i16o);
-    CASE(gOIhw8i16o2i);
-    CASE(gIOhw8i16o2i);
-    CASE(gOIdhw8i16o2i);
-    CASE(gOIhw2i8o4i);
-    CASE(gOIhw2i8o4i_s8s8);
-    CASE(gOIhw4i16o4i);
-    CASE(gOIhw4i16o4i_s8s8);
-    CASE(gOIdhw4i16o4i);
-    CASE(gOIdhw4i16o4i_s8s8);
-    CASE(gOIhw8o16i2o);
-    CASE(gIOhw8o16i2o);
-    CASE(gOIhw4o4i);
-    CASE(gOIhw4o4i_s8s8);
-    CASE(gOIhw8o8i);
-    CASE(gOIhw16o16i);
-    CASE(gIOhw16o16i);
-    CASE(gOihw16o);
-    CASE(gOhwi8o);
-    CASE(gOhwi16o);
-    CASE(Goihw8g);
-    CASE(Goihw8g_s8s8);
-    CASE(Goihw16g);
-    CASE(Goihw16g_s8s8);
-    CASE(Goidhw8g);
-    CASE(Goidhw8g_s8s8);
-    CASE(Goidhw16g);
-    CASE(Goidhw16g_s8s8);
-    CASE(oIhw8i);
-    CASE(oIhw16i);
-    CASE(ncdhw);
-    CASE(ndhwc);
-    CASE(oidhw);
-    CASE(goidhw);
-    CASE(nCdhw4c);
-    CASE(nCdhw8c);
-    CASE(nCdhw16c);
-    CASE(OIdhw16i16o);
-    CASE(gOIdhw16i16o);
-    CASE(OIdhw16o16i);
-    CASE(gOIdhw16o16i);
-    CASE(Oidhw16o);
-    CASE(Odhwi16o);
-    CASE(gOidhw16o);
-    CASE(gOdhwi16o);
-    CASE(ntc);
-    CASE(tnc);
-    CASE(ldsnc);
-    CASE(ldigo);
-    CASE(ldgoi);
-    CASE(ldgo);
-#undef CASE
-    assert(!"unknown memory format");
-    return mkldnn_format_undef;
+    return mkldnn_str2fmt(str);
 }
index 6e022c6..778bac7 100644 (file)
@@ -13,6 +13,7 @@
 #include <algorithm>
 #include <memory>
 #include <string>
+#include <cstring>
 #include <ncCommPrivate.h>
 #include <mvnc.h>
 #include <ncPrivateTypes.h>
@@ -157,6 +158,36 @@ class NoDueOnFirstCall : public IDevice {
     }
 };
 
+class CustomUniqueLock {
+public:
+    explicit CustomUniqueLock(pthread_mutex_t* mutex)
+        :m_mutex(mutex) {
+        if(m_mutex == nullptr) {
+            throw std::runtime_error("mutex should not be null");
+        }
+
+        int rc = pthread_mutex_lock(m_mutex);
+        if (rc != 0) {
+            throw std::runtime_error(std::string("failed to lock mutex. rc: ") + strerror(rc));
+        }
+    };
+
+    ~CustomUniqueLock() {
+        int rc = pthread_mutex_unlock(m_mutex);
+        if (rc != 0) {
+            mvLog(MVLOG_ERROR, "failed to unlock mutex. rc: %s", strerror(rc));
+        }
+    }
+
+    CustomUniqueLock(const CustomUniqueLock&) = delete;
+    CustomUniqueLock(const CustomUniqueLock&&) = delete;
+    CustomUniqueLock& operator=(const CustomUniqueLock&) = delete;
+    CustomUniqueLock& operator=(const CustomUniqueLock&&) = delete;
+
+private:
+    pthread_mutex_t* m_mutex = nullptr;
+};
+
 static void * WD_OPAQUE_MAGIC = reinterpret_cast<void*>(0xdeadbeaf);
 
 struct wd_context_opaque {
@@ -223,12 +254,11 @@ public:
         mvLog(MVLOG_INFO, "watchdog terminated\n");
         try
         {
-            lockRoutineMutex();
+            CustomUniqueLock lock {&routineLock};
             for (auto &item : watchedDevices) {
                 *std::get<1>(item) = true;
                 mvLog(MVLOG_WARN, "[%p] device, stop watching due to watchdog termination\n", std::get<2>(item));
             }
-            unlockRoutineMutex();
         } catch (const std::exception & ex) {
             mvLog(MVLOG_ERROR, "error %s", ex.what());
         } catch (...) {
@@ -258,7 +288,7 @@ public:
 
 public:
     void *register_device(std::shared_ptr<IDevice> device) {
-        lockRoutineMutex();
+        CustomUniqueLock lock {&routineLock};
         std::unique_ptr<wd_context_opaque> ctx (new wd_context_opaque);
 
         // rare case of exact pointer address collision
@@ -296,7 +326,6 @@ public:
 
         ctx->actual = std::get<0>(watchedDevices.back()).get();
 
-        unlockRoutineMutex();
         return ctx.release();
     }
 
@@ -310,28 +339,30 @@ public:
         if (ptr == nullptr) {
             return false;
         }
-        lockRoutineMutex();
 
-        // thread already removed
-        if (ptr->destroyed) {
-            delete ptr;
-            unlockRoutineMutex();
-            return true;
-        }
+        bool bFound = false;
+        {
+            CustomUniqueLock lock {&routineLock};
 
-        auto idx = std::find_if(std::begin(watchedDevices),
-                                std::end(watchedDevices),
-                                [ptr](const wd_context_as_tuple &item) {
-                                    return std::get<0>(item)->getHandle() == ptr->actual->getHandle();
-                                });
-        bool bFound = idx != std::end(watchedDevices);
-        if(bFound) {
-            watchedDevices.erase(idx);
-            delete ptr;
+            // thread already removed
+            if (ptr->destroyed) {
+                delete ptr;
+                return true;
+            }
+
+            auto idx = std::find_if(std::begin(watchedDevices),
+                                    std::end(watchedDevices),
+                                    [ptr](const wd_context_as_tuple &item) {
+                                        return std::get<0>(item)->getHandle() == ptr->actual->getHandle();
+                                    });
+            bFound = idx != std::end(watchedDevices);
+            if(bFound) {
+                watchedDevices.erase(idx);
+                delete ptr;
+            }
         }
 
         // wake up thread since we might select removed device as nex to be ping, and there is no more devices available
-        unlockRoutineMutex();
         int rc = pthread_cond_broadcast(&wakeUpPingThread);
         if (rc != 0) {
             mvLog(MVLOG_WARN, "failed to unblock threads blocked on the \"wakeUpPingThread\". rc=%d", rc);
@@ -341,19 +372,7 @@ public:
     }
 
  private:
-    void lockRoutineMutex() {
-        int rc = pthread_mutex_lock(&routineLock);
-        if (rc != 0) {
-            throw std::runtime_error("failed to lock \"routineLock\" mutex. rc: " + std::to_string(rc));
-        }
-    }
 
-    void unlockRoutineMutex() {
-        int rc = pthread_mutex_unlock(&routineLock);
-        if (rc != 0) {
-            throw std::runtime_error("failed to unlock \"routineLock\" mutex. rc: " + std::to_string(rc));
-        }
-    }
 
     void watchdog_routine() noexcept {
         try {
@@ -361,7 +380,7 @@ public:
 
             milliseconds sleepInterval;
             struct timespec timeToWait = {0, 0};
-            lockRoutineMutex();
+            CustomUniqueLock lock {&routineLock};
 
             do {
                 for (auto deviceIt = watchedDevices.begin(); deviceIt != watchedDevices.end(); ) {
@@ -430,7 +449,6 @@ public:
             mvLog(MVLOG_ERROR, "unknown error");
         }
 
-        unlockRoutineMutex();
         mvLog(MVLOG_INFO, "thread ended\n");
     }
 };
@@ -539,9 +557,9 @@ WD_API wd_error_t watchdog_unregister_device(wd_context *ctx) {
 
         return WD_ERRNO;
     } catch (const std::exception & ex) {
-        mvLog(MVLOG_ERROR, "error %s", ex.what());
+        mvLog(MVLOG_WARN, "error %s", ex.what());
     } catch (...) {
-        mvLog(MVLOG_ERROR, "unknown error");
+        mvLog(MVLOG_WARN, "unknown error");
     }
 
     return WD_FAIL;
diff --git a/inference-engine/thirdparty/movidius/vpualHost.patch b/inference-engine/thirdparty/movidius/vpualHost.patch
deleted file mode 100644 (file)
index 48946b3..0000000
+++ /dev/null
@@ -1,80 +0,0 @@
-diff --git a/host/source/FLIC/NNFlicPlg/NNFlicPlg.cpp b/host/source/FLIC/NNFlicPlg/NNFlicPlg.cpp
-index 8c61ec1..9808319 100644
---- a/host/source/FLIC/NNFlicPlg/NNFlicPlg.cpp
-+++ b/host/source/FLIC/NNFlicPlg/NNFlicPlg.cpp
-@@ -187,7 +187,7 @@ uint32_t* NNFlicPlg::GetBlobVersion()
-     //Dispatch Command
-     VpualDispatch (&cmd, &rep);
--    uint32_t* tempBlob;
-+    uint32_t* tempBlob = nullptr;
-     rep.deserialize(tempBlob, sizeof(* tempBlob));
-diff --git a/host/source/FLIC/top/include/MemAllocator.h b/host/source/FLIC/top/include/MemAllocator.h
-index 8402e18..be81be9 100644
---- a/host/source/FLIC/top/include/MemAllocator.h
-+++ b/host/source/FLIC/top/include/MemAllocator.h
-@@ -52,9 +52,9 @@ class HeapAllocator : public IAllocator {
- //Stock objects
- // extern uint8_t       RgnBuff[];
- // extern uint8_t       RgnBuffCMX[];
--extern RgnAllocator  RgnAlloc;
--extern RgnAllocator  RgnAllocCMX;
--extern HeapAllocator HeapAlloc;
-+//extern RgnAllocator  RgnAlloc;
-+//extern RgnAllocator  RgnAllocCMX;
-+//extern HeapAllocator HeapAlloc;
- // TODO Default pool size:
- #define DEF_POOL_SZ (8*800*480*3/2)
-diff --git a/host/source/FLIC/top/source/Allocator.cpp b/host/source/FLIC/top/source/Allocator.cpp
-index 3672b0b..ecd4b2d 100644
---- a/host/source/FLIC/top/source/Allocator.cpp
-+++ b/host/source/FLIC/top/source/Allocator.cpp
-@@ -66,6 +66,6 @@ HeapAllocator::HeapAllocator(uint32_t alignment) : IAllocator("HeapAllocator") {
- // uint8_t       RgnBuff[DEF_POOL_SZ] ALIGNED(32) SECTION(DEF_POOL_SECT);
- // uint8_t       RgnBuffCMX[CMX_POOL_SZ] ALIGNED(32) SECTION(CMX_POOL_SECT);
--RgnAllocator  RgnAlloc;
--RgnAllocator  RgnAllocCMX;
--HeapAllocator HeapAlloc;
-+//RgnAllocator  RgnAlloc;
-+//RgnAllocator  RgnAllocCMX;
-+//HeapAllocator HeapAlloc;
-diff --git a/host/source/SIPP/include/sipp_messages.h b/host/source/SIPP/include/sipp_messages.h
-index 1581564..bd847c8 100644
---- a/host/source/SIPP/include/sipp_messages.h
-+++ b/host/source/SIPP/include/sipp_messages.h
-@@ -46,7 +46,7 @@ public:
-     // Move constructors.
-     SippPipeline(SippPipeline&&) noexcept {}
--    SippPipeline& operator=(SippPipeline&&) noexcept {}
-+    //SippPipeline& operator=(SippPipeline&&) noexcept {}
-     uint32_t getPipelineId() const {return pipelineID;}
- };
-@@ -94,7 +94,7 @@ public:
-     // Move Constructors.
-     SippFilter(SippFilter&&) noexcept {}
--    SippFilter& operator=(SippFilter&&) noexcept {}
-+    //SippFilter& operator=(SippFilter&&) noexcept {}
-     uint32_t getFilterId() const {return filterID;}
-     int getFilter() const {return filter;}
-diff --git a/host/source/XLink/pc/XLinkPlatform.c b/host/source/XLink/pc/XLinkPlatform.c
-index ea54b1f..a6bae87 100644
---- a/host/source/XLink/pc/XLinkPlatform.c
-+++ b/host/source/XLink/pc/XLinkPlatform.c
-@@ -313,6 +313,9 @@ static int vsc_usb_open(const char* devPathRead __attribute__((unused)),
-         return -1;
-     }
-     libusb_unref_device(dev);
-+
-+    libusb_detach_kernel_driver(h, 0);
-+
-     libusb_rc = libusb_claim_interface(h, 0);
-     if(libusb_rc < 0)
-     {
diff --git a/inference-engine/thirdparty/movidius/vpualHost_clone_repo.sh b/inference-engine/thirdparty/movidius/vpualHost_clone_repo.sh
deleted file mode 100644 (file)
index 060f93d..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-git clone git@github.com:movidius/vpualHost.git
-cd vpualHost
-git checkout $1
-git apply ../vpualHost.patch
index 55247b6..e5fa636 100644 (file)
@@ -39,8 +39,8 @@ target_link_libraries(${TARGET_NAME} PRIVATE
 )
 
 set_target_properties(${TARGET_NAME} PROPERTIES
-    COMPILE_PDB_NAME
-    ${TARGET_NAME}
+    COMPILE_PDB_NAME ${TARGET_NAME}
+    FOLDER tools
 )
 
 add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME})
index 40aa69e..0fda2b3 100644 (file)
 #include "precision_utils.h"
 
 InferenceEngine::CNNNetwork readNetwork(const std::string &xmlFileName) {
-    std::string binFileName = fileNameNoExt(xmlFileName) + ".bin";
-
-    IE_SUPPRESS_DEPRECATED_START
-    InferenceEngine::CNNNetReader reader;
-    reader.ReadNetwork(xmlFileName);
-    reader.ReadWeights(binFileName);
-
-    return reader.getNetwork();
-    IE_SUPPRESS_DEPRECATED_END
+    return InferenceEngine::Core().ReadNetwork(xmlFileName);
 }
 
 bool isFP16(InferenceEngine::Precision precision) {
index 072db1b..b8861da 100644 (file)
@@ -40,8 +40,8 @@ target_link_libraries(${TARGET_NAME} PRIVATE
 add_dependencies(${TARGET_NAME} myriadPlugin)
 
 set_target_properties(${TARGET_NAME} PROPERTIES
-    COMPILE_PDB_NAME
-    ${TARGET_NAME}
+    COMPILE_PDB_NAME ${TARGET_NAME}
+    FOLDER tools
 )
 
 add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME})
index bb8ba29..c098e04 100644 (file)
@@ -45,7 +45,8 @@ function(add_perfcheck_target TARGET_NAME PLUGIN_NAME)
         ${PLUGIN_NAME} ${ARGN})
 
     set_target_properties(${TARGET_NAME} PROPERTIES
-        COMPILE_PDB_NAME ${TARGET_NAME})
+        COMPILE_PDB_NAME ${TARGET_NAME}
+        FOLDER tools)
 
     add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME})
 endfunction()
index 4da0589..e3cf4e5 100644 (file)
@@ -309,7 +309,7 @@ std::map<std::string, InferenceEngine::InferenceEngineProfileInfo> perfMap;
 int process(const std::string& modelFileName, const std::string& inputsDir,
             std::string& file_config_cl, int nBatch, int num_networks) {
     InferenceEngine::ResponseDesc resp;
-
+    InferenceEngine::Core ie;
     niter /= nBatch;
     num_requests = num_requests * num_networks;
 
@@ -326,22 +326,15 @@ int process(const std::string& modelFileName, const std::string& inputsDir,
     }
 #endif
 
-    InferenceEngine::PluginDispatcher disp;
-    InferenceEngine::InferenceEnginePluginPtr plugin(
-        disp.getPluginByName(std::string("myriadPlugin") + IE_BUILD_POSTFIX));
-
+#ifdef USE_KMB_PLUGIN
+    std::string deivceName = "KMB";
+#else
+    std::string deviceName = "MYRIAD";
+#endif
+    const auto pluginVersion = ie.GetVersions(deviceName);
     std::cout << "InferenceEngine: " << std::endl;
-
-    const InferenceEngine::Version *pluginVersion = nullptr;
-    plugin->GetVersion(pluginVersion);
     std::cout << pluginVersion << std::endl << std::endl;
 
-    InferenceEngine::CNNNetReader netReader;
-    netReader.ReadNetwork(modelFileName);
-
-    std::string binFileName = fileNameNoExt(modelFileName) + ".bin";
-    netReader.ReadWeights(binFileName);
-
     std::ifstream file(file_config_cl);
     if (!file.is_open()) {
         file_config_cl.clear();
@@ -360,7 +353,7 @@ int process(const std::string& modelFileName, const std::string& inputsDir,
         return 1;
     }
 
-    InferenceEngine::CNNNetwork cnnNetwork = netReader.getNetwork();
+    InferenceEngine::CNNNetwork cnnNetwork = ie.ReadNetwork(modelFileName);
 
     if (nBatch != 1) {
         std::cout << "Setting batch to : "<< nBatch << "\n";
@@ -397,7 +390,7 @@ int process(const std::string& modelFileName, const std::string& inputsDir,
         else
             printf("Load network... \n");
         fflush(stdout);
-        IECALL(plugin->LoadNetwork(exeNetwork[n], cnnNetwork, networkConfig, &resp));
+        exeNetwork[n] = ie.LoadNetwork(cnnNetwork, deviceName, networkConfig);
     }
 
     std::vector<InferenceEngine::IInferRequest::Ptr> request(num_requests);
index 970ec59..5b5d46b 100644 (file)
@@ -40,7 +40,7 @@ target_link_libraries(${TARGET_NAME} PRIVATE
     gflags
 )
 
-set_target_properties(${TARGET_NAME} PROPERTIES COMPILE_PDB_NAME ${TARGET_NAME})
+set_target_properties(${TARGET_NAME} PROPERTIES COMPILE_PDB_NAME ${TARGET_NAME}
+                                                FOLDER tools)
 
 add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME})
-
index 9700c51..bbf677e 100644 (file)
@@ -202,7 +202,7 @@ ignore-mixin-members=yes
 # (useful for modules/projects where namespaces are manipulated during runtime
 # and thus existing member attributes cannot be deduced by static analysis. It
 # supports qualified module names, as well as Unix pattern matching.
-ignored-modules=flask_sqlalchemy,app.extensions.flask_sqlalchemy
+ignored-modules=flask_sqlalchemy,app.extensions.flask_sqlalchemy,distutils
 
 # List of class names for which member attributes should not be checked (useful
 # for classes with dynamically set attributes). This supports the use of
index 9a111d9..f48d7f8 100644 (file)
@@ -61,6 +61,22 @@ Model Optimizer requires:
     pip3 install -r requirements.txt
     </pre>
 
+4. [OPTIONAL] If you use Windows OS, most probably you get python version of `protobuf` library. It is known to be rather slow,
+   and you can use a boosted version of library by building the .egg file (Python package format) yourself,
+   using instructions below (section 'How to boost Caffe model loading') for the target OS and Python, or install it
+   with the pre-built .egg (it is built for Python 3.4, 3.5, 3.6, 3.7):
+    <pre>
+        python3 -m easy_install protobuf-3.6.1-py3.6-win-amd64.egg
+    </pre>
+
+   It overrides the protobuf python package installed by the previous command.
+
+   Set environment variable to enable boost in protobuf performance:
+    <pre>
+        set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp
+    </pre>
+
+
 ## Command-Line Interface (CLI)
 
 The following short examples are framework-dependent. Please read the complete help
@@ -143,6 +159,9 @@ The whole workflow and more documentation on the structure of IR are documented
 of Inference Engine. Note that sections about running Model Optimizer refer to the old version
 of the tool and can not be applied to the current version of Model Optimizer.
 
+
+## Setup development environment
+
 ### How to run unit-tests
 
 1. Run tests with:
@@ -150,5 +169,30 @@ of the tool and can not be applied to the current version of Model Optimizer.
     python -m unittest discover -p "*_test.py" [-s PATH_TO_DIR]
 </pre>
 
----
-\* Other names and brands may be claimed as the property of others.
+### How to capture unit-tests coverage
+
+1. Run tests with:
+<pre>
+    coverage run -m unittest discover -p "*_test.py" [-s PATH_TO_DIR]
+</pre>
+
+2. Build html report:
+<pre>
+    coverage html
+</pre>
+
+### How to run code linting
+
+1. Run the following command:
+<pre>
+    pylint mo/ mo.py
+</pre>
+
+### How to check requirements dependencies 
+
+1. Run the following command:
+<pre>
+    safety check -r requirements_file
+</pre>
+
+> **NOTE**: here <code>requirements_file</code> is one of the following: <code>requirements.txt</code>, <code>requirements_caffe.txt</code>, <code>requirements_tf.txt</code>, <code>requirements_mxnet.txt</code>, <code>requirements_dev.txt</code>.
index 984e158..d0797b3 100644 (file)
@@ -258,6 +258,7 @@ extensions/front/onnx/gru_ext.py
 extensions/front/onnx/hard_sigmoid_ext.py
 extensions/front/onnx/image_scaler_ext.py
 extensions/front/onnx/instance_normalization_ext.py
+extensions/front/onnx/logsoftmaxONNX_to_logsoftmax.py
 extensions/front/onnx/lp_normalization_ext.py
 extensions/front/onnx/lrn_ext.py
 extensions/front/onnx/lstm_ext.py
@@ -305,6 +306,7 @@ extensions/front/onnx/topkrois_ext.py
 extensions/front/onnx/transpose_ext.py
 extensions/front/onnx/unsqueeze_ext.py
 extensions/front/onnx/upsample_ext.py
+extensions/front/onnx/where_ext.py
 extensions/front/output_cut.py
 extensions/front/override_batch.py
 extensions/front/Pack.py
@@ -515,6 +517,7 @@ extensions/middle/L2NormToNorm.py
 extensions/middle/LayoutChangeForConstantShapePaths.py
 extensions/middle/LeakyReluPattern.py
 extensions/middle/LSTMRNNSequenceToTensorIterator.py
+extensions/middle/MarkSubgraphsWithCorrectLayout.py
 extensions/middle/MinimumMiddleReplacer.py
 extensions/middle/MulAddToSS.py
 extensions/middle/MulFakeQuantizeFuse.py
index 6377ca1..5147742 100644 (file)
@@ -148,7 +148,7 @@ class OpVersioning(BackReplacementPattern):
         "ExperimentalDetectronPriorGridGenerator",
     ]))
 
-    # Several ops were added to opset1 by mistake, now they are marked as blonging to opset2
+    # Several ops were added to opset1 by mistake, now they are marked as belonging to opset2
     opset_2_legacy_ops = set(map(lambda s: s.lower(), [
         "MVN",
         "ReorgYolo",
@@ -160,9 +160,10 @@ class OpVersioning(BackReplacementPattern):
             node_type = node.soft_get('type').lower()
             name = node.soft_get('name', node.id)
 
-            if node.soft_get('version', None) == 'opset1' and node_type not in self.opset_1_types:
-                raise Error('Node {} has `version` attribute set to `opset1`, but it is a reserved word, '
-                            'please use another'.format(name))
+            if node.soft_get('version', None) == 'opset1' and node_type not in self.opset_1_types \
+                                and node_type not in self.opset_2_legacy_ops:
+                raise Error('Node {} has `version` attribute set to `{}`, but it is a reserved word, '
+                            'please use another'.format(name, node.version))
 
             if not node.has_valid('version'):
                 if node_type in self.opset_1_types:
index 82c9ed0..d673b83 100644 (file)
@@ -16,6 +16,7 @@
 import logging as log
 
 from mo.back.replacement import BackReplacementPattern
+from mo.front.common.partial_infer.utils import int64_array
 from mo.graph.graph import Graph
 from mo.middle.passes.eliminate import remove_op_node_with_data_node
 
@@ -54,25 +55,54 @@ class RemoveLastSoftMaxPattern(BackReplacementPattern):
 class RemoveLastLogSoftMaxPattern(BackReplacementPattern):
     enabled = True
     graph_condition = [lambda graph: graph.graph['fw'] == 'kaldi' and graph.graph['cmd_params'].remove_output_softmax]
+    force_clean_up = True
 
     @staticmethod
     def pattern():
         return dict(
             nodes=[
-                ('softmax_node', dict(op='SoftMax')),
-                ('softmax_data', dict(kind='data')),
-                ('log_node', dict(op='Log')),
-                ('log_data', dict(kind='data')),
-                ('op_output', dict(op='Result'))
+                ('input_data', {'kind': 'data'}),
+                ('sub_node', {'kind': 'op', 'op': 'Sub'}),
+                ('reduce_max_node', {'kind': 'op', 'op': 'ReduceMax'}),
+                ('reduce_max_node_data', {'kind': 'data'}),
+                ('sub_node_data', {'kind': 'data'}),
+                ('exp', {'kind': 'op', 'op': 'Exp'}),
+                ('exp_data', {'kind': 'data'}),
+                ('reduce_sum_node', {'kind': 'op', 'op': 'ReduceSum'}),
+                ('reduce_sum_node_data', {'kind': 'data'}),
+                ('reduce_sum_axis', {'kind': 'op', 'op': 'Const'}),
+                ('reduce_sum_axis_data', {'kind': 'data'}),
+                ('log', {'kind': 'op', 'op': 'Log'}),
+                ('log_data', {'kind': 'data'}),
+                ('last_sub', {'kind': 'op', 'op': 'Sub'}),
+                ('last_sub_data', {'kind': 'data'}),
+                ('op_output', {'kind': 'op', 'op': 'Result'}),
             ],
             edges=[
-                ('softmax_node', 'softmax_data'),
-                ('softmax_data', 'log_node'),
-                ('log_node', 'log_data'),
-                ('log_data', 'op_output')
+                ('input_data', 'sub_node', {'in': 0}),
+                ('input_data', 'reduce_max_node', {'in': 0}),
+                ('reduce_max_node', 'reduce_max_node_data'),
+                ('reduce_max_node_data', 'sub_node', {'in': 1}),
+                ('sub_node', 'sub_node_data'),
+                ('sub_node_data', 'exp', {'out': 0, 'in': 0}),
+                ('exp', 'exp_data'),
+                ('exp_data', 'reduce_sum_node', {'in': 0}),
+                ('reduce_sum_node', 'reduce_sum_node_data'),
+                ('reduce_sum_axis', 'reduce_sum_axis_data'),
+                ('reduce_sum_axis_data', 'reduce_sum_node', {'in': 1}),
+                ('reduce_sum_node_data', 'log'),
+                ('log', 'log_data'),
+                ('log_data', 'last_sub', {'in': 1}),
+                ('last_sub', 'last_sub_data'),
+                ('sub_node_data', 'last_sub', {'out': 0, 'in': 0}),
+                ('last_sub_data', 'op_output'),
             ]
         )
 
+    expected_number_of_outputs = {
+        'reduce_max_node': 1, 'reduce_sum_node': 1, 'exp': 1, 'log': 1, 'sub_node': 2, 'last_sub': 1
+    }
+
     @staticmethod
     def replace_pattern(graph: Graph, match: dict):
         """
@@ -80,6 +110,20 @@ class RemoveLastLogSoftMaxPattern(BackReplacementPattern):
         :param graph: graph to operate on
         :param match: dictionary with matched nodes
         """
-        if len(match['softmax_data'].out_nodes()) == 1 and len(match['log_data'].out_nodes()) == 1:
-            remove_op_node_with_data_node(graph, match['log_node'])
-            remove_op_node_with_data_node(graph, match['softmax_node'])
+        reduce_max_node = match['reduce_max_node']
+        second_input_of_reduce_max = reduce_max_node.in_port(1).get_connection().get_source().node
+        if not second_input_of_reduce_max.has_valid('value') or len(second_input_of_reduce_max.value) != 1:
+            return
+
+        reduce_sum_node = match['reduce_sum_node']
+        second_input_of_reduce_sum = reduce_sum_node.in_port(1).get_connection().get_source().node
+        if not second_input_of_reduce_sum.has_valid('value') or len(second_input_of_reduce_sum.value) != 1:
+            return
+        if second_input_of_reduce_max.value[0] != second_input_of_reduce_sum.value[0]:
+            return
+
+        for name, number in RemoveLastLogSoftMaxPattern.expected_number_of_outputs.items():
+            if len(match[name].out_port(0).get_destinations()) != number:
+                return
+
+        match['op_output'].in_port(0).get_connection().set_source(match['sub_node'].in_port(0).get_source())
index 6d6a83a..3ff31c2 100644 (file)
@@ -17,6 +17,8 @@
 import unittest
 
 from extensions.back.remove_last_softmax_pattern import RemoveLastSoftMaxPattern, RemoveLastLogSoftMaxPattern
+from mo.front.common.partial_infer.utils import int64_array
+from mo.utils.ir_engine.compare_graphs import compare_graphs
 from mo.utils.unittest.graph import build_graph
 
 
@@ -45,6 +47,71 @@ class KaldiRemoveLastSoftMaxTest(unittest.TestCase):
         },
     }
 
+    nodes_for_logsoftmax = {
+        'input': {'kind': 'op', 'op': 'Parameter'},
+        'input_data': {'kind': 'data'},
+        'sub': {'kind': 'op', 'op': 'Sub'},
+        'reduce_max_node': {'kind': 'op', 'op': 'ReduceMax'},
+        'reduce_max_node_data': {'kind': 'data'},
+        'reduce_max_axis': {
+            'kind': 'op',
+            'op': 'Const',
+            'type': 'Const',
+            'value': int64_array([1]),
+            'shape': int64_array([1]),
+        },
+        'reduce_max_axis_data': {
+            'kind': 'data',
+            'value': int64_array([1]),
+            'shape': int64_array([1]),
+        },
+        'sub_data': {'kind': 'data'},
+        'exp': {'kind': 'op', 'op': 'Exp'},
+        'exp_data': {'kind': 'data'},
+        'reduce_sum_node': {'kind': 'op', 'op': 'ReduceSum'},
+        'reduce_sum_node_data': {'kind': 'data'},
+        'reduce_sum_axis': {
+            'kind': 'op',
+            'op': 'Const',
+            'type': 'Const',
+            'value': int64_array([1]),
+            'shape': int64_array([1]),
+        },
+        'reduce_sum_axis_data': {
+            'kind': 'data',
+            'value': int64_array([1]),
+            'shape': int64_array([1]),
+        },
+        'log': {'kind': 'op', 'op': 'Log'},
+        'log_data': {'kind': 'data'},
+        'last_sub': {'kind': 'op', 'op': 'Sub'},
+        'last_sub_data': {'kind': 'data'},
+        'op_output': {'kind': 'op', 'op': 'Result'},
+    }
+
+    edges_for_logsoftmax = [
+        ('input', 'input_data'),
+        ('input_data', 'sub', {'in': 0}),
+        ('input_data', 'reduce_max_node', {'in': 0}),
+        ('reduce_max_node', 'reduce_max_node_data'),
+        ('reduce_max_node_data', 'sub', {'in': 1}),
+        ('reduce_max_axis', 'reduce_max_axis_data'),
+        ('reduce_max_axis_data', 'reduce_max_node', {'in': 1}),
+        ('sub', 'sub_data'),
+        ('sub_data', 'exp', {'out': 0, 'in': 0}),
+        ('exp', 'exp_data'),
+        ('exp_data', 'reduce_sum_node', {'in': 0}),
+        ('reduce_sum_node', 'reduce_sum_node_data'),
+        ('reduce_sum_axis', 'reduce_sum_axis_data'),
+        ('reduce_sum_axis_data', 'reduce_sum_node', {'in': 1}),
+        ('reduce_sum_node_data', 'log'),
+        ('log', 'log_data'),
+        ('log_data', 'last_sub', {'in': 1}),
+        ('last_sub', 'last_sub_data'),
+        ('sub_data', 'last_sub', {'out': 0, 'in': 0}),
+        ('last_sub_data', 'op_output'),
+    ]
+
     def test_remove_last_SoftMax(self):
         graph = build_graph(self.nodes, [
             ('input_node', 'softmax_node'),
@@ -55,16 +122,20 @@ class KaldiRemoveLastSoftMaxTest(unittest.TestCase):
         self.assertNotIn('softmax_node', graph.node)
 
     def test_remove_last_LogSoftMax(self):
-        graph = build_graph(self.nodes, [
-            ('input_node', 'softmax_node'),
-            ('softmax_node', 'output_node'),
-            ('output_node', 'log_node'),
-            ('log_node', 'log_data'),
-            ('log_data', 'op_output')
-        ], nodes_with_edges_only=True)
+        graph = build_graph(nodes_attrs=self.nodes_for_logsoftmax, edges=self.edges_for_logsoftmax)
         RemoveLastLogSoftMaxPattern().find_and_replace_pattern(graph)
-        self.assertNotIn('softmax_node', graph.node)
-        self.assertNotIn('log_node', graph.node)
+        graph.clean_up()
+
+        ref_graph_nodes_attributes = {
+            'input': {'kind': 'op', 'op': 'Parameter'},
+            'input_data': {'kind': 'data'},
+            'op_output': {'kind': 'op', 'op': 'Result'},
+        }
+
+        ref_graph_edges = [('input', 'input_data'), ('input_data', 'op_output')]
+        ref_graph = build_graph(ref_graph_nodes_attributes, ref_graph_edges)
+        (flag, resp) = compare_graphs(graph, ref_graph, 'op_output')
+        self.assertTrue(flag, resp)
 
     def test_do_not_remove_not_last_SoftMax(self):
         graph = build_graph(self.nodes, [
index c5dcd99..e199ef4 100644 (file)
  limitations under the License.
 """
 from extensions.ops.Log import LogOp
+from extensions.ops.ReduceOps import ReduceMax, ReduceSum
+from extensions.ops.elementwise import Sub
+from extensions.ops.exp import ExpOp
+from mo.front.common.partial_infer.utils import int64_array
 from mo.front.common.replacement import FrontReplacementOp
+from mo.front.tf.graph_utils import create_op_with_const_inputs
 from mo.graph.graph import Graph, Node, rename_nodes
-from mo.ops.softmax import Softmax
+from mo.ops.const import Const
 
 
 class LogSoftmaxFrontReplacer(FrontReplacementOp):
     """
-    Replace LogSoftmax operation with Softmax -> Log.
+    Replace LogSoftmax operation with ReduceMax + Sub + Exp + ReduceSum + Log + Sub.
+
+    More precisely, this transformation implements the following formulas of the calculation of LogSoftmax:
+
+        shifted_data = input_data - ReduceMax(input_data, axis),              (1)
+        output = shifted_data - Log(ReduceSum(Exp(shifted_data), axis)).
+
+    These formulas is used to calculate LogSoftmax in implementation of TensorFlow (see
+    https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/softmax_op_functor.h),
+    Kaldi (see https://github.com/kaldi-asr/kaldi/blob/master/src/cudamatrix/cu-kernels.cu),
+    MxNet (see https://github.com/apache/incubator-mxnet/blob/master/src/operator/nn/softmax-inl.h).
+
+    ONNX implements LogSoftmax according to formulas
+
+        flatten_data = Flatten(input_data, axis),                              (1')
+        shifted_data = flatten_data - ReduceMax(flatten_data, 1),
+        z = shifted_data - Log(ReduceSum(Exp(shifted_data), 1)),
+        output = Reshape(z, input_data.shape)
+
+    (see https://github.com/microsoft/onnxruntime/blob/master/onnxruntime/core/codegen/mti/math/logsoftmax.cc,
+     https://github.com/microsoft/onnxruntime-tvm/blob/master/topi/include/topi/nn/softmax.h)
+
+     Formally speaking, the formula (1) is equivalent to the formula
+        output = Log(SoftMax(input_data, axis)) (2)
+
+    But LogSoftMax is calculated according to formula (1) for better numeric stability.
     """
     op = "LogSoftmax"
     enabled = True
@@ -30,11 +60,35 @@ class LogSoftmaxFrontReplacer(FrontReplacementOp):
         node_name = node.soft_get('name', node.id)
         assert node.has_valid('axis'), 'The node "{}" does not have mandatory attribute "axis"'.format(node_name)
 
-        log = LogOp(graph, {}).create_node()
-        softmax = Softmax(graph, {'axis': node.axis, 'name': node_name + '/Softmax'}).create_node()
-        rename_nodes([(node, node_name + '/delete'), (log, node_name)])
+        # Creating of ReduceMax -> Sub -> Exp block
+        first_sub_node = Sub(graph, {'name': node_name + '/Sub_/first_'}).create_node()
+        reduce_max_node = create_op_with_const_inputs(graph,
+                                                      ReduceMax,
+                                                      {1: int64_array([node.axis])},
+                                                      op_attrs={'name': node_name + '/ReduceMax_', 'keep_dims': True})
+        reduce_max_node.out_port(0).connect(first_sub_node.in_port(1))
+
+        # Creating of Exp -> ReduceSum -> Log block
+        exp_node = ExpOp(graph,  {'name': node_name + '/Exp_'}).create_node()
+        reduce_sum_node = create_op_with_const_inputs(graph,
+                                                      ReduceSum,
+                                                      {1: int64_array([node.axis])},
+                                                      op_attrs={'name': node_name + '/ReduceSum_', 'keep_dims': True})
+        log_node = LogOp(graph, {'name': node_name + '/Log_'}).create_node()
+
+        first_sub_node.out_port(0).connect(exp_node.in_port(0))
+        exp_node.out_port(0).connect(reduce_sum_node.in_port(0))
+        reduce_sum_node.out_port(0).connect(log_node.in_port(0))
+
+        # Creating of the last Sub node
+        second_sub_node = Sub(graph, {}).create_node()
+        rename_nodes([(node, node_name + '/delete'), (second_sub_node, node_name)])
+        log_node.out_port(0).connect(second_sub_node.in_port(1))
+        first_sub_node.out_port(0).connect(second_sub_node.in_port(0))
+
+        # Correcting of input edges
+        source = node.in_port(0).get_source()
+        first_sub_node.in_port(0).connect(source)
+        reduce_max_node.in_port(0).connect(source)
 
-        # Connect nodes: input -> Softmax -> Log
-        node.in_port(0).get_connection().set_destination(softmax.in_port(0))
-        log.in_port(0).get_connection().set_source(softmax.out_port(0))
-        return [log.id]
+        return [second_sub_node.id]
index 18bea40..5c5050c 100644 (file)
 
 import unittest
 
+from generator import generator, generate
+
 from extensions.front.LogSoftmax import LogSoftmaxFrontReplacer
+from mo.front.common.partial_infer.utils import int64_array
 from mo.utils.ir_engine.compare_graphs import compare_graphs
-from mo.utils.unittest.graph import build_graph, regular_op, result, connect
+from mo.utils.unittest.graph import build_graph
 
-nodes = {
-    **regular_op('input', {'type': 'Parameter'}),
-    **regular_op('logsoftmax', {'type': None, 'op': 'LogSoftmax', 'axis': -2, 'name': 'my_logsoftmax'}),
-    **result('output'),
+graph_node_attributes = {
+    'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
+    'logsoftmax': {'type': None, 'kind': 'op', 'op': 'LogSoftmax', 'axis': -1},
+    'output': {'kind': 'op', 'type': 'Result', 'op': 'Result'},
 }
-edges = [
-    ('input', 'logsoftmax'),
+
+
+graph_edges = [
+    ('placeholder', 'logsoftmax'),
     ('logsoftmax', 'output'),
 ]
 
 
-class LogSoftmaxReplacerTest(unittest.TestCase):
-    def test_1(self):
-        graph = build_graph(nodes, edges)
-
-        graph_ref = build_graph({
-            **regular_op('input', {'type': 'Parameter'}),
-            **regular_op('log', {'op': 'Log', 'type': 'Log'}),
-            **regular_op('softmax', {'op': 'SoftMax', 'type': 'SoftMax', 'axis': -2}),
-            **result('output'),
-        },
-            [
-                ('input', 'softmax'),
-                ('softmax', 'log'),
-                ('log', 'output'),
-            ])
-
-        graph.graph['layout'] = 'NCHW'
-        graph.stage = 'front'
+graph_ref_node_attributes = {
+    'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
+    'exp': {'type': 'Exp', 'kind': 'op', 'op': 'Exp'},
+    'reduce_sum':  {'type': 'ReduceSum', 'kind': 'op', 'op': 'ReduceSum', 'keep_dims': True},
+    'reduce_max':  {'type': 'ReduceMax', 'kind': 'op', 'op': 'ReduceMax', 'keep_dims': True},
+    'log': {'type': 'Log', 'kind': 'op', 'op': 'Log'},
+    'second_sub': {'type': 'Subtract', 'kind': 'op', 'op': 'Sub'},
+    'reduce_sum_axis': {'type': 'Const', 'kind': 'op', 'op': 'Const', 'value': None, 'shape': int64_array([1])},
+    'reduce_max_axis': {'type': 'Const', 'kind': 'op', 'op': 'Const', 'value': None, 'shape': int64_array([1])},
+    'first_sub': {'type': 'Subtract', 'kind': 'op', 'op': 'Sub'},
+    'output': {'kind': 'op', 'type': 'Result', 'op': 'Result'},
+}
 
-        LogSoftmaxFrontReplacer().find_and_replace_pattern(graph)
 
-        (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True)
-        self.assertTrue(flag, resp)
-        self.assertTrue(graph.get_op_nodes(op='Log')[0].name == 'my_logsoftmax')
-
-    def test_2(self):
-        graph = build_graph(nodes, edges)
-
-        graph_ref = build_graph({
-            **regular_op('input', {'type': 'Parameter'}),
-            **regular_op('log', {'op': 'Log', 'type': 'Log'}),
-            **regular_op('softmax', {'op': 'SoftMax', 'type': 'SoftMax', 'axis': -2}),
-            **result('output'),
-        },
-            [
-                ('input', 'softmax'),
-                ('softmax', 'log'),
-                ('log', 'output'),
-            ])
-
-        graph.graph['layout'] = 'NHWC'
-        graph.stage = 'front'
+graph_ref_edges = [
+    ('placeholder', 'reduce_max', {'in': 0, 'out': 0}),
+    ('placeholder', 'first_sub', {'in': 0, 'out': 0}),
+    ('reduce_max', 'first_sub', {'in': 1}),
+    ('reduce_max_axis', 'reduce_max', {'in': 1}),
+    ('first_sub', 'exp', {'in': 0, 'out': 0}),
+    ('first_sub', 'second_sub', {'in': 0, 'out': 0}),
+    ('exp', 'reduce_sum', {'in': 0}),
+    ('reduce_sum_axis', 'reduce_sum', {'in': 1}),
+    ('reduce_sum', 'log'),
+    ('log', 'second_sub', {'in': 1}),
+    ('second_sub', 'output'),
+]
 
-        LogSoftmaxFrontReplacer().find_and_replace_pattern(graph)
 
-        (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True)
+@generator
+class LogSoftmaxReplacerTest(unittest.TestCase):
+    @generate(*[(-1, 'NCHW'), (-1, 'NHWC'), (0, 'NHWC'),
+                (0, 'NCHW'), (2, 'NCHW'), (2, 'NHWC'),
+                (-2, 'NHWC'), (-2, 'NCHW')])
+    def test_logsoftmax_replacer(self, axis, layout):
+        graph = build_graph(nodes_attrs=graph_node_attributes, edges=graph_edges)
+        graph_ref = build_graph(nodes_attrs=graph_ref_node_attributes,
+                                edges=graph_ref_edges,
+                                update_attributes={
+                                    'reduce_max_axis': {'value': int64_array([axis])},
+                                    'reduce_sum_axis': {'value': int64_array([axis])},
+                                })
+        graph.graph['layout'] = layout
+        graph.stage = 'front'
+        LogSoftmaxFrontReplacer().find_and_replace_pattern(graph)
+        (flag, resp) = compare_graphs(graph, graph_ref, 'output')
         self.assertTrue(flag, resp)
-        self.assertTrue(graph.get_op_nodes(op='Log')[0].name == 'my_logsoftmax')
+
index 05c88fa..d4ebcdb 100644 (file)
@@ -36,6 +36,10 @@ class FlattenONNXToReshape(FrontReplacementSubgraph):
     """
     enabled = True
 
+    def run_before(self):
+        from extensions.front.LogSoftmax import LogSoftmaxFrontReplacer
+        return [LogSoftmaxFrontReplacer]
+
     def pattern(self):
         return dict(nodes=[('flatten', dict(op='FlattenONNX'))],
                     edges=[])
diff --git a/model-optimizer/extensions/front/onnx/logsoftmaxONNX_to_logsoftmax.py b/model-optimizer/extensions/front/onnx/logsoftmaxONNX_to_logsoftmax.py
new file mode 100644 (file)
index 0000000..2438786
--- /dev/null
@@ -0,0 +1,55 @@
+"""
+ Copyright (C) 2018-2020 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+from mo.front.common.replacement import FrontReplacementOp
+from mo.graph.graph import Graph, Node, rename_nodes
+from mo.ops.flatten import FlattenONNX
+from mo.ops.reshape import Reshape
+from mo.ops.shape import Shape
+from mo.ops.softmax import LogSoftmax
+
+
+class LogSoftmaxONNXFrontReplacer(FrontReplacementOp):
+    """
+    Replace LogSoftmaxONNX operation with FlattenONNX -> LogSoftmax -> Reshape subgraph
+    """
+    op = "LogSoftmaxONNX"
+    enabled = True
+
+    def run_before(self):
+        from extensions.front.onnx.flattenONNX_to_reshape import FlattenONNXToReshape
+        return [FlattenONNXToReshape]
+
+    def replace_op(self, graph: Graph, node: Node):
+        node_name = node.soft_get('name', node.id)
+        assert node.has_valid('axis'), 'The node "{}" does not have mandatory attribute "axis"'.format(node_name)
+
+        flatten_node = FlattenONNX(graph, {'name': node_name + '/FlattenONNX_', 'axis': node.axis}).create_node()
+        shape_node = Shape(graph, {'name': node_name + '/ShapeOf_'}).create_node()
+        logsoftmax_node = LogSoftmax(graph, {'name': node_name + '/LogSoftmax_', 'axis': 1}).create_node()
+        reshape_node = Reshape(graph,  {}).create_node()
+
+        rename_nodes([(node, node_name + '/delete'), (reshape_node, node_name)])
+
+        shape_node.out_port(0).connect(reshape_node.in_port(1))
+        logsoftmax_node.out_port(0).connect(reshape_node.in_port(0))
+        flatten_node.out_port(0).connect(logsoftmax_node.in_port(0))
+
+        source = node.in_port(0).get_source()
+
+        flatten_node.in_port(0).connect(source)
+        shape_node.in_port(0).connect(source)
+
+        return [reshape_node.id]
index f517f68..beb27c4 100644 (file)
@@ -16,7 +16,7 @@
 
 from mo.front.extractor import FrontExtractorOp
 from mo.front.onnx.extractors.utils import onnx_attr
-from mo.ops.softmax import LogSoftmax, Softmax
+from mo.ops.softmax import LogSoftmaxONNX, Softmax
 
 
 class SoftmaxExtractor(FrontExtractorOp):
@@ -37,5 +37,5 @@ class LogSoftmaxExtractor(FrontExtractorOp):
     @classmethod
     def extract(cls, node):
         axis = onnx_attr(node, 'axis', 'i', default=1)
-        LogSoftmax.update_node_stat(node, {'axis': axis})
+        LogSoftmaxONNX.update_node_stat(node, {'axis': axis})
         return cls.enabled
diff --git a/model-optimizer/extensions/front/onnx/where_ext.py b/model-optimizer/extensions/front/onnx/where_ext.py
new file mode 100644 (file)
index 0000000..70ab322
--- /dev/null
@@ -0,0 +1,28 @@
+"""
+ Copyright (C) 2020 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+from extensions.ops.select import Select
+from mo.front.extractor import FrontExtractorOp
+
+
+class WhereExtractor(FrontExtractorOp):
+    op = 'Where'
+    enabled = True
+
+    @classmethod
+    def extract(cls, node):
+        Select.update_node_stat(node, {})
+        return cls.enabled
index 0ac4436..9344f8c 100644 (file)
@@ -14,7 +14,7 @@
  limitations under the License.
 """
 from extensions.ops.activation_ops import Abs, Elu, Erf, Exp, ReLU, LeakyReLU, LogicalNot, ReLU6, Sigmoid, \
-    Sin, Sinh, Cos, Cosh, Tan, Tanh
+    Sin, Sinh, Cos, Cosh, Tan, Tanh, Ceiling
 from mo.front.extractor import FrontExtractorOp
 
 
@@ -170,3 +170,13 @@ class TanhFrontExtractor(FrontExtractorOp):
     def extract(cls, node):
         Tanh.update_node_stat(node)
         return cls.enabled
+
+
+class CeilExtractor(FrontExtractorOp):
+    op = 'Ceil'
+    enabled = True
+
+    @classmethod
+    def extract(cls, node):
+        Ceiling.update_node_stat(node)
+        return cls.enabled
index b44d2b4..3e1d831 100644 (file)
@@ -210,7 +210,7 @@ class ConvertGroupedStridedSlice(MiddleReplacementPattern):
         k = 0
 
         # Don't permute reshape if channels were squeezed
-        dont_permute = False
+        dont_permute = graph.graph['layout'] == 'NCHW'
         if graph.graph['layout'] == 'NHWC' and ss_node['shrink_axis_mask'][-1] == 1:
             dont_permute = True
 
index 441d50d..6eaf6d7 100644 (file)
@@ -622,7 +622,7 @@ class AddReshapeAfterStridedSliceTests(unittest.TestCase):
                                           'shrink_axis_mask': [0, 0, 1, 0],
                                           'new_axis_mask': np.array([0, 0, 0, 0])},
                              'sslice_1_data': {'shape': np.array([1, 227, 54])},
-                             })
+                             }, nodes_with_edges_only=True)
         graph.graph['layout'] = 'NHWC'
 
         graph_ref = build_graph(nodes_attributes,
@@ -644,13 +644,11 @@ class AddReshapeAfterStridedSliceTests(unittest.TestCase):
                                               'new_axis_mask': np.array([0, 0, 0, 0])},
                                  'sslice_1_data': {'shape': np.array([1, 227, 1, 54])},
                                  'sslice_1/Squeeze_shrink_data': {'shape': np.array([1, 227, 54])}
-                                 })
+                                 }, nodes_with_edges_only=True)
 
         ConvertGroupedStridedSlice().add_squeeze_for_shrink(graph, Node(graph, 'sslice_1'))
 
         (flag, resp) = compare_graphs(graph, graph_ref, 'sslice_1_data', check_op_attrs=True)
-        graph.clear()
-        graph_ref.clear()
         self.assertTrue(flag, resp)
 
     def test_ss_1_shrink(self):
@@ -672,7 +670,7 @@ class AddReshapeAfterStridedSliceTests(unittest.TestCase):
                                           'shrink_axis_mask': [0, 0, 1, 0],
                                           'new_axis_mask': np.array([0, 0, 0, 0])},
                              'sslice_2_data': {'shape': np.array([1, 227, 54])}
-                             })
+                             }, nodes_with_edges_only=True)
         graph.graph['layout'] = 'NHWC'
 
         graph_ref = build_graph(nodes_attributes,
@@ -697,13 +695,11 @@ class AddReshapeAfterStridedSliceTests(unittest.TestCase):
                                  'sslice_2_data': {'shape': np.array([1, 227, 1, 54])},
                                  'sslice_2/squeeze_const': {'value': np.array([2])},
                                  'sslice_2/Squeeze_shrink_data': {'shape': np.array([1, 227, 54])},
-                                 })
+                                 }, nodes_with_edges_only=True)
 
         ConvertGroupedStridedSlice().add_squeeze_for_shrink(graph, Node(graph, 'sslice_2'))
 
         (flag, resp) = compare_graphs(graph, graph_ref, 'op_output', check_op_attrs=True)
-        graph.clear()
-        graph_ref.clear()
         self.assertTrue(flag, resp)
 
     def test_ss_2_shrink(self):
@@ -725,7 +721,7 @@ class AddReshapeAfterStridedSliceTests(unittest.TestCase):
                                           'shrink_axis_mask': np.array([0, 1, 0, 1]),
                                           'new_axis_mask': np.array([0, 0, 0, 0])},
                              'sslice_2_data': {'shape': np.array([1, 227])}
-                             })
+                             }, nodes_with_edges_only=True)
         graph.graph['layout'] = 'NHWC'
 
         graph_ref = build_graph(nodes_attributes,
@@ -750,13 +746,11 @@ class AddReshapeAfterStridedSliceTests(unittest.TestCase):
                                  'sslice_2_data': {'shape': np.array([1, 1, 227, 1])},
                                  'sslice_2/squeeze_const': {'value': np.array([1, 3])},
                                  'sslice_2/Squeeze_shrink_data': {'shape': np.array([1, 227])},
-                                 })
+                                 }, nodes_with_edges_only=True)
 
         ConvertGroupedStridedSlice().add_squeeze_for_shrink(graph, Node(graph, 'sslice_2'))
 
         (flag, resp) = compare_graphs(graph, graph_ref, 'op_output', check_op_attrs=True)
-        graph.clear()
-        graph_ref.clear()
         self.assertTrue(flag, resp)
 
     def test_ss_1_new(self):
@@ -775,7 +769,7 @@ class AddReshapeAfterStridedSliceTests(unittest.TestCase):
                                           'shrink_axis_mask': np.array([0, 0, 0, 0, 0]),
                                           'new_axis_mask': np.array([0, 1, 0, 0, 0])},
                              'sslice_2_data': {'shape': np.array([1, 1, 227, 227, 54])}
-                             })
+                             }, nodes_with_edges_only=True)
         graph.graph['layout'] = 'NHWC'
 
         graph_ref = build_graph(nodes_attributes,
@@ -798,14 +792,12 @@ class AddReshapeAfterStridedSliceTests(unittest.TestCase):
                                  'sslice_2_data': {'shape': np.array([1, 227, 227, 54])},
                                  'sslice_2/unsqueeze_const': {'value': int64_array([1])},
                                  'sslice_2/Unsqueeze_new_data': {'shape': np.array([1, 1, 227, 227, 54])},
-                                 })
+                                 }, nodes_with_edges_only=True)
 
         pattern = ConvertGroupedStridedSlice()
         pattern.add_unsqueeze_for_new(graph, Node(graph, 'sslice_2'))
 
         (flag, resp) = compare_graphs(graph, graph_ref, 'sslice_2_data', check_op_attrs=True)
-        graph.clear()
-        graph_ref.clear()
         self.assertTrue(flag, resp)
 
     def test_ss_shrink_new(self):
@@ -827,7 +819,7 @@ class AddReshapeAfterStridedSliceTests(unittest.TestCase):
                                           'shrink_axis_mask': np.array([0, 0, 0, 1, 0]),
                                           'new_axis_mask': np.array([0, 1, 0, 0, 0])},
                              'sslice_2_data': {'shape': np.array([1, 1, 227, 54])}
-                             })
+                             }, nodes_with_edges_only=True)
         graph.graph['layout'] = 'NHWC'
 
         graph_ref = build_graph(nodes_attributes,
@@ -858,15 +850,13 @@ class AddReshapeAfterStridedSliceTests(unittest.TestCase):
                                  'sslice_2/Unsqueeze_new_data': {'shape': np.array([1, 1, 227, 1, 54])},
                                  'sslice_2/squeeze_const': {'value': np.array([3])},
                                  'sslice_2/Squeeze_shrink_data': {'shape': np.array([1, 1, 227, 54])},
-                                 })
+                                 }, nodes_with_edges_only=True)
 
         pattern = ConvertGroupedStridedSlice()
         pattern.add_squeeze_for_shrink(graph, Node(graph, 'sslice_2'))
         pattern.add_unsqueeze_for_new(graph, Node(graph, 'sslice_2'))
 
         (flag, resp) = compare_graphs(graph, graph_ref, 'op_output', check_op_attrs=True)
-        graph.clear()
-        graph_ref.clear()
         self.assertTrue(flag, resp)
 
     # test case for strided slice that only shrinks dimension
@@ -886,7 +876,7 @@ class AddReshapeAfterStridedSliceTests(unittest.TestCase):
                                  [slice(0, 1, 1), slice(0, 227, 1), slice(0, 1, 1), slice(0, 54, 1)]),
                                  'shrink_axis_mask': np.array([0, 0, 1, 0])},
                              'sslice_2_data': {'shape': np.array([1, 227, 54])}
-                             })
+                             }, nodes_with_edges_only=True)
         graph.graph['layout'] = 'NHWC'
 
         graph_ref = graph.copy()
@@ -894,8 +884,6 @@ class AddReshapeAfterStridedSliceTests(unittest.TestCase):
         ConvertGroupedStridedSlice().find_and_replace_pattern(graph)
 
         (flag, resp) = compare_graphs(graph, graph_ref, 'sslice_2_data', check_op_attrs=True)
-        graph.clear()
-        graph_ref.clear()
         self.assertTrue(flag, resp)
 
     def test_ss_shrink_only_short(self):
@@ -914,7 +902,7 @@ class AddReshapeAfterStridedSliceTests(unittest.TestCase):
                                  [slice(0, 1, 1), slice(0, 227, 1), slice(0, 1, 1), slice(0, 54, 1)]),
                                  'shrink_axis_mask': np.array([0, 0, 1])},
                              'sslice_2_data': {'shape': np.array([1, 227, 54])}
-                             })
+                             }, nodes_with_edges_only=True)
         graph.graph['layout'] = 'NHWC'
 
         graph_ref = graph.copy()
@@ -922,8 +910,6 @@ class AddReshapeAfterStridedSliceTests(unittest.TestCase):
         ConvertGroupedStridedSlice().find_and_replace_pattern(graph)
 
         (flag, resp) = compare_graphs(graph, graph_ref, 'sslice_2_data', check_op_attrs=True)
-        graph.clear()
-        graph_ref.clear()
         self.assertTrue(flag, resp)
 
     def test_ss_shrink_only_long(self):
@@ -942,7 +928,7 @@ class AddReshapeAfterStridedSliceTests(unittest.TestCase):
                                  [slice(0, 1, 1), slice(0, 227, 1), slice(0, 1, 1), slice(0, 54, 1)]),
                                  'shrink_axis_mask': np.array([0, 0, 1, 0, 0])},
                              'sslice_2_data': {'shape': np.array([1, 227, 54])}
-                             })
+                             }, nodes_with_edges_only=True)
         graph.graph['layout'] = 'NHWC'
 
         graph_ref = graph.copy()
@@ -950,8 +936,6 @@ class AddReshapeAfterStridedSliceTests(unittest.TestCase):
         ConvertGroupedStridedSlice().find_and_replace_pattern(graph)
 
         (flag, resp) = compare_graphs(graph, graph_ref, 'sslice_2_data', check_op_attrs=True)
-        graph.clear()
-        graph_ref.clear()
         self.assertTrue(flag, resp)
 
     # test case with 2 strided slices with the same parameters but different outputs
@@ -987,7 +971,7 @@ class AddReshapeAfterStridedSliceTests(unittest.TestCase):
                              'sslice_3_data': {'shape': np.array([1, 227, 227, 27])},
 
                              'concat_1_data': {'shape': np.array([1, 227, 227, 54])},
-                             })
+                             }, nodes_with_edges_only=True)
         graph.graph['layout'] = 'NHWC'
 
         graph_ref = build_graph(nodes_attributes,
@@ -1013,7 +997,7 @@ class AddReshapeAfterStridedSliceTests(unittest.TestCase):
                                  'split_1_data': {'shape': np.array([1, 227, 227, 27])},
                                  'split_2_data': {'shape': np.array([1, 227, 227, 27])},
                                  'concat_1_data': {'shape': np.array([1, 227, 227, 54])},
-                                 })
+                                 }, nodes_with_edges_only=True)
 
         ConvertGroupedStridedSlice().find_and_replace_pattern(graph)
 
index e9b7870..9174ca3 100644 (file)
@@ -24,9 +24,9 @@ from mo.ops.op import PermuteAttrs
 
 class InsertLayoutPropagationTranspose(MiddleReplacementPattern):
     """
-    The transformation inserts Transpose layers before/after Reshape layers that change the interpretation of data, for
-    example, from 3D to 4D or from 4D to 3D. These Transpose layers basically convert layout from N(D)HWC to NC(D)HW and
-    in the reverse order.
+    The transformation inserts Transpose layers before/after operations that change the interpretation of data, for
+    example, Reshape from 3D to 4D or from 4D to 3D. These Transpose layers basically convert layout from N(D)HWC to
+    NC(D)HW and in the reverse order.
     """
     enabled = True
     force_clean_up = True  # need to run clean up after the transformation to update shapes
@@ -38,6 +38,36 @@ class InsertLayoutPropagationTranspose(MiddleReplacementPattern):
     def run_before(self):
         return []
 
+    @staticmethod
+    def is_nchw_to_nhwc_transpose_needed(node: Node):
+        """
+        The function checks that it is necessary to insert Transpose from NCHW to NHWC before the node.
+        The transpose is needed when all the following conditions are met:
+         1. The node is marked as 'reinterp_shape' attribute
+         2. The node is *not* marked as getting input in correct layout (implicitly imply that the input is on port 0)
+         3. The input shape rank is not less than 4
+        :param node: node to check
+        :return: result of the check
+        """
+        return node.has_and_set('reinterp_shape') and \
+               not is_input_data_in_correct_layout(node, 0) and \
+               len(node.in_port(0).data.get_shape()) >= 4
+
+    @staticmethod
+    def is_nhwc_to_nchw_transpose_needed(node: Node):
+        """
+        The function checks that it is necessary to insert Transpose from NHWC to NCHW after the node.
+        The transpose is needed when all the following conditions are met:
+         1. The node is marked as 'reinterp_shape' attribute
+         2. The node is *not* marked as generating output in correct layout (implicitly imply that the output port is 0)
+         3. The output shape rank is not less than 4
+        :param node: node to check
+        :return: result of the check
+        """
+        return node.has_and_set('reinterp_shape') and \
+               not is_output_data_in_correct_layout(node, 0) and \
+               len(node.out_port(0).data.get_shape()) >= 4
+
     def find_and_replace_pattern(self, graph: Graph):
         if graph.graph['layout'] != 'NHWC':
             # we check it here because this transformation is called explicitly from the pipeline
@@ -49,7 +79,7 @@ class InsertLayoutPropagationTranspose(MiddleReplacementPattern):
             assert 0 in reinterp_shape_node.in_nodes(), 'Node {} does not have 0 input. \n{}'.format(
                 reinterp_shape_node_id, graph.dump_graph_for_graphviz())
             input_shape = reinterp_shape_node.in_node(0).shape
-            if not is_input_data_in_correct_layout(reinterp_shape_node, 0) and len(input_shape) >= 4:
+            if self.is_nchw_to_nhwc_transpose_needed(reinterp_shape_node):
                 order_const = Const(graph, {'value': PermuteAttrs().get_nchw_to_nhwc_permutation(len(input_shape)).perm
                                             }).create_node()
                 permute_node = Transpose(graph,
@@ -76,7 +106,7 @@ class InsertLayoutPropagationTranspose(MiddleReplacementPattern):
             assert 0 in reinterp_shape_node.out_nodes(), 'Node {} does not have 0 output. \n{}'.format(
                 reinterp_shape_node_id, graph.dump_graph_for_graphviz())
             output_shape = reinterp_shape_node.out_node(0).shape
-            if not is_output_data_in_correct_layout(reinterp_shape_node, 0) and len(output_shape) >= 4:
+            if self.is_nhwc_to_nchw_transpose_needed(reinterp_shape_node):
                 order_const = Const(graph, {
                     'value': PermuteAttrs().get_nhwc_to_nchw_permutation(len(output_shape)).perm}).create_node()
                 permute_node = Transpose(graph, {'name': reinterp_shape_node.id + '/Transpose'}).create_node()
diff --git a/model-optimizer/extensions/middle/MarkSubgraphsWithCorrectLayout.py b/model-optimizer/extensions/middle/MarkSubgraphsWithCorrectLayout.py
new file mode 100644 (file)
index 0000000..34c4bff
--- /dev/null
@@ -0,0 +1,123 @@
+"""
+ Copyright (C) 2018-2020 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+import logging as log
+from collections import deque
+
+from extensions.middle.InsertLayoutPropagationTransposes import InsertLayoutPropagationTranspose, \
+    mark_as_correct_data_layout
+from extensions.middle.pass_separator import PostMiddleStart
+from mo.graph.graph import Graph, Node
+from mo.middle.replacement import MiddleReplacementPattern
+
+
+class MarkSubGraphsWithCorrectLayout(MiddleReplacementPattern):
+    """
+    The transformation looks for the layout agnostic operations which does not have a layout (NCHW or NHWC) and makes
+    necessary changes to infer the part of the topology in the original layout:
+    1. Prevents from adding Transpose operations before and after "reinterp_shape" like operations which change rank of
+    the input and output tensors of this layout agnostic op.
+    2. Disable attributes permutation for all intermediate ops between these "reinterp_shape" nodes.
+
+    For now the transformation is triggered for MatMul operation only getting input as 4D or 5D tensors.
+    """
+    enabled = True
+    graph_condition = [lambda graph: graph.graph['layout'] == 'NHWC']
+    op_conditions = [lambda n: n.soft_get('op') == 'MatMul' and
+                               any([len(port.data.get_shape()) in (4, 5) for port in n.in_ports().values()]),
+                     ]
+
+    def run_after(self):
+        return [PostMiddleStart]
+
+    def run_before(self):
+        return [InsertLayoutPropagationTranspose]
+
+    @staticmethod
+    def get_input_nodes(node: Node):
+        return [src_port.get_source().node for src_port in node.in_ports().values()]
+
+    @staticmethod
+    def get_output_nodes(node: Node):
+        result = []
+        for out_port in node.out_ports().values():
+            for dest_port in out_port.get_destinations():
+                result.append(dest_port.node)
+        return result
+
+    def bfs(self, start_nodes: list, visited: set, condition: callable = None, forward: bool = True):
+        """
+        The function performs BFS starting from selected nodes in forward or backward direction adding nodes by an
+        optional condition
+        :param start_nodes: Nodes to start search from
+        :param visited: set of already visited nodes where traversing should not happen
+        :param condition: function getting a Node as input and returning whether the node should be included into the
+        resukt or not. If the value is None then the node is added unconditionally.
+        :param forward: boolean flag specifying the traverse direction
+        :return: the list of Nodes visited
+        """
+        assert visited is not None, 'The "visited" set must be defined'
+        assert start_nodes is not None and len(start_nodes) != 0, 'The list of start nodes must be specified'
+
+        result = list()
+        d = deque(start_nodes)
+        while len(d) != 0:
+            cur_node = d.popleft()
+            result.append(cur_node)
+            visited.add(cur_node)
+            if forward:
+                next_nodes = self.get_output_nodes(cur_node)
+            else:
+                next_nodes = self.get_input_nodes(cur_node)
+            for next_node in next_nodes:
+                if next_node not in visited and (condition is None or condition(next_node)):
+                    d.append(next_node)
+        return result
+
+    def find_and_replace_pattern(self, graph: Graph):
+        visited = set()
+        marked_nodes = set()
+        condition_forward = lambda n: not InsertLayoutPropagationTranspose.is_nhwc_to_nchw_transpose_needed(n)
+        condition_backward = lambda n: not InsertLayoutPropagationTranspose.is_nchw_to_nhwc_transpose_needed(n)
+        for node_condition in self.op_conditions:
+            for node in graph.get_op_nodes():
+                if node_condition(node):
+                    log.debug('Detected node "{}" as a node which should be executed in the original layout'
+                              ''.format(node.soft_get('name', node.id)))
+                    forward_visited_nodes = self.bfs([node], visited, condition_forward, True)
+                    backward_visited_nodes = self.bfs([node], visited, condition_backward, False)
+
+                    # find "reinterp_shape" like ops which change rank of input to 4D or 5D from smaller dimensions
+                    for back_node in backward_visited_nodes:
+                        for input_node in self.get_input_nodes(back_node):
+                            if input_node not in backward_visited_nodes and not condition_forward(input_node):
+                                marked_nodes.add(input_node)
+
+                    # find "reinterp_shape" like ops which change rank of input from 4D or 5D to smaller dimensions
+                    for forward_node in forward_visited_nodes:
+                        for output_node in self.get_output_nodes(forward_node):
+                            if output_node not in forward_visited_nodes and not condition_backward(output_node):
+                                marked_nodes.add(output_node)
+
+                    marked_nodes.update(forward_visited_nodes + backward_visited_nodes)
+
+        if len(marked_nodes):
+            log.debug('The following nodes will be executed in the original layout: {}'
+                      ''.format([n.soft_get('name', n.id) for n in marked_nodes]))
+
+            # mark all matched nodes as in correct layout and disable attributes permutation for them
+            for visited_node in marked_nodes:
+                mark_as_correct_data_layout(visited_node)
+                visited_node['nchw_layout'] = True
index f9873f1..d68ef5f 100644 (file)
@@ -50,6 +50,7 @@ class UnsqueezeTileReshapeBlockToInterpolate(MiddleReplacementPattern):
         ]
     """
     enabled = True
+    force_shape_inference = True
 
     def run_before(self):
         return [InterpolateSequenceToInterpolate]
index 8bcc46a..fa635bb 100644 (file)
@@ -76,7 +76,28 @@ class OneHot(Op):
 
         node.out_port(0).data.set_shape(output_shape)
 
-        # This operation should be inferred in original TF (NHWC) layout
+        indices = node.in_port(0).data.get_value()
+        depth = node.in_port(1).data.get_value()
+        on_value = node.in_port(2).data.get_value()
+        off_value = node.in_port(3).data.get_value()
+
+        if indices is not None and depth is not None and on_value is not None and off_value is not None:
+            onehot_value = np.full(output_shape, off_value)
+
+            for idx in np.ndindex(tuple(indices_shape)):
+                if axis == 0:
+                    hot_idx = indices[idx], *idx
+                elif (axis > 0) and (axis < len(output_shape) - 1):
+                    hot_idx = *idx[:axis], indices[idx], *idx[axis:]
+                elif axis == len(output_shape) - 1:
+                    hot_idx = *idx, indices[idx]
+
+                if -depth <= indices[idx] < depth:
+                    onehot_value[hot_idx] = on_value
+
+            node.out_port(0).data.set_value(onehot_value)
+
+        # This operation should be inferred in original layout
         node['reinterp_shape'] = True
         node['NCHW'] = True
 
diff --git a/model-optimizer/extensions/ops/one_hot_test.py b/model-optimizer/extensions/ops/one_hot_test.py
new file mode 100644 (file)
index 0000000..c58e8c9
--- /dev/null
@@ -0,0 +1,73 @@
+"""
+ Copyright (C) 2018-2020 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+from generator import generator, generate
+
+from extensions.ops.one_hot import OneHot
+from mo.front.common.partial_infer.utils import int64_array, float_array
+from mo.graph.graph import Node
+from mo.utils.unittest.graph import build_graph, regular_op_with_shaped_data, const_with_data, connect
+
+
+def generate_nodes(data, axis=-1, depth=4, on_value=1., off_value=0.):
+    return {
+        'indices': {'Op': 'Parameter', 'value': data, 'shape': int64_array(data.shape)},
+        'indices_d': {'kind': 'data', 'value': data, 'shape': int64_array(data.shape)},
+        **const_with_data('depth', int64_array(depth)),
+        **const_with_data('on_value', float_array(on_value)),
+        **const_with_data('off_value', float_array(off_value)),
+        **regular_op_with_shaped_data('one_hot', None, {'type': 'OneHot', 'axis': axis, 'Op': 'OneHot'})
+    }
+
+
+edges = [
+    *connect('indices:0', 'one_hot:0'),
+    *connect('depth:0', 'one_hot:1'),
+    *connect('on_value:0', 'one_hot:2'),
+    *connect('off_value:0', 'one_hot:3'),
+    ('one_hot', 'one_hot_d')
+]
+
+
+@generator
+class TestOneHotInfer(unittest.TestCase):
+    @generate(*[
+        # 0d input
+        (1, [0, 1, 0, 0]),
+        # 1d input
+        ([1, 2], [[0, 1, 0, 0], [0, 0, 1, 0]]),
+        # 2D input
+        ([[1, 2], [3, 4]], [[[0, 1, 0, 0], [0, 0, 1, 0]],
+                            [[0, 0, 0, 1], [0, 0, 0, 0]]]),
+        # 3d input
+        ([[[0, 2], [1, 2]], [[2, 1], [3, 0]]],
+         [[[[1, 0, 0, 0], [0, 0, 1, 0]], [[0, 1, 0, 0], [0, 0, 1, 0]]],
+          [[[0, 0, 1, 0], [0, 1, 0, 0]], [[0, 0, 0, 1], [1, 0, 0, 0]]]]),
+        # 1d input with negative indices
+        ([-2, 2], [[0, 0, 1, 0], [0, 0, 1, 0]]),
+        # check if axis is neither 0 nor -1
+        ([[1, 2], [3, 4]], [[[0, 0], [1, 0], [0, 1], [0, 0]],
+                            [[0, 0], [0, 0], [0, 0], [1, 0]]], 1)
+    ])
+    def test_infer(self, input_value, exp_value, axis=-1):
+        graph = build_graph(generate_nodes(int64_array(input_value), axis), edges)
+        onehot_node = Node(graph, 'one_hot')
+        OneHot.infer(onehot_node)
+        res_value = graph.node['one_hot_d']['value']
+        self.assertTrue(np.array_equal(exp_value, int64_array(res_value)))
index e1e8434..4d64b39 100644 (file)
@@ -16,6 +16,7 @@
 
 import numpy as np
 
+from mo.front.common.partial_infer.utils import int64_array, broadcast_shape
 from mo.graph.graph import Node, Graph
 from mo.ops.op import Op
 
@@ -41,16 +42,14 @@ class Select(Op):
         condition_node = node.in_node(0)
         resulting_tensors = [node.in_node(1), node.in_node(2)]
 
-        assert np.array_equal(resulting_tensors[0].shape, resulting_tensors[1].shape), \
-            "TensorFlow \'Select\' operation has 3 inputs: \'condition\', \'then\' and \'else\' tensors." \
-            "\'then\' and \'else\' tensors must have the same shape by TensorFlow reference"
-        output_shape = resulting_tensors[0].shape
+        a_shape = node.in_port(1).data.get_shape()
+        b_shape = node.in_port(2).data.get_shape()
+        output_shape = broadcast_shape(a_shape, b_shape)
 
         # Case with unknown condition
         if not condition_node.has_valid('value'):
             # infer only shapes
-            for out in node.out_nodes():
-                node.out_node(out).shape = np.array(output_shape)
+            node.out_port(0).data.set_shape(output_shape)
             return
 
         assert condition_node.value.size == 1
index 6d8d075..38ef546 100644 (file)
@@ -17,6 +17,7 @@
 import unittest
 
 import numpy as np
+from generator import generator, generate
 
 from extensions.ops.select import Select
 from mo.graph.graph import Node
@@ -24,18 +25,25 @@ from mo.utils.ir_engine.compare_graphs import compare_graphs
 from mo.utils.unittest.graph import build_graph_with_attrs
 
 
+@generator
 class TestSelect(unittest.TestCase):
     nodes = [
-        ('than', {'value': np.ones((2, 2)), 'kind': 'data', 'executable': True, 'shape': np.array([2, 2])}),
-        ('else', {'value': np.zeros((2, 2)), 'kind': 'data', 'executable': True, 'shape': np.array([2, 2])}),
-        ('condition', {'value': None, 'kind': 'data', 'executable': True}),
+        ('than', {'kind': 'op'}),
+        ('than_data', {'value': np.ones((2, 2)), 'kind': 'data', 'executable': True, 'shape': np.array([2, 2])}),
+        ('else', {'kind': 'op'}),
+        ('else_data', {'value': np.zeros((2, 2)), 'kind': 'data', 'executable': True, 'shape': np.array([2, 2])}),
+        ('condition', {'value': None, 'kind': 'op'}),
+        ('condition_data', {'value': None, 'kind': 'data', 'executable': True, 'shape': np.array([2, 2])}),
         ('select', {'type': 'Select', 'kind': 'op', 'op': 'Select'}),
         ('select_output', {'value': None, 'kind': 'data', 'executable': True, 'shape': None}),
     ]
     edges = [
-        ('condition', 'select', {'in': 0}),
-        ('than', 'select', {'in': 1}),
-        ('else', 'select', {'in': 2}),
+        ('condition', 'condition_data'),
+        ('condition_data', 'select', {'in': 0}),
+        ('than', 'than_data'),
+        ('than_data', 'select', {'in': 1}),
+        ('else', 'else_data'),
+        ('else_data', 'select', {'in': 2}),
         ('select', 'select_output', {'out': 0}),
     ]
 
@@ -57,13 +65,16 @@ class TestSelect(unittest.TestCase):
 
     def test_select_infer_condition_true(self):
         graph = build_graph_with_attrs(nodes_with_attrs=self.nodes, edges_with_attrs=self.edges,
-                                       update_nodes_attributes=[('condition', {'value': np.array([True])})])
+                                       update_nodes_attributes=[('condition', {'value': np.array([True])}),
+                                                                ('select_output', {'shape': np.array([2, 2]),
+                                                                                   'value': np.ones((2, 2))})
+                                                                ])
 
         # We should propagate shapes and values
         graph_ref = build_graph_with_attrs(nodes_with_attrs=self.nodes,
                                            edges_with_attrs=self.edges,
                                            update_nodes_attributes=[('select_output', {'shape': np.array([2, 2]),
-                                                                                       'value': np.ones((2,2))})])
+                                                                                       'value': np.ones((2, 2))})])
 
         tested_class = Select(graph=graph, attrs={})
 
@@ -75,7 +86,10 @@ class TestSelect(unittest.TestCase):
 
     def test_select_infer_condition_false(self):
         graph = build_graph_with_attrs(nodes_with_attrs=self.nodes, edges_with_attrs=self.edges,
-                                       update_nodes_attributes=[('condition', {'value': np.array([False])})])
+                                       update_nodes_attributes=[('condition', {'value': np.array([False])}),
+                                                                ('select_output', {'shape': np.array([2, 2]),
+                                                                                   'value': np.zeros((2, 2))})
+                                                                ])
 
         # We should propagate shapes and values
         graph_ref = build_graph_with_attrs(nodes_with_attrs=self.nodes,
@@ -93,19 +107,17 @@ class TestSelect(unittest.TestCase):
 
     def test_select_infer_assert_shapes(self):
         graph = build_graph_with_attrs(nodes_with_attrs=self.nodes, edges_with_attrs=self.edges,
-                                       update_nodes_attributes=[('else', {'shape': np.array([3,3]), 'value':np.zeros((3,3))})])
+                                       update_nodes_attributes=[('else_data', {'shape': np.array([3, 3]), 'value':np.zeros((3, 3))})])
 
         tested_class = Select(graph=graph, attrs={})
 
         node = Node(graph, 'select')
-        with self.assertRaisesRegex(AssertionError, "TensorFlow \'Select\' operation has 3 inputs: \'condition\',"
-                                                    " \'then\' and \'else\' tensors.\'then\' and \'else\' tensors"
-                                                    " must have the same shape by TensorFlow reference"):
+        with self.assertRaisesRegex(AssertionError, "Input shape do not broadcast"):
             tested_class.infer(node)
 
     def test_select_infer_assert_condition_bool(self):
         graph = build_graph_with_attrs(nodes_with_attrs=self.nodes, edges_with_attrs=self.edges,
-                                       update_nodes_attributes=[('condition', {'value': np.array([3])})])
+                                       update_nodes_attributes=[('condition_data', {'value': np.array([3])})])
 
         tested_class = Select(graph=graph, attrs={})
 
@@ -113,4 +125,39 @@ class TestSelect(unittest.TestCase):
         with self.assertRaisesRegex(AssertionError, "TensorFlow \'Select\' operation has 3 inputs: \'condition\',"
                                                     " \'then\' and \'else\' tensors. Value of \'condition\' tensor"
                                                     " must be boolen by TensorFlow reference"):
-            tested_class.infer(node)
\ No newline at end of file
+            tested_class.infer(node)
+
+    @generate(*[
+        ([5, 6], [1], [5, 6]),
+        ([15, 3, 5], [15, 1, 5], [15, 3, 5]),
+        ([2, 3, 4, 5], [], [2, 3, 4, 5]),
+        ([2, 3, 4, 5], [5], [2, 3, 4, 5]),
+        ([2, 3, 4, 5], [2, 1, 1, 5], [2, 3, 4, 5]),
+        ([2, 3, 4, 5], [1, 3, 1, 5], [2, 3, 4, 5]),
+    ])
+    def test_select_infer_condition_shapes_broadcast(self, else_data_shape, than_data_shape, select_output_shape):
+        graph = build_graph_with_attrs(nodes_with_attrs=self.nodes, edges_with_attrs=self.edges,
+                                       update_nodes_attributes=[('else_data', {'shape': np.array(else_data_shape),
+                                                                               'value': np.zeros(else_data_shape, dtype=np.float)}),
+                                                                ('than_data', {'shape': np.array(than_data_shape),
+                                                                               'value': np.zeros(than_data_shape, dtype=np.float)}),
+                                                                ('select_output', {'shape': np.array(select_output_shape),
+                                                                                   'value': np.zeros(select_output_shape, dtype=np.float)})
+                                                                ])
+
+        # We should propagate shapes and values
+        graph_ref = build_graph_with_attrs(nodes_with_attrs=self.nodes, edges_with_attrs=self.edges,
+                                           update_nodes_attributes=[
+                                               ('else_data', {'shape': np.array(else_data_shape),
+                                                              'value': np.zeros(else_data_shape, dtype=np.float)}),
+                                               ('than_data', {'shape': np.array(than_data_shape),
+                                                              'value': np.zeros(than_data_shape, dtype=np.float)}),
+                                               ('select_output', {'shape': np.array(select_output_shape), 'value': np.zeros(select_output_shape)})])
+
+        tested_class = Select(graph=graph, attrs={})
+
+        node = Node(graph, 'select')
+        tested_class.infer(node)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'select_output', check_op_attrs=True)
+        self.assertTrue(flag, resp)
index 5f8a0a4..613622c 100644 (file)
@@ -15,6 +15,7 @@
 """
 
 import math
+import numpy as np
 
 from mo.front.common.layout import get_batch_dim, get_features_dim, get_height_dim, get_width_dim, shape_for_layout
 from mo.graph.graph import Node, Graph
@@ -63,4 +64,4 @@ class UpsampleOp(Op):
         else:
             assert node.in_node(1).value is not None
             # generic output shape calculation to support 5D input shape case
-            node.out_node().shape = input_shape * node.in_node(1).value
+            node.out_node().shape = np.array(input_shape * node.in_node(1).value).astype(np.int64)
index 2882204..0f29ffb 100644 (file)
@@ -19,6 +19,7 @@ import unittest
 import numpy as np
 
 from extensions.ops.upsample import UpsampleOp
+from generator import generator, generate
 from mo.graph.graph import Node
 from mo.utils.unittest.graph import build_graph
 
@@ -29,30 +30,40 @@ nodes_attributes = {'node_1': {'type': 'Identity', 'kind': 'op'},
                     }
 
 
+@generator
 class TestUpsampleOp(unittest.TestCase):
-    def test_upsample_with_scales_infer(self):
+    @generate(*[
+        (np.array([1., 1., 2., 2.]), np.array([1, 3, 227, 227]), np.array([1, 3, 454, 454], dtype=np.int64)),
+        (np.array([1., 1., 2.5, 1.5]), np.array([1, 5, 227, 227]), np.array([1, 5, 567, 340], dtype=np.int64)),
+        (np.array([1., 1., 1.3, 0.7]), np.array([1, 14, 1023, 713]), np.array([1, 14, 1329, 499], dtype=np.int64)),
+    ])
+    def test_upsample_with_scales_infer(self, scales, input_shape, expected_shape):
         graph = build_graph(nodes_attributes,
                             [('node_1', 'upsample'),
                              ('upsample', 'node_3'),
                              ('node_3', 'op_output')
                              ],
                             {'node_3': {'shape': None},
-                             'node_1': {'shape': np.array([1, 3, 227, 227])},
+                             'node_1': {'shape': input_shape},
                              'upsample': {'mode': 'linear',
-                                          'height_scale': 2.,
-                                          'width_scale': 2.}
+                                          'height_scale': scales[2],
+                                          'width_scale': scales[3]}
                              })
 
         graph.graph['layout'] = 'NCHW'
         upsample_node = Node(graph, 'upsample')
         UpsampleOp.upsample_infer(upsample_node)
-        exp_shape = np.array([1, 3, 454, 454])
         res_shape = graph.node['node_3']['shape']
-        for i in range(0, len(exp_shape)):
-            self.assertEqual(exp_shape[i], res_shape[i])
+        for i in range(0, len(expected_shape)):
+            self.assertEqual(expected_shape[i], res_shape[i])
 
-    def test_upsample_with_second_input_infer(self):
-        nodes_attributes['scales'] = {'kind': 'data', 'value': np.array([1., 1., 2., 2.])}
+    @generate(*[
+        (np.array([1., 1., 2., 2.]), np.array([1, 3, 227, 227]), np.array([1, 3, 454, 454], dtype=np.int64)),
+        (np.array([1., 1., 2.5, 1.5]), np.array([1, 5, 227, 227]), np.array([1, 5, 567, 340], dtype=np.int64)),
+        (np.array([1., 1., 1.3, 0.7]), np.array([1, 14, 1023, 713]), np.array([1, 14, 1329, 499], dtype=np.int64)),
+    ])
+    def test_upsample_with_second_input_infer(self, scales, input_shape, expected_shape):
+        nodes_attributes['scales'] = {'kind': 'data', 'value': scales}
         graph = build_graph(nodes_attributes,
                             [('node_1', 'upsample'),
                              ('scales', 'upsample'),
@@ -60,7 +71,7 @@ class TestUpsampleOp(unittest.TestCase):
                              ('node_3', 'op_output')
                              ],
                             {'node_3': {'shape': None},
-                             'node_1': {'shape': np.array([1, 3, 227, 227])},
+                             'node_1': {'shape': input_shape},
                              'upsample': {'mode': 'linear',
                                           'height_scale': None,
                                           'width_scale': None}
@@ -69,7 +80,6 @@ class TestUpsampleOp(unittest.TestCase):
         graph.graph['layout'] = 'NCHW'
         upsample_node = Node(graph, 'upsample')
         UpsampleOp.upsample_infer(upsample_node)
-        exp_shape = np.array([1, 3, 454, 454])
         res_shape = graph.node['node_3']['shape']
-        for i in range(0, len(exp_shape)):
-            self.assertEqual(exp_shape[i], res_shape[i])
+        for i in range(0, len(expected_shape)):
+            self.assertEqual(expected_shape[i], res_shape[i])
index e88198e..4917c16 100644 (file)
@@ -95,3 +95,18 @@ def tf_window_op_pad_infer(input, window, stride, auto_pad, is_deconv=False):
         pad = None
         output = None
     return (pad, output)
+
+
+def broadcast_shape(first_shape, second_shape):
+    """
+    Perform broadcasting of one shape to another for different shapes
+    """
+    shape = first_shape if len(first_shape) > len(second_shape) else second_shape
+    new_shape = int64_array(shape)
+    for i in range(len(shape)):
+        a_val = first_shape[-i - 1] if i < len(first_shape) else 1
+        b_val = second_shape[-i - 1] if i < len(second_shape) else 1
+        assert a_val == 1 or b_val == 1 or a_val == b_val, "Input shape do not broadcast"
+        new_val = b_val if a_val == 1 else a_val
+        new_shape[-i - 1] = new_val
+    return int64_array(new_shape)
\ No newline at end of file
index 1ab415d..e95c429 100644 (file)
@@ -54,7 +54,23 @@ class LogSoftmax(Op):
             'infer': None,
             'kind': 'op',
             'axis': 1,
-            'type': None,  # the operation will be replaced with a Log(Softmax(x)) sub-graph
+            'type': None,  # the operation will be replaced with a x - Log(ReduceSum(Exp(x), axis)) sub-graph
+            'op': __class__.op,
+            'in_ports_count': 1,
+            'out_ports_count': 1,
+        }, attrs)
+
+class LogSoftmaxONNX(Op):
+    op = 'LogSoftmaxONNX'
+    enabled = False
+
+    def __init__(self, graph: Graph, attrs: dict):
+        super().__init__(graph, {
+            'infer': None,
+            'kind': 'op',
+            'axis': 1,
+            'type': None,  # the operation will be replaced with a
+                           # Reshape(LogSoftmax(FlattenONNX(x, axis), 1), x.shape) sub-graph
             'op': __class__.op,
             'in_ports_count': 1,
             'out_ports_count': 1,
index 2d7da0c..1349a6d 100644 (file)
@@ -39,12 +39,9 @@ def extend_mask_according_ellipsis(ellipsis_mask, shrink_axis_mask, length_outpu
     return attr_mask_extended
 
 
-def permute_array_with_ellipsis(node: Node, array: np.array, ins_value: int):
+def permute_array(node: Node, array: np.array):
     """
-    This function permutes masks according to permutation parameter. Several cases should be processed:
-    * Some dimensions can be omitted in mask according to ellipsis mask
-    * Mask length can be less than length of output dimensions plus shrinked dimensions
-    * Mask have the same or more length than output
+    This function permutes masks according to permutation parameter. Mask have the same or more length than output
     """
     attr_mask_extended = list(array)
 
@@ -52,32 +49,20 @@ def permute_array_with_ellipsis(node: Node, array: np.array, ins_value: int):
     if len(node.in_port(0).data.get_shape()) < 4 and len(node.out_port(0).data.get_shape()) < 4:
         return attr_mask_extended
 
-    # Length of mask is less than length of output ()plus shrink dimensions then we should extend it before permute
-    if len(attr_mask_extended) < len(node.out_port(0).data.get_shape()) + np.count_nonzero(node.shrink_axis_mask):
-        # ellipsis is set, add dimensions in right place otherwise insert in the end
-        attr_mask_extended = extend_mask_according_ellipsis(node.ellipsis_mask, node.shrink_axis_mask,
-                                                            len(node.out_port(0).data.get_shape()),
-                                                            attr_mask_extended, ins_value)
-
-        # permute extended mask
-        perm = PermuteAttrs.get_nhwc_to_nchw_permutation(len(attr_mask_extended))
-        attr_mask_extended = int64_array(attr_mask_extended)[perm.perm]
-        return attr_mask_extended
-    else:
-        perm_len = len(node.out_port(0).data.get_shape()) + np.count_nonzero(node.shrink_axis_mask)
-        perm = PermuteAttrs.get_nhwc_to_nchw_permutation(perm_len)
-        perm_list = list(perm.perm)
-        # if mask length is more than output, just add tail that will not be permuted to avoid error
-        for i in range(perm_len, len(attr_mask_extended)):
-            perm_list.append(i)
-        return int64_array(attr_mask_extended)[int64_array(perm_list)]
+    perm_len = len(node.out_port(0).data.get_shape()) + np.count_nonzero(node.shrink_axis_mask)
+    perm = PermuteAttrs.get_nhwc_to_nchw_permutation(perm_len)
+    perm_list = list(perm.perm)
+    # if mask length is more than output, just add tail that will not be permuted to avoid error
+    for i in range(perm_len, len(attr_mask_extended)):
+        perm_list.append(i)
+    return int64_array(attr_mask_extended)[int64_array(perm_list)]
 
 
 def permute_masks(node: Node, permutation: PermuteAttrs.Permutation, attr: str):
     if not node.has_valid(attr):
         return None
 
-    node[attr] = permute_array_with_ellipsis(node, node[attr], 0)
+    node[attr] = permute_array(node, node[attr])
     return node[attr]
 
 
@@ -108,6 +93,37 @@ class StridedSlice(Op):
     def infer(node: Node):
         tf_strided_slice_infer(node)
 
+        out_shape = node.out_port(0).data.get_shape()
+        assert out_shape is not None, \
+            'Output shape was not calculated for node {}'.format(node.name)
+        # extend inputs according to ellipsis mask and/or input_shape
+        for i_port in node.in_ports().values():
+            if i_port.idx == 0 or i_port.disconnected():
+                continue
+            old_value = i_port.data.get_value()
+            # additional check for non-const input
+            # error will be return in shape inference if non-const will be added
+            # it is paranoid check for case if shape inference will be changed
+            assert old_value is not None, \
+                '{} input of {} node is not constant: \'value\' attribute for edge ' + \
+                'contains None'.format(i_port.idx, node.name)
+            # insert 0 for begin and end and 1 for stride
+            new_value = int64_array(extend_mask_according_ellipsis(node.ellipsis_mask, node.shrink_axis_mask,
+                                                                   len(out_shape), list(old_value),
+                                                                   int(i_port.idx == 3)))
+            # set_value additionally set_shape and propagate value to Const node
+            if np.all(new_value != old_value):
+                i_port.data.set_value(new_value)
+
+        # extend masks before removing ellipsis
+        for attr in ["new_axis_mask", "shrink_axis_mask", "begin_mask", "end_mask", "ellipsis_mask"]:
+            node[attr] = int64_array(extend_mask_according_ellipsis(node.ellipsis_mask, node.shrink_axis_mask,
+                                                                    len(out_shape), list(node[attr]), 0))
+
+        # we will extend all masks and inputs to simplify future transformations
+        idx = np.nonzero(node.ellipsis_mask)
+        node.ellipsis_mask[idx] = 0
+
         if node.graph.graph['layout'] == 'NHWC' and node.out_port(0).data.get_value() is None:
             PermuteAttrs.create_permute_attrs(node, attrs=[('shrink_axis_mask', 'input:0', permute_masks),
                                                            ('new_axis_mask', 'input:0', permute_masks),
@@ -115,7 +131,7 @@ class StridedSlice(Op):
                                                            ('begin_mask', 'input:0', permute_masks),
                                                            ('end_mask', 'input:0', permute_masks),
                                                            ])
-            # extend inputs according to ellipsis mask
+            # permute inputs
             in_shape = node.in_port(0).get_source().data.get_shape()
             assert in_shape is not None, \
                 'Input shape is unknown for 0 input of node {}'.format(node.name)
@@ -124,25 +140,6 @@ class StridedSlice(Op):
                 for i_port in node.in_ports().values():
                     if i_port.idx == 0 or i_port.disconnected():
                         continue
-                    old_value = i_port.data.get_value()
-                    # additional check for non-const input
-                    # error will be return in shape inference if non-const will be added
-                    # it is paranoid check for case if shape inference will be changed
-                    assert old_value is not None, \
-                        '{} input of {} node is not constant: \'value\' attribute for edge ' + \
-                        'contains None'.format(i_port.idx, node.name)
-                    # insert 0 for begin and end and 1 for stride
-                    new_value = permute_array_with_ellipsis(node, old_value, int(i_port.idx == 3))
+                    new_value = permute_array(node, i_port.data.get_value())
                     # set_value additionally set_shape and propagate value to Const node
                     i_port.data.set_value(new_value)
-
-            # extend masks before removing ellipsis
-            if np.any(node.ellipsis_mask):
-                for attr in ["new_axis_mask", "shrink_axis_mask", "begin_mask", "end_mask"]:
-                    node[attr] = int64_array(extend_mask_according_ellipsis(node.ellipsis_mask, node.shrink_axis_mask,
-                                                                            len(node.out_port(0).data.get_shape()),
-                                                                            list(node[attr]), 0))
-
-            # due to permutation from nhwc to nchw we will extend all masks and inputs
-            idx = np.nonzero(node.ellipsis_mask)
-            node.ellipsis_mask[idx] = 0
index 64821ce..d8fa18c 100644 (file)
@@ -20,7 +20,7 @@ import numpy as np
 from mo.front.common.partial_infer.utils import int64_array
 from mo.graph.graph import Node
 from mo.ops.op import PermuteAttrs
-from mo.ops.strided_slice import extend_mask_according_ellipsis, permute_masks, permute_array_with_ellipsis, \
+from mo.ops.strided_slice import extend_mask_according_ellipsis, permute_masks, permute_array, \
     StridedSlice
 from mo.utils.error import Error
 from mo.utils.unittest.graph import build_graph
@@ -124,9 +124,15 @@ class TestPermutationStridedSlice(unittest.TestCase):
                              })
 
         slice_node = Node(graph, 'strided_slice')
+        slice_node['begin_mask'] = int64_array(extend_mask_according_ellipsis(slice_node['ellipsis_mask'],
+                                                                              slice_node['shrink_axis_mask'], 4,
+                                                                              list(slice_node['begin_mask']), 0))
         permute_masks(slice_node, PermuteAttrs.Permutation(perm=[0, 3, 1, 2], inv=[0, 2, 3, 1]), 'begin_mask')
         self.assertTrue(np.array_equal(slice_node.begin_mask, np.array([1, 0, 0, 0])))
 
+        slice_node['end_mask'] = int64_array(extend_mask_according_ellipsis(slice_node['ellipsis_mask'],
+                                                                            slice_node['shrink_axis_mask'], 4,
+                                                                            list(slice_node['end_mask']), 0))
         permute_masks(slice_node, PermuteAttrs.Permutation(perm=[0, 3, 1, 2], inv=[0, 2, 3, 1]), 'end_mask')
         self.assertTrue(np.array_equal(slice_node.end_mask, np.array([0, 0, 1, 0])))
 
@@ -194,9 +200,15 @@ class TestPermutationStridedSlice(unittest.TestCase):
                              })
 
         slice_node = Node(graph, 'strided_slice')
+        slice_node['begin_mask'] = int64_array(extend_mask_according_ellipsis(slice_node['ellipsis_mask'],
+                                                                              slice_node['shrink_axis_mask'], 5,
+                                                                              list(slice_node['begin_mask']), 0))
         permute_masks(slice_node, PermuteAttrs.Permutation(perm=[0, 4, 1, 2, 3], inv=[0, 2, 3, 4, 1]), 'begin_mask')
         self.assertTrue(np.array_equal(slice_node.begin_mask, np.array([1, 0, 0, 0, 0])))
 
+        slice_node['end_mask'] = int64_array(extend_mask_according_ellipsis(slice_node['ellipsis_mask'],
+                                                                            slice_node['shrink_axis_mask'], 5,
+                                                                            list(slice_node['end_mask']), 0))
         permute_masks(slice_node, PermuteAttrs.Permutation(perm=[0, 4, 1, 2, 3], inv=[0, 2, 3, 4, 1]), 'end_mask')
         self.assertTrue(np.array_equal(slice_node.end_mask, np.array([0, 0, 1, 0, 0])))
 
@@ -239,9 +251,15 @@ class TestPermutationStridedSlice(unittest.TestCase):
                              })
 
         slice_node = Node(graph, 'strided_slice')
+        slice_node['begin_mask'] = int64_array(extend_mask_according_ellipsis(slice_node['ellipsis_mask'],
+                                                                              slice_node['shrink_axis_mask'], 3,
+                                                                              list(slice_node['begin_mask']), 0))
         permute_masks(slice_node, PermuteAttrs.Permutation(perm=[0, 3, 1, 2], inv=[0, 2, 3, 1]), 'begin_mask')
         self.assertTrue(np.array_equal(slice_node.begin_mask, np.array([1, 0, 0, 0])))
 
+        slice_node['end_mask'] = int64_array(extend_mask_according_ellipsis(slice_node['ellipsis_mask'],
+                                                                            slice_node['shrink_axis_mask'], 3,
+                                                                            list(slice_node['end_mask']), 0))
         permute_masks(slice_node, PermuteAttrs.Permutation(perm=[0, 3, 1, 2], inv=[0, 2, 3, 1]), 'end_mask')
         self.assertTrue(np.array_equal(slice_node.end_mask, np.array([0, 0, 1, 0])))
 
@@ -268,9 +286,15 @@ class TestPermutationStridedSlice(unittest.TestCase):
                              })
 
         slice_node = Node(graph, 'strided_slice')
+        slice_node['begin_mask'] = int64_array(extend_mask_according_ellipsis(slice_node['ellipsis_mask'],
+                                                                              slice_node['shrink_axis_mask'], 4,
+                                                                              list(slice_node['begin_mask']), 0))
         permute_masks(slice_node, PermuteAttrs.Permutation(perm=[0, 3, 1, 2], inv=[0, 2, 3, 1]), 'begin_mask')
         self.assertTrue(np.array_equal(slice_node.begin_mask, np.array([0, 0, 0, 0])))
 
+        slice_node['end_mask'] = int64_array(extend_mask_according_ellipsis(slice_node['ellipsis_mask'],
+                                                                            slice_node['shrink_axis_mask'], 4,
+                                                                            list(slice_node['end_mask']), 0))
         permute_masks(slice_node, PermuteAttrs.Permutation(perm=[0, 3, 1, 2], inv=[0, 2, 3, 1]), 'end_mask')
         self.assertTrue(np.array_equal(slice_node.end_mask, np.array([1, 0, 0, 0])))
 
@@ -313,6 +337,88 @@ class TestPermutationStridedSlice(unittest.TestCase):
         self.assertTrue(np.array_equal(end_node.value, np.array([1, 0, 0, 0])))
         self.assertTrue(np.array_equal(stride_node.value, np.array([1, 2, 1, 1])))
 
+    def test_ss_shrink_only_short_ellipsis(self):
+        graph = build_graph(nodes_attributes,
+                            [('input', 'data_1'),
+                             ('data_1', 'strided_slice', {'in': 0}),
+                             ('begin', 'begin_data'),
+                             ('begin_data', 'strided_slice', {'in': 1}),
+                             ('end', 'end_data'),
+                             ('end_data', 'strided_slice', {'in': 2}),
+                             ('stride', 'stride_data'),
+                             ('stride_data', 'strided_slice', {'in': 3}),
+                             ('strided_slice', 'data_2')],
+                            {'data_1': {'shape': np.array([1, 127, 1, 54]), 'value': None},
+                             'begin': {'value': [0, 0, 0], 'shape': [3]},
+                             'end': {'value': [0, 0, 24], 'shape': [3]},
+                             'stride': {'value': [1, 1, 1], 'shape': [3]},
+                             'begin_data': {'value': [0, 0, 0], 'shape': [3]},
+                             'end_data': {'value': [0, 0, 24], 'shape': [3]},
+                             'stride_data': {'value': [1, 1, 1], 'shape': [3]},
+                             'strided_slice': {'begin_mask': np.array([0, 0, 1]), 'end_mask': np.array([0, 0, 1]),
+                                               'new_axis_mask': np.array([0, 0, 0]), 'shrink_axis_mask': [0, 1, 0],
+                                               'ellipsis_mask': np.array([1, 0, 0])},
+                             'data_2': {'shape': None}
+                             }, nodes_with_edges_only=True)
+        graph.graph['layout'] = 'NCHW'
+
+        slice_node = Node(graph, 'strided_slice')
+        begin_node = Node(graph, 'begin')
+        end_node = Node(graph, 'end')
+        stride_node = Node(graph, 'stride')
+        out_node = Node(graph, 'data_2')
+        StridedSlice.infer(slice_node)
+        self.assertTrue(np.array_equal(slice_node.begin_mask, np.array([0, 0, 0, 1])))
+        self.assertTrue(np.array_equal(slice_node.end_mask, np.array([0, 0, 0, 1])))
+        self.assertTrue(np.array_equal(slice_node.shrink_axis_mask, np.array([0, 0, 1, 0])))
+        self.assertTrue(np.array_equal(slice_node.new_axis_mask, np.array([0, 0, 0, 0])))
+        self.assertTrue(np.array_equal(slice_node.ellipsis_mask, np.array([0, 0, 0, 0])))
+        self.assertTrue(np.array_equal(begin_node.value, np.array([0, 0, 0, 0])))
+        self.assertTrue(np.array_equal(end_node.value, np.array([0, 0, 0, 24])))
+        self.assertTrue(np.array_equal(stride_node.value, np.array([1, 1, 1, 1])))
+        self.assertTrue(np.array_equal(out_node.shape, np.array([1, 127, 24])))
+
+    def test_ss_shrink_only_short(self):
+        graph = build_graph(nodes_attributes,
+                            [('input', 'data_1'),
+                             ('data_1', 'strided_slice', {'in': 0}),
+                             ('begin', 'begin_data'),
+                             ('begin_data', 'strided_slice', {'in': 1}),
+                             ('end', 'end_data'),
+                             ('end_data', 'strided_slice', {'in': 2}),
+                             ('stride', 'stride_data'),
+                             ('stride_data', 'strided_slice', {'in': 3}),
+                             ('strided_slice', 'data_2')],
+                            {'data_1': {'shape': np.array([1, 1, 127, 54]), 'value': None},
+                             'begin': {'value': [0, 0, 0], 'shape': [3]},
+                             'end': {'value': [0, 0, 0], 'shape': [3]},
+                             'stride': {'value': [1, 1, 1], 'shape': [3]},
+                             'begin_data': {'value': [0, 0, 0], 'shape': [3]},
+                             'end_data': {'value': [0, 0, 0], 'shape': [3]},
+                             'stride_data': {'value': [1, 1, 1], 'shape': [3]},
+                             'strided_slice': {'begin_mask': np.array([0, 0, 0]), 'end_mask': np.array([0, 0, 0]),
+                                               'new_axis_mask': np.array([0, 0, 0]), 'shrink_axis_mask': [0, 1, 0],
+                                               'ellipsis_mask': np.array([0, 0, 0])},
+                             'data_2': {'shape': None}
+                             }, nodes_with_edges_only=True)
+        graph.graph['layout'] = 'NCHW'
+
+        slice_node = Node(graph, 'strided_slice')
+        begin_node = Node(graph, 'begin')
+        end_node = Node(graph, 'end')
+        stride_node = Node(graph, 'stride')
+        out_node = Node(graph, 'data_2')
+        StridedSlice.infer(slice_node)
+        self.assertTrue(np.array_equal(slice_node.begin_mask, np.array([0, 0, 0, 0])))
+        self.assertTrue(np.array_equal(slice_node.end_mask, np.array([0, 0, 0, 0])))
+        self.assertTrue(np.array_equal(slice_node.shrink_axis_mask, np.array([0, 1, 0, 0])))
+        self.assertTrue(np.array_equal(slice_node.new_axis_mask, np.array([0, 0, 0, 0])))
+        self.assertTrue(np.array_equal(slice_node.ellipsis_mask, np.array([0, 0, 0, 0])))
+        self.assertTrue(np.array_equal(begin_node.value, np.array([0, 0, 0, 0])))
+        self.assertTrue(np.array_equal(end_node.value, np.array([0, 0, 0, 0])))
+        self.assertTrue(np.array_equal(stride_node.value, np.array([1, 1, 1, 1])))
+        self.assertTrue(np.array_equal(out_node.shape, np.array([1, 127, 54])))
+
     def test_permute_begin_end_ellipsis_new(self):
         # Testing constant path case
         graph = build_graph(nodes_attributes,
@@ -339,9 +445,15 @@ class TestPermutationStridedSlice(unittest.TestCase):
                              })
 
         slice_node = Node(graph, 'strided_slice')
+        slice_node['begin_mask'] = int64_array(extend_mask_according_ellipsis(slice_node['ellipsis_mask'],
+                                                                              slice_node['shrink_axis_mask'], 5,
+                                                                              list(slice_node['begin_mask']), 0))
         permute_masks(slice_node, PermuteAttrs.Permutation(perm=[0, 4, 1, 2, 3], inv=[0, 2, 3, 4, 1]), 'begin_mask')
         self.assertTrue(np.array_equal(slice_node.begin_mask, np.array([1, 3, 2, 0, 0])))
 
+        slice_node['end_mask'] = int64_array(extend_mask_according_ellipsis(slice_node['ellipsis_mask'],
+                                                                            slice_node['shrink_axis_mask'], 5,
+                                                                            list(slice_node['end_mask']), 0))
         permute_masks(slice_node, PermuteAttrs.Permutation(perm=[0, 4, 1, 2, 3], inv=[0, 2, 3, 4, 1]), 'end_mask')
         self.assertTrue(np.array_equal(slice_node.end_mask, np.array([1, 3, 2, 0, 0])))
 
@@ -371,12 +483,16 @@ class TestPermutationStridedSlice(unittest.TestCase):
                              })
 
         slice_node = Node(graph, 'strided_slice')
-        slice_node.in_node(1).value = permute_array_with_ellipsis(slice_node,
-                                                                  slice_node.in_node(1).value, 0)
+        slice_node.in_node(1).value = int64_array(extend_mask_according_ellipsis(slice_node['ellipsis_mask'],
+                                                                                 slice_node['shrink_axis_mask'], 5,
+                                                                                 list(slice_node.in_node(1).value), 0))
+        slice_node.in_node(1).value = permute_array(slice_node, slice_node.in_node(1).value)
         self.assertTrue(np.array_equal(slice_node.in_node(1).value, np.array([0, 2, 1, 0, 0])))
 
-        slice_node.in_node(2).value = permute_array_with_ellipsis(slice_node,
-                                                                  slice_node.in_node(2).value, 0)
+        slice_node.in_node(2).value = int64_array(extend_mask_according_ellipsis(slice_node['ellipsis_mask'],
+                                                                                 slice_node['shrink_axis_mask'], 5,
+                                                                                 list(slice_node.in_node(2).value), 0))
+        slice_node.in_node(2).value = permute_array(slice_node, slice_node.in_node(2).value)
         self.assertTrue(np.array_equal(slice_node.in_node(2).value, np.array([1, 3, 2, 0, 0])))
 
     def test_extend_mask(self):
@@ -388,6 +504,16 @@ class TestPermutationStridedSlice(unittest.TestCase):
         mask = extend_mask_according_ellipsis(ellipsis_mask, shrink_mask, length_shape, list(mask), ins_value)
         self.assertEquals(mask, [0, 0, 0, 1])
 
+    def test_extend_mask_twice(self):
+        ellipsis_mask = int64_array([1, 0])
+        shrink_mask = int64_array([0, 0])
+        length_shape = 4
+        mask = int64_array([0, 1])
+        ins_value = 0
+        mask = extend_mask_according_ellipsis(ellipsis_mask, shrink_mask, length_shape, list(mask), ins_value)
+        mask = extend_mask_according_ellipsis(ellipsis_mask, shrink_mask, length_shape, list(mask), ins_value)
+        self.assertEquals(mask, [0, 0, 0, 1])
+
     def test_extend_mask_shrinked(self):
         ellipsis_mask = int64_array([1, 0])
         shrink_mask = int64_array([0, 1])
diff --git a/ngraph b/ngraph
index eaa6d35..a909d3e 160000 (submodule)
--- a/ngraph
+++ b/ngraph
@@ -1 +1 @@
-Subproject commit eaa6d35b7ed415e02b2401b528f31960123e5b71
+Subproject commit a909d3e0b6d12036be4d913e43b18408bd8bf0b6
diff --git a/tests/fuzz/CMakeLists.txt b/tests/fuzz/CMakeLists.txt
new file mode 100644 (file)
index 0000000..8ddccb0
--- /dev/null
@@ -0,0 +1,41 @@
+# Copyright (C) 2020 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+#
+
+if(ENABLE_DOCKER)
+    cmake_minimum_required(VERSION 3.3 FATAL_ERROR)
+else()
+    if (APPLE)
+        # due to https://cmake.org/cmake/help/v3.12/policy/CMP0068.html
+        cmake_minimum_required(VERSION 3.9 FATAL_ERROR)
+    else()
+        cmake_minimum_required(VERSION 3.7.2 FATAL_ERROR)
+    endif()
+endif()
+
+set(OpenVINO_MAIN_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../../..)
+set(CMAKE_MODULE_PATH "${OpenVINO_MAIN_SOURCE_DIR}/cmake" ${CMAKE_MODULE_PATH})
+
+if (CMAKE_BUILD_TYPE STREQUAL "")
+    message(STATUS "CMAKE_BUILD_TYPE not defined, 'Release' will be used")
+    set(CMAKE_BUILD_TYPE "Release")
+endif()
+
+if (NOT TARGET IE::inference_engine)
+    find_package(InferenceEngineDeveloperPackage REQUIRED)
+endif()
+
+include(sanitizer)
+include(fuzzing)
+
+if (NOT ENABLE_FUZZING)
+    message(STATUS
+        "Fuzz tests will be built without fuzzer support. You can use those to\n"
+        "run crash reproducers and corpus inputs. Configure ENABLE_FUZZING=ON\n"
+        "to built with a fuzzer.")
+else()
+    enable_fuzzing()
+endif()
+
+add_subdirectory(fuzz-testhelper)
+add_subdirectory(src)
diff --git a/tests/fuzz/README.md b/tests/fuzz/README.md
new file mode 100644 (file)
index 0000000..5f16f9f
--- /dev/null
@@ -0,0 +1,31 @@
+# Fuzzing Test Suite
+
+This test suite contains [fuzzing](https://en.wikipedia.org/wiki/Fuzzing) tests for [libFuzzer](https://llvm.org/docs/LibFuzzer.html) fuzzing engine.
+
+## Getting Started
+
+Each fuzzing test is an executable. It can run fuzzing to search for new
+failures and save reproducer in a file. You can later run a fuzzing test with a
+reproducer to debug a failure found.
+
+## Pre-requisites
+
+There are no special pre-requisites to reproduce and debug failures.
+
+To run fuzzing you will need [LLVM](https://apt.llvm.org/) components:
+- Clang and co.
+- libFuzzer
+- lld (linker)
+- libc++
+
+## Reproducing Failure Found by Fuzzing
+
+1. Build `fuzz` test target:
+```bash
+cmake -DENABLE_TESTS=ON .. && ninja fuzz
+```
+
+2. Run fuzzing test passing a failure reproducer as a command-line argument:
+``` bash
+./read_network-fuzzer crash-reproducer
+```
diff --git a/tests/fuzz/fuzz-testhelper/CMakeLists.txt b/tests/fuzz/fuzz-testhelper/CMakeLists.txt
new file mode 100644 (file)
index 0000000..eefa61b
--- /dev/null
@@ -0,0 +1,15 @@
+# Copyright (C) 2018-2020 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+#
+
+set(TARGET_NAME fuzz-testhelper)
+
+file(
+    GLOB SRC_FILES
+    ${CMAKE_CURRENT_SOURCE_DIR}/*-testhelper.cc)
+
+add_library(
+    ${TARGET_NAME} STATIC
+    ${SRC_FILES})
+
+target_include_directories(${TARGET_NAME} PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}")
diff --git a/tests/fuzz/fuzz-testhelper/main-testhelper.cc b/tests/fuzz/fuzz-testhelper/main-testhelper.cc
new file mode 100644 (file)
index 0000000..d6a0e94
--- /dev/null
@@ -0,0 +1,143 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+/*!
+\file
+\brief Replacement of libFuzzer main entrypoint.
+
+[libFuzzer](https://llvm.org/docs/LibFuzzer.html), part of LLVM toolchain,
+implements `main` entry point which runs in-process fuzzing. This provides
+a simplified `main` entry point implementation which is limited to processing
+the inputs.
+*/
+
+#if !defined(WITH_LIBFUZZER)
+
+#include <stdint.h>
+
+#include <algorithm>
+#include <cstdlib>
+#include <ctime>
+#include <fstream>
+#include <iostream>
+#include <map>
+#include <sstream>
+#include <string>
+#include <vector>
+#ifdef WIN32
+#include <windows.h>
+#else  // WIN32
+#include <dirent.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#endif  // WIN32
+
+/// Fuzzing target
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size);
+
+/// Get basename from path
+std::string basename(std::string const& path) {
+    std::string str = std::string("/") + path;
+    return str.substr(str.find_last_of("/\\") + 1);
+}
+
+/// Get directory content
+std::vector<std::string> list_dir(std::string const& path) {
+    std::vector<std::string> res;
+#ifdef WIN32
+    WIN32_FIND_DATA find_data;
+    HANDLE find_handle;
+    find_handle = FindFirstFile((path + "\\*").c_str(), &find_data);
+    if (INVALID_HANDLE_VALUE != find_handle) {
+        do {
+            std::string filename(find_data.cFileName);
+            if (filename == "." || filename == "..") continue;
+            res.push_back(path + "\\" + filename);
+        } while (FindNextFile(find_handle, &find_data));
+        FindClose(find_handle);
+    }
+#else   // WIN32
+    DIR* dir = opendir(path.c_str());
+    if (dir) {
+        struct dirent* entry;
+        while (NULL != (entry = readdir(dir))) {
+            if (DT_REG == entry->d_type) res.push_back(path + "/" + std::string(entry->d_name));
+        }
+        closedir(dir);
+        dir = NULL;
+    }
+#endif  // WIN32
+    return res;
+}
+
+// Check if file by given path is a directory.
+bool is_dir(std::string const& path) {
+#ifdef WIN32
+    return 0 != (FILE_ATTRIBUTE_DIRECTORY & GetFileAttributes(path.c_str()));
+#else   // WIN32
+    struct stat stat_res = {0};
+    stat(path.c_str(), &stat_res);
+    return S_IFDIR & stat_res.st_mode;
+#endif  // WIN32
+}
+
+// Print usage help
+void print_usage(const std::string& program_name, std::ostream* os) {
+    *os << "Usage: " << program_name << " INPUT" << std::endl;
+}
+
+/// Main entrypoint
+extern "C" int main(int argc, char* argv[]) {
+    std::string program_name = basename(argv[0]);
+
+    // Parse command line options
+    std::vector<std::string> positional;
+    for (int i = 1; i < argc; i++) {
+        std::string arg(argv[i]);
+        // Ignore all options but positional arguments
+        if ('-' == arg[0]) {
+            std::cout << "Ignoring option " << arg << std::endl;
+            continue;
+        }
+        positional.push_back(arg);
+    }
+    if (1 != positional.size()) {
+        std::cerr << program_name << ": error: wrong number of positional arguments." << std::endl;
+        print_usage(program_name, &std::cerr);
+        return -1;
+    }
+
+    // Run input files through test function
+    std::vector<std::string> input_files;
+    if (is_dir(positional[0])) {
+        std::cout << "Loading corpus dir: " << positional[0] << std::endl;
+        input_files = list_dir(positional[0]);
+    } else {
+        std::cout << "Running: " << positional[0] << std::endl;
+        input_files.push_back(positional[0]);
+    }
+    time_t time_total = 0;
+    for (auto const& path : input_files) {
+        std::ifstream test_file(path, std::ios::binary);
+        if (!test_file) {
+            std::cerr << program_name << ": error: failed to open \"" << path << "\"" << std::endl;
+            return -2;
+        }
+        std::ostringstream data;
+        data << test_file.rdbuf();
+        test_file.close();
+
+        time_t time_start = time(nullptr);
+        int fuzzer_res;
+        if (0 != (fuzzer_res = LLVMFuzzerTestOneInput((const uint8_t*)data.str().c_str(), data.str().size()))) {
+            std::cerr << program_name << ": error: testing \"" << path << "\" fails" << std::endl;
+            return fuzzer_res;
+        }
+        time_total += time(nullptr) - time_start;
+    }
+    std::cout << "Executed " << input_files.size() << " item(s) in " << time_total << " seconds" << std::endl;
+    return 0;
+}
+
+#endif  // !defined(WITH_LIBFUZZER)
diff --git a/tests/fuzz/fuzz-testhelper/main.h b/tests/fuzz/fuzz-testhelper/main.h
new file mode 100644 (file)
index 0000000..d3f4211
--- /dev/null
@@ -0,0 +1,23 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+/*!
+\file
+\brief Replacement of libFuzzer main entrypoint for fuzz tests.
+
+Include this file into code so that Microsoft linker can resolve
+main entrypoint from static library.
+Microsoft linker only resolve libary functions which are referenced,
+`main` is not.
+*/
+#ifndef TESTS_FUZZ_TESTHELPER_MAIN_H_
+#define TESTS_FUZZ_TESTHELPER_MAIN_H_
+
+#if !defined(WITH_LIBFUZZER)
+extern "C" int main(int argc, char* argv[]);
+// make a reference to main so linker resolve it from static library
+void* main_ptr_ = (void*)main;
+#endif  // !defined(WITH_LIBFUZZER)
+
+#endif  // TESTS_FUZZ_TESTHELPER_MAIN_H_
diff --git a/tests/fuzz/src/CMakeLists.txt b/tests/fuzz/src/CMakeLists.txt
new file mode 100644 (file)
index 0000000..71e59cd
--- /dev/null
@@ -0,0 +1,19 @@
+# Copyright (C) 2018-2020 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+#
+
+# add dummy `fuzz` target combines all fuzz tests
+add_custom_target(fuzz)
+
+# Build fuzz test from every source file matchs *-fuzzer.cc.
+# Fuzz test target name is source file name without extension.
+FILE(GLOB tests "*-fuzzer.cc")
+
+foreach(test_source ${tests})
+    get_filename_component(test_name ${test_source} NAME_WE)
+    add_fuzzer(${test_name} ${test_source})
+
+    target_link_libraries(${test_name} PRIVATE IE::inference_engine)
+
+    add_dependencies(fuzz ${test_name})
+endforeach()
diff --git a/tests/fuzz/src/read_network-fuzzer.cc b/tests/fuzz/src/read_network-fuzzer.cc
new file mode 100644 (file)
index 0000000..87fbce9
--- /dev/null
@@ -0,0 +1,46 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <ie_blob.h>
+
+#include <inference_engine.hpp>
+
+#define COUNT_OF(A) (sizeof(A) / sizeof(A[0]))
+const char kSplitSequence[] = {'F', 'U', 'Z', 'Z', '_', 'N', 'E', 'X', 'T', '_', 'F', 'I', 'E', 'L', 'D'};
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+    size_t split_counter = 0;
+    size_t split[1] = {0};
+    if (size < sizeof(kSplitSequence)) return 0;  // we at least expect one separator
+    for (size_t i = 0; i < size - sizeof(kSplitSequence); i++)
+        if (0 == memcmp(data + i, kSplitSequence, sizeof(kSplitSequence))) {
+            split[split_counter++] = i;
+            if (COUNT_OF(split) <= split_counter) break;
+        }
+    if (COUNT_OF(split) != split_counter) return 0;  // not enough splits
+
+    // isolate xml data
+    size_t net_size = split[0];
+    std::string net((const char*)data, net_size);
+    size -= net_size + sizeof(kSplitSequence);
+    data += net_size + sizeof(kSplitSequence);
+
+    // isolate weights data
+    std::vector<uint8_t> weights(data, data + size);
+    auto weights_blob =
+        InferenceEngine::make_shared_blob<uint8_t>(InferenceEngine::TensorDesc(InferenceEngine::Precision::U8, InferenceEngine::C), &weights[0]);
+
+    size -= weights.size() + sizeof(kSplitSequence);
+    data += weights.size() + sizeof(kSplitSequence);
+
+    // read xml and set weights
+    try {
+        InferenceEngine::Core ie;
+        InferenceEngine::CNNNetwork network = ie.ReadNetwork(net, weights_blob);
+    } catch (const InferenceEngine::details::InferenceEngineException& error) {
+        return 0;  // fail gracefully on expected exceptions
+    }
+
+    return 0;
+}
\ No newline at end of file
index 82a6c6c..32ef748 100644 (file)
 <?xml version="1.0"?>
 <attributes>
     <models>
-        <model path="caffe/FP32/alexnet/alexnet.xml" test="create_exenetwork" device="CPU" vmsize="753847" vmpeak="1528832" vmrss="14005" vmhwm="814655" />
-        <model path="caffe/FP32/alexnet/alexnet.xml" test="create_exenetwork" device="GPU" vmsize="580025" vmpeak="1743759" vmrss="234704" vmhwm="1462062" />
-        <model path="caffe/FP32/alexnet/alexnet.xml" test="infer_request_inference" device="CPU" vmsize="1339971" vmpeak="1528828" vmrss="555262" vmhwm="814805" />
-        <model path="caffe/FP32/alexnet/alexnet.xml" test="infer_request_inference" device="GPU" vmsize="1389159" vmpeak="1741154" vmrss="1036169" vmhwm="1460052" />
-        <model path="caffe/FP32/caffenet/caffenet.xml" test="create_exenetwork" device="CPU" vmsize="753843" vmpeak="1545451" vmrss="14234" vmhwm="821334" />
-        <model path="caffe/FP32/caffenet/caffenet.xml" test="create_exenetwork" device="GPU" vmsize="602206" vmpeak="1511325" vmrss="257501" vmhwm="1230284" />
-        <model path="caffe/FP32/caffenet/caffenet.xml" test="infer_request_inference" device="CPU" vmsize="1368206" vmpeak="1545456" vmrss="576774" vmhwm="821739" />
-        <model path="caffe/FP32/caffenet/caffenet.xml" test="infer_request_inference" device="GPU" vmsize="1423096" vmpeak="1511373" vmrss="1074752" vmhwm="1230732" />
-        <model path="caffe/FP32/densenet_121/densenet_121.xml" test="create_exenetwork" device="CPU" vmsize="772626" vmpeak="985754" vmrss="95260" vmhwm="151496" />
-        <model path="caffe/FP32/densenet_121/densenet_121.xml" test="create_exenetwork" device="GPU" vmsize="1044604" vmpeak="1154709" vmrss="699168" vmhwm="811104" />
-        <model path="caffe/FP32/densenet_121/densenet_121.xml" test="infer_request_inference" device="CPU" vmsize="985525" vmpeak="1057614" vmrss="159306" vmhwm="159306" />
-        <model path="caffe/FP32/densenet_121/densenet_121.xml" test="infer_request_inference" device="GPU" vmsize="1163289" vmpeak="1235379" vmrss="812961" vmhwm="812961" />
-        <model path="caffe/FP32/densenet_161/densenet_161.xml" test="create_exenetwork" device="CPU" vmsize="762770" vmpeak="1212248" vmrss="93570" vmhwm="426817" />
-        <model path="caffe/FP32/densenet_161/densenet_161.xml" test="create_exenetwork" device="GPU" vmsize="1127847" vmpeak="1586310" vmrss="782029" vmhwm="1304679" />
-        <model path="caffe/FP32/densenet_161/densenet_161.xml" test="infer_request_inference" device="CPU" vmsize="1351816" vmpeak="1423906" vmrss="353738" vmhwm="427644" />
-        <model path="caffe/FP32/densenet_161/densenet_161.xml" test="infer_request_inference" device="GPU" vmsize="1660304" vmpeak="1660304" vmrss="1309215" vmhwm="1309215" />
-        <model path="caffe/FP32/densenet_169/densenet_169.xml" test="create_exenetwork" device="CPU" vmsize="791863" vmpeak="998329" vmrss="123059" vmhwm="240160" />
-        <model path="caffe/FP32/densenet_169/densenet_169.xml" test="create_exenetwork" device="GPU" vmsize="1309598" vmpeak="1428944" vmrss="964066" vmhwm="1086751" />
-        <model path="caffe/FP32/densenet_169/densenet_169.xml" test="infer_request_inference" device="CPU" vmsize="1060303" vmpeak="1132392" vmrss="238924" vmhwm="240416" />
-        <model path="caffe/FP32/densenet_169/densenet_169.xml" test="infer_request_inference" device="GPU" vmsize="1435214" vmpeak="1507303" vmrss="1084969" vmhwm="1084969" />
-        <model path="caffe/FP32/densenet_201/densenet_201.xml" test="create_exenetwork" device="CPU" vmsize="864639" vmpeak="1153900" vmrss="147906" vmhwm="322590" />
-        <model path="caffe/FP32/densenet_201/densenet_201.xml" test="create_exenetwork" device="GPU" vmsize="1541161" vmpeak="1686282" vmrss="1195972" vmhwm="1337595" />
-        <model path="caffe/FP32/densenet_201/densenet_201.xml" test="infer_request_inference" device="CPU" vmsize="1181479" vmpeak="1253568" vmrss="315581" vmhwm="322700" />
-        <model path="caffe/FP32/densenet_201/densenet_201.xml" test="infer_request_inference" device="GPU" vmsize="1706760" vmpeak="1778849" vmrss="1356533" vmhwm="1356533" />
-        <model path="caffe/FP32/dilation/dilation.xml" test="create_exenetwork" device="CPU" vmsize="754428" vmpeak="3004311" vmrss="17613" vmhwm="1856210" />
-        <model path="caffe/FP32/dilation/dilation.xml" test="create_exenetwork" device="GPU" vmsize="710569" vmpeak="3363879" vmrss="365380" vmhwm="3081751" />
-        <model path="caffe/FP32/dilation/dilation.xml" test="infer_request_inference" device="CPU" vmsize="2487130" vmpeak="3004311" vmrss="1687936" vmhwm="1856448" />
-        <model path="caffe/FP32/dilation/dilation.xml" test="infer_request_inference" device="GPU" vmsize="2951748" vmpeak="3363804" vmrss="2597940" vmhwm="3080968" />
-        <model path="caffe/FP32/dpn_92/dpn_92.xml" test="create_exenetwork" device="CPU" vmsize="767157" vmpeak="1369376" vmrss="63338" vmhwm="540166" />
-        <model path="caffe/FP32/dpn_92/dpn_92.xml" test="create_exenetwork" device="GPU" vmsize="1155101" vmpeak="1701180" vmrss="809938" vmhwm="1420152" />
-        <model path="caffe/FP32/dpn_92/dpn_92.xml" test="infer_request_inference" device="CPU" vmsize="1299262" vmpeak="1373882" vmrss="431758" vmhwm="540214" />
-        <model path="caffe/FP32/dpn_92/dpn_92.xml" test="infer_request_inference" device="GPU" vmsize="1647738" vmpeak="1719828" vmrss="1296350" vmhwm="1419092" />
-        <model path="caffe/FP32/fcn_alexnet/fcn_alexnet.xml" test="create_exenetwork" device="CPU" vmsize="753711" vmpeak="1642832" vmrss="14014" vmhwm="789109" />
-        <model path="caffe/FP32/fcn_alexnet/fcn_alexnet.xml" test="create_exenetwork" device="GPU" vmsize="595430" vmpeak="1690484" vmrss="250496" vmhwm="1409205" />
-        <model path="caffe/FP32/fcn_alexnet/fcn_alexnet.xml" test="infer_request_inference" device="CPU" vmsize="1494464" vmpeak="1642832" vmrss="679214" vmhwm="789412" />
-        <model path="caffe/FP32/fcn_alexnet/fcn_alexnet.xml" test="infer_request_inference" device="GPU" vmsize="1450746" vmpeak="1693172" vmrss="1097681" vmhwm="1412254" />
-        <model path="caffe/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="create_exenetwork" device="CPU" vmsize="919740" vmpeak="1521955" vmrss="234520" vmhwm="792022" />
-        <model path="caffe/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="create_exenetwork" device="GPU" vmsize="1666363" vmpeak="2175012" vmrss="1321245" vmhwm="1893936" />
-        <model path="caffe/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="infer_request_inference" device="CPU" vmsize="1436982" vmpeak="1521955" vmrss="643614" vmhwm="793218" />
-        <model path="caffe/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="infer_request_inference" device="GPU" vmsize="2138818" vmpeak="2210907" vmrss="1786162" vmhwm="1893760" />
-        <model path="caffe/FP32/inception_v1/inception_v1.xml" test="create_exenetwork" device="CPU" vmsize="757262" vmpeak="978832" vmrss="81408" vmhwm="124238" />
-        <model path="caffe/FP32/inception_v1/inception_v1.xml" test="create_exenetwork" device="GPU" vmsize="810590" vmpeak="929139" vmrss="464868" vmhwm="503813" />
-        <model path="caffe/FP32/inception_v1/inception_v1.xml" test="infer_request_inference" device="CPU" vmsize="928637" vmpeak="1000727" vmrss="130719" vmhwm="130719" />
-        <model path="caffe/FP32/inception_v1/inception_v1.xml" test="infer_request_inference" device="GPU" vmsize="859478" vmpeak="931568" vmrss="507540" vmhwm="507540" />
-        <model path="caffe/FP32/inception_v2/inception_v2.xml" test="create_exenetwork" device="CPU" vmsize="766726" vmpeak="925245" vmrss="33382" vmhwm="180268" />
-        <model path="caffe/FP32/inception_v2/inception_v2.xml" test="create_exenetwork" device="GPU" vmsize="775117" vmpeak="913347" vmrss="430157" vmhwm="605598" />
-        <model path="caffe/FP32/inception_v2/inception_v2.xml" test="infer_request_inference" device="CPU" vmsize="927163" vmpeak="999253" vmrss="141869" vmhwm="181156" />
-        <model path="caffe/FP32/inception_v2/inception_v2.xml" test="infer_request_inference" device="GPU" vmsize="924752" vmpeak="996842" vmrss="571590" vmhwm="602839" />
-        <model path="caffe/FP32/inception_v3/inception_v3.xml" test="create_exenetwork" device="CPU" vmsize="767003" vmpeak="1090526" vmrss="34900" vmhwm="348172" />
-        <model path="caffe/FP32/inception_v3/inception_v3.xml" test="create_exenetwork" device="GPU" vmsize="948046" vmpeak="1182082" vmrss="602624" vmhwm="900169" />
-        <model path="caffe/FP32/inception_v3/inception_v3.xml" test="infer_request_inference" device="CPU" vmsize="1051481" vmpeak="1123570" vmrss="257219" vmhwm="348541" />
-        <model path="caffe/FP32/inception_v3/inception_v3.xml" test="infer_request_inference" device="GPU" vmsize="1187106" vmpeak="1259196" vmrss="834438" vmhwm="902800" />
-        <model path="caffe/FP32/inception_v4/inception_v4.xml" test="create_exenetwork" device="CPU" vmsize="764315" vmpeak="1326938" vmrss="63725" vmhwm="603213" />
-        <model path="caffe/FP32/inception_v4/inception_v4.xml" test="create_exenetwork" device="GPU" vmsize="1183410" vmpeak="1680448" vmrss="837953" vmhwm="1398870" />
-        <model path="caffe/FP32/inception_v4/inception_v4.xml" test="infer_request_inference" device="CPU" vmsize="1227798" vmpeak="1326908" vmrss="438160" vmhwm="602434" />
-        <model path="caffe/FP32/inception_v4/inception_v4.xml" test="infer_request_inference" device="GPU" vmsize="1633997" vmpeak="1706086" vmrss="1281693" vmhwm="1395878" />
-        <model path="caffe/FP32/lenet/lenet.xml" test="create_exenetwork" device="CPU" vmsize="753605" vmpeak="876330" vmrss="15571" vmhwm="29106" />
-        <model path="caffe/FP32/lenet/lenet.xml" test="create_exenetwork" device="GPU" vmsize="566693" vmpeak="658486" vmrss="220783" vmhwm="232452" />
-        <model path="caffe/FP32/lenet/lenet.xml" test="infer_request_inference" device="CPU" vmsize="808486" vmpeak="880576" vmrss="29084" vmhwm="29084" />
-        <model path="caffe/FP32/lenet/lenet.xml" test="infer_request_inference" device="GPU" vmsize="586401" vmpeak="658490" vmrss="232764" vmhwm="232764" />
-        <model path="caffe/FP32/mobilenet/mobilenet.xml" test="create_exenetwork" device="CPU" vmsize="754864" vmpeak="893692" vmrss="54617" vmhwm="81584" />
-        <model path="caffe/FP32/mobilenet/mobilenet.xml" test="create_exenetwork" device="GPU" vmsize="642527" vmpeak="750424" vmrss="296678" vmhwm="362300" />
-        <model path="caffe/FP32/mobilenet/mobilenet.xml" test="infer_request_inference" device="CPU" vmsize="831336" vmpeak="903425" vmrss="85654" vmhwm="85654" />
-        <model path="caffe/FP32/mobilenet/mobilenet.xml" test="infer_request_inference" device="GPU" vmsize="716047" vmpeak="788136" vmrss="364434" vmhwm="364434" />
-        <model path="caffe/FP32/mobilenet_v2/mobilenet_v2.xml" test="create_exenetwork" device="CPU" vmsize="756813" vmpeak="819698" vmrss="54410" vmhwm="78289" />
-        <model path="caffe/FP32/mobilenet_v2/mobilenet_v2.xml" test="create_exenetwork" device="GPU" vmsize="758705" vmpeak="862466" vmrss="412966" vmhwm="437131" />
-        <model path="caffe/FP32/mobilenet_v2/mobilenet_v2.xml" test="infer_request_inference" device="CPU" vmsize="840967" vmpeak="840967" vmrss="82860" vmhwm="82860" />
-        <model path="caffe/FP32/mobilenet_v2/mobilenet_v2.xml" test="infer_request_inference" device="GPU" vmsize="787182" vmpeak="859271" vmrss="436801" vmhwm="436801" />
-        <model path="caffe/FP32/mtcnn_o/mtcnn_o.xml" test="create_exenetwork" device="CPU" vmsize="753715" vmpeak="876299" vmrss="17512" vmhwm="28402" />
-        <model path="caffe/FP32/mtcnn_o/mtcnn_o.xml" test="create_exenetwork" device="GPU" vmsize="583092" vmpeak="674744" vmrss="238220" vmhwm="249722" />
-        <model path="caffe/FP32/mtcnn_o/mtcnn_o.xml" test="infer_request_inference" device="CPU" vmsize="808209" vmpeak="808209" vmrss="27865" vmhwm="27865" />
-        <model path="caffe/FP32/mtcnn_o/mtcnn_o.xml" test="infer_request_inference" device="GPU" vmsize="600714" vmpeak="672804" vmrss="246967" vmhwm="246967" />
-        <model path="caffe/FP32/mtcnn_p/mtcnn_p.xml" test="create_exenetwork" device="CPU" vmsize="763677" vmpeak="874535" vmrss="13318" vmhwm="35327" />
-        <model path="caffe/FP32/mtcnn_p/mtcnn_p.xml" test="create_exenetwork" device="GPU" vmsize="570521" vmpeak="662182" vmrss="224774" vmhwm="351410" />
-        <model path="caffe/FP32/mtcnn_p/mtcnn_p.xml" test="infer_request_inference" device="CPU" vmsize="901260" vmpeak="973350" vmrss="108037" vmhwm="108037" />
-        <model path="caffe/FP32/mtcnn_p/mtcnn_p.xml" test="infer_request_inference" device="GPU" vmsize="685115" vmpeak="757204" vmrss="331421" vmhwm="351529" />
-        <model path="caffe/FP32/mtcnn_r/mtcnn_r.xml" test="create_exenetwork" device="CPU" vmsize="753711" vmpeak="803228" vmrss="14806" vmhwm="25911" />
-        <model path="caffe/FP32/mtcnn_r/mtcnn_r.xml" test="create_exenetwork" device="GPU" vmsize="577280" vmpeak="667673" vmrss="232029" vmhwm="242580" />
-        <model path="caffe/FP32/mtcnn_r/mtcnn_r.xml" test="infer_request_inference" device="CPU" vmsize="806102" vmpeak="806102" vmrss="25352" vmhwm="25352" />
-        <model path="caffe/FP32/mtcnn_r/mtcnn_r.xml" test="infer_request_inference" device="GPU" vmsize="593340" vmpeak="665429" vmrss="240200" vmhwm="240200" />
-        <model path="caffe/FP32/openpose_face/openpose_face.xml" test="create_exenetwork" device="CPU" vmsize="764711" vmpeak="1279238" vmrss="23544" vmhwm="528431" />
-        <model path="caffe/FP32/openpose_face/openpose_face.xml" test="create_exenetwork" device="GPU" vmsize="890428" vmpeak="1316884" vmrss="544882" vmhwm="1035192" />
-        <model path="caffe/FP32/openpose_face/openpose_face.xml" test="infer_request_inference" device="CPU" vmsize="1187529" vmpeak="1279207" vmrss="398512" vmhwm="528730" />
-        <model path="caffe/FP32/openpose_face/openpose_face.xml" test="infer_request_inference" device="GPU" vmsize="1288707" vmpeak="1360796" vmrss="935778" vmhwm="1038888" />
-        <model path="caffe/FP32/openpose_hand/openpose_hand.xml" test="create_exenetwork" device="CPU" vmsize="755634" vmpeak="1259024" vmrss="23342" vmhwm="507980" />
-        <model path="caffe/FP32/openpose_hand/openpose_hand.xml" test="create_exenetwork" device="GPU" vmsize="845886" vmpeak="1297898" vmrss="500957" vmhwm="1016822" />
-        <model path="caffe/FP32/openpose_hand/openpose_hand.xml" test="infer_request_inference" device="CPU" vmsize="1327246" vmpeak="1327246" vmrss="384634" vmhwm="507522" />
-        <model path="caffe/FP32/openpose_hand/openpose_hand.xml" test="infer_request_inference" device="GPU" vmsize="1277117" vmpeak="1300490" vmrss="923674" vmhwm="1018956" />
-        <model path="caffe/FP32/openpose_pose_coco/openpose_pose_coco.xml" test="create_exenetwork" device="CPU" vmsize="757556" vmpeak="1471373" vmrss="32780" vmhwm="716861" />
-        <model path="caffe/FP32/openpose_pose_coco/openpose_pose_coco.xml" test="create_exenetwork" device="GPU" vmsize="1153103" vmpeak="1684306" vmrss="807426" vmhwm="1402513" />
-        <model path="caffe/FP32/openpose_pose_coco/openpose_pose_coco.xml" test="infer_request_inference" device="CPU" vmsize="1397686" vmpeak="1471373" vmrss="528620" vmhwm="717728" />
-        <model path="caffe/FP32/openpose_pose_coco/openpose_pose_coco.xml" test="infer_request_inference" device="GPU" vmsize="1597785" vmpeak="1680465" vmrss="1244672" vmhwm="1399217" />
-        <model path="caffe/FP32/places205_alexnet/places205_alexnet.xml" test="create_exenetwork" device="CPU" vmsize="753711" vmpeak="1485853" vmrss="14330" vmhwm="773766" />
-        <model path="caffe/FP32/places205_alexnet/places205_alexnet.xml" test="create_exenetwork" device="GPU" vmsize="604573" vmpeak="1684861" vmrss="259556" vmhwm="1403600" />
-        <model path="caffe/FP32/places205_alexnet/places205_alexnet.xml" test="infer_request_inference" device="CPU" vmsize="1311107" vmpeak="1485862" vmrss="528448" vmhwm="773656" />
-        <model path="caffe/FP32/places205_alexnet/places205_alexnet.xml" test="infer_request_inference" device="GPU" vmsize="1346840" vmpeak="1684896" vmrss="993942" vmhwm="1403886" />
-        <model path="caffe/FP32/places205_googlenet/places205_googlenet.xml" test="create_exenetwork" device="CPU" vmsize="757187" vmpeak="831362" vmrss="78795" vmhwm="113814" />
-        <model path="caffe/FP32/places205_googlenet/places205_googlenet.xml" test="create_exenetwork" device="GPU" vmsize="805270" vmpeak="920321" vmrss="460319" vmhwm="495638" />
-        <model path="caffe/FP32/places205_googlenet/places205_googlenet.xml" test="infer_request_inference" device="CPU" vmsize="852781" vmpeak="852781" vmrss="119033" vmhwm="119033" />
-        <model path="caffe/FP32/places205_googlenet/places205_googlenet.xml" test="infer_request_inference" device="GPU" vmsize="847052" vmpeak="919142" vmrss="494916" vmhwm="494916" />
-        <model path="caffe/FP32/resnet_18/resnet_18.xml" test="create_exenetwork" device="CPU" vmsize="754248" vmpeak="925443" vmrss="16878" vmhwm="177663" />
-        <model path="caffe/FP32/resnet_18/resnet_18.xml" test="create_exenetwork" device="GPU" vmsize="657659" vmpeak="799510" vmrss="312070" vmhwm="466153" />
-        <model path="caffe/FP32/resnet_18/resnet_18.xml" test="infer_request_inference" device="CPU" vmsize="920163" vmpeak="920163" vmrss="131859" vmhwm="176726" />
-        <model path="caffe/FP32/resnet_18/resnet_18.xml" test="infer_request_inference" device="GPU" vmsize="775350" vmpeak="847440" vmrss="422919" vmhwm="467610" />
-        <model path="caffe/FP32/resnet_v1_101/resnet_v1_101.xml" test="create_exenetwork" device="CPU" vmsize="760584" vmpeak="1338202" vmrss="43243" vmhwm="616928" />
-        <model path="caffe/FP32/resnet_v1_101/resnet_v1_101.xml" test="create_exenetwork" device="GPU" vmsize="1104862" vmpeak="1557006" vmrss="759030" vmhwm="1275071" />
-        <model path="caffe/FP32/resnet_v1_101/resnet_v1_101.xml" test="infer_request_inference" device="CPU" vmsize="1224172" vmpeak="1338172" vmrss="434944" vmhwm="616849" />
-        <model path="caffe/FP32/resnet_v1_101/resnet_v1_101.xml" test="infer_request_inference" device="GPU" vmsize="1452145" vmpeak="1558106" vmrss="1099428" vmhwm="1276787" />
-        <model path="caffe/FP32/resnet_v1_152/resnet_v1_152.xml" test="create_exenetwork" device="CPU" vmsize="764878" vmpeak="1551919" vmrss="58638" vmhwm="828383" />
-        <model path="caffe/FP32/resnet_v1_152/resnet_v1_152.xml" test="create_exenetwork" device="GPU" vmsize="1315120" vmpeak="1977250" vmrss="968858" vmhwm="1694796" />
-        <model path="caffe/FP32/resnet_v1_152/resnet_v1_152.xml" test="infer_request_inference" device="CPU" vmsize="1526166" vmpeak="1598256" vmrss="582401" vmhwm="829598" />
-        <model path="caffe/FP32/resnet_v1_152/resnet_v1_152.xml" test="infer_request_inference" device="GPU" vmsize="1804748" vmpeak="1975855" vmrss="1451397" vmhwm="1693419" />
-        <model path="caffe/FP32/resnet_v1_269/resnet_v1_269.xml" test="create_exenetwork" device="CPU" vmsize="927665" vmpeak="2236845" vmrss="224034" vmhwm="1396458" />
-        <model path="caffe/FP32/resnet_v1_269/resnet_v1_269.xml" test="create_exenetwork" device="GPU" vmsize="1988676" vmpeak="3156291" vmrss="1643919" vmhwm="2874946" />
-        <model path="caffe/FP32/resnet_v1_269/resnet_v1_269.xml" test="infer_request_inference" device="CPU" vmsize="2016999" vmpeak="2236955" vmrss="1117754" vmhwm="1396128" />
-        <model path="caffe/FP32/resnet_v1_269/resnet_v1_269.xml" test="infer_request_inference" device="GPU" vmsize="2845849" vmpeak="3165219" vmrss="2493550" vmhwm="2883091" />
-        <model path="caffe/FP32/resnet_v1_50/resnet_v1_50.xml" test="create_exenetwork" device="CPU" vmsize="766101" vmpeak="1079971" vmrss="27359" vmhwm="362142" />
-        <model path="caffe/FP32/resnet_v1_50/resnet_v1_50.xml" test="create_exenetwork" device="GPU" vmsize="834856" vmpeak="1080094" vmrss="490089" vmhwm="799312" />
-        <model path="caffe/FP32/resnet_v1_50/resnet_v1_50.xml" test="infer_request_inference" device="CPU" vmsize="1046381" vmpeak="1118471" vmrss="260528" vmhwm="362203" />
-        <model path="caffe/FP32/resnet_v1_50/resnet_v1_50.xml" test="infer_request_inference" device="GPU" vmsize="1060109" vmpeak="1132199" vmrss="707876" vmhwm="804108" />
-        <model path="caffe/FP32/se_bn_inception/se_bn_inception.xml" test="create_exenetwork" device="CPU" vmsize="758516" vmpeak="930397" vmrss="40572" vmhwm="194062" />
-        <model path="caffe/FP32/se_bn_inception/se_bn_inception.xml" test="create_exenetwork" device="GPU" vmsize="873061" vmpeak="1013430" vmrss="528167" vmhwm="692564" />
-        <model path="caffe/FP32/se_bn_inception/se_bn_inception.xml" test="infer_request_inference" device="CPU" vmsize="957620" vmpeak="1029710" vmrss="152754" vmhwm="194656" />
-        <model path="caffe/FP32/se_bn_inception/se_bn_inception.xml" test="infer_request_inference" device="GPU" vmsize="1014305" vmpeak="1086395" vmrss="662525" vmhwm="694821" />
-        <model path="caffe/FP32/se_resnext_50/se_resnext_50.xml" test="create_exenetwork" device="CPU" vmsize="759382" vmpeak="1174707" vmrss="39265" vmhwm="401856" />
-        <model path="caffe/FP32/se_resnext_50/se_resnext_50.xml" test="create_exenetwork" device="GPU" vmsize="983083" vmpeak="1257471" vmrss="637335" vmhwm="975444" />
-        <model path="caffe/FP32/se_resnext_50/se_resnext_50.xml" test="infer_request_inference" device="CPU" vmsize="1140730" vmpeak="1174672" vmrss="315977" vmhwm="401508" />
-        <model path="caffe/FP32/se_resnext_50/se_resnext_50.xml" test="infer_request_inference" device="GPU" vmsize="1251214" vmpeak="1323304" vmrss="899034" vmhwm="976474" />
-        <model path="caffe/FP32/squeezenet_v1.0/squeezenet_v1.0.xml" test="create_exenetwork" device="CPU" vmsize="754890" vmpeak="815095" vmrss="28833" vmhwm="43881" />
-        <model path="caffe/FP32/squeezenet_v1.0/squeezenet_v1.0.xml" test="create_exenetwork" device="GPU" vmsize="651974" vmpeak="746719" vmrss="306455" vmhwm="321345" />
-        <model path="caffe/FP32/squeezenet_v1.0/squeezenet_v1.0.xml" test="infer_request_inference" device="CPU" vmsize="824942" vmpeak="897032" vmrss="48567" vmhwm="48567" />
-        <model path="caffe/FP32/squeezenet_v1.0/squeezenet_v1.0.xml" test="infer_request_inference" device="GPU" vmsize="676328" vmpeak="748418" vmrss="324860" vmhwm="324860" />
-        <model path="caffe/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="create_exenetwork" device="CPU" vmsize="758212" vmpeak="813208" vmrss="29691" vmhwm="44220" />
-        <model path="caffe/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="create_exenetwork" device="GPU" vmsize="611789" vmpeak="706534" vmrss="266244" vmhwm="324007" />
-        <model path="caffe/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="infer_request_inference" device="CPU" vmsize="818549" vmpeak="890639" vmrss="47141" vmhwm="47141" />
-        <model path="caffe/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="infer_request_inference" device="GPU" vmsize="677705" vmpeak="749795" vmrss="326163" vmhwm="326163" />
-        <model path="caffe/FP32/ssd_googlenet/ssd_googlenet.xml" test="create_exenetwork" device="CPU" vmsize="757534" vmpeak="911495" vmrss="36445" vmhwm="182050" />
-        <model path="caffe/FP32/ssd_googlenet/ssd_googlenet.xml" test="create_exenetwork" device="GPU" vmsize="835683" vmpeak="973280" vmrss="490613" vmhwm="658640" />
-        <model path="caffe/FP32/ssd_googlenet/ssd_googlenet.xml" test="infer_request_inference" device="CPU" vmsize="941076" vmpeak="1013166" vmrss="148222" vmhwm="183185" />
-        <model path="caffe/FP32/ssd_googlenet/ssd_googlenet.xml" test="infer_request_inference" device="GPU" vmsize="989608" vmpeak="1061698" vmrss="637709" vmhwm="661746" />
-        <model path="caffe/FP32/ssd_mobilenet/ssd_mobilenet.xml" test="create_exenetwork" device="CPU" vmsize="757174" vmpeak="901648" vmrss="73409" vmhwm="106537" />
-        <model path="caffe/FP32/ssd_mobilenet/ssd_mobilenet.xml" test="create_exenetwork" device="GPU" vmsize="801644" vmpeak="915186" vmrss="456517" vmhwm="490520" />
-        <model path="caffe/FP32/ssd_mobilenet/ssd_mobilenet.xml" test="infer_request_inference" device="CPU" vmsize="847932" vmpeak="847932" vmrss="116410" vmhwm="116410" />
-        <model path="caffe/FP32/ssd_mobilenet/ssd_mobilenet.xml" test="infer_request_inference" device="GPU" vmsize="843022" vmpeak="915112" vmrss="490864" vmhwm="490864" />
-        <model path="caffe/FP32/ssd_squeezenet/ssd_squeezenet.xml" test="create_exenetwork" device="CPU" vmsize="765393" vmpeak="900402" vmrss="71544" vmhwm="105032" />
-        <model path="caffe/FP32/ssd_squeezenet/ssd_squeezenet.xml" test="create_exenetwork" device="GPU" vmsize="759668" vmpeak="872762" vmrss="414493" vmhwm="497701" />
-        <model path="caffe/FP32/ssd_squeezenet/ssd_squeezenet.xml" test="infer_request_inference" device="CPU" vmsize="848438" vmpeak="900754" vmrss="113590" vmhwm="113590" />
-        <model path="caffe/FP32/ssd_squeezenet/ssd_squeezenet.xml" test="infer_request_inference" device="GPU" vmsize="847620" vmpeak="919710" vmrss="495730" vmhwm="495730" />
-        <model path="caffe/FP32/ssd_vgg16_300/ssd_vgg16_300.xml" test="create_exenetwork" device="CPU" vmsize="755374" vmpeak="1146156" vmrss="22026" vmhwm="370176" />
-        <model path="caffe/FP32/ssd_vgg16_300/ssd_vgg16_300.xml" test="create_exenetwork" device="GPU" vmsize="768451" vmpeak="1074730" vmrss="423662" vmhwm="794266" />
-        <model path="caffe/FP32/ssd_vgg16_300/ssd_vgg16_300.xml" test="infer_request_inference" device="CPU" vmsize="1113609" vmpeak="1185698" vmrss="313513" vmhwm="370035" />
-        <model path="caffe/FP32/ssd_vgg16_300/ssd_vgg16_300.xml" test="infer_request_inference" device="GPU" vmsize="1134227" vmpeak="1206317" vmrss="783006" vmhwm="795000" />
-        <model path="caffe/FP32/ssd_vgg16_512/ssd_vgg16_512.xml" test="create_exenetwork" device="CPU" vmsize="755796" vmpeak="1267802" vmrss="23746" vmhwm="383983" />
-        <model path="caffe/FP32/ssd_vgg16_512/ssd_vgg16_512.xml" test="create_exenetwork" device="GPU" vmsize="794565" vmpeak="1272634" vmrss="449394" vmhwm="991632" />
-        <model path="caffe/FP32/ssd_vgg16_512/ssd_vgg16_512.xml" test="infer_request_inference" device="CPU" vmsize="1234050" vmpeak="1306140" vmrss="421194" vmhwm="421194" />
-        <model path="caffe/FP32/ssd_vgg16_512/ssd_vgg16_512.xml" test="infer_request_inference" device="GPU" vmsize="1348960" vmpeak="1421050" vmrss="999050" vmhwm="999050" />
-        <model path="caffe/FP32/vgg16/vgg16.xml" test="create_exenetwork" device="CPU" vmsize="754006" vmpeak="2548497" vmrss="15598" vmhwm="1808624" />
-        <model path="caffe/FP32/vgg16/vgg16.xml" test="create_exenetwork" device="GPU" vmsize="668602" vmpeak="3326708" vmrss="323791" vmhwm="3045328" />
-        <model path="caffe/FP32/vgg16/vgg16.xml" test="infer_request_inference" device="CPU" vmsize="2027181" vmpeak="2548497" vmrss="1242560" vmhwm="1808730" />
-        <model path="caffe/FP32/vgg16/vgg16.xml" test="infer_request_inference" device="GPU" vmsize="2441076" vmpeak="3326708" vmrss="2088055" vmhwm="3045050" />
-        <model path="caffe/FP32/vgg19/vgg19.xml" test="create_exenetwork" device="CPU" vmsize="754212" vmpeak="2618030" vmrss="15510" vmhwm="1877383" />
-        <model path="caffe/FP32/vgg19/vgg19.xml" test="create_exenetwork" device="GPU" vmsize="739222" vmpeak="3397112" vmrss="393866" vmhwm="3115085" />
-        <model path="caffe/FP32/vgg19/vgg19.xml" test="infer_request_inference" device="CPU" vmsize="2073794" vmpeak="2618030" vmrss="1289741" vmhwm="1878289" />
-        <model path="caffe/FP32/vgg19/vgg19.xml" test="infer_request_inference" device="GPU" vmsize="2518340" vmpeak="3397081" vmrss="2165196" vmhwm="3114975" />
-        <model path="caffe/FP32/vnect/vnect.xml" test="create_exenetwork" device="CPU" vmsize="764940" vmpeak="947157" vmrss="27988" vmhwm="223726" />
-        <model path="caffe/FP32/vnect/vnect.xml" test="create_exenetwork" device="GPU" vmsize="789223" vmpeak="941683" vmrss="443788" vmhwm="641476" />
-        <model path="caffe/FP32/vnect/vnect.xml" test="infer_request_inference" device="CPU" vmsize="962187" vmpeak="1034277" vmrss="177848" vmhwm="224180" />
-        <model path="caffe/FP32/vnect/vnect.xml" test="infer_request_inference" device="GPU" vmsize="969069" vmpeak="1041158" vmrss="616990" vmhwm="641977" />
-        <model path="caffe/FP32/wrn_50_2/wrn_50_2.xml" test="create_exenetwork" device="CPU" vmsize="755651" vmpeak="1654985" vmrss="24921" vmhwm="920400" />
-        <model path="caffe/FP32/wrn_50_2/wrn_50_2.xml" test="create_exenetwork" device="GPU" vmsize="936892" vmpeak="1838610" vmrss="590994" vmhwm="1556526" />
-        <model path="caffe/FP32/wrn_50_2/wrn_50_2.xml" test="infer_request_inference" device="CPU" vmsize="1433352" vmpeak="1654989" vmrss="639456" vmhwm="918693" />
-        <model path="caffe/FP32/wrn_50_2/wrn_50_2.xml" test="infer_request_inference" device="GPU" vmsize="1613176" vmpeak="1824922" vmrss="1259940" vmhwm="1543031" />
-        <model path="caffe/FP32/yolo_v1_full/yolo_v1_full.xml" test="create_exenetwork" device="CPU" vmsize="754692" vmpeak="4259393" vmrss="18013" vmhwm="3532412" />
-        <model path="caffe/FP32/yolo_v1_full/yolo_v1_full.xml" test="create_exenetwork" device="GPU" vmsize="719105" vmpeak="5906194" vmrss="373648" vmhwm="5623600" />
-        <model path="caffe/FP32/yolo_v1_full/yolo_v1_full.xml" test="infer_request_inference" device="CPU" vmsize="3167040" vmpeak="4259380" vmrss="2378362" vmhwm="3531237" />
-        <model path="caffe/FP32/yolo_v1_full/yolo_v1_full.xml" test="infer_request_inference" device="GPU" vmsize="4165801" vmpeak="5903801" vmrss="3812393" vmhwm="5621585" />
-        <model path="caffe/FP32/yolo_v1_tiny/yolo_v1_tiny.xml" test="create_exenetwork" device="CPU" vmsize="753860" vmpeak="1101161" vmrss="14599" vmhwm="375399" />
-        <model path="caffe/FP32/yolo_v1_tiny/yolo_v1_tiny.xml" test="create_exenetwork" device="GPU" vmsize="577640" vmpeak="1037480" vmrss="232443" vmhwm="755972" />
-        <model path="caffe/FP32/yolo_v1_tiny/yolo_v1_tiny.xml" test="infer_request_inference" device="CPU" vmsize="1059828" vmpeak="1131917" vmrss="272879" vmhwm="374721" />
-        <model path="caffe/FP32/yolo_v1_tiny/yolo_v1_tiny.xml" test="infer_request_inference" device="GPU" vmsize="957453" vmpeak="1037445" vmrss="605026" vmhwm="756606" />
-        <model path="caffe/FP32/yolo_v2/yolo_v2.xml" test="create_exenetwork" device="CPU" vmsize="754344" vmpeak="1422647" vmrss="16790" vmhwm="680072" />
-        <model path="caffe/FP32/yolo_v2/yolo_v2.xml" test="create_exenetwork" device="GPU" vmsize="678964" vmpeak="1435790" vmrss="334017" vmhwm="1154573" />
-        <model path="caffe/FP32/yolo_v2/yolo_v2.xml" test="infer_request_inference" device="CPU" vmsize="1279823" vmpeak="1422647" vmrss="490692" vmhwm="680526" />
-        <model path="caffe/FP32/yolo_v2/yolo_v2.xml" test="infer_request_inference" device="GPU" vmsize="1325156" vmpeak="1438571" vmrss="972140" vmhwm="1157138" />
-        <model path="caffe/FP32/yolo_v2_tiny/yolo_v2_tiny.xml" test="create_exenetwork" device="CPU" vmsize="753733" vmpeak="954430" vmrss="14278" vmhwm="229913" />
-        <model path="caffe/FP32/yolo_v2_tiny/yolo_v2_tiny.xml" test="create_exenetwork" device="GPU" vmsize="568880" vmpeak="814976" vmrss="223907" vmhwm="533808" />
-        <model path="caffe/FP32/yolo_v2_tiny/yolo_v2_tiny.xml" test="infer_request_inference" device="CPU" vmsize="1032882" vmpeak="1032882" vmrss="174631" vmhwm="230243" />
-        <model path="caffe/FP32/yolo_v2_tiny/yolo_v2_tiny.xml" test="infer_request_inference" device="GPU" vmsize="810031" vmpeak="816178" vmrss="456856" vmhwm="534503" />
-        <model path="caffe/FP32/yolo_v3/yolo_v3.xml" test="create_exenetwork" device="CPU" vmsize="756852" vmpeak="1587154" vmrss="31460" vmhwm="837570" />
-        <model path="caffe/FP32/yolo_v3/yolo_v3.xml" test="create_exenetwork" device="GPU" vmsize="1159840" vmpeak="1822444" vmrss="813969" vmhwm="1540343" />
-        <model path="caffe/FP32/yolo_v3/yolo_v3.xml" test="infer_request_inference" device="CPU" vmsize="1554462" vmpeak="1626552" vmrss="609677" vmhwm="836655" />
-        <model path="caffe/FP32/yolo_v3/yolo_v3.xml" test="infer_request_inference" device="GPU" vmsize="1735610" vmpeak="1821749" vmrss="1383285" vmhwm="1540598" />
-        <model path="mxnet/FP32/caffenet/caffenet.xml" test="create_exenetwork" device="CPU" vmsize="753856" vmpeak="1528538" vmrss="14414" vmhwm="815491" />
-        <model path="mxnet/FP32/caffenet/caffenet.xml" test="create_exenetwork" device="GPU" vmsize="580030" vmpeak="1741062" vmrss="235624" vmhwm="1460386" />
-        <model path="mxnet/FP32/caffenet/caffenet.xml" test="infer_request_inference" device="CPU" vmsize="1339681" vmpeak="1528538" vmrss="556146" vmhwm="815262" />
-        <model path="mxnet/FP32/caffenet/caffenet.xml" test="infer_request_inference" device="GPU" vmsize="1389097" vmpeak="1741093" vmrss="1036178" vmhwm="1460060" />
-        <model path="mxnet/FP32/densenet_121/densenet_121.xml" test="create_exenetwork" device="CPU" vmsize="772622" vmpeak="985749" vmrss="95431" vmhwm="151087" />
-        <model path="mxnet/FP32/densenet_121/densenet_121.xml" test="create_exenetwork" device="GPU" vmsize="1141962" vmpeak="1252068" vmrss="796734" vmhwm="827217" />
-        <model path="mxnet/FP32/densenet_121/densenet_121.xml" test="infer_request_inference" device="CPU" vmsize="985239" vmpeak="1057328" vmrss="158532" vmhwm="158532" />
-        <model path="mxnet/FP32/densenet_121/densenet_121.xml" test="infer_request_inference" device="GPU" vmsize="1171425" vmpeak="1243514" vmrss="818624" vmhwm="818624" />
-        <model path="mxnet/FP32/densenet_161/densenet_161.xml" test="create_exenetwork" device="CPU" vmsize="762731" vmpeak="1211720" vmrss="93486" vmhwm="426896" />
-        <model path="mxnet/FP32/densenet_161/densenet_161.xml" test="create_exenetwork" device="GPU" vmsize="1312801" vmpeak="1592839" vmrss="967252" vmhwm="1311569" />
-        <model path="mxnet/FP32/densenet_161/densenet_161.xml" test="infer_request_inference" device="CPU" vmsize="1198124" vmpeak="1270214" vmrss="353051" vmhwm="427319" />
-        <model path="mxnet/FP32/densenet_161/densenet_161.xml" test="infer_request_inference" device="GPU" vmsize="1657339" vmpeak="1729428" vmrss="1304820" vmhwm="1304820" />
-        <model path="mxnet/FP32/densenet_169/densenet_169.xml" test="create_exenetwork" device="CPU" vmsize="796360" vmpeak="1002408" vmrss="123094" vmhwm="239945" />
-        <model path="mxnet/FP32/densenet_169/densenet_169.xml" test="create_exenetwork" device="GPU" vmsize="1352916" vmpeak="1472262" vmrss="1007630" vmhwm="1084727" />
-        <model path="mxnet/FP32/densenet_169/densenet_169.xml" test="infer_request_inference" device="CPU" vmsize="1059880" vmpeak="1059880" vmrss="239307" vmhwm="241753" />
-        <model path="mxnet/FP32/densenet_169/densenet_169.xml" test="infer_request_inference" device="GPU" vmsize="1437656" vmpeak="1509745" vmrss="1084828" vmhwm="1084828" />
-        <model path="mxnet/FP32/densenet_201/densenet_201.xml" test="create_exenetwork" device="CPU" vmsize="864635" vmpeak="1154040" vmrss="148830" vmhwm="322528" />
-        <model path="mxnet/FP32/densenet_201/densenet_201.xml" test="create_exenetwork" device="GPU" vmsize="1505042" vmpeak="1650162" vmrss="1159906" vmhwm="1343711" />
-        <model path="mxnet/FP32/densenet_201/densenet_201.xml" test="infer_request_inference" device="CPU" vmsize="1181056" vmpeak="1253146" vmrss="315048" vmhwm="322282" />
-        <model path="mxnet/FP32/densenet_201/densenet_201.xml" test="infer_request_inference" device="GPU" vmsize="1719256" vmpeak="1791345" vmrss="1366767" vmhwm="1366767" />
-        <model path="mxnet/FP32/dpn_92/dpn_92.xml" test="create_exenetwork" device="CPU" vmsize="767976" vmpeak="1370195" vmrss="63456" vmhwm="539897" />
-        <model path="mxnet/FP32/dpn_92/dpn_92.xml" test="create_exenetwork" device="GPU" vmsize="1313452" vmpeak="1701664" vmrss="968145" vmhwm="1420434" />
-        <model path="mxnet/FP32/dpn_92/dpn_92.xml" test="infer_request_inference" device="CPU" vmsize="1295571" vmpeak="1370195" vmrss="430610" vmhwm="539536" />
-        <model path="mxnet/FP32/dpn_92/dpn_92.xml" test="infer_request_inference" device="GPU" vmsize="1651421" vmpeak="1723510" vmrss="1299738" vmhwm="1422326" />
-        <model path="mxnet/FP32/fcn8s_vgg16/fcn8s_vgg16.xml" test="create_exenetwork" device="CPU" vmsize="754212" vmpeak="3124338" vmrss="17362" vmhwm="1770388" />
-        <model path="mxnet/FP32/fcn8s_vgg16/fcn8s_vgg16.xml" test="create_exenetwork" device="GPU" vmsize="669583" vmpeak="3628222" vmrss="324363" vmhwm="3347071" />
-        <model path="mxnet/FP32/fcn8s_vgg16/fcn8s_vgg16.xml" test="infer_request_inference" device="CPU" vmsize="2705824" vmpeak="3124338" vmrss="1906933" vmhwm="1906933" />
-        <model path="mxnet/FP32/fcn8s_vgg16/fcn8s_vgg16.xml" test="infer_request_inference" device="GPU" vmsize="3710449" vmpeak="3782539" vmrss="3356861" vmhwm="3356861" />
-        <model path="mxnet/FP32/full_imagenet_network/full_imagenet_network.xml" test="create_exenetwork" device="CPU" vmsize="756870" vmpeak="1192276" vmrss="32300" vmhwm="470417" />
-        <model path="mxnet/FP32/full_imagenet_network/full_imagenet_network.xml" test="create_exenetwork" device="GPU" vmsize="772970" vmpeak="1363872" vmrss="428054" vmhwm="1079412" />
-        <model path="mxnet/FP32/full_imagenet_network/full_imagenet_network.xml" test="infer_request_inference" device="CPU" vmsize="1123746" vmpeak="1195836" vmrss="335288" vmhwm="470162" />
-        <model path="mxnet/FP32/full_imagenet_network/full_imagenet_network.xml" test="infer_request_inference" device="GPU" vmsize="1219618" vmpeak="1362376" vmrss="875415" vmhwm="1077560" />
-        <model path="mxnet/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="create_exenetwork" device="CPU" vmsize="848157" vmpeak="1522730" vmrss="178424" vmhwm="792470" />
-        <model path="mxnet/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="create_exenetwork" device="GPU" vmsize="1549574" vmpeak="2182501" vmrss="1203804" vmhwm="1900742" />
-        <model path="mxnet/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="infer_request_inference" device="CPU" vmsize="1437730" vmpeak="1522730" vmrss="644402" vmhwm="794024" />
-        <model path="mxnet/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="infer_request_inference" device="GPU" vmsize="2145426" vmpeak="2217516" vmrss="1793162" vmhwm="1899854" />
-        <model path="mxnet/FP32/inception_v3/inception_v3.xml" test="create_exenetwork" device="CPU" vmsize="756584" vmpeak="925636" vmrss="32982" vmhwm="182529" />
-        <model path="mxnet/FP32/inception_v3/inception_v3.xml" test="create_exenetwork" device="GPU" vmsize="769230" vmpeak="907847" vmrss="423874" vmhwm="604982" />
-        <model path="mxnet/FP32/inception_v3/inception_v3.xml" test="infer_request_inference" device="CPU" vmsize="928659" vmpeak="928659" vmrss="142304" vmhwm="182353" />
-        <model path="mxnet/FP32/inception_v3/inception_v3.xml" test="infer_request_inference" device="GPU" vmsize="926103" vmpeak="998192" vmrss="572985" vmhwm="603592" />
-        <model path="mxnet/FP32/inception_v3_no_batchnorm/inception_v3_no_batchnorm.xml" test="create_exenetwork" device="CPU" vmsize="757851" vmpeak="1078682" vmrss="34751" vmhwm="348154" />
-        <model path="mxnet/FP32/inception_v3_no_batchnorm/inception_v3_no_batchnorm.xml" test="create_exenetwork" device="GPU" vmsize="911473" vmpeak="1183102" vmrss="565549" vmhwm="900992" />
-        <model path="mxnet/FP32/inception_v3_no_batchnorm/inception_v3_no_batchnorm.xml" test="infer_request_inference" device="CPU" vmsize="1051652" vmpeak="1123742" vmrss="258231" vmhwm="349131" />
-        <model path="mxnet/FP32/inception_v3_no_batchnorm/inception_v3_no_batchnorm.xml" test="infer_request_inference" device="GPU" vmsize="1182570" vmpeak="1254660" vmrss="829659" vmhwm="899540" />
-        <model path="mxnet/FP32/inception_v4/inception_v4.xml" test="create_exenetwork" device="CPU" vmsize="764319" vmpeak="1327506" vmrss="61375" vmhwm="601048" />
-        <model path="mxnet/FP32/inception_v4/inception_v4.xml" test="create_exenetwork" device="GPU" vmsize="1206559" vmpeak="1676272" vmrss="860362" vmhwm="1393906" />
-        <model path="mxnet/FP32/inception_v4/inception_v4.xml" test="infer_request_inference" device="CPU" vmsize="1228396" vmpeak="1327475" vmrss="441135" vmhwm="603394" />
-        <model path="mxnet/FP32/inception_v4/inception_v4.xml" test="infer_request_inference" device="GPU" vmsize="1637486" vmpeak="1709576" vmrss="1285376" vmhwm="1398377" />
-        <model path="mxnet/FP32/location_net/location_net.xml" test="create_exenetwork" device="CPU" vmsize="761046" vmpeak="1754029" vmrss="43916" vmhwm="1002368" />
-        <model path="mxnet/FP32/location_net/location_net.xml" test="create_exenetwork" device="GPU" vmsize="1026110" vmpeak="2108686" vmrss="680191" vmhwm="1826792" />
-        <model path="mxnet/FP32/location_net/location_net.xml" test="infer_request_inference" device="CPU" vmsize="1512095" vmpeak="1753998" vmrss="701483" vmhwm="1002333" />
-        <model path="mxnet/FP32/location_net/location_net.xml" test="infer_request_inference" device="GPU" vmsize="1880973" vmpeak="2110306" vmrss="1532348" vmhwm="1828952" />
-        <model path="mxnet/FP32/lresnet100e/lresnet100e.xml" test="create_exenetwork" device="CPU" vmsize="759695" vmpeak="1636430" vmrss="38011" vmhwm="883225" />
-        <model path="mxnet/FP32/lresnet100e/lresnet100e.xml" test="create_exenetwork" device="GPU" vmsize="1118880" vmpeak="1994964" vmrss="773102" vmhwm="1713034" />
-        <model path="mxnet/FP32/lresnet100e/lresnet100e.xml" test="infer_request_inference" device="CPU" vmsize="1430871" vmpeak="1636434" vmrss="617078" vmhwm="882886" />
-        <model path="mxnet/FP32/lresnet100e/lresnet100e.xml" test="infer_request_inference" device="GPU" vmsize="1804484" vmpeak="1993530" vmrss="1450724" vmhwm="1711340" />
-        <model path="mxnet/FP32/mobilenet/mobilenet.xml" test="create_exenetwork" device="CPU" vmsize="754872" vmpeak="821893" vmrss="55070" vmhwm="82354" />
-        <model path="mxnet/FP32/mobilenet/mobilenet.xml" test="create_exenetwork" device="GPU" vmsize="626304" vmpeak="734201" vmrss="280918" vmhwm="362925" />
-        <model path="mxnet/FP32/mobilenet/mobilenet.xml" test="infer_request_inference" device="CPU" vmsize="831344" vmpeak="903434" vmrss="86495" vmhwm="86495" />
-        <model path="mxnet/FP32/mobilenet/mobilenet.xml" test="infer_request_inference" device="GPU" vmsize="718357" vmpeak="790446" vmrss="367096" vmhwm="367096" />
-        <model path="mxnet/FP32/mobilenet_v2/mobilenet_v2.xml" test="create_exenetwork" device="CPU" vmsize="756826" vmpeak="819711" vmrss="53961" vmhwm="77206" />
-        <model path="mxnet/FP32/mobilenet_v2/mobilenet_v2.xml" test="create_exenetwork" device="GPU" vmsize="758023" vmpeak="861784" vmrss="412702" vmhwm="436805" />
-        <model path="mxnet/FP32/mobilenet_v2/mobilenet_v2.xml" test="infer_request_inference" device="CPU" vmsize="836470" vmpeak="891765" vmrss="83050" vmhwm="83050" />
-        <model path="mxnet/FP32/mobilenet_v2/mobilenet_v2.xml" test="infer_request_inference" device="GPU" vmsize="788986" vmpeak="861075" vmrss="437646" vmhwm="437646" />
-        <model path="mxnet/FP32/mtcnn_o/mtcnn_o.xml" test="create_exenetwork" device="CPU" vmsize="762731" vmpeak="804491" vmrss="17490" vmhwm="28454" />
-        <model path="mxnet/FP32/mtcnn_o/mtcnn_o.xml" test="create_exenetwork" device="GPU" vmsize="578894" vmpeak="670546" vmrss="233547" vmhwm="245172" />
-        <model path="mxnet/FP32/mtcnn_o/mtcnn_o.xml" test="infer_request_inference" device="CPU" vmsize="808209" vmpeak="808209" vmrss="28314" vmhwm="28314" />
-        <model path="mxnet/FP32/mtcnn_o/mtcnn_o.xml" test="infer_request_inference" device="GPU" vmsize="600507" vmpeak="672597" vmrss="247596" vmhwm="247596" />
-        <model path="mxnet/FP32/mtcnn_p/mtcnn_p.xml" test="create_exenetwork" device="CPU" vmsize="753530" vmpeak="881588" vmrss="13208" vmhwm="35261" />
-        <model path="mxnet/FP32/mtcnn_p/mtcnn_p.xml" test="create_exenetwork" device="GPU" vmsize="570042" vmpeak="661702" vmrss="224870" vmhwm="353003" />
-        <model path="mxnet/FP32/mtcnn_p/mtcnn_p.xml" test="infer_request_inference" device="CPU" vmsize="901260" vmpeak="901260" vmrss="107390" vmhwm="107390" />
-        <model path="mxnet/FP32/mtcnn_p/mtcnn_p.xml" test="infer_request_inference" device="GPU" vmsize="686408" vmpeak="758498" vmrss="332895" vmhwm="351907" />
-        <model path="mxnet/FP32/mtcnn_r/mtcnn_r.xml" test="create_exenetwork" device="CPU" vmsize="753711" vmpeak="803228" vmrss="14546" vmhwm="25586" />
-        <model path="mxnet/FP32/mtcnn_r/mtcnn_r.xml" test="create_exenetwork" device="GPU" vmsize="577288" vmpeak="667682" vmrss="231642" vmhwm="242167" />
-        <model path="mxnet/FP32/mtcnn_r/mtcnn_r.xml" test="infer_request_inference" device="CPU" vmsize="806102" vmpeak="806102" vmrss="24468" vmhwm="24468" />
-        <model path="mxnet/FP32/mtcnn_r/mtcnn_r.xml" test="infer_request_inference" device="GPU" vmsize="595588" vmpeak="667678" vmrss="242246" vmhwm="242246" />
-        <model path="mxnet/FP32/nin/nin.xml" test="create_exenetwork" device="CPU" vmsize="753838" vmpeak="907420" vmrss="80674" vmhwm="122086" />
-        <model path="mxnet/FP32/nin/nin.xml" test="create_exenetwork" device="GPU" vmsize="675633" vmpeak="798283" vmrss="330184" vmhwm="372754" />
-        <model path="mxnet/FP32/nin/nin.xml" test="infer_request_inference" device="CPU" vmsize="841390" vmpeak="913479" vmrss="123776" vmhwm="123776" />
-        <model path="mxnet/FP32/nin/nin.xml" test="infer_request_inference" device="GPU" vmsize="726066" vmpeak="798155" vmrss="390764" vmhwm="390764" />
-        <model path="mxnet/FP32/nst_vgg19/nst_vgg19.xml" test="create_exenetwork" device="CPU" vmsize="754080" vmpeak="884950" vmrss="35930" vmhwm="56368" />
-        <model path="mxnet/FP32/nst_vgg19/nst_vgg19.xml" test="create_exenetwork" device="GPU" vmsize="613082" vmpeak="713020" vmrss="267753" vmhwm="358019" />
-        <model path="mxnet/FP32/nst_vgg19/nst_vgg19.xml" test="infer_request_inference" device="CPU" vmsize="847726" vmpeak="919815" vmrss="83300" vmhwm="83300" />
-        <model path="mxnet/FP32/nst_vgg19/nst_vgg19.xml" test="infer_request_inference" device="GPU" vmsize="710754" vmpeak="782843" vmrss="357442" vmhwm="357442" />
-        <model path="mxnet/FP32/resnet_v1_101/resnet_v1_101.xml" test="create_exenetwork" device="CPU" vmsize="760821" vmpeak="1370292" vmrss="44242" vmhwm="618965" />
-        <model path="mxnet/FP32/resnet_v1_101/resnet_v1_101.xml" test="create_exenetwork" device="GPU" vmsize="1077643" vmpeak="1594964" vmrss="731733" vmhwm="1313127" />
-        <model path="mxnet/FP32/resnet_v1_101/resnet_v1_101.xml" test="infer_request_inference" device="CPU" vmsize="1256200" vmpeak="1370261" vmrss="444043" vmhwm="617852" />
-        <model path="mxnet/FP32/resnet_v1_101/resnet_v1_101.xml" test="infer_request_inference" device="GPU" vmsize="1494732" vmpeak="1596218" vmrss="1141690" vmhwm="1314187" />
-        <model path="mxnet/FP32/resnet_v1_152/resnet_v1_152.xml" test="create_exenetwork" device="CPU" vmsize="765322" vmpeak="1593790" vmrss="61120" vmhwm="831661" />
-        <model path="mxnet/FP32/resnet_v1_152/resnet_v1_152.xml" test="create_exenetwork" device="GPU" vmsize="1339184" vmpeak="2040148" vmrss="993968" vmhwm="1758746" />
-        <model path="mxnet/FP32/resnet_v1_152/resnet_v1_152.xml" test="infer_request_inference" device="CPU" vmsize="1414652" vmpeak="1593754" vmrss="594426" vmhwm="832220" />
-        <model path="mxnet/FP32/resnet_v1_152/resnet_v1_152.xml" test="infer_request_inference" device="GPU" vmsize="1871271" vmpeak="2037904" vmrss="1518501" vmhwm="1756343" />
-        <model path="mxnet/FP32/resnet_v2_101/resnet_v2_101.xml" test="create_exenetwork" device="CPU" vmsize="760650" vmpeak="1369557" vmrss="43384" vmhwm="618015" />
-        <model path="mxnet/FP32/resnet_v2_101/resnet_v2_101.xml" test="create_exenetwork" device="GPU" vmsize="1022863" vmpeak="1592206" vmrss="676698" vmhwm="1309880" />
-        <model path="mxnet/FP32/resnet_v2_101/resnet_v2_101.xml" test="infer_request_inference" device="CPU" vmsize="1255557" vmpeak="1369522" vmrss="445350" vmhwm="618750" />
-        <model path="mxnet/FP32/resnet_v2_101/resnet_v2_101.xml" test="infer_request_inference" device="GPU" vmsize="1490077" vmpeak="1591563" vmrss="1137444" vmhwm="1309910" />
-        <model path="mxnet/FP32/resnet_v2_152/resnet_v2_152.xml" test="create_exenetwork" device="CPU" vmsize="765204" vmpeak="1593108" vmrss="61124" vmhwm="831353" />
-        <model path="mxnet/FP32/resnet_v2_152/resnet_v2_152.xml" test="create_exenetwork" device="GPU" vmsize="1340754" vmpeak="2034586" vmrss="995636" vmhwm="1753100" />
-        <model path="mxnet/FP32/resnet_v2_152/resnet_v2_152.xml" test="infer_request_inference" device="CPU" vmsize="1413992" vmpeak="1593077" vmrss="592710" vmhwm="831098" />
-        <model path="mxnet/FP32/resnet_v2_152/resnet_v2_152.xml" test="infer_request_inference" device="GPU" vmsize="1867096" vmpeak="2036610" vmrss="1514532" vmhwm="1755089" />
-        <model path="mxnet/FP32/resnext_101/resnext_101.xml" test="create_exenetwork" device="CPU" vmsize="766911" vmpeak="1356080" vmrss="64389" vmhwm="623026" />
-        <model path="mxnet/FP32/resnext_101/resnext_101.xml" test="create_exenetwork" device="GPU" vmsize="1105068" vmpeak="1552320" vmrss="759990" vmhwm="1271340" />
-        <model path="mxnet/FP32/resnext_101/resnext_101.xml" test="infer_request_inference" device="CPU" vmsize="1258699" vmpeak="1356084" vmrss="468780" vmhwm="623788" />
-        <model path="mxnet/FP32/resnext_101/resnext_101.xml" test="infer_request_inference" device="GPU" vmsize="1478730" vmpeak="1553591" vmrss="1126364" vmhwm="1272167" />
-        <model path="mxnet/FP32/resnext_101_64x4d/resnext_101_64x4d.xml" test="create_exenetwork" device="CPU" vmsize="761239" vmpeak="1894468" vmrss="40691" vmhwm="1139410" />
-        <model path="mxnet/FP32/resnext_101_64x4d/resnext_101_64x4d.xml" test="create_exenetwork" device="GPU" vmsize="1418938" vmpeak="2248351" vmrss="1073886" vmhwm="1967262" />
-        <model path="mxnet/FP32/resnext_101_64x4d/resnext_101_64x4d.xml" test="infer_request_inference" device="CPU" vmsize="1618592" vmpeak="1894499" vmrss="810946" vmhwm="1140422" />
-        <model path="mxnet/FP32/resnext_101_64x4d/resnext_101_64x4d.xml" test="infer_request_inference" device="GPU" vmsize="1996112" vmpeak="2247322" vmrss="1660700" vmhwm="1965405" />
-        <model path="mxnet/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="create_exenetwork" device="CPU" vmsize="754987" vmpeak="880664" vmrss="29475" vmhwm="43832" />
-        <model path="mxnet/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="create_exenetwork" device="GPU" vmsize="616360" vmpeak="711106" vmrss="270859" vmhwm="322498" />
-        <model path="mxnet/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="infer_request_inference" device="CPU" vmsize="818562" vmpeak="818562" vmrss="47141" vmhwm="47141" />
-        <model path="mxnet/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="infer_request_inference" device="GPU" vmsize="674124" vmpeak="746213" vmrss="322731" vmhwm="322731" />
-        <model path="mxnet/FP32/ssd_vgg16_300/ssd_vgg16_300.xml" test="create_exenetwork" device="CPU" vmsize="755224" vmpeak="1146433" vmrss="21806" vmhwm="370044" />
-        <model path="mxnet/FP32/ssd_vgg16_300/ssd_vgg16_300.xml" test="create_exenetwork" device="GPU" vmsize="775324" vmpeak="1077709" vmrss="430342" vmhwm="796857" />
-        <model path="mxnet/FP32/ssd_vgg16_300/ssd_vgg16_300.xml" test="infer_request_inference" device="CPU" vmsize="1113904" vmpeak="1185993" vmrss="312527" vmhwm="370946" />
-        <model path="mxnet/FP32/ssd_vgg16_300/ssd_vgg16_300.xml" test="infer_request_inference" device="GPU" vmsize="1137391" vmpeak="1137391" vmrss="785391" vmhwm="793201" />
-        <model path="mxnet/FP32/vgg16/vgg16.xml" test="create_exenetwork" device="CPU" vmsize="754133" vmpeak="2548906" vmrss="14955" vmhwm="1807044" />
-        <model path="mxnet/FP32/vgg16/vgg16.xml" test="create_exenetwork" device="GPU" vmsize="668619" vmpeak="3326725" vmrss="322691" vmhwm="3044404" />
-        <model path="mxnet/FP32/vgg16/vgg16.xml" test="infer_request_inference" device="CPU" vmsize="2027476" vmpeak="2548906" vmrss="1242678" vmhwm="1808470" />
-        <model path="mxnet/FP32/vgg16/vgg16.xml" test="infer_request_inference" device="GPU" vmsize="2438563" vmpeak="3326725" vmrss="2085028" vmhwm="3044505" />
-        <model path="mxnet/FP32/vgg19/vgg19.xml" test="create_exenetwork" device="CPU" vmsize="754226" vmpeak="2618325" vmrss="15708" vmhwm="1877977" />
-        <model path="mxnet/FP32/vgg19/vgg19.xml" test="create_exenetwork" device="GPU" vmsize="741092" vmpeak="3397116" vmrss="396074" vmhwm="3115345" />
-        <model path="mxnet/FP32/vgg19/vgg19.xml" test="infer_request_inference" device="CPU" vmsize="2074089" vmpeak="2618325" vmrss="1290049" vmhwm="1878672" />
-        <model path="mxnet/FP32/vgg19/vgg19.xml" test="infer_request_inference" device="GPU" vmsize="2518436" vmpeak="3397178" vmrss="2165728" vmhwm="3115459" />
-        <model path="mxnet/FP32/yolo_v1_full/yolo_v1_full.xml" test="create_exenetwork" device="CPU" vmsize="754701" vmpeak="4259684" vmrss="17626" vmhwm="3531853" />
-        <model path="mxnet/FP32/yolo_v1_full/yolo_v1_full.xml" test="create_exenetwork" device="GPU" vmsize="747582" vmpeak="5921322" vmrss="402490" vmhwm="5639084" />
-        <model path="mxnet/FP32/yolo_v1_full/yolo_v1_full.xml" test="infer_request_inference" device="CPU" vmsize="3095241" vmpeak="4259670" vmrss="2379062" vmhwm="3530652" />
-        <model path="mxnet/FP32/yolo_v1_full/yolo_v1_full.xml" test="infer_request_inference" device="GPU" vmsize="4163667" vmpeak="5923566" vmrss="3810193" vmhwm="5640967" />
-        <model path="mxnet/FP32/yolo_v1_tiny/yolo_v1_tiny.xml" test="create_exenetwork" device="CPU" vmsize="754023" vmpeak="1334414" vmrss="15254" vmhwm="608322" />
-        <model path="mxnet/FP32/yolo_v1_tiny/yolo_v1_tiny.xml" test="create_exenetwork" device="GPU" vmsize="600701" vmpeak="1330978" vmrss="255912" vmhwm="1049844" />
-        <model path="mxnet/FP32/yolo_v1_tiny/yolo_v1_tiny.xml" test="infer_request_inference" device="CPU" vmsize="1215838" vmpeak="1334383" vmrss="428331" vmhwm="607442" />
-        <model path="mxnet/FP32/yolo_v1_tiny/yolo_v1_tiny.xml" test="infer_request_inference" device="GPU" vmsize="1199972" vmpeak="1330384" vmrss="847391" vmhwm="1049228" />
-        <model path="onnx/FP32/ssd_resnet34/ssd_resnet34.xml" test="create_exenetwork" device="CPU" vmsize="755387" vmpeak="1175570" vmrss="25374" vmhwm="306904" />
-        <model path="onnx/FP32/ssd_resnet34/ssd_resnet34.xml" test="create_exenetwork" device="GPU" vmsize="805222" vmpeak="1346307" vmrss="460781" vmhwm="1065873" />
-        <model path="onnx/FP32/ssd_resnet34/ssd_resnet34.xml" test="infer_request_inference" device="CPU" vmsize="1188580" vmpeak="1260670" vmrss="336036" vmhwm="336036" />
-        <model path="onnx/FP32/ssd_resnet34/ssd_resnet34.xml" test="infer_request_inference" device="GPU" vmsize="1449408" vmpeak="1521498" vmrss="1096792" vmhwm="1096792" />
-        <model path="onnx/FP32/ssd_resnet34_new/ssd_resnet34_new.xml" test="create_exenetwork" device="CPU" vmsize="756822" vmpeak="1181615" vmrss="28468" vmhwm="309716" />
-        <model path="onnx/FP32/ssd_resnet34_new/ssd_resnet34_new.xml" test="create_exenetwork" device="GPU" vmsize="819271" vmpeak="2432738" vmrss="474764" vmhwm="1101047" />
-        <model path="onnx/FP32/ssd_resnet34_new/ssd_resnet34_new.xml" test="infer_request_inference" device="CPU" vmsize="1189117" vmpeak="1261207" vmrss="333788" vmhwm="333788" />
-        <model path="onnx/FP32/ssd_resnet34_new/ssd_resnet34_new.xml" test="infer_request_inference" device="GPU" vmsize="2539222" vmpeak="2611312" vmrss="2191604" vmhwm="2191604" />
-        <model path="pytorch/FP32/inceptionv3_pretrained/inceptionv3_pretrained.xml" test="create_exenetwork" device="CPU" vmsize="757878" vmpeak="1077934" vmrss="35261" vmhwm="348964" />
-        <model path="pytorch/FP32/inceptionv3_pretrained/inceptionv3_pretrained.xml" test="create_exenetwork" device="GPU" vmsize="899610" vmpeak="1179116" vmrss="553863" vmhwm="896997" />
-        <model path="pytorch/FP32/inceptionv3_pretrained/inceptionv3_pretrained.xml" test="infer_request_inference" device="CPU" vmsize="1050878" vmpeak="1077876" vmrss="256506" vmhwm="347974" />
-        <model path="pytorch/FP32/inceptionv3_pretrained/inceptionv3_pretrained.xml" test="infer_request_inference" device="GPU" vmsize="1179239" vmpeak="1251329" vmrss="826553" vmhwm="897714" />
-        <model path="pytorch/FP32/resnet50_pretrained/resnet50_pretrained.xml" test="create_exenetwork" device="CPU" vmsize="760456" vmpeak="1096708" vmrss="27315" vmhwm="361944" />
-        <model path="pytorch/FP32/resnet50_pretrained/resnet50_pretrained.xml" test="create_exenetwork" device="GPU" vmsize="834275" vmpeak="1073569" vmrss="489086" vmhwm="792343" />
-        <model path="pytorch/FP32/resnet50_pretrained/resnet50_pretrained.xml" test="infer_request_inference" device="CPU" vmsize="1058622" vmpeak="1130712" vmrss="267682" vmhwm="362749" />
-        <model path="pytorch/FP32/resnet50_pretrained/resnet50_pretrained.xml" test="infer_request_inference" device="GPU" vmsize="1050852" vmpeak="1122941" vmrss="697576" vmhwm="791040" />
-        <model path="pytorch/FP32/resnet50_torchvision/resnet50_torchvision.xml" test="create_exenetwork" device="CPU" vmsize="755950" vmpeak="1092203" vmrss="27640" vmhwm="362740" />
-        <model path="pytorch/FP32/resnet50_torchvision/resnet50_torchvision.xml" test="create_exenetwork" device="GPU" vmsize="835951" vmpeak="1073516" vmrss="490674" vmhwm="792224" />
-        <model path="pytorch/FP32/resnet50_torchvision/resnet50_torchvision.xml" test="infer_request_inference" device="CPU" vmsize="1058626" vmpeak="1130716" vmrss="266516" vmhwm="361992" />
-        <model path="pytorch/FP32/resnet50_torchvision/resnet50_torchvision.xml" test="infer_request_inference" device="GPU" vmsize="1050218" vmpeak="1071435" vmrss="696669" vmhwm="789848" />
-        <model path="pytorch/FP32/squeezenet_v1.1_pretrained/squeezenet_v1.1_pretrained.xml" test="create_exenetwork" device="CPU" vmsize="754872" vmpeak="880550" vmrss="29603" vmhwm="43212" />
-        <model path="pytorch/FP32/squeezenet_v1.1_pretrained/squeezenet_v1.1_pretrained.xml" test="create_exenetwork" device="GPU" vmsize="648881" vmpeak="743626" vmrss="303424" vmhwm="318348" />
-        <model path="pytorch/FP32/squeezenet_v1.1_pretrained/squeezenet_v1.1_pretrained.xml" test="infer_request_inference" device="CPU" vmsize="818246" vmpeak="818246" vmrss="46534" vmhwm="46534" />
-        <model path="pytorch/FP32/squeezenet_v1.1_pretrained/squeezenet_v1.1_pretrained.xml" test="infer_request_inference" device="GPU" vmsize="674146" vmpeak="746235" vmrss="320315" vmhwm="320315" />
-        <model path="tf/1.14.0/FP32/bert_base_uncased/bert_base_uncased.xml" test="create_exenetwork" device="CPU" vmsize="764755" vmpeak="2092574" vmrss="38016" vmhwm="1352450" />
-        <model path="tf/1.14.0/FP32/bert_base_uncased/bert_base_uncased.xml" test="create_exenetwork" device="GPU" vmsize="1578328" vmpeak="3355976" vmrss="1233474" vmhwm="3074953" />
-        <model path="tf/1.14.0/FP32/bert_base_uncased/bert_base_uncased.xml" test="infer_request_inference" device="CPU" vmsize="1802838" vmpeak="2092587" vmrss="994188" vmhwm="1352709" />
-        <model path="tf/1.14.0/FP32/bert_base_uncased/bert_base_uncased.xml" test="infer_request_inference" device="GPU" vmsize="2958472" vmpeak="3352694" vmrss="2607677" vmhwm="3072185" />
-        <model path="tf/1.14.0/FP32/bert_xnli/bert_xnli.xml" test="create_exenetwork" device="CPU" vmsize="765124" vmpeak="2035453" vmrss="39745" vmhwm="1292420" />
-        <model path="tf/1.14.0/FP32/bert_xnli/bert_xnli.xml" test="create_exenetwork" device="GPU" vmsize="1939801" vmpeak="3261715" vmrss="1594617" vmhwm="2980577" />
-        <model path="tf/1.14.0/FP32/bert_xnli/bert_xnli.xml" test="infer_request_inference" device="CPU" vmsize="1750196" vmpeak="2039945" vmrss="935774" vmhwm="1291963" />
-        <model path="tf/1.14.0/FP32/bert_xnli/bert_xnli.xml" test="infer_request_inference" device="GPU" vmsize="2902235" vmpeak="3265460" vmrss="2551727" vmhwm="2984352" />
-        <model path="tf/1.14.0/FP32/cmu/cmu.xml" test="create_exenetwork" device="CPU" vmsize="757587" vmpeak="1547678" vmrss="33004" vmhwm="718973" />
-        <model path="tf/1.14.0/FP32/cmu/cmu.xml" test="create_exenetwork" device="GPU" vmsize="1154670" vmpeak="1678943" vmrss="809811" vmhwm="1398284" />
-        <model path="tf/1.14.0/FP32/cmu/cmu.xml" test="infer_request_inference" device="CPU" vmsize="1553134" vmpeak="1553134" vmrss="606232" vmhwm="719791" />
-        <model path="tf/1.14.0/FP32/cmu/cmu.xml" test="infer_request_inference" device="GPU" vmsize="1753910" vmpeak="1826000" vmrss="1400234" vmhwm="1400234" />
-        <model path="tf/1.14.0/FP32/deeplab_v3/deeplab_v3.xml" test="create_exenetwork" device="CPU" vmsize="757160" vmpeak="867486" vmrss="41307" vmhwm="62678" />
-        <model path="tf/1.14.0/FP32/deeplab_v3/deeplab_v3.xml" test="create_exenetwork" device="GPU" vmsize="743283" vmpeak="841055" vmrss="398604" vmhwm="537209" />
-        <model path="tf/1.14.0/FP32/deeplab_v3/deeplab_v3.xml" test="infer_request_inference" device="CPU" vmsize="888087" vmpeak="960176" vmrss="114166" vmhwm="114166" />
-        <model path="tf/1.14.0/FP32/deeplab_v3/deeplab_v3.xml" test="infer_request_inference" device="GPU" vmsize="894339" vmpeak="966429" vmrss="541912" vmhwm="541912" />
-        <model path="tf/1.14.0/FP32/densenet_121/densenet_121.xml" test="create_exenetwork" device="CPU" vmsize="772728" vmpeak="951218" vmrss="95840" vmhwm="151676" />
-        <model path="tf/1.14.0/FP32/densenet_121/densenet_121.xml" test="create_exenetwork" device="GPU" vmsize="1135195" vmpeak="1245301" vmrss="789848" vmhwm="820410" />
-        <model path="tf/1.14.0/FP32/densenet_121/densenet_121.xml" test="infer_request_inference" device="CPU" vmsize="985450" vmpeak="1057540" vmrss="159046" vmhwm="159046" />
-        <model path="tf/1.14.0/FP32/densenet_121/densenet_121.xml" test="infer_request_inference" device="GPU" vmsize="1171152" vmpeak="1243242" vmrss="818598" vmhwm="818598" />
-        <model path="tf/1.14.0/FP32/densenet_169/densenet_169.xml" test="create_exenetwork" device="CPU" vmsize="864168" vmpeak="998263" vmrss="126266" vmhwm="241604" />
-        <model path="tf/1.14.0/FP32/densenet_169/densenet_169.xml" test="create_exenetwork" device="GPU" vmsize="1353237" vmpeak="1472583" vmrss="1007978" vmhwm="1094614" />
-        <model path="tf/1.14.0/FP32/densenet_169/densenet_169.xml" test="infer_request_inference" device="CPU" vmsize="1060316" vmpeak="1132406" vmrss="238326" vmhwm="240724" />
-        <model path="tf/1.14.0/FP32/densenet_169/densenet_169.xml" test="infer_request_inference" device="GPU" vmsize="1447146" vmpeak="1519236" vmrss="1094759" vmhwm="1097835" />
-        <model path="tf/1.14.0/FP32/dssd_avigilon/dssd_avigilon.xml" test="create_exenetwork" device="CPU" vmsize="757156" vmpeak="826843" vmrss="69031" vmhwm="100887" />
-        <model path="tf/1.14.0/FP32/dssd_avigilon/dssd_avigilon.xml" test="create_exenetwork" device="GPU" vmsize="796250" vmpeak="906813" vmrss="451171" vmhwm="482077" />
-        <model path="tf/1.14.0/FP32/dssd_avigilon/dssd_avigilon.xml" test="infer_request_inference" device="CPU" vmsize="849041" vmpeak="849041" vmrss="104464" vmhwm="104464" />
-        <model path="tf/1.14.0/FP32/dssd_avigilon/dssd_avigilon.xml" test="infer_request_inference" device="GPU" vmsize="833984" vmpeak="906074" vmrss="481786" vmhwm="481786" />
-        <model path="tf/1.14.0/FP32/facenet/facenet.xml" test="create_exenetwork" device="CPU" vmsize="760786" vmpeak="1139173" vmrss="66413" vmhwm="353346" />
-        <model path="tf/1.14.0/FP32/facenet/facenet.xml" test="create_exenetwork" device="GPU" vmsize="1055560" vmpeak="1255601" vmrss="710595" vmhwm="974815" />
-        <model path="tf/1.14.0/FP32/facenet/facenet.xml" test="infer_request_inference" device="CPU" vmsize="1097984" vmpeak="1170074" vmrss="281050" vmhwm="352228" />
-        <model path="tf/1.14.0/FP32/facenet/facenet.xml" test="infer_request_inference" device="GPU" vmsize="1259253" vmpeak="1331343" vmrss="906562" vmhwm="976483" />
-        <model path="tf/1.14.0/FP32/faster_rcnn_inception_resnet_v2_atrous_coco/faster_rcnn_inception_resnet_v2_atrous_coco.xml" test="create_exenetwork" device="CPU" vmsize="920884" vmpeak="2443892" vmrss="237186" vmhwm="851215" />
-        <model path="tf/1.14.0/FP32/faster_rcnn_inception_resnet_v2_atrous_coco/faster_rcnn_inception_resnet_v2_atrous_coco.xml" test="create_exenetwork" device="GPU" vmsize="1751376" vmpeak="4164239" vmrss="1406411" vmhwm="3883422" />
-        <model path="tf/1.14.0/FP32/faster_rcnn_inception_v2_coco/faster_rcnn_inception_v2_coco.xml" test="create_exenetwork" device="CPU" vmsize="757323" vmpeak="986519" vmrss="35006" vmhwm="212911" />
-        <model path="tf/1.14.0/FP32/faster_rcnn_inception_v2_coco/faster_rcnn_inception_v2_coco.xml" test="create_exenetwork" device="GPU" vmsize="862219" vmpeak="1179283" vmrss="516881" vmhwm="897930" />
-        <model path="tf/1.14.0/FP32/faster_rcnn_resnet101_coco/faster_rcnn_resnet101_coco.xml" test="create_exenetwork" device="CPU" vmsize="761538" vmpeak="1491811" vmrss="45667" vmhwm="671554" />
-        <model path="tf/1.14.0/FP32/faster_rcnn_resnet101_coco/faster_rcnn_resnet101_coco.xml" test="create_exenetwork" device="GPU" vmsize="1126884" vmpeak="1800550" vmrss="781739" vmhwm="1519302" />
-        <model path="tf/1.14.0/FP32/faster_rcnn_resnet50_coco/faster_rcnn_resnet50_coco.xml" test="create_exenetwork" device="CPU" vmsize="766964" vmpeak="1233342" vmrss="29568" vmhwm="415509" />
-        <model path="tf/1.14.0/FP32/faster_rcnn_resnet50_coco/faster_rcnn_resnet50_coco.xml" test="create_exenetwork" device="GPU" vmsize="897432" vmpeak="1347007" vmrss="553357" vmhwm="1067290" />
-        <model path="tf/1.14.0/FP32/i3d_rgb/i3d_rgb.xml" test="create_exenetwork" device="CPU" vmsize="756562" vmpeak="1099533" vmrss="30078" vmhwm="245590" />
-        <model path="tf/1.14.0/FP32/i3d_rgb/i3d_rgb.xml" test="create_exenetwork" device="GPU" vmsize="764170" vmpeak="1353149" vmrss="419267" vmhwm="1072244" />
-        <model path="tf/1.14.0/FP32/i3d_rgb/i3d_rgb.xml" test="infer_request_inference" device="CPU" vmsize="1478496" vmpeak="1478496" vmrss="332820" vmhwm="332820" />
-        <model path="tf/1.14.0/FP32/i3d_rgb/i3d_rgb.xml" test="infer_request_inference" device="GPU" vmsize="1423364" vmpeak="1495454" vmrss="1070973" vmhwm="1172441" />
-        <model path="tf/1.14.0/FP32/icv_squeezenet_v1.0/icv_squeezenet_v1.0.xml" test="create_exenetwork" device="CPU" vmsize="755092" vmpeak="815298" vmrss="28811" vmhwm="43687" />
-        <model path="tf/1.14.0/FP32/icv_squeezenet_v1.0/icv_squeezenet_v1.0.xml" test="create_exenetwork" device="GPU" vmsize="620734" vmpeak="715479" vmrss="274991" vmhwm="324935" />
-        <model path="tf/1.14.0/FP32/icv_squeezenet_v1.0/icv_squeezenet_v1.0.xml" test="infer_request_inference" device="CPU" vmsize="825268" vmpeak="825268" vmrss="48439" vmhwm="48439" />
-        <model path="tf/1.14.0/FP32/icv_squeezenet_v1.0/icv_squeezenet_v1.0.xml" test="infer_request_inference" device="GPU" vmsize="680592" vmpeak="752681" vmrss="326972" vmhwm="326972" />
-        <model path="tf/1.14.0/FP32/icv_squeezenet_v1.1/icv_squeezenet_v1.1.xml" test="create_exenetwork" device="CPU" vmsize="765182" vmpeak="880712" vmrss="29827" vmhwm="44149" />
-        <model path="tf/1.14.0/FP32/icv_squeezenet_v1.1/icv_squeezenet_v1.1.xml" test="create_exenetwork" device="GPU" vmsize="612620" vmpeak="707366" vmrss="266855" vmhwm="323734" />
-        <model path="tf/1.14.0/FP32/icv_squeezenet_v1.1/icv_squeezenet_v1.1.xml" test="infer_request_inference" device="CPU" vmsize="818879" vmpeak="818879" vmrss="46534" vmhwm="46534" />
-        <model path="tf/1.14.0/FP32/icv_squeezenet_v1.1/icv_squeezenet_v1.1.xml" test="infer_request_inference" device="GPU" vmsize="681010" vmpeak="753099" vmrss="326902" vmhwm="326902" />
-        <model path="tf/1.14.0/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="create_exenetwork" device="CPU" vmsize="848056" vmpeak="1522360" vmrss="147382" vmhwm="794481" />
-        <model path="tf/1.14.0/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="create_exenetwork" device="GPU" vmsize="1699992" vmpeak="2187231" vmrss="1354892" vmhwm="1906344" />
-        <model path="tf/1.14.0/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="infer_request_inference" device="CPU" vmsize="1437365" vmpeak="1522364" vmrss="643724" vmhwm="793755" />
-        <model path="tf/1.14.0/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="infer_request_inference" device="GPU" vmsize="2152515" vmpeak="2224604" vmrss="1800026" vmhwm="1900395" />
-        <model path="tf/1.14.0/FP32/inception_v1/inception_v1.xml" test="create_exenetwork" device="CPU" vmsize="757526" vmpeak="905132" vmrss="83195" vmhwm="119653" />
-        <model path="tf/1.14.0/FP32/inception_v1/inception_v1.xml" test="create_exenetwork" device="GPU" vmsize="815988" vmpeak="932663" vmrss="470742" vmhwm="507760" />
-        <model path="tf/1.14.0/FP32/inception_v1/inception_v1.xml" test="infer_request_inference" device="CPU" vmsize="1007820" vmpeak="1007820" vmrss="123926" vmhwm="123926" />
-        <model path="tf/1.14.0/FP32/inception_v1/inception_v1.xml" test="infer_request_inference" device="GPU" vmsize="861520" vmpeak="933609" vmrss="507870" vmhwm="507870" />
-        <model path="tf/1.14.0/FP32/inception_v2/inception_v2.xml" test="create_exenetwork" device="CPU" vmsize="756756" vmpeak="925425" vmrss="34007" vmhwm="180769" />
-        <model path="tf/1.14.0/FP32/inception_v2/inception_v2.xml" test="create_exenetwork" device="GPU" vmsize="824168" vmpeak="962403" vmrss="478737" vmhwm="610280" />
-        <model path="tf/1.14.0/FP32/inception_v2/inception_v2.xml" test="infer_request_inference" device="CPU" vmsize="927669" vmpeak="999759" vmrss="141772" vmhwm="181966" />
-        <model path="tf/1.14.0/FP32/inception_v2/inception_v2.xml" test="infer_request_inference" device="GPU" vmsize="936755" vmpeak="1008845" vmrss="583963" vmhwm="611516" />
-        <model path="tf/1.14.0/FP32/inception_v3/inception_v3.xml" test="create_exenetwork" device="CPU" vmsize="759013" vmpeak="1063559" vmrss="51255" vmhwm="349113" />
-        <model path="tf/1.14.0/FP32/inception_v3/inception_v3.xml" test="create_exenetwork" device="GPU" vmsize="925958" vmpeak="1184101" vmrss="580056" vmhwm="902325" />
-        <model path="tf/1.14.0/FP32/inception_v3/inception_v3.xml" test="infer_request_inference" device="CPU" vmsize="1043583" vmpeak="1115672" vmrss="263520" vmhwm="349034" />
-        <model path="tf/1.14.0/FP32/inception_v3/inception_v3.xml" test="infer_request_inference" device="GPU" vmsize="1189548" vmpeak="1261638" vmrss="836646" vmhwm="903676" />
-        <model path="tf/1.14.0/FP32/inception_v4/inception_v4.xml" test="create_exenetwork" device="CPU" vmsize="764574" vmpeak="1327493" vmrss="64108" vmhwm="603842" />
-        <model path="tf/1.14.0/FP32/inception_v4/inception_v4.xml" test="create_exenetwork" device="GPU" vmsize="1221717" vmpeak="1686643" vmrss="875617" vmhwm="1404475" />
-        <model path="tf/1.14.0/FP32/inception_v4/inception_v4.xml" test="infer_request_inference" device="CPU" vmsize="1381556" vmpeak="1403402" vmrss="440356" vmhwm="602751" />
-        <model path="tf/1.14.0/FP32/inception_v4/inception_v4.xml" test="infer_request_inference" device="GPU" vmsize="1641921" vmpeak="1714011" vmrss="1289340" vmhwm="1405430" />
-        <model path="tf/1.14.0/FP32/mask_rcnn_resnet101_atrous_coco/mask_rcnn_resnet101_atrous_coco.xml" test="create_exenetwork" device="CPU" vmsize="762119" vmpeak="2738828" vmrss="47203" vmhwm="947557" />
-        <model path="tf/1.14.0/FP32/mask_rcnn_resnet101_atrous_coco/mask_rcnn_resnet101_atrous_coco.xml" test="create_exenetwork" device="GPU" vmsize="1295483" vmpeak="4189812" vmrss="949788" vmhwm="3908550" />
-        <model path="tf/1.14.0/FP32/mobilenet_v1_0.25_128/mobilenet_v1_0.25_128.xml" test="create_exenetwork" device="CPU" vmsize="763840" vmpeak="805556" vmrss="21938" vmhwm="33264" />
-        <model path="tf/1.14.0/FP32/mobilenet_v1_0.25_128/mobilenet_v1_0.25_128.xml" test="create_exenetwork" device="GPU" vmsize="652572" vmpeak="744180" vmrss="306754" vmhwm="318432" />
-        <model path="tf/1.14.0/FP32/mobilenet_v1_0.25_128/mobilenet_v1_0.25_128.xml" test="infer_request_inference" device="CPU" vmsize="814000" vmpeak="814000" vmrss="33391" vmhwm="33391" />
-        <model path="tf/1.14.0/FP32/mobilenet_v1_0.25_128/mobilenet_v1_0.25_128.xml" test="infer_request_inference" device="GPU" vmsize="672144" vmpeak="744233" vmrss="319026" vmhwm="319026" />
-        <model path="tf/1.14.0/FP32/mobilenet_v1_0.5_160/mobilenet_v1_0.5_160.xml" test="create_exenetwork" device="CPU" vmsize="754705" vmpeak="881188" vmrss="29282" vmhwm="44836" />
-        <model path="tf/1.14.0/FP32/mobilenet_v1_0.5_160/mobilenet_v1_0.5_160.xml" test="create_exenetwork" device="GPU" vmsize="614209" vmpeak="709759" vmrss="268778" vmhwm="326845" />
-        <model path="tf/1.14.0/FP32/mobilenet_v1_0.5_160/mobilenet_v1_0.5_160.xml" test="infer_request_inference" device="CPU" vmsize="818228" vmpeak="890318" vmrss="45513" vmhwm="45513" />
-        <model path="tf/1.14.0/FP32/mobilenet_v1_0.5_160/mobilenet_v1_0.5_160.xml" test="infer_request_inference" device="GPU" vmsize="682484" vmpeak="754573" vmrss="328966" vmhwm="328966" />
-        <model path="tf/1.14.0/FP32/mobilenet_v1_1.0_224/mobilenet_v1_1.0_224.xml" test="create_exenetwork" device="CPU" vmsize="754903" vmpeak="821928" vmrss="55237" vmhwm="82768" />
-        <model path="tf/1.14.0/FP32/mobilenet_v1_1.0_224/mobilenet_v1_1.0_224.xml" test="create_exenetwork" device="GPU" vmsize="643887" vmpeak="751788" vmrss="298685" vmhwm="367602" />
-        <model path="tf/1.14.0/FP32/mobilenet_v1_1.0_224/mobilenet_v1_1.0_224.xml" test="infer_request_inference" device="CPU" vmsize="831111" vmpeak="831111" vmrss="86732" vmhwm="86732" />
-        <model path="tf/1.14.0/FP32/mobilenet_v1_1.0_224/mobilenet_v1_1.0_224.xml" test="infer_request_inference" device="GPU" vmsize="720979" vmpeak="793069" vmrss="367584" vmhwm="367584" />
-        <model path="tf/1.14.0/FP32/mobilenet_v2_1.0_224/mobilenet_v2_1.0_224.xml" test="create_exenetwork" device="CPU" vmsize="756870" vmpeak="819759" vmrss="54586" vmhwm="78570" />
-        <model path="tf/1.14.0/FP32/mobilenet_v2_1.0_224/mobilenet_v2_1.0_224.xml" test="create_exenetwork" device="GPU" vmsize="705724" vmpeak="809490" vmrss="360267" vmhwm="435512" />
-        <model path="tf/1.14.0/FP32/mobilenet_v2_1.0_224/mobilenet_v2_1.0_224.xml" test="infer_request_inference" device="CPU" vmsize="835978" vmpeak="835978" vmrss="82583" vmhwm="82583" />
-        <model path="tf/1.14.0/FP32/mobilenet_v2_1.0_224/mobilenet_v2_1.0_224.xml" test="infer_request_inference" device="GPU" vmsize="788902" vmpeak="860992" vmrss="435727" vmhwm="435727" />
-        <model path="tf/1.14.0/FP32/mobilenet_v2_1.4_224/mobilenet_v2_1.4_224.xml" test="create_exenetwork" device="CPU" vmsize="756725" vmpeak="831080" vmrss="76414" vmhwm="111914" />
-        <model path="tf/1.14.0/FP32/mobilenet_v2_1.4_224/mobilenet_v2_1.4_224.xml" test="create_exenetwork" device="GPU" vmsize="787058" vmpeak="902290" vmrss="441399" vmhwm="476911" />
-        <model path="tf/1.14.0/FP32/mobilenet_v2_1.4_224/mobilenet_v2_1.4_224.xml" test="infer_request_inference" device="CPU" vmsize="847299" vmpeak="847299" vmrss="120969" vmhwm="120969" />
-        <model path="tf/1.14.0/FP32/mobilenet_v2_1.4_224/mobilenet_v2_1.4_224.xml" test="infer_request_inference" device="GPU" vmsize="828920" vmpeak="901010" vmrss="475939" vmhwm="475939" />
-        <model path="tf/1.14.0/FP32/ncf/ncf.xml" test="create_exenetwork" device="CPU" vmsize="760988" vmpeak="1018754" vmrss="14484" vmhwm="296612" />
-        <model path="tf/1.14.0/FP32/ncf/ncf.xml" test="create_exenetwork" device="GPU" vmsize="600859" vmpeak="965967" vmrss="255569" vmhwm="685150" />
-        <model path="tf/1.14.0/FP32/ncf/ncf.xml" test="infer_request_inference" device="CPU" vmsize="1095155" vmpeak="1167245" vmrss="304607" vmhwm="304607" />
-        <model path="tf/1.14.0/FP32/ncf/ncf.xml" test="infer_request_inference" device="GPU" vmsize="1004577" vmpeak="1076666" vmrss="651943" vmhwm="689915" />
-        <model path="tf/1.14.0/FP32/resnet_v1.5_50/resnet_v1.5_50.xml" test="create_exenetwork" device="CPU" vmsize="756096" vmpeak="1100136" vmrss="27812" vmhwm="362344" />
-        <model path="tf/1.14.0/FP32/resnet_v1.5_50/resnet_v1.5_50.xml" test="create_exenetwork" device="GPU" vmsize="822830" vmpeak="1073947" vmrss="477193" vmhwm="792264" />
-        <model path="tf/1.14.0/FP32/resnet_v1.5_50/resnet_v1.5_50.xml" test="infer_request_inference" device="CPU" vmsize="1060571" vmpeak="1132661" vmrss="269808" vmhwm="362771" />
-        <model path="tf/1.14.0/FP32/resnet_v1.5_50/resnet_v1.5_50.xml" test="infer_request_inference" device="GPU" vmsize="1054684" vmpeak="1075272" vmrss="702310" vmhwm="794314" />
-        <model path="tf/1.14.0/FP32/resnet_v1_101/resnet_v1_101.xml" test="create_exenetwork" device="CPU" vmsize="760764" vmpeak="1338383" vmrss="42706" vmhwm="617047" />
-        <model path="tf/1.14.0/FP32/resnet_v1_101/resnet_v1_101.xml" test="create_exenetwork" device="GPU" vmsize="1108602" vmpeak="1561885" vmrss="762616" vmhwm="1279700" />
-        <model path="tf/1.14.0/FP32/resnet_v1_101/resnet_v1_101.xml" test="infer_request_inference" device="CPU" vmsize="1279819" vmpeak="1338409" vmrss="435102" vmhwm="617865" />
-        <model path="tf/1.14.0/FP32/resnet_v1_101/resnet_v1_101.xml" test="infer_request_inference" device="GPU" vmsize="1455146" vmpeak="1561388" vmrss="1101755" vmhwm="1279845" />
-        <model path="tf/1.14.0/FP32/resnet_v1_152/resnet_v1_152.xml" test="create_exenetwork" device="CPU" vmsize="765221" vmpeak="1552262" vmrss="59875" vmhwm="829250" />
-        <model path="tf/1.14.0/FP32/resnet_v1_152/resnet_v1_152.xml" test="create_exenetwork" device="GPU" vmsize="1322098" vmpeak="1985359" vmrss="976223" vmhwm="1703319" />
-        <model path="tf/1.14.0/FP32/resnet_v1_152/resnet_v1_152.xml" test="infer_request_inference" device="CPU" vmsize="1373006" vmpeak="1552293" vmrss="581891" vmhwm="829848" />
-        <model path="tf/1.14.0/FP32/resnet_v1_152/resnet_v1_152.xml" test="infer_request_inference" device="GPU" vmsize="1814348" vmpeak="1986380" vmrss="1461099" vmhwm="1704714" />
-        <model path="tf/1.14.0/FP32/resnet_v1_50/resnet_v1_50.xml" test="create_exenetwork" device="CPU" vmsize="766088" vmpeak="1079958" vmrss="27324" vmhwm="362155" />
-        <model path="tf/1.14.0/FP32/resnet_v1_50/resnet_v1_50.xml" test="create_exenetwork" device="GPU" vmsize="838965" vmpeak="1085884" vmrss="493407" vmhwm="804324" />
-        <model path="tf/1.14.0/FP32/resnet_v1_50/resnet_v1_50.xml" test="infer_request_inference" device="CPU" vmsize="1046157" vmpeak="1118246" vmrss="260515" vmhwm="362810" />
-        <model path="tf/1.14.0/FP32/resnet_v1_50/resnet_v1_50.xml" test="infer_request_inference" device="GPU" vmsize="1057223" vmpeak="1080772" vmrss="704066" vmhwm="799440" />
-        <model path="tf/1.14.0/FP32/resnet_v2_101/resnet_v2_101.xml" test="create_exenetwork" device="CPU" vmsize="761754" vmpeak="1365104" vmrss="45179" vmhwm="620879" />
-        <model path="tf/1.14.0/FP32/resnet_v2_101/resnet_v2_101.xml" test="create_exenetwork" device="GPU" vmsize="1120737" vmpeak="1613546" vmrss="774637" vmhwm="1331308" />
-        <model path="tf/1.14.0/FP32/resnet_v2_101/resnet_v2_101.xml" test="infer_request_inference" device="CPU" vmsize="1251346" vmpeak="1365135" vmrss="446415" vmhwm="620241" />
-        <model path="tf/1.14.0/FP32/resnet_v2_101/resnet_v2_101.xml" test="infer_request_inference" device="GPU" vmsize="1515817" vmpeak="1613858" vmrss="1162572" vmhwm="1331968" />
-        <model path="tf/1.14.0/FP32/resnet_v2_152/resnet_v2_152.xml" test="create_exenetwork" device="CPU" vmsize="839823" vmpeak="1569361" vmrss="155029" vmhwm="833157" />
-        <model path="tf/1.14.0/FP32/resnet_v2_152/resnet_v2_152.xml" test="create_exenetwork" device="GPU" vmsize="1363960" vmpeak="2068752" vmrss="1018507" vmhwm="1787042" />
-        <model path="tf/1.14.0/FP32/resnet_v2_152/resnet_v2_152.xml" test="infer_request_inference" device="CPU" vmsize="1476041" vmpeak="1569392" vmrss="679918" vmhwm="833914" />
-        <model path="tf/1.14.0/FP32/resnet_v2_152/resnet_v2_152.xml" test="infer_request_inference" device="GPU" vmsize="1904799" vmpeak="2060317" vmrss="1551756" vmhwm="1778167" />
-        <model path="tf/1.14.0/FP32/resnet_v2_50/resnet_v2_50.xml" test="create_exenetwork" device="CPU" vmsize="756602" vmpeak="1096774" vmrss="28393" vmhwm="363391" />
-        <model path="tf/1.14.0/FP32/resnet_v2_50/resnet_v2_50.xml" test="create_exenetwork" device="GPU" vmsize="845226" vmpeak="1103374" vmrss="500051" vmhwm="821986" />
-        <model path="tf/1.14.0/FP32/resnet_v2_50/resnet_v2_50.xml" test="infer_request_inference" device="CPU" vmsize="1063304" vmpeak="1135393" vmrss="271220" vmhwm="364399" />
-        <model path="tf/1.14.0/FP32/resnet_v2_50/resnet_v2_50.xml" test="infer_request_inference" device="GPU" vmsize="1092159" vmpeak="1105997" vmrss="738276" vmhwm="823983" />
-        <model path="tf/1.14.0/FP32/rfcn_resnet101_coco/rfcn_resnet101_coco.xml" test="create_exenetwork" device="CPU" vmsize="838816" vmpeak="1561762" vmrss="116930" vmhwm="752906" />
-        <model path="tf/1.14.0/FP32/rfcn_resnet101_coco/rfcn_resnet101_coco.xml" test="create_exenetwork" device="GPU" vmsize="1674490" vmpeak="2318250" vmrss="1329842" vmhwm="2034986" />
-        <model path="tf/1.14.0/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="create_exenetwork" device="CPU" vmsize="755062" vmpeak="880739" vmrss="28415" vmhwm="43480" />
-        <model path="tf/1.14.0/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="create_exenetwork" device="GPU" vmsize="609298" vmpeak="704044" vmrss="263868" vmhwm="323488" />
-        <model path="tf/1.14.0/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="infer_request_inference" device="CPU" vmsize="825048" vmpeak="897138" vmrss="49108" vmhwm="49108" />
-        <model path="tf/1.14.0/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="infer_request_inference" device="GPU" vmsize="675844" vmpeak="747934" vmrss="322753" vmhwm="322753" />
-        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_coco/ssd_mobilenet_v1_coco.xml" test="create_exenetwork" device="CPU" vmsize="756804" vmpeak="978252" vmrss="70514" vmhwm="120370" />
-        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_coco/ssd_mobilenet_v1_coco.xml" test="create_exenetwork" device="GPU" vmsize="831318" vmpeak="949744" vmrss="485619" vmhwm="524550" />
-        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_coco/ssd_mobilenet_v1_coco.xml" test="infer_request_inference" device="CPU" vmsize="925689" vmpeak="997779" vmrss="130244" vmhwm="130244" />
-        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_coco/ssd_mobilenet_v1_coco.xml" test="infer_request_inference" device="GPU" vmsize="878099" vmpeak="950188" vmrss="525395" vmhwm="525395" />
-        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_fpn_coco/ssd_mobilenet_v1_fpn_coco.xml" test="create_exenetwork" device="CPU" vmsize="759435" vmpeak="1442861" vmrss="34680" vmhwm="509454" />
-        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_fpn_coco/ssd_mobilenet_v1_fpn_coco.xml" test="create_exenetwork" device="GPU" vmsize="1012906" vmpeak="1460487" vmrss="667977" vmhwm="1179833" />
-        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_fpn_coco/ssd_mobilenet_v1_fpn_coco.xml" test="infer_request_inference" device="CPU" vmsize="1368043" vmpeak="1442861" vmrss="427737" vmhwm="509533" />
-        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_fpn_coco/ssd_mobilenet_v1_fpn_coco.xml" test="infer_request_inference" device="GPU" vmsize="1542648" vmpeak="1542648" vmrss="1195304" vmhwm="1195304" />
-        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_fpn_coco_602x602/ssd_mobilenet_v1_fpn_coco_602x602.xml" test="create_exenetwork" device="CPU" vmsize="759558" vmpeak="1426185" vmrss="33862" vmhwm="507768" />
-        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_fpn_coco_602x602/ssd_mobilenet_v1_fpn_coco_602x602.xml" test="create_exenetwork" device="GPU" vmsize="1010358" vmpeak="1414454" vmrss="665451" vmhwm="1133941" />
-        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_fpn_coco_602x602/ssd_mobilenet_v1_fpn_coco_602x602.xml" test="infer_request_inference" device="CPU" vmsize="1350650" vmpeak="1426185" vmrss="421828" vmhwm="509168" />
-        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_fpn_coco_602x602/ssd_mobilenet_v1_fpn_coco_602x602.xml" test="infer_request_inference" device="GPU" vmsize="1493681" vmpeak="1565770" vmrss="1145416" vmhwm="1145416" />
-        <model path="tf/1.14.0/FP32/ssd_mobilenet_v2_coco/ssd_mobilenet_v2_coco.xml" test="create_exenetwork" device="CPU" vmsize="761433" vmpeak="985784" vmrss="41514" vmhwm="254610" />
-        <model path="tf/1.14.0/FP32/ssd_mobilenet_v2_coco/ssd_mobilenet_v2_coco.xml" test="create_exenetwork" device="GPU" vmsize="876933" vmpeak="1078919" vmrss="531814" vmhwm="798001" />
-        <model path="tf/1.14.0/FP32/ssd_mobilenet_v2_coco/ssd_mobilenet_v2_coco.xml" test="infer_request_inference" device="CPU" vmsize="1028508" vmpeak="1064698" vmrss="201212" vmhwm="254390" />
-        <model path="tf/1.14.0/FP32/ssd_mobilenet_v2_coco/ssd_mobilenet_v2_coco.xml" test="infer_request_inference" device="GPU" vmsize="1091807" vmpeak="1163896" vmrss="739525" vmhwm="798023" />
-        <model path="tf/1.14.0/FP32/unet2d/unet2d.xml" test="create_exenetwork" device="CPU" vmsize="754067" vmpeak="1169247" vmrss="15686" vmhwm="429523" />
-        <model path="tf/1.14.0/FP32/unet2d/unet2d.xml" test="create_exenetwork" device="GPU" vmsize="682413" vmpeak="1130109" vmrss="337194" vmhwm="848733" />
-        <model path="tf/1.14.0/FP32/unet2d/unet2d.xml" test="infer_request_inference" device="CPU" vmsize="1106463" vmpeak="1178553" vmrss="321428" vmhwm="429871" />
-        <model path="tf/1.14.0/FP32/unet2d/unet2d.xml" test="infer_request_inference" device="GPU" vmsize="1083904" vmpeak="1155994" vmrss="730976" vmhwm="845882" />
-        <model path="tf/1.14.0/FP32/vgg16/vgg16.xml" test="create_exenetwork" device="CPU" vmsize="754010" vmpeak="2548502" vmrss="15452" vmhwm="1807863" />
-        <model path="tf/1.14.0/FP32/vgg16/vgg16.xml" test="create_exenetwork" device="GPU" vmsize="686602" vmpeak="3327385" vmrss="340982" vmhwm="3045398" />
-        <model path="tf/1.14.0/FP32/vgg16/vgg16.xml" test="infer_request_inference" device="CPU" vmsize="2026776" vmpeak="2548502" vmrss="1241011" vmhwm="1808730" />
-        <model path="tf/1.14.0/FP32/vgg16/vgg16.xml" test="infer_request_inference" device="GPU" vmsize="2438568" vmpeak="3312188" vmrss="2084328" vmhwm="3029980" />
-        <model path="tf/1.14.0/FP32/vgg19/vgg19.xml" test="create_exenetwork" device="CPU" vmsize="754168" vmpeak="2617986" vmrss="16073" vmhwm="1877000" />
-        <model path="tf/1.14.0/FP32/vgg19/vgg19.xml" test="create_exenetwork" device="GPU" vmsize="612194" vmpeak="3415310" vmrss="266732" vmhwm="3133363" />
-        <model path="tf/1.14.0/FP32/vgg19/vgg19.xml" test="infer_request_inference" device="CPU" vmsize="2145479" vmpeak="2617885" vmrss="1287272" vmhwm="1877568" />
-        <model path="tf/1.14.0/FP32/vgg19/vgg19.xml" test="infer_request_inference" device="GPU" vmsize="2521367" vmpeak="3415297" vmrss="2167426" vmhwm="3133059" />
-        <model path="tf/1.14.0/FP32/yolo_v2/yolo_v2.xml" test="create_exenetwork" device="CPU" vmsize="754344" vmpeak="1426625" vmrss="17173" vmhwm="684173" />
-        <model path="tf/1.14.0/FP32/yolo_v2/yolo_v2.xml" test="create_exenetwork" device="GPU" vmsize="684424" vmpeak="1460949" vmrss="339600" vmhwm="1180036" />
-        <model path="tf/1.14.0/FP32/yolo_v2/yolo_v2.xml" test="infer_request_inference" device="CPU" vmsize="1282802" vmpeak="1426625" vmrss="493737" vmhwm="684802" />
-        <model path="tf/1.14.0/FP32/yolo_v2/yolo_v2.xml" test="infer_request_inference" device="GPU" vmsize="1331783" vmpeak="1443006" vmrss="978560" vmhwm="1161124" />
-        <model path="tf/1.14.0/FP32/yolo_v2_tiny_voc/yolo_v2_tiny_voc.xml" test="create_exenetwork" device="CPU" vmsize="753724" vmpeak="954421" vmrss="14414" vmhwm="229578" />
-        <model path="tf/1.14.0/FP32/yolo_v2_tiny_voc/yolo_v2_tiny_voc.xml" test="create_exenetwork" device="GPU" vmsize="569179" vmpeak="816648" vmrss="224250" vmhwm="535449" />
-        <model path="tf/1.14.0/FP32/yolo_v2_tiny_voc/yolo_v2_tiny_voc.xml" test="infer_request_inference" device="CPU" vmsize="960810" vmpeak="960810" vmrss="174231" vmhwm="229807" />
-        <model path="tf/1.14.0/FP32/yolo_v2_tiny_voc/yolo_v2_tiny_voc.xml" test="infer_request_inference" device="GPU" vmsize="808627" vmpeak="880717" vmrss="455677" vmhwm="533002" />
-        <model path="tf/1.14.0/FP32/yolo_v2_voc/yolo_v2_voc.xml" test="create_exenetwork" device="CPU" vmsize="754344" vmpeak="1422647" vmrss="17437" vmhwm="680666" />
-        <model path="tf/1.14.0/FP32/yolo_v2_voc/yolo_v2_voc.xml" test="create_exenetwork" device="GPU" vmsize="686316" vmpeak="1436296" vmrss="340586" vmhwm="1154617" />
-        <model path="tf/1.14.0/FP32/yolo_v2_voc/yolo_v2_voc.xml" test="infer_request_inference" device="CPU" vmsize="1279797" vmpeak="1422616" vmrss="490982" vmhwm="680147" />
-        <model path="tf/1.14.0/FP32/yolo_v2_voc/yolo_v2_voc.xml" test="infer_request_inference" device="GPU" vmsize="1330780" vmpeak="1442570" vmrss="978392" vmhwm="1161490" />
-        <model path="tf/1.14.0/FP32/yolo_v3/yolo_v3.xml" test="create_exenetwork" device="CPU" vmsize="756958" vmpeak="1587260" vmrss="31108" vmhwm="836506" />
-        <model path="tf/1.14.0/FP32/yolo_v3/yolo_v3.xml" test="create_exenetwork" device="GPU" vmsize="1163712" vmpeak="1824596" vmrss="819011" vmhwm="1543559" />
-        <model path="tf/1.14.0/FP32/yolo_v3/yolo_v3.xml" test="infer_request_inference" device="CPU" vmsize="1405879" vmpeak="1591766" vmrss="610302" vmhwm="836594" />
-        <model path="tf/1.14.0/FP32/yolo_v3/yolo_v3.xml" test="infer_request_inference" device="GPU" vmsize="1734233" vmpeak="1823470" vmrss="1381925" vmhwm="1542178" />
-        <model path="tf/1.14.0/FP32/yolo_v3_tiny/yolo_v3_tiny.xml" test="create_exenetwork" device="CPU" vmsize="753975" vmpeak="895633" vmrss="15637" vmhwm="140927" />
-        <model path="tf/1.14.0/FP32/yolo_v3_tiny/yolo_v3_tiny.xml" test="create_exenetwork" device="GPU" vmsize="599332" vmpeak="728939" vmrss="254029" vmhwm="412566" />
-        <model path="tf/1.14.0/FP32/yolo_v3_tiny/yolo_v3_tiny.xml" test="infer_request_inference" device="CPU" vmsize="903469" vmpeak="975559" vmrss="116124" vmhwm="141182" />
-        <model path="tf/1.14.0/FP32/yolo_v3_tiny/yolo_v3_tiny.xml" test="infer_request_inference" device="GPU" vmsize="741738" vmpeak="813828" vmrss="389259" vmhwm="413476" />
+        <model path="caffe/FP32/alexnet/alexnet.xml" test="create_exenetwork" device="CPU" vmsize="1321668" vmpeak="1631245" vmrss="657919" vmhwm="967408" />
+        <model path="caffe/FP32/alexnet/alexnet.xml" test="create_exenetwork" device="GPU" vmsize="1563796" vmpeak="2064987" vmrss="1227532" vmhwm="1728485" />
+        <model path="caffe/FP32/alexnet/alexnet.xml" test="infer_request_inference" device="CPU" vmsize="1589073" vmpeak="1631151" vmrss="659287" vmhwm="966721" />
+        <model path="caffe/FP32/alexnet/alexnet.xml" test="infer_request_inference" device="GPU" vmsize="1557202" vmpeak="1973197" vmrss="1079972" vmhwm="1580035" />
+        <model path="caffe/FP32/caffenet/caffenet.xml" test="create_exenetwork" device="CPU" vmsize="1341314" vmpeak="1650890" vmrss="665329" vmhwm="974724" />
+        <model path="caffe/FP32/caffenet/caffenet.xml" test="create_exenetwork" device="GPU" vmsize="1591844" vmpeak="1793074" vmrss="1255238" vmhwm="1456566" />
+        <model path="caffe/FP32/caffenet/caffenet.xml" test="infer_request_inference" device="CPU" vmsize="1441388" vmpeak="1650797" vmrss="682999" vmhwm="973897" />
+        <model path="caffe/FP32/caffenet/caffenet.xml" test="infer_request_inference" device="GPU" vmsize="1605884" vmpeak="1696297" vmrss="1128160" vmhwm="1303270" />
+        <model path="caffe/FP32/densenet_121/densenet_121.xml" test="create_exenetwork" device="CPU" vmsize="903562" vmpeak="903562" vmrss="180684" vmhwm="180684" />
+        <model path="caffe/FP32/densenet_121/densenet_121.xml" test="create_exenetwork" device="GPU" vmsize="1301939" vmpeak="1301939" vmrss="964126" vmhwm="964126" />
+        <model path="caffe/FP32/densenet_121/densenet_121.xml" test="infer_request_inference" device="CPU" vmsize="1170582" vmpeak="1255779" vmrss="189836" vmhwm="189836" />
+        <model path="caffe/FP32/densenet_121/densenet_121.xml" test="infer_request_inference" device="GPU" vmsize="1057290" vmpeak="1142486" vmrss="582316" vmhwm="582316" />
+        <model path="caffe/FP32/densenet_161/densenet_161.xml" test="create_exenetwork" device="CPU" vmsize="1155512" vmpeak="1257531" vmrss="406551" vmhwm="508289" />
+        <model path="caffe/FP32/densenet_161/densenet_161.xml" test="create_exenetwork" device="GPU" vmsize="1884636" vmpeak="1884636" vmrss="1547655" vmhwm="1547655" />
+        <model path="caffe/FP32/densenet_161/densenet_161.xml" test="infer_request_inference" device="CPU" vmsize="1241500" vmpeak="1326696" vmrss="419666" vmhwm="506740" />
+        <model path="caffe/FP32/densenet_161/densenet_161.xml" test="infer_request_inference" device="GPU" vmsize="1583504" vmpeak="1668700" vmrss="1108941" vmhwm="1108941" />
+        <model path="caffe/FP32/densenet_169/densenet_169.xml" test="create_exenetwork" device="CPU" vmsize="992170" vmpeak="1004790" vmrss="275704" vmhwm="288189" />
+        <model path="caffe/FP32/densenet_169/densenet_169.xml" test="create_exenetwork" device="GPU" vmsize="1487241" vmpeak="1487241" vmrss="1150458" vmhwm="1150458" />
+        <model path="caffe/FP32/densenet_169/densenet_169.xml" test="infer_request_inference" device="CPU" vmsize="1259122" vmpeak="1259122" vmrss="283545" vmhwm="286317" />
+        <model path="caffe/FP32/densenet_169/densenet_169.xml" test="infer_request_inference" device="GPU" vmsize="1294259" vmpeak="1379456" vmrss="819712" vmhwm="819712" />
+        <model path="caffe/FP32/densenet_201/densenet_201.xml" test="create_exenetwork" device="CPU" vmsize="1135388" vmpeak="1188803" vmrss="366688" vmhwm="384436" />
+        <model path="caffe/FP32/densenet_201/densenet_201.xml" test="create_exenetwork" device="GPU" vmsize="1903132" vmpeak="1903132" vmrss="1341693" vmhwm="1509783" />
+        <model path="caffe/FP32/densenet_201/densenet_201.xml" test="infer_request_inference" device="CPU" vmsize="1221381" vmpeak="1306578" vmrss="376038" vmhwm="384514" />
+        <model path="caffe/FP32/densenet_201/densenet_201.xml" test="infer_request_inference" device="GPU" vmsize="1517360" vmpeak="1602556" vmrss="1041424" vmhwm="1041424" />
+        <model path="caffe/FP32/dilation/dilation.xml" test="create_exenetwork" device="CPU" vmsize="2658385" vmpeak="3374820" vmrss="1479264" vmhwm="2195507" />
+        <model path="caffe/FP32/dilation/dilation.xml" test="create_exenetwork" device="GPU" vmsize="3398751" vmpeak="3980990" vmrss="3009406" vmhwm="3589695" />
+        <model path="caffe/FP32/dilation/dilation.xml" test="infer_request_inference" device="CPU" vmsize="2763358" vmpeak="3374727" vmrss="1996228" vmhwm="2195658" />
+        <model path="caffe/FP32/dilation/dilation.xml" test="infer_request_inference" device="GPU" vmsize="3381653" vmpeak="3900676" vmrss="2904111" vmhwm="3506760" />
+        <model path="caffe/FP32/dpn_92/dpn_92.xml" test="create_exenetwork" device="CPU" vmsize="1254858" vmpeak="1436120" vmrss="461666" vmhwm="642226" />
+        <model path="caffe/FP32/dpn_92/dpn_92.xml" test="create_exenetwork" device="GPU" vmsize="1880288" vmpeak="2024947" vmrss="1544847" vmhwm="1688965" />
+        <model path="caffe/FP32/dpn_92/dpn_92.xml" test="infer_request_inference" device="CPU" vmsize="1529008" vmpeak="1529008" vmrss="505601" vmhwm="640972" />
+        <model path="caffe/FP32/dpn_92/dpn_92.xml" test="infer_request_inference" device="GPU" vmsize="1560561" vmpeak="1620039" vmrss="1084423" vmhwm="1227179" />
+        <model path="caffe/FP32/fcn_alexnet/fcn_alexnet.xml" test="create_exenetwork" device="CPU" vmsize="1467497" vmpeak="1765602" vmrss="637795" vmhwm="935719" />
+        <model path="caffe/FP32/fcn_alexnet/fcn_alexnet.xml" test="create_exenetwork" device="GPU" vmsize="1611261" vmpeak="2008177" vmrss="1219769" vmhwm="1615723" />
+        <model path="caffe/FP32/fcn_alexnet/fcn_alexnet.xml" test="infer_request_inference" device="CPU" vmsize="1771364" vmpeak="1771364" vmrss="805464" vmhwm="935511" />
+        <model path="caffe/FP32/fcn_alexnet/fcn_alexnet.xml" test="infer_request_inference" device="GPU" vmsize="1605936" vmpeak="1895415" vmrss="1127750" vmhwm="1502191" />
+        <model path="caffe/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="create_exenetwork" device="CPU" vmsize="1436468" vmpeak="1623923" vmrss="753001" vmhwm="940030" />
+        <model path="caffe/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="create_exenetwork" device="GPU" vmsize="2477649" vmpeak="2606604" vmrss="1727107" vmhwm="1917645" />
+        <model path="caffe/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="infer_request_inference" device="CPU" vmsize="1704596" vmpeak="1704596" vmrss="763807" vmhwm="939510" />
+        <model path="caffe/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="infer_request_inference" device="GPU" vmsize="2069168" vmpeak="2154365" vmrss="1592208" vmhwm="1718236" />
+        <model path="caffe/FP32/inception_v1/inception_v1.xml" test="create_exenetwork" device="CPU" vmsize="755742" vmpeak="920202" vmrss="149593" vmhwm="149593" />
+        <model path="caffe/FP32/inception_v1/inception_v1.xml" test="create_exenetwork" device="GPU" vmsize="941834" vmpeak="941834" vmrss="605690" vmhwm="605690" />
+        <model path="caffe/FP32/inception_v1/inception_v1.xml" test="infer_request_inference" device="CPU" vmsize="1012616" vmpeak="1012616" vmrss="154793" vmhwm="154793" />
+        <model path="caffe/FP32/inception_v1/inception_v1.xml" test="infer_request_inference" device="GPU" vmsize="827018" vmpeak="912215" vmrss="350012" vmhwm="350012" />
+        <model path="caffe/FP32/inception_v2/inception_v2.xml" test="create_exenetwork" device="CPU" vmsize="833872" vmpeak="886454" vmrss="162780" vmhwm="214853" />
+        <model path="caffe/FP32/inception_v2/inception_v2.xml" test="create_exenetwork" device="GPU" vmsize="1017369" vmpeak="1055308" vmrss="681980" vmhwm="719721" />
+        <model path="caffe/FP32/inception_v2/inception_v2.xml" test="infer_request_inference" device="CPU" vmsize="920363" vmpeak="1005560" vmrss="167133" vmhwm="214895" />
+        <model path="caffe/FP32/inception_v2/inception_v2.xml" test="infer_request_inference" device="GPU" vmsize="898206" vmpeak="983403" vmrss="419707" vmhwm="455660" />
+        <model path="caffe/FP32/inception_v3/inception_v3.xml" test="create_exenetwork" device="CPU" vmsize="980382" vmpeak="1099368" vmrss="295952" vmhwm="414325" />
+        <model path="caffe/FP32/inception_v3/inception_v3.xml" test="create_exenetwork" device="GPU" vmsize="1322427" vmpeak="1407354" vmrss="987646" vmhwm="1072141" />
+        <model path="caffe/FP32/inception_v3/inception_v3.xml" test="infer_request_inference" device="CPU" vmsize="1248421" vmpeak="1248421" vmrss="307860" vmhwm="415298" />
+        <model path="caffe/FP32/inception_v3/inception_v3.xml" test="infer_request_inference" device="GPU" vmsize="1158170" vmpeak="1243366" vmrss="680934" vmhwm="763703" />
+        <model path="caffe/FP32/inception_v4/inception_v4.xml" test="create_exenetwork" device="CPU" vmsize="1188829" vmpeak="1392934" vmrss="513037" vmhwm="716632" />
+        <model path="caffe/FP32/inception_v4/inception_v4.xml" test="create_exenetwork" device="GPU" vmsize="1859291" vmpeak="1997377" vmrss="1524088" vmhwm="1661504" />
+        <model path="caffe/FP32/inception_v4/inception_v4.xml" test="infer_request_inference" device="CPU" vmsize="1456962" vmpeak="1456962" vmrss="521965" vmhwm="715650" />
+        <model path="caffe/FP32/inception_v4/inception_v4.xml" test="infer_request_inference" device="GPU" vmsize="1605110" vmpeak="1690306" vmrss="1127874" vmhwm="1262539" />
+        <model path="caffe/FP32/lenet/lenet.xml" test="create_exenetwork" device="CPU" vmsize="694122" vmpeak="774706" vmrss="35958" vmhwm="35958" />
+        <model path="caffe/FP32/lenet/lenet.xml" test="create_exenetwork" device="GPU" vmsize="617312" vmpeak="617312" vmrss="281574" vmhwm="281574" />
+        <model path="caffe/FP32/lenet/lenet.xml" test="infer_request_inference" device="CPU" vmsize="961006" vmpeak="1046203" vmrss="35443" vmhwm="35443" />
+        <model path="caffe/FP32/lenet/lenet.xml" test="infer_request_inference" device="GPU" vmsize="610729" vmpeak="695926" vmrss="132324" vmhwm="132324" />
+        <model path="caffe/FP32/mobilenet/mobilenet.xml" test="create_exenetwork" device="CPU" vmsize="720948" vmpeak="795828" vmrss="98992" vmhwm="98992" />
+        <model path="caffe/FP32/mobilenet/mobilenet.xml" test="create_exenetwork" device="GPU" vmsize="770952" vmpeak="770952" vmrss="435333" vmhwm="435333" />
+        <model path="caffe/FP32/mobilenet/mobilenet.xml" test="infer_request_inference" device="CPU" vmsize="987984" vmpeak="1073181" vmrss="103136" vmhwm="103136" />
+        <model path="caffe/FP32/mobilenet/mobilenet.xml" test="infer_request_inference" device="GPU" vmsize="727896" vmpeak="813092" vmrss="252522" vmhwm="252522" />
+        <model path="caffe/FP32/mobilenet_v2/mobilenet_v2.xml" test="create_exenetwork" device="CPU" vmsize="727100" vmpeak="727100" vmrss="92372" vmhwm="92372" />
+        <model path="caffe/FP32/mobilenet_v2/mobilenet_v2.xml" test="create_exenetwork" device="GPU" vmsize="858800" vmpeak="858800" vmrss="523712" vmhwm="523712" />
+        <model path="caffe/FP32/mobilenet_v2/mobilenet_v2.xml" test="infer_request_inference" device="CPU" vmsize="994151" vmpeak="1079348" vmrss="100588" vmhwm="100588" />
+        <model path="caffe/FP32/mobilenet_v2/mobilenet_v2.xml" test="infer_request_inference" device="GPU" vmsize="763750" vmpeak="848946" vmrss="288984" vmhwm="288984" />
+        <model path="caffe/FP32/mtcnn_o/mtcnn_o.xml" test="create_exenetwork" device="CPU" vmsize="694023" vmpeak="774893" vmrss="34673" vmhwm="34673" />
+        <model path="caffe/FP32/mtcnn_o/mtcnn_o.xml" test="create_exenetwork" device="GPU" vmsize="631940" vmpeak="631940" vmrss="288189" vmhwm="288189" />
+        <model path="caffe/FP32/mtcnn_o/mtcnn_o.xml" test="infer_request_inference" device="CPU" vmsize="960580" vmpeak="1045777" vmrss="35604" vmhwm="35604" />
+        <model path="caffe/FP32/mtcnn_o/mtcnn_o.xml" test="infer_request_inference" device="GPU" vmsize="618436" vmpeak="703632" vmrss="140368" vmhwm="140368" />
+        <model path="caffe/FP32/mtcnn_p/mtcnn_p.xml" test="create_exenetwork" device="CPU" vmsize="783447" vmpeak="866314" vmrss="43825" vmhwm="43825" />
+        <model path="caffe/FP32/mtcnn_p/mtcnn_p.xml" test="create_exenetwork" device="GPU" vmsize="728395" vmpeak="756038" vmrss="383780" vmhwm="410545" />
+        <model path="caffe/FP32/mtcnn_p/mtcnn_p.xml" test="infer_request_inference" device="CPU" vmsize="979997" vmpeak="979997" vmrss="128320" vmhwm="128320" />
+        <model path="caffe/FP32/mtcnn_p/mtcnn_p.xml" test="infer_request_inference" device="GPU" vmsize="763287" vmpeak="848484" vmrss="284648" vmhwm="284648" />
+        <model path="caffe/FP32/mtcnn_r/mtcnn_r.xml" test="create_exenetwork" device="CPU" vmsize="691485" vmpeak="691485" vmrss="30253" vmhwm="30253" />
+        <model path="caffe/FP32/mtcnn_r/mtcnn_r.xml" test="create_exenetwork" device="GPU" vmsize="520577" vmpeak="523374" vmrss="126614" vmhwm="129084" />
+        <model path="caffe/FP32/mtcnn_r/mtcnn_r.xml" test="infer_request_inference" device="CPU" vmsize="963367" vmpeak="1048564" vmrss="33337" vmhwm="33337" />
+        <model path="caffe/FP32/mtcnn_r/mtcnn_r.xml" test="infer_request_inference" device="GPU" vmsize="605597" vmpeak="690794" vmrss="128091" vmhwm="129911" />
+        <model path="caffe/FP32/openpose_face/openpose_face.xml" test="create_exenetwork" device="CPU" vmsize="1141790" vmpeak="1336405" vmrss="431813" vmhwm="626236" />
+        <model path="caffe/FP32/openpose_face/openpose_face.xml" test="create_exenetwork" device="GPU" vmsize="1443811" vmpeak="1566063" vmrss="1055756" vmhwm="1177592" />
+        <model path="caffe/FP32/openpose_face/openpose_face.xml" test="infer_request_inference" device="CPU" vmsize="1409517" vmpeak="1409517" vmrss="472004" vmhwm="625461" />
+        <model path="caffe/FP32/openpose_face/openpose_face.xml" test="infer_request_inference" device="GPU" vmsize="1361157" vmpeak="1446354" vmrss="883168" vmhwm="1005030" />
+        <model path="caffe/FP32/openpose_hand/openpose_hand.xml" test="create_exenetwork" device="CPU" vmsize="1125716" vmpeak="1312344" vmrss="413764" vmhwm="600215" />
+        <model path="caffe/FP32/openpose_hand/openpose_hand.xml" test="create_exenetwork" device="GPU" vmsize="1426141" vmpeak="1538960" vmrss="1037488" vmhwm="1149792" />
+        <model path="caffe/FP32/openpose_hand/openpose_hand.xml" test="infer_request_inference" device="CPU" vmsize="1212156" vmpeak="1312438" vmrss="455239" vmhwm="601276" />
+        <model path="caffe/FP32/openpose_hand/openpose_hand.xml" test="infer_request_inference" device="GPU" vmsize="1337679" vmpeak="1365301" vmrss="859944" vmhwm="972233" />
+        <model path="caffe/FP32/openpose_pose_coco/openpose_pose_coco.xml" test="create_exenetwork" device="CPU" vmsize="1299688" vmpeak="1563577" vmrss="586242" vmhwm="849924" />
+        <model path="caffe/FP32/openpose_pose_coco/openpose_pose_coco.xml" test="create_exenetwork" device="GPU" vmsize="1812174" vmpeak="1997912" vmrss="1424103" vmhwm="1609166" />
+        <model path="caffe/FP32/openpose_pose_coco/openpose_pose_coco.xml" test="infer_request_inference" device="CPU" vmsize="1386018" vmpeak="1563577" vmrss="626147" vmhwm="849420" />
+        <model path="caffe/FP32/openpose_pose_coco/openpose_pose_coco.xml" test="infer_request_inference" device="GPU" vmsize="1652414" vmpeak="1755286" vmrss="1174087" vmhwm="1361599" />
+        <model path="caffe/FP32/places205_alexnet/places205_alexnet.xml" test="create_exenetwork" device="CPU" vmsize="1287572" vmpeak="1580612" vmrss="624582" vmhwm="917441" />
+        <model path="caffe/FP32/places205_alexnet/places205_alexnet.xml" test="create_exenetwork" device="GPU" vmsize="1513813" vmpeak="1998531" vmrss="1151737" vmhwm="1636216" />
+        <model path="caffe/FP32/places205_alexnet/places205_alexnet.xml" test="infer_request_inference" device="CPU" vmsize="1464517" vmpeak="1580597" vmrss="626922" vmhwm="916905" />
+        <model path="caffe/FP32/places205_alexnet/places205_alexnet.xml" test="infer_request_inference" device="GPU" vmsize="1498551" vmpeak="1889992" vmrss="1020489" vmhwm="1496653" />
+        <model path="caffe/FP32/places205_googlenet/places205_googlenet.xml" test="create_exenetwork" device="CPU" vmsize="746007" vmpeak="746007" vmrss="136240" vmhwm="136240" />
+        <model path="caffe/FP32/places205_googlenet/places205_googlenet.xml" test="create_exenetwork" device="GPU" vmsize="926957" vmpeak="926957" vmrss="577309" vmhwm="577309" />
+        <model path="caffe/FP32/places205_googlenet/places205_googlenet.xml" test="infer_request_inference" device="CPU" vmsize="1013547" vmpeak="1013547" vmrss="142885" vmhwm="142885" />
+        <model path="caffe/FP32/places205_googlenet/places205_googlenet.xml" test="infer_request_inference" device="GPU" vmsize="813794" vmpeak="898991" vmrss="336570" vmhwm="336570" />
+        <model path="caffe/FP32/resnet_18/resnet_18.xml" test="create_exenetwork" device="CPU" vmsize="824631" vmpeak="897722" vmrss="151590" vmhwm="210714" />
+        <model path="caffe/FP32/resnet_18/resnet_18.xml" test="create_exenetwork" device="GPU" vmsize="838567" vmpeak="891956" vmrss="503739" vmhwm="557273" />
+        <model path="caffe/FP32/resnet_18/resnet_18.xml" test="infer_request_inference" device="CPU" vmsize="910988" vmpeak="996184" vmrss="158886" vmhwm="211936" />
+        <model path="caffe/FP32/resnet_18/resnet_18.xml" test="infer_request_inference" device="GPU" vmsize="818776" vmpeak="903973" vmrss="341322" vmhwm="391955" />
+        <model path="caffe/FP32/resnet_v1_101/resnet_v1_101.xml" test="create_exenetwork" device="CPU" vmsize="1184934" vmpeak="1406100" vmrss="511170" vmhwm="731827" />
+        <model path="caffe/FP32/resnet_v1_101/resnet_v1_101.xml" test="create_exenetwork" device="GPU" vmsize="1640386" vmpeak="1850810" vmrss="1305855" vmhwm="1515966" />
+        <model path="caffe/FP32/resnet_v1_101/resnet_v1_101.xml" test="infer_request_inference" device="CPU" vmsize="1452578" vmpeak="1452578" vmrss="518258" vmhwm="732508" />
+        <model path="caffe/FP32/resnet_v1_101/resnet_v1_101.xml" test="infer_request_inference" device="GPU" vmsize="1479166" vmpeak="1604392" vmrss="1000901" vmhwm="1210248" />
+        <model path="caffe/FP32/resnet_v1_152/resnet_v1_152.xml" test="create_exenetwork" device="CPU" vmsize="1360918" vmpeak="1658852" vmrss="684892" vmhwm="982316" />
+        <model path="caffe/FP32/resnet_v1_152/resnet_v1_152.xml" test="create_exenetwork" device="GPU" vmsize="2023595" vmpeak="2311010" vmrss="1620923" vmhwm="1906216" />
+        <model path="caffe/FP32/resnet_v1_152/resnet_v1_152.xml" test="infer_request_inference" device="CPU" vmsize="1628577" vmpeak="1713774" vmrss="691672" vmhwm="982930" />
+        <model path="caffe/FP32/resnet_v1_152/resnet_v1_152.xml" test="infer_request_inference" device="GPU" vmsize="1814176" vmpeak="2016393" vmrss="1336238" vmhwm="1622244" />
+        <model path="caffe/FP32/resnet_v1_269/resnet_v1_269.xml" test="create_exenetwork" device="CPU" vmsize="2119015" vmpeak="2465268" vmrss="1307748" vmhwm="1653490" />
+        <model path="caffe/FP32/resnet_v1_269/resnet_v1_269.xml" test="create_exenetwork" device="GPU" vmsize="3063808" vmpeak="3522360" vmrss="2673543" vmhwm="3130623" />
+        <model path="caffe/FP32/resnet_v1_269/resnet_v1_269.xml" test="infer_request_inference" device="CPU" vmsize="2386618" vmpeak="2465538" vmrss="1321663" vmhwm="1652372" />
+        <model path="caffe/FP32/resnet_v1_269/resnet_v1_269.xml" test="infer_request_inference" device="GPU" vmsize="2799269" vmpeak="3172618" vmrss="2321664" vmhwm="2777736" />
+        <model path="caffe/FP32/resnet_v1_50/resnet_v1_50.xml" test="create_exenetwork" device="CPU" vmsize="974698" vmpeak="1100762" vmrss="304220" vmhwm="429774" />
+        <model path="caffe/FP32/resnet_v1_50/resnet_v1_50.xml" test="create_exenetwork" device="GPU" vmsize="1173671" vmpeak="1286625" vmrss="838682" vmhwm="951636" />
+        <model path="caffe/FP32/resnet_v1_50/resnet_v1_50.xml" test="infer_request_inference" device="CPU" vmsize="1242233" vmpeak="1242233" vmrss="310086" vmhwm="429150" />
+        <model path="caffe/FP32/resnet_v1_50/resnet_v1_50.xml" test="infer_request_inference" device="GPU" vmsize="1090726" vmpeak="1175922" vmrss="613813" vmhwm="726200" />
+        <model path="caffe/FP32/se_bn_inception/se_bn_inception.xml" test="create_exenetwork" device="CPU" vmsize="870022" vmpeak="924336" vmrss="179088" vmhwm="232892" />
+        <model path="caffe/FP32/se_bn_inception/se_bn_inception.xml" test="create_exenetwork" device="GPU" vmsize="1125753" vmpeak="1166344" vmrss="786666" vmhwm="827138" />
+        <model path="caffe/FP32/se_bn_inception/se_bn_inception.xml" test="infer_request_inference" device="CPU" vmsize="1137541" vmpeak="1137541" vmrss="184485" vmhwm="232949" />
+        <model path="caffe/FP32/se_bn_inception/se_bn_inception.xml" test="infer_request_inference" device="GPU" vmsize="955177" vmpeak="1040374" vmrss="477032" vmhwm="519178" />
+        <model path="caffe/FP32/se_resnext_50/se_resnext_50.xml" test="create_exenetwork" device="CPU" vmsize="1074985" vmpeak="1208168" vmrss="344406" vmhwm="477089" />
+        <model path="caffe/FP32/se_resnext_50/se_resnext_50.xml" test="create_exenetwork" device="GPU" vmsize="1383397" vmpeak="1496918" vmrss="980408" vmhwm="1092702" />
+        <model path="caffe/FP32/se_resnext_50/se_resnext_50.xml" test="infer_request_inference" device="CPU" vmsize="1168200" vmpeak="1253397" vmrss="374275" vmhwm="477698" />
+        <model path="caffe/FP32/se_resnext_50/se_resnext_50.xml" test="infer_request_inference" device="GPU" vmsize="1240657" vmpeak="1325854" vmrss="762725" vmhwm="854386" />
+        <model path="caffe/FP32/squeezenet_v1.0/squeezenet_v1.0.xml" test="create_exenetwork" device="CPU" vmsize="713351" vmpeak="787898" vmrss="52858" vmhwm="52858" />
+        <model path="caffe/FP32/squeezenet_v1.0/squeezenet_v1.0.xml" test="create_exenetwork" device="GPU" vmsize="719794" vmpeak="719794" vmrss="384508" vmhwm="384508" />
+        <model path="caffe/FP32/squeezenet_v1.0/squeezenet_v1.0.xml" test="infer_request_inference" device="CPU" vmsize="980522" vmpeak="980522" vmrss="59456" vmhwm="59456" />
+        <model path="caffe/FP32/squeezenet_v1.0/squeezenet_v1.0.xml" test="infer_request_inference" device="GPU" vmsize="686613" vmpeak="771810" vmrss="211426" vmhwm="211426" />
+        <model path="caffe/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="create_exenetwork" device="CPU" vmsize="705796" vmpeak="705796" vmrss="52405" vmhwm="52405" />
+        <model path="caffe/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="create_exenetwork" device="GPU" vmsize="724984" vmpeak="724984" vmrss="390031" vmhwm="390031" />
+        <model path="caffe/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="infer_request_inference" device="CPU" vmsize="791918" vmpeak="877115" vmrss="56269" vmhwm="56269" />
+        <model path="caffe/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="infer_request_inference" device="GPU" vmsize="674590" vmpeak="759787" vmrss="199139" vmhwm="199139" />
+        <model path="caffe/FP32/ssd_googlenet/ssd_googlenet.xml" test="create_exenetwork" device="CPU" vmsize="850278" vmpeak="901976" vmrss="168672" vmhwm="218660" />
+        <model path="caffe/FP32/ssd_googlenet/ssd_googlenet.xml" test="create_exenetwork" device="GPU" vmsize="1092353" vmpeak="1123298" vmrss="689566" vmhwm="762699" />
+        <model path="caffe/FP32/ssd_googlenet/ssd_googlenet.xml" test="infer_request_inference" device="CPU" vmsize="1118015" vmpeak="1118015" vmrss="177444" vmhwm="218670" />
+        <model path="caffe/FP32/ssd_googlenet/ssd_googlenet.xml" test="infer_request_inference" device="GPU" vmsize="944564" vmpeak="1029761" vmrss="467672" vmhwm="495326" />
+        <model path="caffe/FP32/ssd_mobilenet/ssd_mobilenet.xml" test="create_exenetwork" device="CPU" vmsize="740069" vmpeak="740069" vmrss="128315" vmhwm="128315" />
+        <model path="caffe/FP32/ssd_mobilenet/ssd_mobilenet.xml" test="create_exenetwork" device="GPU" vmsize="791986" vmpeak="791986" vmrss="456830" vmhwm="456830" />
+        <model path="caffe/FP32/ssd_mobilenet/ssd_mobilenet.xml" test="infer_request_inference" device="CPU" vmsize="1188891" vmpeak="1274088" vmrss="138252" vmhwm="138252" />
+        <model path="caffe/FP32/ssd_mobilenet/ssd_mobilenet.xml" test="infer_request_inference" device="GPU" vmsize="819218" vmpeak="904415" vmrss="342066" vmhwm="342066" />
+        <model path="caffe/FP32/ssd_squeezenet/ssd_squeezenet.xml" test="create_exenetwork" device="CPU" vmsize="740714" vmpeak="803946" vmrss="126521" vmhwm="126521" />
+        <model path="caffe/FP32/ssd_squeezenet/ssd_squeezenet.xml" test="create_exenetwork" device="GPU" vmsize="925225" vmpeak="925225" vmrss="519417" vmhwm="586206" />
+        <model path="caffe/FP32/ssd_squeezenet/ssd_squeezenet.xml" test="infer_request_inference" device="CPU" vmsize="1008446" vmpeak="1093643" vmrss="135714" vmhwm="135714" />
+        <model path="caffe/FP32/ssd_squeezenet/ssd_squeezenet.xml" test="infer_request_inference" device="GPU" vmsize="824470" vmpeak="909667" vmrss="348103" vmhwm="348103" />
+        <model path="caffe/FP32/ssd_vgg16_300/ssd_vgg16_300.xml" test="create_exenetwork" device="CPU" vmsize="1046843" vmpeak="1178897" vmrss="308848" vmhwm="440377" />
+        <model path="caffe/FP32/ssd_vgg16_300/ssd_vgg16_300.xml" test="create_exenetwork" device="GPU" vmsize="1151961" vmpeak="1168070" vmrss="815692" vmhwm="831932" />
+        <model path="caffe/FP32/ssd_vgg16_300/ssd_vgg16_300.xml" test="infer_request_inference" device="CPU" vmsize="1321751" vmpeak="1321751" vmrss="373412" vmhwm="440299" />
+        <model path="caffe/FP32/ssd_vgg16_300/ssd_vgg16_300.xml" test="infer_request_inference" device="GPU" vmsize="1200820" vmpeak="1286017" vmrss="725717" vmhwm="734500" />
+        <model path="caffe/FP32/ssd_vgg16_512/ssd_vgg16_512.xml" test="create_exenetwork" device="CPU" vmsize="1186697" vmpeak="1322895" vmrss="323164" vmhwm="457116" />
+        <model path="caffe/FP32/ssd_vgg16_512/ssd_vgg16_512.xml" test="create_exenetwork" device="GPU" vmsize="1522606" vmpeak="1522606" vmrss="1120277" vmhwm="1120277" />
+        <model path="caffe/FP32/ssd_vgg16_512/ssd_vgg16_512.xml" test="infer_request_inference" device="CPU" vmsize="1288424" vmpeak="1373621" vmrss="500370" vmhwm="500370" />
+        <model path="caffe/FP32/ssd_vgg16_512/ssd_vgg16_512.xml" test="infer_request_inference" device="GPU" vmsize="1449448" vmpeak="1534644" vmrss="973845" vmhwm="973845" />
+        <model path="caffe/FP32/vgg16/vgg16.xml" test="create_exenetwork" device="CPU" vmsize="2133809" vmpeak="2836407" vmrss="1438444" vmhwm="2140850" />
+        <model path="caffe/FP32/vgg16/vgg16.xml" test="create_exenetwork" device="GPU" vmsize="2707359" vmpeak="3834188" vmrss="2314816" vmhwm="3441464" />
+        <model path="caffe/FP32/vgg16/vgg16.xml" test="infer_request_inference" device="CPU" vmsize="2401339" vmpeak="3101945" vmrss="1469098" vmhwm="2139987" />
+        <model path="caffe/FP32/vgg16/vgg16.xml" test="infer_request_inference" device="GPU" vmsize="2792654" vmpeak="3834136" vmrss="2314577" vmhwm="3440408" />
+        <model path="caffe/FP32/vgg19/vgg19.xml" test="create_exenetwork" device="CPU" vmsize="2188804" vmpeak="2918375" vmrss="1492623" vmhwm="2222001" />
+        <model path="caffe/FP32/vgg19/vgg19.xml" test="create_exenetwork" device="GPU" vmsize="2898989" vmpeak="4025117" vmrss="2481081" vmhwm="3626459" />
+        <model path="caffe/FP32/vgg19/vgg19.xml" test="infer_request_inference" device="CPU" vmsize="2275379" vmpeak="2918474" vmrss="1523834" vmhwm="2221715" />
+        <model path="caffe/FP32/vgg19/vgg19.xml" test="infer_request_inference" device="GPU" vmsize="2876250" vmpeak="3944834" vmrss="2398682" vmhwm="3551002" />
+        <model path="caffe/FP32/vnect/vnect.xml" test="create_exenetwork" device="CPU" vmsize="873480" vmpeak="943924" vmrss="196320" vmhwm="266656" />
+        <model path="caffe/FP32/vnect/vnect.xml" test="create_exenetwork" device="GPU" vmsize="1067367" vmpeak="1101604" vmrss="730048" vmhwm="764051" />
+        <model path="caffe/FP32/vnect/vnect.xml" test="infer_request_inference" device="CPU" vmsize="961745" vmpeak="1046942" vmrss="212149" vmhwm="266546" />
+        <model path="caffe/FP32/vnect/vnect.xml" test="infer_request_inference" device="GPU" vmsize="976471" vmpeak="1061668" vmrss="499335" vmhwm="528736" />
+        <model path="caffe/FP32/wrn_50_2/wrn_50_2.xml" test="create_exenetwork" device="CPU" vmsize="1428580" vmpeak="1776923" vmrss="741670" vmhwm="1089587" />
+        <model path="caffe/FP32/wrn_50_2/wrn_50_2.xml" test="create_exenetwork" device="GPU" vmsize="1842729" vmpeak="2177494" vmrss="1452183" vmhwm="1785934" />
+        <model path="caffe/FP32/wrn_50_2/wrn_50_2.xml" test="infer_request_inference" device="CPU" vmsize="1514890" vmpeak="1776834" vmrss="756730" vmhwm="1088464" />
+        <model path="caffe/FP32/wrn_50_2/wrn_50_2.xml" test="infer_request_inference" device="GPU" vmsize="1753476" vmpeak="2003045" vmrss="1275523" vmhwm="1608807" />
+        <model path="caffe/FP32/yolo_v1_full/yolo_v1_full.xml" test="create_exenetwork" device="CPU" vmsize="3478618" vmpeak="4858219" vmrss="2796794" vmhwm="4176062" />
+        <model path="caffe/FP32/yolo_v1_full/yolo_v1_full.xml" test="create_exenetwork" device="GPU" vmsize="4842442" vmpeak="6987687" vmrss="4397738" vmhwm="6544928" />
+        <model path="caffe/FP32/yolo_v1_full/yolo_v1_full.xml" test="infer_request_inference" device="CPU" vmsize="3567340" vmpeak="4858193" vmrss="2814666" vmhwm="4176177" />
+        <model path="caffe/FP32/yolo_v1_full/yolo_v1_full.xml" test="infer_request_inference" device="GPU" vmsize="4814217" vmpeak="6932785" vmrss="4335193" vmhwm="6538194" />
+        <model path="caffe/FP32/yolo_v1_tiny/yolo_v1_tiny.xml" test="create_exenetwork" device="CPU" vmsize="998956" vmpeak="1136428" vmrss="307600" vmhwm="444735" />
+        <model path="caffe/FP32/yolo_v1_tiny/yolo_v1_tiny.xml" test="create_exenetwork" device="GPU" vmsize="1052719" vmpeak="1232316" vmrss="717854" vmhwm="897540" />
+        <model path="caffe/FP32/yolo_v1_tiny/yolo_v1_tiny.xml" test="infer_request_inference" device="CPU" vmsize="1258004" vmpeak="1258004" vmrss="326175" vmhwm="443996" />
+        <model path="caffe/FP32/yolo_v1_tiny/yolo_v1_tiny.xml" test="infer_request_inference" device="GPU" vmsize="1059619" vmpeak="1138789" vmrss="582155" vmhwm="745664" />
+        <model path="caffe/FP32/yolo_v2/yolo_v2.xml" test="create_exenetwork" device="CPU" vmsize="1249211" vmpeak="1506304" vmrss="550752" vmhwm="807762" />
+        <model path="caffe/FP32/yolo_v2/yolo_v2.xml" test="create_exenetwork" device="GPU" vmsize="1492743" vmpeak="1714642" vmrss="1095354" vmhwm="1316988" />
+        <model path="caffe/FP32/yolo_v2/yolo_v2.xml" test="infer_request_inference" device="CPU" vmsize="1427483" vmpeak="1512680" vmrss="582514" vmhwm="806858" />
+        <model path="caffe/FP32/yolo_v2/yolo_v2.xml" test="infer_request_inference" device="GPU" vmsize="1456343" vmpeak="1595287" vmrss="978369" vmhwm="1201579" />
+        <model path="caffe/FP32/yolo_v2_tiny/yolo_v2_tiny.xml" test="create_exenetwork" device="CPU" vmsize="871930" vmpeak="952359" vmrss="193388" vmhwm="273634" />
+        <model path="caffe/FP32/yolo_v2_tiny/yolo_v2_tiny.xml" test="create_exenetwork" device="GPU" vmsize="878768" vmpeak="973180" vmrss="533348" vmhwm="627848" />
+        <model path="caffe/FP32/yolo_v2_tiny/yolo_v2_tiny.xml" test="infer_request_inference" device="CPU" vmsize="959909" vmpeak="1045106" vmrss="208156" vmhwm="273530" />
+        <model path="caffe/FP32/yolo_v2_tiny/yolo_v2_tiny.xml" test="infer_request_inference" device="GPU" vmsize="883818" vmpeak="969014" vmrss="406442" vmhwm="476595" />
+        <model path="caffe/FP32/yolo_v3/yolo_v3.xml" test="create_exenetwork" device="CPU" vmsize="1388405" vmpeak="1700311" vmrss="680352" vmhwm="991998" />
+        <model path="caffe/FP32/yolo_v3/yolo_v3.xml" test="create_exenetwork" device="GPU" vmsize="1970503" vmpeak="2164422" vmrss="1583935" vmhwm="1777209" />
+        <model path="caffe/FP32/yolo_v3/yolo_v3.xml" test="infer_request_inference" device="CPU" vmsize="1661649" vmpeak="1746846" vmrss="723148" vmhwm="991354" />
+        <model path="caffe/FP32/yolo_v3/yolo_v3.xml" test="infer_request_inference" device="GPU" vmsize="1812694" vmpeak="1917910" vmrss="1335609" vmhwm="1524931" />
+        <model path="mxnet/FP32/caffenet/caffenet.xml" test="create_exenetwork" device="CPU" vmsize="1321320" vmpeak="1630896" vmrss="658730" vmhwm="968125" />
+        <model path="mxnet/FP32/caffenet/caffenet.xml" test="create_exenetwork" device="GPU" vmsize="1563660" vmpeak="2064852" vmrss="1226097" vmhwm="1727050" />
+        <model path="mxnet/FP32/caffenet/caffenet.xml" test="infer_request_inference" device="CPU" vmsize="1679251" vmpeak="1849645" vmrss="659406" vmhwm="966815" />
+        <model path="mxnet/FP32/caffenet/caffenet.xml" test="infer_request_inference" device="GPU" vmsize="1557181" vmpeak="1973176" vmrss="1079998" vmhwm="1579983" />
+        <model path="mxnet/FP32/densenet_121/densenet_121.xml" test="create_exenetwork" device="CPU" vmsize="908549" vmpeak="908549" vmrss="180804" vmhwm="180804" />
+        <model path="mxnet/FP32/densenet_121/densenet_121.xml" test="create_exenetwork" device="GPU" vmsize="1315620" vmpeak="1315620" vmrss="978213" vmhwm="978213" />
+        <model path="mxnet/FP32/densenet_121/densenet_121.xml" test="infer_request_inference" device="CPU" vmsize="1170239" vmpeak="1255436" vmrss="189326" vmhwm="189326" />
+        <model path="mxnet/FP32/densenet_121/densenet_121.xml" test="infer_request_inference" device="GPU" vmsize="1068553" vmpeak="1153750" vmrss="590298" vmhwm="590298" />
+        <model path="mxnet/FP32/densenet_161/densenet_161.xml" test="create_exenetwork" device="CPU" vmsize="1160718" vmpeak="1262736" vmrss="405376" vmhwm="507317" />
+        <model path="mxnet/FP32/densenet_161/densenet_161.xml" test="create_exenetwork" device="GPU" vmsize="1898410" vmpeak="1898410" vmrss="1560884" vmhwm="1560884" />
+        <model path="mxnet/FP32/densenet_161/densenet_161.xml" test="infer_request_inference" device="CPU" vmsize="1240917" vmpeak="1326114" vmrss="419094" vmhwm="507306" />
+        <model path="mxnet/FP32/densenet_161/densenet_161.xml" test="infer_request_inference" device="GPU" vmsize="1594502" vmpeak="1679698" vmrss="1116954" vmhwm="1116954" />
+        <model path="mxnet/FP32/densenet_169/densenet_169.xml" test="create_exenetwork" device="CPU" vmsize="991671" vmpeak="1004291" vmrss="275397" vmhwm="287918" />
+        <model path="mxnet/FP32/densenet_169/densenet_169.xml" test="create_exenetwork" device="GPU" vmsize="1616690" vmpeak="1618188" vmrss="1278908" vmhwm="1280494" />
+        <model path="mxnet/FP32/densenet_169/densenet_169.xml" test="infer_request_inference" device="CPU" vmsize="1258623" vmpeak="1258623" vmrss="284320" vmhwm="287606" />
+        <model path="mxnet/FP32/densenet_169/densenet_169.xml" test="infer_request_inference" device="GPU" vmsize="1303156" vmpeak="1388353" vmrss="824928" vmhwm="824928" />
+        <model path="mxnet/FP32/densenet_201/densenet_201.xml" test="create_exenetwork" device="CPU" vmsize="1134889" vmpeak="1188636" vmrss="367130" vmhwm="384935" />
+        <model path="mxnet/FP32/densenet_201/densenet_201.xml" test="create_exenetwork" device="GPU" vmsize="1865047" vmpeak="1865047" vmrss="1527947" vmhwm="1527947" />
+        <model path="mxnet/FP32/densenet_201/densenet_201.xml" test="infer_request_inference" device="CPU" vmsize="1220882" vmpeak="1306078" vmrss="376006" vmhwm="384217" />
+        <model path="mxnet/FP32/densenet_201/densenet_201.xml" test="infer_request_inference" device="GPU" vmsize="1551019" vmpeak="1636216" vmrss="1071928" vmhwm="1071928" />
+        <model path="mxnet/FP32/dpn_92/dpn_92.xml" test="create_exenetwork" device="CPU" vmsize="1255898" vmpeak="1437160" vmrss="461385" vmhwm="642049" />
+        <model path="mxnet/FP32/dpn_92/dpn_92.xml" test="create_exenetwork" device="GPU" vmsize="1800479" vmpeak="1945580" vmrss="1462780" vmhwm="1607470" />
+        <model path="mxnet/FP32/dpn_92/dpn_92.xml" test="infer_request_inference" device="CPU" vmsize="1530053" vmpeak="1530053" vmrss="505570" vmhwm="641368" />
+        <model path="mxnet/FP32/dpn_92/dpn_92.xml" test="infer_request_inference" device="GPU" vmsize="1561955" vmpeak="1619753" vmrss="1084324" vmhwm="1225473" />
+        <model path="mxnet/FP32/fcn8s_vgg16/fcn8s_vgg16.xml" test="create_exenetwork" device="CPU" vmsize="2833797" vmpeak="3516609" vmrss="1409798" vmhwm="2092417" />
+        <model path="mxnet/FP32/fcn8s_vgg16/fcn8s_vgg16.xml" test="create_exenetwork" device="GPU" vmsize="4293634" vmpeak="4293634" vmrss="3955525" vmhwm="3955525" />
+        <model path="mxnet/FP32/fcn8s_vgg16/fcn8s_vgg16.xml" test="infer_request_inference" device="CPU" vmsize="3022032" vmpeak="3516609" vmrss="2255333" vmhwm="2255333" />
+        <model path="mxnet/FP32/fcn8s_vgg16/fcn8s_vgg16.xml" test="infer_request_inference" device="GPU" vmsize="4277993" vmpeak="4363190" vmrss="3799333" vmhwm="3799333" />
+        <model path="mxnet/FP32/full_imagenet_network/full_imagenet_network.xml" test="create_exenetwork" device="CPU" vmsize="1066384" vmpeak="1233736" vmrss="390972" vmhwm="557528" />
+        <model path="mxnet/FP32/full_imagenet_network/full_imagenet_network.xml" test="create_exenetwork" device="GPU" vmsize="1358442" vmpeak="1615062" vmrss="1020947" vmhwm="1273121" />
+        <model path="mxnet/FP32/full_imagenet_network/full_imagenet_network.xml" test="infer_request_inference" device="CPU" vmsize="1243392" vmpeak="1328589" vmrss="398580" vmhwm="558469" />
+        <model path="mxnet/FP32/full_imagenet_network/full_imagenet_network.xml" test="infer_request_inference" device="GPU" vmsize="1256070" vmpeak="1398212" vmrss="778549" vmhwm="1001192" />
+        <model path="mxnet/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="create_exenetwork" device="CPU" vmsize="1437560" vmpeak="1625010" vmrss="754254" vmhwm="941142" />
+        <model path="mxnet/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="create_exenetwork" device="GPU" vmsize="2281713" vmpeak="2410668" vmrss="1943780" vmhwm="2072428" />
+        <model path="mxnet/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="infer_request_inference" device="CPU" vmsize="1524473" vmpeak="1625005" vmrss="763001" vmhwm="940264" />
+        <model path="mxnet/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="infer_request_inference" device="GPU" vmsize="2070671" vmpeak="2155868" vmrss="1593108" vmhwm="1719125" />
+        <model path="mxnet/FP32/inception_v3/inception_v3.xml" test="create_exenetwork" device="CPU" vmsize="835629" vmpeak="889226" vmrss="164216" vmhwm="217245" />
+        <model path="mxnet/FP32/inception_v3/inception_v3.xml" test="create_exenetwork" device="GPU" vmsize="983507" vmpeak="1024665" vmrss="645985" vmhwm="686930" />
+        <model path="mxnet/FP32/inception_v3/inception_v3.xml" test="infer_request_inference" device="CPU" vmsize="927451" vmpeak="1012648" vmrss="168360" vmhwm="216569" />
+        <model path="mxnet/FP32/inception_v3/inception_v3.xml" test="infer_request_inference" device="GPU" vmsize="900712" vmpeak="985909" vmrss="423519" vmhwm="463533" />
+        <model path="mxnet/FP32/inception_v3_no_batchnorm/inception_v3_no_batchnorm.xml" test="create_exenetwork" device="CPU" vmsize="980636" vmpeak="1099706" vmrss="296680" vmhwm="415194" />
+        <model path="mxnet/FP32/inception_v3_no_batchnorm/inception_v3_no_batchnorm.xml" test="create_exenetwork" device="GPU" vmsize="1326213" vmpeak="1409371" vmrss="988488" vmhwm="1071366" />
+        <model path="mxnet/FP32/inception_v3_no_batchnorm/inception_v3_no_batchnorm.xml" test="infer_request_inference" device="CPU" vmsize="1248691" vmpeak="1248691" vmrss="306857" vmhwm="414752" />
+        <model path="mxnet/FP32/inception_v3_no_batchnorm/inception_v3_no_batchnorm.xml" test="infer_request_inference" device="GPU" vmsize="1163032" vmpeak="1248228" vmrss="685843" vmhwm="765507" />
+        <model path="mxnet/FP32/inception_v4/inception_v4.xml" test="create_exenetwork" device="CPU" vmsize="1189531" vmpeak="1393636" vmrss="513661" vmhwm="717204" />
+        <model path="mxnet/FP32/inception_v4/inception_v4.xml" test="create_exenetwork" device="GPU" vmsize="1866176" vmpeak="2002847" vmrss="1528664" vmhwm="1664577" />
+        <model path="mxnet/FP32/inception_v4/inception_v4.xml" test="infer_request_inference" device="CPU" vmsize="1457669" vmpeak="1457669" vmrss="523811" vmhwm="715837" />
+        <model path="mxnet/FP32/inception_v4/inception_v4.xml" test="infer_request_inference" device="GPU" vmsize="1606243" vmpeak="1691440" vmrss="1129185" vmhwm="1262534" />
+        <model path="mxnet/FP32/location_net/location_net.xml" test="create_exenetwork" device="CPU" vmsize="1521920" vmpeak="1894167" vmrss="814210" vmhwm="1185704" />
+        <model path="mxnet/FP32/location_net/location_net.xml" test="create_exenetwork" device="GPU" vmsize="1961772" vmpeak="2317998" vmrss="1623268" vmhwm="1979062" />
+        <model path="mxnet/FP32/location_net/location_net.xml" test="infer_request_inference" device="CPU" vmsize="1789325" vmpeak="1894157" vmrss="828328" vmhwm="1185480" />
+        <model path="mxnet/FP32/location_net/location_net.xml" test="infer_request_inference" device="GPU" vmsize="1951877" vmpeak="2240295" vmrss="1479337" vmhwm="1843041" />
+        <model path="mxnet/FP32/lresnet100e/lresnet100e.xml" test="create_exenetwork" device="CPU" vmsize="1427384" vmpeak="1755920" vmrss="719097" vmhwm="1047295" />
+        <model path="mxnet/FP32/lresnet100e/lresnet100e.xml" test="create_exenetwork" device="GPU" vmsize="2059070" vmpeak="2371101" vmrss="1721616" vmhwm="2033194" />
+        <model path="mxnet/FP32/lresnet100e/lresnet100e.xml" test="infer_request_inference" device="CPU" vmsize="1694035" vmpeak="1779232" vmrss="732596" vmhwm="1046208" />
+        <model path="mxnet/FP32/lresnet100e/lresnet100e.xml" test="infer_request_inference" device="GPU" vmsize="1863825" vmpeak="2084664" vmrss="1386002" vmhwm="1691248" />
+        <model path="mxnet/FP32/mobilenet/mobilenet.xml" test="create_exenetwork" device="CPU" vmsize="720959" vmpeak="795839" vmrss="98898" vmhwm="98898" />
+        <model path="mxnet/FP32/mobilenet/mobilenet.xml" test="create_exenetwork" device="GPU" vmsize="749106" vmpeak="749106" vmrss="411049" vmhwm="411049" />
+        <model path="mxnet/FP32/mobilenet/mobilenet.xml" test="infer_request_inference" device="CPU" vmsize="806941" vmpeak="806941" vmrss="104702" vmhwm="104702" />
+        <model path="mxnet/FP32/mobilenet/mobilenet.xml" test="infer_request_inference" device="GPU" vmsize="727818" vmpeak="813014" vmrss="252787" vmhwm="252787" />
+        <model path="mxnet/FP32/mobilenet_v2/mobilenet_v2.xml" test="create_exenetwork" device="CPU" vmsize="727116" vmpeak="793010" vmrss="92508" vmhwm="92508" />
+        <model path="mxnet/FP32/mobilenet_v2/mobilenet_v2.xml" test="create_exenetwork" device="GPU" vmsize="817554" vmpeak="817554" vmrss="479762" vmhwm="479762" />
+        <model path="mxnet/FP32/mobilenet_v2/mobilenet_v2.xml" test="infer_request_inference" device="CPU" vmsize="813108" vmpeak="898305" vmrss="99481" vmhwm="99481" />
+        <model path="mxnet/FP32/mobilenet_v2/mobilenet_v2.xml" test="infer_request_inference" device="GPU" vmsize="765070" vmpeak="850267" vmrss="290040" vmhwm="290040" />
+        <model path="mxnet/FP32/mtcnn_o/mtcnn_o.xml" test="create_exenetwork" device="CPU" vmsize="694023" vmpeak="694023" vmrss="34377" vmhwm="34377" />
+        <model path="mxnet/FP32/mtcnn_o/mtcnn_o.xml" test="create_exenetwork" device="GPU" vmsize="631919" vmpeak="631919" vmrss="294070" vmhwm="294070" />
+        <model path="mxnet/FP32/mtcnn_o/mtcnn_o.xml" test="infer_request_inference" device="CPU" vmsize="779532" vmpeak="864728" vmrss="36524" vmhwm="36524" />
+        <model path="mxnet/FP32/mtcnn_o/mtcnn_o.xml" test="infer_request_inference" device="GPU" vmsize="618586" vmpeak="703783" vmrss="140582" vmhwm="140582" />
+        <model path="mxnet/FP32/mtcnn_p/mtcnn_p.xml" test="create_exenetwork" device="CPU" vmsize="783447" vmpeak="783447" vmrss="42936" vmhwm="42936" />
+        <model path="mxnet/FP32/mtcnn_p/mtcnn_p.xml" test="create_exenetwork" device="GPU" vmsize="724302" vmpeak="724302" vmrss="386261" vmhwm="386339" />
+        <model path="mxnet/FP32/mtcnn_p/mtcnn_p.xml" test="infer_request_inference" device="CPU" vmsize="1070524" vmpeak="1155720" vmrss="129376" vmhwm="129376" />
+        <model path="mxnet/FP32/mtcnn_p/mtcnn_p.xml" test="infer_request_inference" device="GPU" vmsize="762933" vmpeak="848130" vmrss="284216" vmhwm="284216" />
+        <model path="mxnet/FP32/mtcnn_r/mtcnn_r.xml" test="create_exenetwork" device="CPU" vmsize="691485" vmpeak="691485" vmrss="30700" vmhwm="30700" />
+        <model path="mxnet/FP32/mtcnn_r/mtcnn_r.xml" test="create_exenetwork" device="GPU" vmsize="588270" vmpeak="610240" vmrss="250692" vmhwm="269453" />
+        <model path="mxnet/FP32/mtcnn_r/mtcnn_r.xml" test="infer_request_inference" device="CPU" vmsize="958042" vmpeak="958042" vmrss="30908" vmhwm="30908" />
+        <model path="mxnet/FP32/mtcnn_r/mtcnn_r.xml" test="infer_request_inference" device="GPU" vmsize="605176" vmpeak="690372" vmrss="127602" vmhwm="129365" />
+        <model path="mxnet/FP32/nin/nin.xml" test="create_exenetwork" device="CPU" vmsize="732747" vmpeak="732747" vmrss="146874" vmhwm="146874" />
+        <model path="mxnet/FP32/nin/nin.xml" test="create_exenetwork" device="GPU" vmsize="778096" vmpeak="778096" vmrss="439654" vmhwm="439654" />
+        <model path="mxnet/FP32/nin/nin.xml" test="infer_request_inference" device="CPU" vmsize="818864" vmpeak="904061" vmrss="148220" vmhwm="148220" />
+        <model path="mxnet/FP32/nin/nin.xml" test="infer_request_inference" device="GPU" vmsize="781279" vmpeak="866476" vmrss="323528" vmhwm="323528" />
+        <model path="mxnet/FP32/nst_vgg19/nst_vgg19.xml" test="create_exenetwork" device="CPU" vmsize="739559" vmpeak="739559" vmrss="67152" vmhwm="67152" />
+        <model path="mxnet/FP32/nst_vgg19/nst_vgg19.xml" test="create_exenetwork" device="GPU" vmsize="769938" vmpeak="769938" vmrss="431922" vmhwm="431922" />
+        <model path="mxnet/FP32/nst_vgg19/nst_vgg19.xml" test="infer_request_inference" device="CPU" vmsize="1007323" vmpeak="1007323" vmrss="99127" vmhwm="99127" />
+        <model path="mxnet/FP32/nst_vgg19/nst_vgg19.xml" test="infer_request_inference" device="GPU" vmsize="760047" vmpeak="845244" vmrss="281866" vmhwm="281866" />
+        <model path="mxnet/FP32/resnet_v1_101/resnet_v1_101.xml" test="create_exenetwork" device="CPU" vmsize="1219296" vmpeak="1440462" vmrss="513271" vmhwm="733850" />
+        <model path="mxnet/FP32/resnet_v1_101/resnet_v1_101.xml" test="create_exenetwork" device="GPU" vmsize="1693062" vmpeak="1898192" vmrss="1355270" vmhwm="1559838" />
+        <model path="mxnet/FP32/resnet_v1_101/resnet_v1_101.xml" test="infer_request_inference" device="CPU" vmsize="1305881" vmpeak="1440556" vmrss="527399" vmhwm="732924" />
+        <model path="mxnet/FP32/resnet_v1_101/resnet_v1_101.xml" test="infer_request_inference" device="GPU" vmsize="1500881" vmpeak="1620819" vmrss="1022845" vmhwm="1226721" />
+        <model path="mxnet/FP32/resnet_v1_152/resnet_v1_152.xml" test="create_exenetwork" device="CPU" vmsize="1406802" vmpeak="1704736" vmrss="687445" vmhwm="984760" />
+        <model path="mxnet/FP32/resnet_v1_152/resnet_v1_152.xml" test="create_exenetwork" device="GPU" vmsize="2147516" vmpeak="2429642" vmrss="1810073" vmhwm="2091382" />
+        <model path="mxnet/FP32/resnet_v1_152/resnet_v1_152.xml" test="infer_request_inference" device="CPU" vmsize="1674363" vmpeak="1759560" vmrss="702972" vmhwm="984744" />
+        <model path="mxnet/FP32/resnet_v1_152/resnet_v1_152.xml" test="infer_request_inference" device="GPU" vmsize="1849614" vmpeak="2046543" vmrss="1371458" vmhwm="1652222" />
+        <model path="mxnet/FP32/resnet_v2_101/resnet_v2_101.xml" test="create_exenetwork" device="CPU" vmsize="1218568" vmpeak="1439734" vmrss="513505" vmhwm="734136" />
+        <model path="mxnet/FP32/resnet_v2_101/resnet_v2_101.xml" test="create_exenetwork" device="GPU" vmsize="1688476" vmpeak="1897693" vmrss="1350502" vmhwm="1559168" />
+        <model path="mxnet/FP32/resnet_v2_101/resnet_v2_101.xml" test="infer_request_inference" device="CPU" vmsize="1305106" vmpeak="1439828" vmrss="526188" vmhwm="732721" />
+        <model path="mxnet/FP32/resnet_v2_101/resnet_v2_101.xml" test="infer_request_inference" device="GPU" vmsize="1498400" vmpeak="1619649" vmrss="1021170" vmhwm="1226201" />
+        <model path="mxnet/FP32/resnet_v2_152/resnet_v2_152.xml" test="create_exenetwork" device="CPU" vmsize="1406007" vmpeak="1703941" vmrss="687798" vmhwm="985082" />
+        <model path="mxnet/FP32/resnet_v2_152/resnet_v2_152.xml" test="create_exenetwork" device="GPU" vmsize="2132431" vmpeak="2419976" vmrss="1795331" vmhwm="2082298" />
+        <model path="mxnet/FP32/resnet_v2_152/resnet_v2_152.xml" test="infer_request_inference" device="CPU" vmsize="1673562" vmpeak="1758759" vmrss="702202" vmhwm="984557" />
+        <model path="mxnet/FP32/resnet_v2_152/resnet_v2_152.xml" test="infer_request_inference" device="GPU" vmsize="1852832" vmpeak="2055175" vmrss="1375025" vmhwm="1661046" />
+        <model path="mxnet/FP32/resnext_101/resnext_101.xml" test="create_exenetwork" device="CPU" vmsize="1214486" vmpeak="1422704" vmrss="531008" vmhwm="738576" />
+        <model path="mxnet/FP32/resnext_101/resnext_101.xml" test="create_exenetwork" device="GPU" vmsize="1653386" vmpeak="1850721" vmrss="1316047" vmhwm="1513090" />
+        <model path="mxnet/FP32/resnext_101/resnext_101.xml" test="infer_request_inference" device="CPU" vmsize="1307545" vmpeak="1422720" vmrss="553290" vmhwm="739018" />
+        <model path="mxnet/FP32/resnext_101/resnext_101.xml" test="infer_request_inference" device="GPU" vmsize="1505826" vmpeak="1597455" vmrss="1028154" vmhwm="1203888" />
+        <model path="mxnet/FP32/resnext_101_64x4d/resnext_101_64x4d.xml" test="create_exenetwork" device="CPU" vmsize="1639840" vmpeak="2058960" vmrss="933025" vmhwm="1351495" />
+        <model path="mxnet/FP32/resnext_101_64x4d/resnext_101_64x4d.xml" test="create_exenetwork" device="GPU" vmsize="2290340" vmpeak="2674006" vmrss="1952048" vmhwm="2335455" />
+        <model path="mxnet/FP32/resnext_101_64x4d/resnext_101_64x4d.xml" test="infer_request_inference" device="CPU" vmsize="1914021" vmpeak="2149482" vmrss="959363" vmhwm="1351006" />
+        <model path="mxnet/FP32/resnext_101_64x4d/resnext_101_64x4d.xml" test="infer_request_inference" device="GPU" vmsize="2119436" vmpeak="2416320" vmrss="1662554" vmhwm="2022462" />
+        <model path="mxnet/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="create_exenetwork" device="CPU" vmsize="705806" vmpeak="780353" vmrss="52806" vmhwm="52806" />
+        <model path="mxnet/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="create_exenetwork" device="GPU" vmsize="700835" vmpeak="700835" vmrss="362949" vmhwm="362949" />
+        <model path="mxnet/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="infer_request_inference" device="CPU" vmsize="791934" vmpeak="791934" vmrss="56794" vmhwm="56794" />
+        <model path="mxnet/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="infer_request_inference" device="GPU" vmsize="674611" vmpeak="759808" vmrss="198120" vmhwm="198120" />
+        <model path="mxnet/FP32/ssd_vgg16_300/ssd_vgg16_300.xml" test="create_exenetwork" device="CPU" vmsize="1046858" vmpeak="1178912" vmrss="308542" vmhwm="439483" />
+        <model path="mxnet/FP32/ssd_vgg16_300/ssd_vgg16_300.xml" test="create_exenetwork" device="GPU" vmsize="1226248" vmpeak="1247022" vmrss="889018" vmhwm="909454" />
+        <model path="mxnet/FP32/ssd_vgg16_300/ssd_vgg16_300.xml" test="infer_request_inference" device="CPU" vmsize="1140729" vmpeak="1225926" vmrss="372574" vmhwm="439826" />
+        <model path="mxnet/FP32/ssd_vgg16_300/ssd_vgg16_300.xml" test="infer_request_inference" device="GPU" vmsize="1199894" vmpeak="1285091" vmrss="724178" vmhwm="734505" />
+        <model path="mxnet/FP32/vgg16/vgg16.xml" test="create_exenetwork" device="CPU" vmsize="2134158" vmpeak="2836756" vmrss="1438309" vmhwm="2140715" />
+        <model path="mxnet/FP32/vgg16/vgg16.xml" test="create_exenetwork" device="GPU" vmsize="2781932" vmpeak="3912818" vmrss="2443178" vmhwm="3574105" />
+        <model path="mxnet/FP32/vgg16/vgg16.xml" test="infer_request_inference" device="CPU" vmsize="2220634" vmpeak="2836865" vmrss="1468797" vmhwm="2139722" />
+        <model path="mxnet/FP32/vgg16/vgg16.xml" test="infer_request_inference" device="GPU" vmsize="2790174" vmpeak="3834277" vmrss="2311826" vmhwm="3439888" />
+        <model path="mxnet/FP32/vgg19/vgg19.xml" test="create_exenetwork" device="CPU" vmsize="2189153" vmpeak="2918723" vmrss="1491048" vmhwm="2220868" />
+        <model path="mxnet/FP32/vgg19/vgg19.xml" test="create_exenetwork" device="GPU" vmsize="2869105" vmpeak="4001228" vmrss="2531100" vmhwm="3662869" />
+        <model path="mxnet/FP32/vgg19/vgg19.xml" test="infer_request_inference" device="CPU" vmsize="2366254" vmpeak="2918817" vmrss="1523605" vmhwm="2221388" />
+        <model path="mxnet/FP32/vgg19/vgg19.xml" test="infer_request_inference" device="GPU" vmsize="2877716" vmpeak="3944751" vmrss="2400091" vmhwm="3551449" />
+        <model path="mxnet/FP32/yolo_v1_full/yolo_v1_full.xml" test="create_exenetwork" device="CPU" vmsize="3569482" vmpeak="4949084" vmrss="2797106" vmhwm="4176364" />
+        <model path="mxnet/FP32/yolo_v1_full/yolo_v1_full.xml" test="create_exenetwork" device="GPU" vmsize="4819713" vmpeak="6984764" vmrss="4481042" vmhwm="6645126" />
+        <model path="mxnet/FP32/yolo_v1_full/yolo_v1_full.xml" test="infer_request_inference" device="CPU" vmsize="3929790" vmpeak="4858536" vmrss="2814931" vmhwm="4176198" />
+        <model path="mxnet/FP32/yolo_v1_full/yolo_v1_full.xml" test="infer_request_inference" device="GPU" vmsize="4816962" vmpeak="6932770" vmrss="4337715" vmhwm="6538006" />
+        <model path="mxnet/FP32/yolo_v1_tiny/yolo_v1_tiny.xml" test="create_exenetwork" device="CPU" vmsize="1172662" vmpeak="1401509" vmrss="491966" vmhwm="720564" />
+        <model path="mxnet/FP32/yolo_v1_tiny/yolo_v1_tiny.xml" test="create_exenetwork" device="GPU" vmsize="1345822" vmpeak="1585391" vmrss="1008384" vmhwm="1247916" />
+        <model path="mxnet/FP32/yolo_v1_tiny/yolo_v1_tiny.xml" test="infer_request_inference" device="CPU" vmsize="1442381" vmpeak="1442381" vmrss="510697" vmhwm="720267" />
+        <model path="mxnet/FP32/yolo_v1_tiny/yolo_v1_tiny.xml" test="infer_request_inference" device="GPU" vmsize="1348219" vmpeak="1513917" vmrss="870485" vmhwm="1120215" />
+        <model path="onnx/FP32/ssd_resnet34/ssd_resnet34.xml" test="create_exenetwork" device="CPU" vmsize="1106159" vmpeak="1204460" vmrss="268408" vmhwm="366470" />
+        <model path="onnx/FP32/ssd_resnet34/ssd_resnet34.xml" test="create_exenetwork" device="GPU" vmsize="1568190" vmpeak="1568190" vmrss="1230538" vmhwm="1230538" />
+        <model path="onnx/FP32/ssd_resnet34/ssd_resnet34.xml" test="infer_request_inference" device="CPU" vmsize="1395617" vmpeak="1395617" vmrss="399692" vmhwm="399692" />
+        <model path="onnx/FP32/ssd_resnet34/ssd_resnet34.xml" test="infer_request_inference" device="GPU" vmsize="1513621" vmpeak="1598818" vmrss="1035897" vmhwm="1035897" />
+        <model path="onnx/FP32/ssd_resnet34_new/ssd_resnet34_new.xml" test="create_exenetwork" device="CPU" vmsize="1108187" vmpeak="1206488" vmrss="271648" vmhwm="369590" />
+        <model path="onnx/FP32/ssd_resnet34_new/ssd_resnet34_new.xml" test="create_exenetwork" device="GPU" vmsize="2870816" vmpeak="2870816" vmrss="1290972" vmhwm="1290972" />
+        <model path="onnx/FP32/ssd_resnet34_new/ssd_resnet34_new.xml" test="infer_request_inference" device="CPU" vmsize="1396408" vmpeak="1396408" vmrss="396172" vmhwm="396172" />
+        <model path="onnx/FP32/ssd_resnet34_new/ssd_resnet34_new.xml" test="infer_request_inference" device="GPU" vmsize="2778490" vmpeak="2863686" vmrss="2307058" vmhwm="2307058" />
+        <model path="pytorch/FP32/inceptionv3_pretrained/inceptionv3_pretrained.xml" test="create_exenetwork" device="CPU" vmsize="979706" vmpeak="1098692" vmrss="295682" vmhwm="414247" />
+        <model path="pytorch/FP32/inceptionv3_pretrained/inceptionv3_pretrained.xml" test="create_exenetwork" device="GPU" vmsize="1303499" vmpeak="1390069" vmrss="965224" vmhwm="1051580" />
+        <model path="pytorch/FP32/inceptionv3_pretrained/inceptionv3_pretrained.xml" test="infer_request_inference" device="CPU" vmsize="1247750" vmpeak="1247750" vmrss="307928" vmhwm="415266" />
+        <model path="pytorch/FP32/inceptionv3_pretrained/inceptionv3_pretrained.xml" test="infer_request_inference" device="GPU" vmsize="1160265" vmpeak="1245462" vmrss="682354" vmhwm="766100" />
+        <model path="pytorch/FP32/resnet50_pretrained/resnet50_pretrained.xml" test="create_exenetwork" device="CPU" vmsize="985660" vmpeak="1111723" vmrss="304610" vmhwm="430336" />
+        <model path="pytorch/FP32/resnet50_pretrained/resnet50_pretrained.xml" test="create_exenetwork" device="GPU" vmsize="1170265" vmpeak="1281675" vmrss="833180" vmhwm="944299" />
+        <model path="pytorch/FP32/resnet50_pretrained/resnet50_pretrained.xml" test="infer_request_inference" device="CPU" vmsize="1253189" vmpeak="1253189" vmrss="316373" vmhwm="429618" />
+        <model path="pytorch/FP32/resnet50_pretrained/resnet50_pretrained.xml" test="infer_request_inference" device="GPU" vmsize="1091214" vmpeak="1176411" vmrss="613095" vmhwm="724110" />
+        <model path="pytorch/FP32/resnet50_torchvision/resnet50_torchvision.xml" test="create_exenetwork" device="CPU" vmsize="985660" vmpeak="1111723" vmrss="304772" vmhwm="430414" />
+        <model path="pytorch/FP32/resnet50_torchvision/resnet50_torchvision.xml" test="create_exenetwork" device="GPU" vmsize="1150806" vmpeak="1261878" vmrss="813394" vmhwm="924123" />
+        <model path="pytorch/FP32/resnet50_torchvision/resnet50_torchvision.xml" test="infer_request_inference" device="CPU" vmsize="1253194" vmpeak="1253194" vmrss="315463" vmhwm="428974" />
+        <model path="pytorch/FP32/resnet50_torchvision/resnet50_torchvision.xml" test="infer_request_inference" device="GPU" vmsize="1090070" vmpeak="1175267" vmrss="612274" vmhwm="722924" />
+        <model path="pytorch/FP32/squeezenet_v1.1_pretrained/squeezenet_v1.1_pretrained.xml" test="create_exenetwork" device="CPU" vmsize="705577" vmpeak="780457" vmrss="53320" vmhwm="53320" />
+        <model path="pytorch/FP32/squeezenet_v1.1_pretrained/squeezenet_v1.1_pretrained.xml" test="create_exenetwork" device="GPU" vmsize="716476" vmpeak="716476" vmrss="378487" vmhwm="378487" />
+        <model path="pytorch/FP32/squeezenet_v1.1_pretrained/squeezenet_v1.1_pretrained.xml" test="infer_request_inference" device="CPU" vmsize="972613" vmpeak="1057810" vmrss="57033" vmhwm="57033" />
+        <model path="pytorch/FP32/squeezenet_v1.1_pretrained/squeezenet_v1.1_pretrained.xml" test="infer_request_inference" device="GPU" vmsize="672594" vmpeak="757790" vmrss="194183" vmhwm="194183" />
+        <model path="tf/1.14.0/FP32/bert_base_uncased/bert_base_uncased.xml" test="create_exenetwork" device="CPU" vmsize="1863586" vmpeak="2298270" vmrss="1166578" vmhwm="1601236" />
+        <model path="tf/1.14.0/FP32/bert_base_uncased/bert_base_uncased.xml" test="create_exenetwork" device="GPU" vmsize="3438385" vmpeak="3992487" vmrss="3100890" vmhwm="3654268" />
+        <model path="tf/1.14.0/FP32/bert_base_uncased/bert_base_uncased.xml" test="infer_request_inference" device="CPU" vmsize="2136893" vmpeak="2298270" vmrss="1177888" vmhwm="1601350" />
+        <model path="tf/1.14.0/FP32/bert_base_uncased/bert_base_uncased.xml" test="infer_request_inference" device="GPU" vmsize="2866156" vmpeak="3332056" vmrss="2390778" vmhwm="2939315" />
+        <model path="tf/1.14.0/FP32/bert_xnli/bert_xnli.xml" test="create_exenetwork" device="CPU" vmsize="1795970" vmpeak="2230654" vmrss="1095978" vmhwm="1530557" />
+        <model path="tf/1.14.0/FP32/bert_xnli/bert_xnli.xml" test="create_exenetwork" device="GPU" vmsize="3373229" vmpeak="3883687" vmrss="3035104" vmhwm="3545068" />
+        <model path="tf/1.14.0/FP32/bert_xnli/bert_xnli.xml" test="infer_request_inference" device="CPU" vmsize="2069298" vmpeak="2230675" vmrss="1108967" vmhwm="1530178" />
+        <model path="tf/1.14.0/FP32/bert_xnli/bert_xnli.xml" test="infer_request_inference" device="GPU" vmsize="2783367" vmpeak="3206626" vmrss="2308222" vmhwm="2813283" />
+        <model path="tf/1.14.0/FP32/cmu/cmu.xml" test="create_exenetwork" device="CPU" vmsize="1389767" vmpeak="1653657" vmrss="587459" vmhwm="851136" />
+        <model path="tf/1.14.0/FP32/cmu/cmu.xml" test="create_exenetwork" device="GPU" vmsize="1997091" vmpeak="1999374" vmrss="1659538" vmhwm="1661498" />
+        <model path="tf/1.14.0/FP32/cmu/cmu.xml" test="infer_request_inference" device="CPU" vmsize="1660250" vmpeak="1660250" vmrss="717350" vmhwm="850948" />
+        <model path="tf/1.14.0/FP32/cmu/cmu.xml" test="infer_request_inference" device="GPU" vmsize="1842703" vmpeak="1927900" vmrss="1363991" vmhwm="1363991" />
+        <model path="tf/1.14.0/FP32/deeplab_v3/deeplab_v3.xml" test="create_exenetwork" device="CPU" vmsize="783562" vmpeak="783562" vmrss="74089" vmhwm="74089" />
+        <model path="tf/1.14.0/FP32/deeplab_v3/deeplab_v3.xml" test="create_exenetwork" device="GPU" vmsize="976300" vmpeak="976300" vmrss="639132" vmhwm="639132" />
+        <model path="tf/1.14.0/FP32/deeplab_v3/deeplab_v3.xml" test="infer_request_inference" device="CPU" vmsize="1055204" vmpeak="1140401" vmrss="135018" vmhwm="135018" />
+        <model path="tf/1.14.0/FP32/deeplab_v3/deeplab_v3.xml" test="infer_request_inference" device="GPU" vmsize="895616" vmpeak="980813" vmrss="418631" vmhwm="418631" />
+        <model path="tf/1.14.0/FP32/densenet_121/densenet_121.xml" test="create_exenetwork" device="CPU" vmsize="903520" vmpeak="903520" vmrss="182405" vmhwm="182405" />
+        <model path="tf/1.14.0/FP32/densenet_121/densenet_121.xml" test="create_exenetwork" device="GPU" vmsize="1300780" vmpeak="1300780" vmrss="963144" vmhwm="963144" />
+        <model path="tf/1.14.0/FP32/densenet_121/densenet_121.xml" test="infer_request_inference" device="CPU" vmsize="1261171" vmpeak="1346368" vmrss="191354" vmhwm="191354" />
+        <model path="tf/1.14.0/FP32/densenet_121/densenet_121.xml" test="infer_request_inference" device="GPU" vmsize="1066088" vmpeak="1151285" vmrss="588608" vmhwm="588608" />
+        <model path="tf/1.14.0/FP32/densenet_169/densenet_169.xml" test="create_exenetwork" device="CPU" vmsize="992097" vmpeak="1004718" vmrss="276021" vmhwm="288532" />
+        <model path="tf/1.14.0/FP32/densenet_169/densenet_169.xml" test="create_exenetwork" device="GPU" vmsize="1673510" vmpeak="1686178" vmrss="1335256" vmhwm="1346415" />
+        <model path="tf/1.14.0/FP32/densenet_169/densenet_169.xml" test="infer_request_inference" device="CPU" vmsize="1259304" vmpeak="1259304" vmrss="285667" vmhwm="288584" />
+        <model path="tf/1.14.0/FP32/densenet_169/densenet_169.xml" test="infer_request_inference" device="GPU" vmsize="1318803" vmpeak="1404000" vmrss="840652" vmhwm="840652" />
+        <model path="tf/1.14.0/FP32/dssd_avigilon/dssd_avigilon.xml" test="create_exenetwork" device="CPU" vmsize="742190" vmpeak="801429" vmrss="120036" vmhwm="120036" />
+        <model path="tf/1.14.0/FP32/dssd_avigilon/dssd_avigilon.xml" test="create_exenetwork" device="GPU" vmsize="917155" vmpeak="917155" vmrss="580470" vmhwm="580470" />
+        <model path="tf/1.14.0/FP32/dssd_avigilon/dssd_avigilon.xml" test="infer_request_inference" device="CPU" vmsize="828079" vmpeak="828079" vmrss="124950" vmhwm="124950" />
+        <model path="tf/1.14.0/FP32/dssd_avigilon/dssd_avigilon.xml" test="infer_request_inference" device="GPU" vmsize="798803" vmpeak="884000" vmrss="322223" vmhwm="322223" />
+        <model path="tf/1.14.0/FP32/facenet/facenet.xml" test="create_exenetwork" device="CPU" vmsize="1036542" vmpeak="1123340" vmrss="332675" vmhwm="418984" />
+        <model path="tf/1.14.0/FP32/facenet/facenet.xml" test="create_exenetwork" device="GPU" vmsize="1419095" vmpeak="1503018" vmrss="1081142" vmhwm="1164966" />
+        <model path="tf/1.14.0/FP32/facenet/facenet.xml" test="infer_request_inference" device="CPU" vmsize="1122513" vmpeak="1207710" vmrss="333564" vmhwm="417877" />
+        <model path="tf/1.14.0/FP32/facenet/facenet.xml" test="infer_request_inference" device="GPU" vmsize="1206654" vmpeak="1291851" vmrss="729799" vmhwm="812141" />
+        <model path="tf/1.14.0/FP32/faster_rcnn_inception_resnet_v2_atrous_coco/faster_rcnn_inception_resnet_v2_atrous_coco.xml" test="create_exenetwork" device="CPU" vmsize="2502557" vmpeak="2710479" vmrss="803394" vmhwm="1011098" />
+        <model path="tf/1.14.0/FP32/faster_rcnn_inception_resnet_v2_atrous_coco/faster_rcnn_inception_resnet_v2_atrous_coco.xml" test="create_exenetwork" device="GPU" vmsize="4844647" vmpeak="4844647" vmrss="4505820" vmhwm="4505820" />
+        <model path="tf/1.14.0/FP32/faster_rcnn_inception_v2_coco/faster_rcnn_inception_v2_coco.xml" test="create_exenetwork" device="CPU" vmsize="927518" vmpeak="990735" vmrss="192327" vmhwm="255424" />
+        <model path="tf/1.14.0/FP32/faster_rcnn_inception_v2_coco/faster_rcnn_inception_v2_coco.xml" test="create_exenetwork" device="GPU" vmsize="1410156" vmpeak="1410156" vmrss="1071818" vmhwm="1071818" />
+        <model path="tf/1.14.0/FP32/faster_rcnn_resnet101_coco/faster_rcnn_resnet101_coco.xml" test="create_exenetwork" device="CPU" vmsize="1348308" vmpeak="1587736" vmrss="555162" vmhwm="794456" />
+        <model path="tf/1.14.0/FP32/faster_rcnn_resnet101_coco/faster_rcnn_resnet101_coco.xml" test="create_exenetwork" device="GPU" vmsize="2073328" vmpeak="2139914" vmrss="1735650" vmhwm="1801794" />
+        <model path="tf/1.14.0/FP32/faster_rcnn_resnet50_coco/faster_rcnn_resnet50_coco.xml" test="create_exenetwork" device="CPU" vmsize="1137926" vmpeak="1282252" vmrss="347172" vmhwm="491384" />
+        <model path="tf/1.14.0/FP32/faster_rcnn_resnet50_coco/faster_rcnn_resnet50_coco.xml" test="create_exenetwork" device="GPU" vmsize="1528581" vmpeak="1558133" vmrss="1191273" vmhwm="1220918" />
+        <model path="tf/1.14.0/FP32/i3d_rgb/i3d_rgb.xml" test="create_exenetwork" device="CPU" vmsize="1064445" vmpeak="1124276" vmrss="233131" vmhwm="292728" />
+        <model path="tf/1.14.0/FP32/i3d_rgb/i3d_rgb.xml" test="create_exenetwork" device="GPU" vmsize="1608666" vmpeak="1608666" vmrss="1270744" vmhwm="1270744" />
+        <model path="tf/1.14.0/FP32/i3d_rgb/i3d_rgb.xml" test="infer_request_inference" device="CPU" vmsize="1209941" vmpeak="1295138" vmrss="396422" vmhwm="396422" />
+        <model path="tf/1.14.0/FP32/i3d_rgb/i3d_rgb.xml" test="infer_request_inference" device="GPU" vmsize="1593238" vmpeak="1678435" vmrss="1137583" vmhwm="1257484" />
+        <model path="tf/1.14.0/FP32/icv_squeezenet_v1.0/icv_squeezenet_v1.0.xml" test="create_exenetwork" device="CPU" vmsize="713814" vmpeak="788028" vmrss="53034" vmhwm="53034" />
+        <model path="tf/1.14.0/FP32/icv_squeezenet_v1.0/icv_squeezenet_v1.0.xml" test="create_exenetwork" device="GPU" vmsize="701729" vmpeak="701729" vmrss="363578" vmhwm="363578" />
+        <model path="tf/1.14.0/FP32/icv_squeezenet_v1.0/icv_squeezenet_v1.0.xml" test="infer_request_inference" device="CPU" vmsize="799869" vmpeak="885066" vmrss="59810" vmhwm="59810" />
+        <model path="tf/1.14.0/FP32/icv_squeezenet_v1.0/icv_squeezenet_v1.0.xml" test="infer_request_inference" device="GPU" vmsize="687694" vmpeak="772891" vmrss="209248" vmhwm="209248" />
+        <model path="tf/1.14.0/FP32/icv_squeezenet_v1.1/icv_squeezenet_v1.1.xml" test="create_exenetwork" device="CPU" vmsize="706258" vmpeak="780140" vmrss="52884" vmhwm="52884" />
+        <model path="tf/1.14.0/FP32/icv_squeezenet_v1.1/icv_squeezenet_v1.1.xml" test="create_exenetwork" device="GPU" vmsize="705052" vmpeak="705052" vmrss="367395" vmhwm="367395" />
+        <model path="tf/1.14.0/FP32/icv_squeezenet_v1.1/icv_squeezenet_v1.1.xml" test="infer_request_inference" device="CPU" vmsize="973367" vmpeak="1058564" vmrss="56414" vmhwm="56414" />
+        <model path="tf/1.14.0/FP32/icv_squeezenet_v1.1/icv_squeezenet_v1.1.xml" test="infer_request_inference" device="GPU" vmsize="677320" vmpeak="762517" vmrss="198619" vmhwm="198619" />
+        <model path="tf/1.14.0/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="create_exenetwork" device="CPU" vmsize="1437061" vmpeak="1624516" vmrss="755024" vmhwm="942141" />
+        <model path="tf/1.14.0/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="create_exenetwork" device="GPU" vmsize="2478034" vmpeak="2597150" vmrss="2139680" vmhwm="2258219" />
+        <model path="tf/1.14.0/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="infer_request_inference" device="CPU" vmsize="1524120" vmpeak="1624521" vmrss="762559" vmhwm="940914" />
+        <model path="tf/1.14.0/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="infer_request_inference" device="GPU" vmsize="2100274" vmpeak="2185471" vmrss="1622847" vmhwm="1739566" />
+        <model path="tf/1.14.0/FP32/inception_v1/inception_v1.xml" test="create_exenetwork" device="CPU" vmsize="748534" vmpeak="809437" vmrss="143514" vmhwm="143514" />
+        <model path="tf/1.14.0/FP32/inception_v1/inception_v1.xml" test="create_exenetwork" device="GPU" vmsize="943758" vmpeak="943758" vmrss="606392" vmhwm="606392" />
+        <model path="tf/1.14.0/FP32/inception_v1/inception_v1.xml" test="infer_request_inference" device="CPU" vmsize="1015783" vmpeak="1015783" vmrss="147118" vmhwm="147118" />
+        <model path="tf/1.14.0/FP32/inception_v1/inception_v1.xml" test="infer_request_inference" device="GPU" vmsize="835073" vmpeak="920270" vmrss="357146" vmhwm="357146" />
+        <model path="tf/1.14.0/FP32/inception_v2/inception_v2.xml" test="create_exenetwork" device="CPU" vmsize="834953" vmpeak="887541" vmrss="164626" vmhwm="217001" />
+        <model path="tf/1.14.0/FP32/inception_v2/inception_v2.xml" test="create_exenetwork" device="GPU" vmsize="1034649" vmpeak="1064835" vmrss="696592" vmhwm="726694" />
+        <model path="tf/1.14.0/FP32/inception_v2/inception_v2.xml" test="infer_request_inference" device="CPU" vmsize="921081" vmpeak="1006278" vmrss="167502" vmhwm="215597" />
+        <model path="tf/1.14.0/FP32/inception_v2/inception_v2.xml" test="infer_request_inference" device="GPU" vmsize="911310" vmpeak="996507" vmrss="433617" vmhwm="464682" />
+        <model path="tf/1.14.0/FP32/inception_v3/inception_v3.xml" test="create_exenetwork" device="CPU" vmsize="971453" vmpeak="1081683" vmrss="305390" vmhwm="415204" />
+        <model path="tf/1.14.0/FP32/inception_v3/inception_v3.xml" test="create_exenetwork" device="GPU" vmsize="1332598" vmpeak="1413375" vmrss="995165" vmhwm="1075859" />
+        <model path="tf/1.14.0/FP32/inception_v3/inception_v3.xml" test="infer_request_inference" device="CPU" vmsize="1148685" vmpeak="1233882" vmrss="314220" vmhwm="414882" />
+        <model path="tf/1.14.0/FP32/inception_v3/inception_v3.xml" test="infer_request_inference" device="GPU" vmsize="1167634" vmpeak="1252830" vmrss="689416" vmhwm="769002" />
+        <model path="tf/1.14.0/FP32/inception_v4/inception_v4.xml" test="create_exenetwork" device="CPU" vmsize="1189630" vmpeak="1393740" vmrss="511908" vmhwm="715540" />
+        <model path="tf/1.14.0/FP32/inception_v4/inception_v4.xml" test="create_exenetwork" device="GPU" vmsize="1867418" vmpeak="2007080" vmrss="1529990" vmhwm="1668929" />
+        <model path="tf/1.14.0/FP32/inception_v4/inception_v4.xml" test="infer_request_inference" device="CPU" vmsize="1367256" vmpeak="1452453" vmrss="523946" vmhwm="715577" />
+        <model path="tf/1.14.0/FP32/inception_v4/inception_v4.xml" test="infer_request_inference" device="GPU" vmsize="1611350" vmpeak="1696546" vmrss="1133615" vmhwm="1270427" />
+        <model path="tf/1.14.0/FP32/mask_rcnn_resnet101_atrous_coco/mask_rcnn_resnet101_atrous_coco.xml" test="create_exenetwork" device="CPU" vmsize="2715268" vmpeak="3061650" vmrss="776375" vmhwm="1122695" />
+        <model path="tf/1.14.0/FP32/mask_rcnn_resnet101_atrous_coco/mask_rcnn_resnet101_atrous_coco.xml" test="create_exenetwork" device="GPU" vmsize="4160156" vmpeak="4971210" vmrss="3823164" vmhwm="4634151" />
+        <model path="tf/1.14.0/FP32/mobilenet_v1_0.25_128/mobilenet_v1_0.25_128.xml" test="create_exenetwork" device="CPU" vmsize="701350" vmpeak="776562" vmrss="42281" vmhwm="42281" />
+        <model path="tf/1.14.0/FP32/mobilenet_v1_0.25_128/mobilenet_v1_0.25_128.xml" test="create_exenetwork" device="GPU" vmsize="717771" vmpeak="717771" vmrss="379501" vmhwm="379501" />
+        <model path="tf/1.14.0/FP32/mobilenet_v1_0.25_128/mobilenet_v1_0.25_128.xml" test="infer_request_inference" device="CPU" vmsize="786552" vmpeak="786552" vmrss="42406" vmhwm="42406" />
+        <model path="tf/1.14.0/FP32/mobilenet_v1_0.25_128/mobilenet_v1_0.25_128.xml" test="infer_request_inference" device="GPU" vmsize="656084" vmpeak="741280" vmrss="177543" vmhwm="177543" />
+        <model path="tf/1.14.0/FP32/mobilenet_v1_0.5_160/mobilenet_v1_0.5_160.xml" test="create_exenetwork" device="CPU" vmsize="705936" vmpeak="781149" vmrss="55619" vmhwm="55619" />
+        <model path="tf/1.14.0/FP32/mobilenet_v1_0.5_160/mobilenet_v1_0.5_160.xml" test="create_exenetwork" device="GPU" vmsize="724765" vmpeak="724765" vmrss="386458" vmhwm="386458" />
+        <model path="tf/1.14.0/FP32/mobilenet_v1_0.5_160/mobilenet_v1_0.5_160.xml" test="infer_request_inference" device="CPU" vmsize="791554" vmpeak="791554" vmrss="55582" vmhwm="55582" />
+        <model path="tf/1.14.0/FP32/mobilenet_v1_0.5_160/mobilenet_v1_0.5_160.xml" test="infer_request_inference" device="GPU" vmsize="670987" vmpeak="756184" vmrss="193029" vmhwm="193029" />
+        <model path="tf/1.14.0/FP32/mobilenet_v1_1.0_224/mobilenet_v1_1.0_224.xml" test="create_exenetwork" device="CPU" vmsize="720673" vmpeak="720673" vmrss="99512" vmhwm="99512" />
+        <model path="tf/1.14.0/FP32/mobilenet_v1_1.0_224/mobilenet_v1_1.0_224.xml" test="create_exenetwork" device="GPU" vmsize="771253" vmpeak="771253" vmrss="433087" vmhwm="433087" />
+        <model path="tf/1.14.0/FP32/mobilenet_v1_1.0_224/mobilenet_v1_1.0_224.xml" test="infer_request_inference" device="CPU" vmsize="987828" vmpeak="1073025" vmrss="104005" vmhwm="104005" />
+        <model path="tf/1.14.0/FP32/mobilenet_v1_1.0_224/mobilenet_v1_1.0_224.xml" test="infer_request_inference" device="GPU" vmsize="726986" vmpeak="812182" vmrss="248450" vmhwm="248450" />
+        <model path="tf/1.14.0/FP32/mobilenet_v2_1.0_224/mobilenet_v2_1.0_224.xml" test="create_exenetwork" device="CPU" vmsize="726554" vmpeak="793447" vmrss="91452" vmhwm="91452" />
+        <model path="tf/1.14.0/FP32/mobilenet_v2_1.0_224/mobilenet_v2_1.0_224.xml" test="create_exenetwork" device="GPU" vmsize="857027" vmpeak="857027" vmrss="519630" vmhwm="519630" />
+        <model path="tf/1.14.0/FP32/mobilenet_v2_1.0_224/mobilenet_v2_1.0_224.xml" test="infer_request_inference" device="CPU" vmsize="812619" vmpeak="897816" vmrss="100895" vmhwm="100895" />
+        <model path="tf/1.14.0/FP32/mobilenet_v2_1.0_224/mobilenet_v2_1.0_224.xml" test="infer_request_inference" device="GPU" vmsize="764800" vmpeak="849997" vmrss="287019" vmhwm="287019" />
+        <model path="tf/1.14.0/FP32/mobilenet_v2_1.4_224/mobilenet_v2_1.4_224.xml" test="create_exenetwork" device="CPU" vmsize="739960" vmpeak="739960" vmrss="134924" vmhwm="134924" />
+        <model path="tf/1.14.0/FP32/mobilenet_v2_1.4_224/mobilenet_v2_1.4_224.xml" test="create_exenetwork" device="GPU" vmsize="905439" vmpeak="905439" vmrss="567876" vmhwm="567876" />
+        <model path="tf/1.14.0/FP32/mobilenet_v2_1.4_224/mobilenet_v2_1.4_224.xml" test="infer_request_inference" device="CPU" vmsize="825988" vmpeak="891722" vmrss="144684" vmhwm="144684" />
+        <model path="tf/1.14.0/FP32/mobilenet_v2_1.4_224/mobilenet_v2_1.4_224.xml" test="infer_request_inference" device="GPU" vmsize="821251" vmpeak="906448" vmrss="343085" vmhwm="343085" />
+        <model path="tf/1.14.0/FP32/ncf/ncf.xml" test="create_exenetwork" device="CPU" vmsize="1026407" vmpeak="1026407" vmrss="351535" vmhwm="351535" />
+        <model path="tf/1.14.0/FP32/ncf/ncf.xml" test="create_exenetwork" device="GPU" vmsize="1104485" vmpeak="1149496" vmrss="766740" vmhwm="811642" />
+        <model path="tf/1.14.0/FP32/ncf/ncf.xml" test="infer_request_inference" device="CPU" vmsize="1209280" vmpeak="1209280" vmrss="362325" vmhwm="362325" />
+        <model path="tf/1.14.0/FP32/ncf/ncf.xml" test="infer_request_inference" device="GPU" vmsize="1105275" vmpeak="1190472" vmrss="627822" vmhwm="671450" />
+        <model path="tf/1.14.0/FP32/resnet_v1.5_50/resnet_v1.5_50.xml" test="create_exenetwork" device="CPU" vmsize="988072" vmpeak="1114146" vmrss="304798" vmhwm="430279" />
+        <model path="tf/1.14.0/FP32/resnet_v1.5_50/resnet_v1.5_50.xml" test="create_exenetwork" device="GPU" vmsize="1171383" vmpeak="1282325" vmrss="833705" vmhwm="944476" />
+        <model path="tf/1.14.0/FP32/resnet_v1.5_50/resnet_v1.5_50.xml" test="infer_request_inference" device="CPU" vmsize="1164982" vmpeak="1250178" vmrss="319394" vmhwm="429904" />
+        <model path="tf/1.14.0/FP32/resnet_v1.5_50/resnet_v1.5_50.xml" test="infer_request_inference" device="GPU" vmsize="1090481" vmpeak="1115056" vmrss="613485" vmhwm="722176" />
+        <model path="tf/1.14.0/FP32/resnet_v1_101/resnet_v1_101.xml" test="create_exenetwork" device="CPU" vmsize="1185163" vmpeak="1406329" vmrss="511669" vmhwm="732674" />
+        <model path="tf/1.14.0/FP32/resnet_v1_101/resnet_v1_101.xml" test="create_exenetwork" device="GPU" vmsize="1646897" vmpeak="1857653" vmrss="1308538" vmhwm="1518940" />
+        <model path="tf/1.14.0/FP32/resnet_v1_101/resnet_v1_101.xml" test="infer_request_inference" device="CPU" vmsize="1361906" vmpeak="1447102" vmrss="515138" vmhwm="731073" />
+        <model path="tf/1.14.0/FP32/resnet_v1_101/resnet_v1_101.xml" test="infer_request_inference" device="GPU" vmsize="1486612" vmpeak="1612171" vmrss="1008602" vmhwm="1218973" />
+        <model path="tf/1.14.0/FP32/resnet_v1_152/resnet_v1_152.xml" test="create_exenetwork" device="CPU" vmsize="1361328" vmpeak="1659262" vmrss="685287" vmhwm="983091" />
+        <model path="tf/1.14.0/FP32/resnet_v1_152/resnet_v1_152.xml" test="create_exenetwork" device="GPU" vmsize="2053204" vmpeak="2340951" vmrss="1714788" vmhwm="2002072" />
+        <model path="tf/1.14.0/FP32/resnet_v1_152/resnet_v1_152.xml" test="infer_request_inference" device="CPU" vmsize="1628504" vmpeak="1713701" vmrss="690892" vmhwm="983257" />
+        <model path="tf/1.14.0/FP32/resnet_v1_152/resnet_v1_152.xml" test="infer_request_inference" device="GPU" vmsize="1817290" vmpeak="2019841" vmrss="1338792" vmhwm="1625405" />
+        <model path="tf/1.14.0/FP32/resnet_v1_50/resnet_v1_50.xml" test="create_exenetwork" device="CPU" vmsize="980148" vmpeak="1106211" vmrss="304340" vmhwm="430242" />
+        <model path="tf/1.14.0/FP32/resnet_v1_50/resnet_v1_50.xml" test="create_exenetwork" device="GPU" vmsize="1177410" vmpeak="1291040" vmrss="839217" vmhwm="952868" />
+        <model path="tf/1.14.0/FP32/resnet_v1_50/resnet_v1_50.xml" test="infer_request_inference" device="CPU" vmsize="1060997" vmpeak="1146194" vmrss="308906" vmhwm="429811" />
+        <model path="tf/1.14.0/FP32/resnet_v1_50/resnet_v1_50.xml" test="infer_request_inference" device="GPU" vmsize="1094189" vmpeak="1123038" vmrss="616548" vmhwm="730298" />
+        <model path="tf/1.14.0/FP32/resnet_v2_101/resnet_v2_101.xml" test="create_exenetwork" device="CPU" vmsize="1217086" vmpeak="1438262" vmrss="515611" vmhwm="736502" />
+        <model path="tf/1.14.0/FP32/resnet_v2_101/resnet_v2_101.xml" test="create_exenetwork" device="GPU" vmsize="1721532" vmpeak="1922648" vmrss="1383304" vmhwm="1584195" />
+        <model path="tf/1.14.0/FP32/resnet_v2_101/resnet_v2_101.xml" test="infer_request_inference" device="CPU" vmsize="1394296" vmpeak="1479493" vmrss="530197" vmhwm="735883" />
+        <model path="tf/1.14.0/FP32/resnet_v2_101/resnet_v2_101.xml" test="infer_request_inference" device="GPU" vmsize="1533625" vmpeak="1649492" vmrss="1055813" vmhwm="1256236" />
+        <model path="tf/1.14.0/FP32/resnet_v2_152/resnet_v2_152.xml" test="create_exenetwork" device="CPU" vmsize="1664005" vmpeak="1929070" vmrss="791611" vmhwm="988280" />
+        <model path="tf/1.14.0/FP32/resnet_v2_152/resnet_v2_152.xml" test="create_exenetwork" device="GPU" vmsize="2054062" vmpeak="2324472" vmrss="1715776" vmhwm="1985344" />
+        <model path="tf/1.14.0/FP32/resnet_v2_152/resnet_v2_152.xml" test="infer_request_inference" device="CPU" vmsize="1750642" vmpeak="1750642" vmrss="806811" vmhwm="988041" />
+        <model path="tf/1.14.0/FP32/resnet_v2_152/resnet_v2_152.xml" test="infer_request_inference" device="GPU" vmsize="1905020" vmpeak="2088814" vmrss="1426682" vmhwm="1694347" />
+        <model path="tf/1.14.0/FP32/resnet_v2_50/resnet_v2_50.xml" test="create_exenetwork" device="CPU" vmsize="994541" vmpeak="1120615" vmrss="307034" vmhwm="432806" />
+        <model path="tf/1.14.0/FP32/resnet_v2_50/resnet_v2_50.xml" test="create_exenetwork" device="GPU" vmsize="1212042" vmpeak="1312194" vmrss="874780" vmhwm="974438" />
+        <model path="tf/1.14.0/FP32/resnet_v2_50/resnet_v2_50.xml" test="infer_request_inference" device="CPU" vmsize="1081334" vmpeak="1166531" vmrss="322436" vmhwm="432702" />
+        <model path="tf/1.14.0/FP32/resnet_v2_50/resnet_v2_50.xml" test="infer_request_inference" device="GPU" vmsize="1116720" vmpeak="1132315" vmrss="638097" vmhwm="738348" />
+        <model path="tf/1.14.0/FP32/rfcn_resnet101_coco/rfcn_resnet101_coco.xml" test="create_exenetwork" device="CPU" vmsize="1467762" vmpeak="1671108" vmrss="691412" vmhwm="894509" />
+        <model path="tf/1.14.0/FP32/rfcn_resnet101_coco/rfcn_resnet101_coco.xml" test="create_exenetwork" device="GPU" vmsize="2625381" vmpeak="2732168" vmrss="2288915" vmhwm="2392494" />
+        <model path="tf/1.14.0/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="create_exenetwork" device="CPU" vmsize="713590" vmpeak="788138" vmrss="53216" vmhwm="53216" />
+        <model path="tf/1.14.0/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="create_exenetwork" device="GPU" vmsize="724427" vmpeak="724427" vmrss="386354" vmhwm="386354" />
+        <model path="tf/1.14.0/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="infer_request_inference" device="CPU" vmsize="799604" vmpeak="799604" vmrss="59534" vmhwm="59534" />
+        <model path="tf/1.14.0/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="infer_request_inference" device="GPU" vmsize="685677" vmpeak="770874" vmrss="206845" vmhwm="206845" />
+        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_coco/ssd_mobilenet_v1_coco.xml" test="create_exenetwork" device="CPU" vmsize="832010" vmpeak="832010" vmrss="144367" vmhwm="144367" />
+        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_coco/ssd_mobilenet_v1_coco.xml" test="create_exenetwork" device="GPU" vmsize="920249" vmpeak="920249" vmrss="582769" vmhwm="582769" />
+        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_coco/ssd_mobilenet_v1_coco.xml" test="infer_request_inference" device="CPU" vmsize="1009200" vmpeak="1094397" vmrss="156052" vmhwm="156052" />
+        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_coco/ssd_mobilenet_v1_coco.xml" test="infer_request_inference" device="GPU" vmsize="851666" vmpeak="936863" vmrss="374660" vmhwm="374660" />
+        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_fpn_coco/ssd_mobilenet_v1_fpn_coco.xml" test="create_exenetwork" device="CPU" vmsize="1357855" vmpeak="1537842" vmrss="428038" vmhwm="602841" />
+        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_fpn_coco/ssd_mobilenet_v1_fpn_coco.xml" test="create_exenetwork" device="GPU" vmsize="1748255" vmpeak="1748255" vmrss="1410474" vmhwm="1410474" />
+        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_fpn_coco/ssd_mobilenet_v1_fpn_coco.xml" test="infer_request_inference" device="CPU" vmsize="1539933" vmpeak="1625130" vmrss="506157" vmhwm="602326" />
+        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_fpn_coco/ssd_mobilenet_v1_fpn_coco.xml" test="infer_request_inference" device="GPU" vmsize="1597762" vmpeak="1597762" vmrss="1125956" vmhwm="1125956" />
+        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_fpn_coco_602x602/ssd_mobilenet_v1_fpn_coco_602x602.xml" test="create_exenetwork" device="CPU" vmsize="1508566" vmpeak="1688554" vmrss="427086" vmhwm="602414" />
+        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_fpn_coco_602x602/ssd_mobilenet_v1_fpn_coco_602x602.xml" test="create_exenetwork" device="GPU" vmsize="1694071" vmpeak="1694071" vmrss="1356300" vmhwm="1356300" />
+        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_fpn_coco_602x602/ssd_mobilenet_v1_fpn_coco_602x602.xml" test="infer_request_inference" device="CPU" vmsize="1418346" vmpeak="1507495" vmrss="498206" vmhwm="602238" />
+        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_fpn_coco_602x602/ssd_mobilenet_v1_fpn_coco_602x602.xml" test="infer_request_inference" device="GPU" vmsize="1533370" vmpeak="1618567" vmrss="1062006" vmhwm="1062006" />
+        <model path="tf/1.14.0/FP32/ssd_mobilenet_v2_coco/ssd_mobilenet_v2_coco.xml" test="create_exenetwork" device="CPU" vmsize="912147" vmpeak="990698" vmrss="224068" vmhwm="302484" />
+        <model path="tf/1.14.0/FP32/ssd_mobilenet_v2_coco/ssd_mobilenet_v2_coco.xml" test="create_exenetwork" device="GPU" vmsize="1144707" vmpeak="1222395" vmrss="807570" vmhwm="885076" />
+        <model path="tf/1.14.0/FP32/ssd_mobilenet_v2_coco/ssd_mobilenet_v2_coco.xml" test="infer_request_inference" device="CPU" vmsize="998842" vmpeak="1048663" vmrss="239059" vmhwm="302291" />
+        <model path="tf/1.14.0/FP32/ssd_mobilenet_v2_coco/ssd_mobilenet_v2_coco.xml" test="infer_request_inference" device="GPU" vmsize="1054336" vmpeak="1139533" vmrss="577106" vmhwm="651913" />
+        <model path="tf/1.14.0/FP32/unet2d/unet2d.xml" test="create_exenetwork" device="CPU" vmsize="1046905" vmpeak="1206301" vmrss="351400" vmhwm="510603" />
+        <model path="tf/1.14.0/FP32/unet2d/unet2d.xml" test="create_exenetwork" device="GPU" vmsize="1199005" vmpeak="1333363" vmrss="861400" vmhwm="995815" />
+        <model path="tf/1.14.0/FP32/unet2d/unet2d.xml" test="infer_request_inference" device="CPU" vmsize="1132003" vmpeak="1217200" vmrss="380998" vmhwm="509615" />
+        <model path="tf/1.14.0/FP32/unet2d/unet2d.xml" test="infer_request_inference" device="GPU" vmsize="1174336" vmpeak="1259533" vmrss="696300" vmhwm="857849" />
+        <model path="tf/1.14.0/FP32/vgg16/vgg16.xml" test="create_exenetwork" device="CPU" vmsize="2133768" vmpeak="2836366" vmrss="1437966" vmhwm="2140403" />
+        <model path="tf/1.14.0/FP32/vgg16/vgg16.xml" test="create_exenetwork" device="GPU" vmsize="2803710" vmpeak="3934762" vmrss="2464961" vmhwm="3596054" />
+        <model path="tf/1.14.0/FP32/vgg16/vgg16.xml" test="infer_request_inference" device="CPU" vmsize="2400741" vmpeak="2836230" vmrss="1468438" vmhwm="2139410" />
+        <model path="tf/1.14.0/FP32/vgg16/vgg16.xml" test="infer_request_inference" device="GPU" vmsize="2793221" vmpeak="3855737" vmrss="2313766" vmhwm="3461135" />
+        <model path="tf/1.14.0/FP32/vgg19/vgg19.xml" test="create_exenetwork" device="CPU" vmsize="2188924" vmpeak="2918494" vmrss="1491630" vmhwm="2221008" />
+        <model path="tf/1.14.0/FP32/vgg19/vgg19.xml" test="create_exenetwork" device="GPU" vmsize="2899624" vmpeak="4031731" vmrss="2561410" vmhwm="3693086" />
+        <model path="tf/1.14.0/FP32/vgg19/vgg19.xml" test="infer_request_inference" device="CPU" vmsize="2274792" vmpeak="2918401" vmrss="1523438" vmhwm="2221039" />
+        <model path="tf/1.14.0/FP32/vgg19/vgg19.xml" test="infer_request_inference" device="GPU" vmsize="2877160" vmpeak="3966222" vmrss="2398546" vmhwm="3572186" />
+        <model path="tf/1.14.0/FP32/yolo_v2/yolo_v2.xml" test="create_exenetwork" device="CPU" vmsize="1252357" vmpeak="1511010" vmrss="552931" vmhwm="811361" />
+        <model path="tf/1.14.0/FP32/yolo_v2/yolo_v2.xml" test="create_exenetwork" device="GPU" vmsize="1481464" vmpeak="1701512" vmrss="1144072" vmhwm="1363939" />
+        <model path="tf/1.14.0/FP32/yolo_v2/yolo_v2.xml" test="infer_request_inference" device="CPU" vmsize="1340471" vmpeak="1510438" vmrss="585192" vmhwm="810186" />
+        <model path="tf/1.14.0/FP32/yolo_v2/yolo_v2.xml" test="infer_request_inference" device="GPU" vmsize="1465339" vmpeak="1601189" vmrss="987604" vmhwm="1207902" />
+        <model path="tf/1.14.0/FP32/yolo_v2_tiny_voc/yolo_v2_tiny_voc.xml" test="create_exenetwork" device="CPU" vmsize="872019" vmpeak="952447" vmrss="192904" vmhwm="272953" />
+        <model path="tf/1.14.0/FP32/yolo_v2_tiny_voc/yolo_v2_tiny_voc.xml" test="create_exenetwork" device="GPU" vmsize="876340" vmpeak="970054" vmrss="538460" vmhwm="632299" />
+        <model path="tf/1.14.0/FP32/yolo_v2_tiny_voc/yolo_v2_tiny_voc.xml" test="infer_request_inference" device="CPU" vmsize="959992" vmpeak="1045189" vmrss="207662" vmhwm="273093" />
+        <model path="tf/1.14.0/FP32/yolo_v2_tiny_voc/yolo_v2_tiny_voc.xml" test="infer_request_inference" device="GPU" vmsize="883292" vmpeak="968489" vmrss="405891" vmhwm="476907" />
+        <model path="tf/1.14.0/FP32/yolo_v2_voc/yolo_v2_voc.xml" test="create_exenetwork" device="CPU" vmsize="1248988" vmpeak="1505738" vmrss="549031" vmhwm="805745" />
+        <model path="tf/1.14.0/FP32/yolo_v2_voc/yolo_v2_voc.xml" test="create_exenetwork" device="GPU" vmsize="1459816" vmpeak="1681716" vmrss="1121952" vmhwm="1343638" />
+        <model path="tf/1.14.0/FP32/yolo_v2_voc/yolo_v2_voc.xml" test="infer_request_inference" device="CPU" vmsize="1337055" vmpeak="1506221" vmrss="582212" vmhwm="806447" />
+        <model path="tf/1.14.0/FP32/yolo_v2_voc/yolo_v2_voc.xml" test="infer_request_inference" device="GPU" vmsize="1456322" vmpeak="1589104" vmrss="977688" vmhwm="1194798" />
+        <model path="tf/1.14.0/FP32/yolo_v3/yolo_v3.xml" test="create_exenetwork" device="CPU" vmsize="1388498" vmpeak="1700405" vmrss="680981" vmhwm="992706" />
+        <model path="tf/1.14.0/FP32/yolo_v3/yolo_v3.xml" test="create_exenetwork" device="GPU" vmsize="1904952" vmpeak="2102276" vmrss="1567898" vmhwm="1764921" />
+        <model path="tf/1.14.0/FP32/yolo_v3/yolo_v3.xml" test="infer_request_inference" device="CPU" vmsize="1486066" vmpeak="1705636" vmrss="724443" vmhwm="992409" />
+        <model path="tf/1.14.0/FP32/yolo_v3/yolo_v3.xml" test="infer_request_inference" device="GPU" vmsize="1809121" vmpeak="1916995" vmrss="1331512" vmhwm="1523137" />
+        <model path="tf/1.14.0/FP32/yolo_v3_tiny/yolo_v3_tiny.xml" test="create_exenetwork" device="CPU" vmsize="803400" vmpeak="848244" vmrss="123765" vmhwm="168360" />
+        <model path="tf/1.14.0/FP32/yolo_v3_tiny/yolo_v3_tiny.xml" test="create_exenetwork" device="GPU" vmsize="795683" vmpeak="825796" vmrss="458718" vmhwm="488498" />
+        <model path="tf/1.14.0/FP32/yolo_v3_tiny/yolo_v3_tiny.xml" test="infer_request_inference" device="CPU" vmsize="892273" vmpeak="977470" vmrss="139048" vmhwm="168292" />
+        <model path="tf/1.14.0/FP32/yolo_v3_tiny/yolo_v3_tiny.xml" test="infer_request_inference" device="GPU" vmsize="789438" vmpeak="874634" vmrss="312400" vmhwm="338832" />
     </models>
 </attributes>
\ No newline at end of file
diff --git a/tests/stress_tests/.automation/memcheck_tests/precommit_configs/desktop_env_config.xml b/tests/stress_tests/.automation/memcheck_tests/precommit_configs/desktop_env_config.xml
new file mode 100644 (file)
index 0000000..7e137c7
--- /dev/null
@@ -0,0 +1,6 @@
+<?xml version="1.0"?>
+<attributes>
+    <irs_path>
+        <value>${STRESS_IRS_PATH}</value>
+    </irs_path>
+</attributes>
diff --git a/tests/stress_tests/.automation/memcheck_tests/precommit_configs/desktop_references_config.xml b/tests/stress_tests/.automation/memcheck_tests/precommit_configs/desktop_references_config.xml
new file mode 100644 (file)
index 0000000..35b701b
--- /dev/null
@@ -0,0 +1,21 @@
+<?xml version="1.0"?>
+<attributes>
+    <models>
+<model path="public/mobilenet-ssd/FP32/mobilenet-ssd.xml" test="create_exenetwork" device="CPU" vmsize="757218" vmpeak="901683" vmrss="73920" vmhwm="107866" />
+<model path="public/mobilenet-ssd/FP32/mobilenet-ssd.xml" test="create_exenetwork" device="GPU" vmsize="747815" vmpeak="860978" vmrss="401808" vmhwm="435358" />
+<model path="public/mobilenet-ssd/FP32/mobilenet-ssd.xml" test="infer_request_inference" device="CPU" vmsize="1001189" vmpeak="1001189" vmrss="116080" vmhwm="116080" />
+<model path="public/mobilenet-ssd/FP32/mobilenet-ssd.xml" test="infer_request_inference" device="GPU" vmsize="788752" vmpeak="860842" vmrss="435283" vmhwm="435283" />
+<model path="public/mtcnn-r/FP32/mtcnn-r.xml" test="create_exenetwork" device="CPU" vmsize="754806" vmpeak="803184" vmrss="15206" vmhwm="26532" />
+<model path="public/mtcnn-r/FP32/mtcnn-r.xml" test="create_exenetwork" device="GPU" vmsize="554650" vmpeak="644666" vmrss="207592" vmhwm="217720" />
+<model path="public/mtcnn-r/FP32/mtcnn-r.xml" test="infer_request_inference" device="CPU" vmsize="959257" vmpeak="959257" vmrss="26690" vmhwm="26690" />
+<model path="public/mtcnn-r/FP32/mtcnn-r.xml" test="infer_request_inference" device="GPU" vmsize="572576" vmpeak="644666" vmrss="215230" vmhwm="215230" />
+<model path="public/ssd300/FP32/ssd300.xml" test="create_exenetwork" device="CPU" vmsize="755224" vmpeak="1146142" vmrss="22246" vmhwm="370770" />
+<model path="public/ssd300/FP32/ssd300.xml" test="create_exenetwork" device="GPU" vmsize="747709" vmpeak="1031694" vmrss="401746" vmhwm="749962" />
+<model path="public/ssd300/FP32/ssd300.xml" test="infer_request_inference" device="CPU" vmsize="1343474" vmpeak="1415563" vmrss="314204" vmhwm="371131" />
+<model path="public/ssd300/FP32/ssd300.xml" test="infer_request_inference" device="GPU" vmsize="1088700" vmpeak="1160790" vmrss="739626" vmhwm="748008" />
+<model path="public/vgg16/FP32/vgg16.xml" test="create_exenetwork" device="CPU" vmsize="754050" vmpeak="2548532" vmrss="15593" vmhwm="1808765" />
+<model path="public/vgg16/FP32/vgg16.xml" test="create_exenetwork" device="GPU" vmsize="648912" vmpeak="3289101" vmrss="299327" vmhwm="3003457" />
+<model path="public/vgg16/FP32/vgg16.xml" test="infer_request_inference" device="CPU" vmsize="2257006" vmpeak="2548532" vmrss="1243448" vmhwm="1809143" />
+<model path="public/vgg16/FP32/vgg16.xml" test="infer_request_inference" device="GPU" vmsize="2413290" vmpeak="3289101" vmrss="2059780" vmhwm="3006845" />
+    </models>
+</attributes>
diff --git a/tests/stress_tests/.automation/memcheck_tests/precommit_configs/desktop_test_config.xml b/tests/stress_tests/.automation/memcheck_tests/precommit_configs/desktop_test_config.xml
new file mode 100644 (file)
index 0000000..9944819
--- /dev/null
@@ -0,0 +1,13 @@
+<?xml version="1.0"?>
+<attributes>
+    <devices>
+        <value>CPU</value>
+        <value>GPU</value>
+    </devices>
+    <models>
+        <value>public/vgg16/FP32/vgg16.xml</value>
+        <value>public/mtcnn-r/FP32/mtcnn-r.xml</value>
+        <value>public/mobilenet-ssd/FP32/mobilenet-ssd.xml</value>
+        <value>public/ssd300/FP32/ssd300.xml</value>
+    </models>
+</attributes>
index 82a6c6c..32ef748 100644 (file)
 <?xml version="1.0"?>
 <attributes>
     <models>
-        <model path="caffe/FP32/alexnet/alexnet.xml" test="create_exenetwork" device="CPU" vmsize="753847" vmpeak="1528832" vmrss="14005" vmhwm="814655" />
-        <model path="caffe/FP32/alexnet/alexnet.xml" test="create_exenetwork" device="GPU" vmsize="580025" vmpeak="1743759" vmrss="234704" vmhwm="1462062" />
-        <model path="caffe/FP32/alexnet/alexnet.xml" test="infer_request_inference" device="CPU" vmsize="1339971" vmpeak="1528828" vmrss="555262" vmhwm="814805" />
-        <model path="caffe/FP32/alexnet/alexnet.xml" test="infer_request_inference" device="GPU" vmsize="1389159" vmpeak="1741154" vmrss="1036169" vmhwm="1460052" />
-        <model path="caffe/FP32/caffenet/caffenet.xml" test="create_exenetwork" device="CPU" vmsize="753843" vmpeak="1545451" vmrss="14234" vmhwm="821334" />
-        <model path="caffe/FP32/caffenet/caffenet.xml" test="create_exenetwork" device="GPU" vmsize="602206" vmpeak="1511325" vmrss="257501" vmhwm="1230284" />
-        <model path="caffe/FP32/caffenet/caffenet.xml" test="infer_request_inference" device="CPU" vmsize="1368206" vmpeak="1545456" vmrss="576774" vmhwm="821739" />
-        <model path="caffe/FP32/caffenet/caffenet.xml" test="infer_request_inference" device="GPU" vmsize="1423096" vmpeak="1511373" vmrss="1074752" vmhwm="1230732" />
-        <model path="caffe/FP32/densenet_121/densenet_121.xml" test="create_exenetwork" device="CPU" vmsize="772626" vmpeak="985754" vmrss="95260" vmhwm="151496" />
-        <model path="caffe/FP32/densenet_121/densenet_121.xml" test="create_exenetwork" device="GPU" vmsize="1044604" vmpeak="1154709" vmrss="699168" vmhwm="811104" />
-        <model path="caffe/FP32/densenet_121/densenet_121.xml" test="infer_request_inference" device="CPU" vmsize="985525" vmpeak="1057614" vmrss="159306" vmhwm="159306" />
-        <model path="caffe/FP32/densenet_121/densenet_121.xml" test="infer_request_inference" device="GPU" vmsize="1163289" vmpeak="1235379" vmrss="812961" vmhwm="812961" />
-        <model path="caffe/FP32/densenet_161/densenet_161.xml" test="create_exenetwork" device="CPU" vmsize="762770" vmpeak="1212248" vmrss="93570" vmhwm="426817" />
-        <model path="caffe/FP32/densenet_161/densenet_161.xml" test="create_exenetwork" device="GPU" vmsize="1127847" vmpeak="1586310" vmrss="782029" vmhwm="1304679" />
-        <model path="caffe/FP32/densenet_161/densenet_161.xml" test="infer_request_inference" device="CPU" vmsize="1351816" vmpeak="1423906" vmrss="353738" vmhwm="427644" />
-        <model path="caffe/FP32/densenet_161/densenet_161.xml" test="infer_request_inference" device="GPU" vmsize="1660304" vmpeak="1660304" vmrss="1309215" vmhwm="1309215" />
-        <model path="caffe/FP32/densenet_169/densenet_169.xml" test="create_exenetwork" device="CPU" vmsize="791863" vmpeak="998329" vmrss="123059" vmhwm="240160" />
-        <model path="caffe/FP32/densenet_169/densenet_169.xml" test="create_exenetwork" device="GPU" vmsize="1309598" vmpeak="1428944" vmrss="964066" vmhwm="1086751" />
-        <model path="caffe/FP32/densenet_169/densenet_169.xml" test="infer_request_inference" device="CPU" vmsize="1060303" vmpeak="1132392" vmrss="238924" vmhwm="240416" />
-        <model path="caffe/FP32/densenet_169/densenet_169.xml" test="infer_request_inference" device="GPU" vmsize="1435214" vmpeak="1507303" vmrss="1084969" vmhwm="1084969" />
-        <model path="caffe/FP32/densenet_201/densenet_201.xml" test="create_exenetwork" device="CPU" vmsize="864639" vmpeak="1153900" vmrss="147906" vmhwm="322590" />
-        <model path="caffe/FP32/densenet_201/densenet_201.xml" test="create_exenetwork" device="GPU" vmsize="1541161" vmpeak="1686282" vmrss="1195972" vmhwm="1337595" />
-        <model path="caffe/FP32/densenet_201/densenet_201.xml" test="infer_request_inference" device="CPU" vmsize="1181479" vmpeak="1253568" vmrss="315581" vmhwm="322700" />
-        <model path="caffe/FP32/densenet_201/densenet_201.xml" test="infer_request_inference" device="GPU" vmsize="1706760" vmpeak="1778849" vmrss="1356533" vmhwm="1356533" />
-        <model path="caffe/FP32/dilation/dilation.xml" test="create_exenetwork" device="CPU" vmsize="754428" vmpeak="3004311" vmrss="17613" vmhwm="1856210" />
-        <model path="caffe/FP32/dilation/dilation.xml" test="create_exenetwork" device="GPU" vmsize="710569" vmpeak="3363879" vmrss="365380" vmhwm="3081751" />
-        <model path="caffe/FP32/dilation/dilation.xml" test="infer_request_inference" device="CPU" vmsize="2487130" vmpeak="3004311" vmrss="1687936" vmhwm="1856448" />
-        <model path="caffe/FP32/dilation/dilation.xml" test="infer_request_inference" device="GPU" vmsize="2951748" vmpeak="3363804" vmrss="2597940" vmhwm="3080968" />
-        <model path="caffe/FP32/dpn_92/dpn_92.xml" test="create_exenetwork" device="CPU" vmsize="767157" vmpeak="1369376" vmrss="63338" vmhwm="540166" />
-        <model path="caffe/FP32/dpn_92/dpn_92.xml" test="create_exenetwork" device="GPU" vmsize="1155101" vmpeak="1701180" vmrss="809938" vmhwm="1420152" />
-        <model path="caffe/FP32/dpn_92/dpn_92.xml" test="infer_request_inference" device="CPU" vmsize="1299262" vmpeak="1373882" vmrss="431758" vmhwm="540214" />
-        <model path="caffe/FP32/dpn_92/dpn_92.xml" test="infer_request_inference" device="GPU" vmsize="1647738" vmpeak="1719828" vmrss="1296350" vmhwm="1419092" />
-        <model path="caffe/FP32/fcn_alexnet/fcn_alexnet.xml" test="create_exenetwork" device="CPU" vmsize="753711" vmpeak="1642832" vmrss="14014" vmhwm="789109" />
-        <model path="caffe/FP32/fcn_alexnet/fcn_alexnet.xml" test="create_exenetwork" device="GPU" vmsize="595430" vmpeak="1690484" vmrss="250496" vmhwm="1409205" />
-        <model path="caffe/FP32/fcn_alexnet/fcn_alexnet.xml" test="infer_request_inference" device="CPU" vmsize="1494464" vmpeak="1642832" vmrss="679214" vmhwm="789412" />
-        <model path="caffe/FP32/fcn_alexnet/fcn_alexnet.xml" test="infer_request_inference" device="GPU" vmsize="1450746" vmpeak="1693172" vmrss="1097681" vmhwm="1412254" />
-        <model path="caffe/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="create_exenetwork" device="CPU" vmsize="919740" vmpeak="1521955" vmrss="234520" vmhwm="792022" />
-        <model path="caffe/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="create_exenetwork" device="GPU" vmsize="1666363" vmpeak="2175012" vmrss="1321245" vmhwm="1893936" />
-        <model path="caffe/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="infer_request_inference" device="CPU" vmsize="1436982" vmpeak="1521955" vmrss="643614" vmhwm="793218" />
-        <model path="caffe/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="infer_request_inference" device="GPU" vmsize="2138818" vmpeak="2210907" vmrss="1786162" vmhwm="1893760" />
-        <model path="caffe/FP32/inception_v1/inception_v1.xml" test="create_exenetwork" device="CPU" vmsize="757262" vmpeak="978832" vmrss="81408" vmhwm="124238" />
-        <model path="caffe/FP32/inception_v1/inception_v1.xml" test="create_exenetwork" device="GPU" vmsize="810590" vmpeak="929139" vmrss="464868" vmhwm="503813" />
-        <model path="caffe/FP32/inception_v1/inception_v1.xml" test="infer_request_inference" device="CPU" vmsize="928637" vmpeak="1000727" vmrss="130719" vmhwm="130719" />
-        <model path="caffe/FP32/inception_v1/inception_v1.xml" test="infer_request_inference" device="GPU" vmsize="859478" vmpeak="931568" vmrss="507540" vmhwm="507540" />
-        <model path="caffe/FP32/inception_v2/inception_v2.xml" test="create_exenetwork" device="CPU" vmsize="766726" vmpeak="925245" vmrss="33382" vmhwm="180268" />
-        <model path="caffe/FP32/inception_v2/inception_v2.xml" test="create_exenetwork" device="GPU" vmsize="775117" vmpeak="913347" vmrss="430157" vmhwm="605598" />
-        <model path="caffe/FP32/inception_v2/inception_v2.xml" test="infer_request_inference" device="CPU" vmsize="927163" vmpeak="999253" vmrss="141869" vmhwm="181156" />
-        <model path="caffe/FP32/inception_v2/inception_v2.xml" test="infer_request_inference" device="GPU" vmsize="924752" vmpeak="996842" vmrss="571590" vmhwm="602839" />
-        <model path="caffe/FP32/inception_v3/inception_v3.xml" test="create_exenetwork" device="CPU" vmsize="767003" vmpeak="1090526" vmrss="34900" vmhwm="348172" />
-        <model path="caffe/FP32/inception_v3/inception_v3.xml" test="create_exenetwork" device="GPU" vmsize="948046" vmpeak="1182082" vmrss="602624" vmhwm="900169" />
-        <model path="caffe/FP32/inception_v3/inception_v3.xml" test="infer_request_inference" device="CPU" vmsize="1051481" vmpeak="1123570" vmrss="257219" vmhwm="348541" />
-        <model path="caffe/FP32/inception_v3/inception_v3.xml" test="infer_request_inference" device="GPU" vmsize="1187106" vmpeak="1259196" vmrss="834438" vmhwm="902800" />
-        <model path="caffe/FP32/inception_v4/inception_v4.xml" test="create_exenetwork" device="CPU" vmsize="764315" vmpeak="1326938" vmrss="63725" vmhwm="603213" />
-        <model path="caffe/FP32/inception_v4/inception_v4.xml" test="create_exenetwork" device="GPU" vmsize="1183410" vmpeak="1680448" vmrss="837953" vmhwm="1398870" />
-        <model path="caffe/FP32/inception_v4/inception_v4.xml" test="infer_request_inference" device="CPU" vmsize="1227798" vmpeak="1326908" vmrss="438160" vmhwm="602434" />
-        <model path="caffe/FP32/inception_v4/inception_v4.xml" test="infer_request_inference" device="GPU" vmsize="1633997" vmpeak="1706086" vmrss="1281693" vmhwm="1395878" />
-        <model path="caffe/FP32/lenet/lenet.xml" test="create_exenetwork" device="CPU" vmsize="753605" vmpeak="876330" vmrss="15571" vmhwm="29106" />
-        <model path="caffe/FP32/lenet/lenet.xml" test="create_exenetwork" device="GPU" vmsize="566693" vmpeak="658486" vmrss="220783" vmhwm="232452" />
-        <model path="caffe/FP32/lenet/lenet.xml" test="infer_request_inference" device="CPU" vmsize="808486" vmpeak="880576" vmrss="29084" vmhwm="29084" />
-        <model path="caffe/FP32/lenet/lenet.xml" test="infer_request_inference" device="GPU" vmsize="586401" vmpeak="658490" vmrss="232764" vmhwm="232764" />
-        <model path="caffe/FP32/mobilenet/mobilenet.xml" test="create_exenetwork" device="CPU" vmsize="754864" vmpeak="893692" vmrss="54617" vmhwm="81584" />
-        <model path="caffe/FP32/mobilenet/mobilenet.xml" test="create_exenetwork" device="GPU" vmsize="642527" vmpeak="750424" vmrss="296678" vmhwm="362300" />
-        <model path="caffe/FP32/mobilenet/mobilenet.xml" test="infer_request_inference" device="CPU" vmsize="831336" vmpeak="903425" vmrss="85654" vmhwm="85654" />
-        <model path="caffe/FP32/mobilenet/mobilenet.xml" test="infer_request_inference" device="GPU" vmsize="716047" vmpeak="788136" vmrss="364434" vmhwm="364434" />
-        <model path="caffe/FP32/mobilenet_v2/mobilenet_v2.xml" test="create_exenetwork" device="CPU" vmsize="756813" vmpeak="819698" vmrss="54410" vmhwm="78289" />
-        <model path="caffe/FP32/mobilenet_v2/mobilenet_v2.xml" test="create_exenetwork" device="GPU" vmsize="758705" vmpeak="862466" vmrss="412966" vmhwm="437131" />
-        <model path="caffe/FP32/mobilenet_v2/mobilenet_v2.xml" test="infer_request_inference" device="CPU" vmsize="840967" vmpeak="840967" vmrss="82860" vmhwm="82860" />
-        <model path="caffe/FP32/mobilenet_v2/mobilenet_v2.xml" test="infer_request_inference" device="GPU" vmsize="787182" vmpeak="859271" vmrss="436801" vmhwm="436801" />
-        <model path="caffe/FP32/mtcnn_o/mtcnn_o.xml" test="create_exenetwork" device="CPU" vmsize="753715" vmpeak="876299" vmrss="17512" vmhwm="28402" />
-        <model path="caffe/FP32/mtcnn_o/mtcnn_o.xml" test="create_exenetwork" device="GPU" vmsize="583092" vmpeak="674744" vmrss="238220" vmhwm="249722" />
-        <model path="caffe/FP32/mtcnn_o/mtcnn_o.xml" test="infer_request_inference" device="CPU" vmsize="808209" vmpeak="808209" vmrss="27865" vmhwm="27865" />
-        <model path="caffe/FP32/mtcnn_o/mtcnn_o.xml" test="infer_request_inference" device="GPU" vmsize="600714" vmpeak="672804" vmrss="246967" vmhwm="246967" />
-        <model path="caffe/FP32/mtcnn_p/mtcnn_p.xml" test="create_exenetwork" device="CPU" vmsize="763677" vmpeak="874535" vmrss="13318" vmhwm="35327" />
-        <model path="caffe/FP32/mtcnn_p/mtcnn_p.xml" test="create_exenetwork" device="GPU" vmsize="570521" vmpeak="662182" vmrss="224774" vmhwm="351410" />
-        <model path="caffe/FP32/mtcnn_p/mtcnn_p.xml" test="infer_request_inference" device="CPU" vmsize="901260" vmpeak="973350" vmrss="108037" vmhwm="108037" />
-        <model path="caffe/FP32/mtcnn_p/mtcnn_p.xml" test="infer_request_inference" device="GPU" vmsize="685115" vmpeak="757204" vmrss="331421" vmhwm="351529" />
-        <model path="caffe/FP32/mtcnn_r/mtcnn_r.xml" test="create_exenetwork" device="CPU" vmsize="753711" vmpeak="803228" vmrss="14806" vmhwm="25911" />
-        <model path="caffe/FP32/mtcnn_r/mtcnn_r.xml" test="create_exenetwork" device="GPU" vmsize="577280" vmpeak="667673" vmrss="232029" vmhwm="242580" />
-        <model path="caffe/FP32/mtcnn_r/mtcnn_r.xml" test="infer_request_inference" device="CPU" vmsize="806102" vmpeak="806102" vmrss="25352" vmhwm="25352" />
-        <model path="caffe/FP32/mtcnn_r/mtcnn_r.xml" test="infer_request_inference" device="GPU" vmsize="593340" vmpeak="665429" vmrss="240200" vmhwm="240200" />
-        <model path="caffe/FP32/openpose_face/openpose_face.xml" test="create_exenetwork" device="CPU" vmsize="764711" vmpeak="1279238" vmrss="23544" vmhwm="528431" />
-        <model path="caffe/FP32/openpose_face/openpose_face.xml" test="create_exenetwork" device="GPU" vmsize="890428" vmpeak="1316884" vmrss="544882" vmhwm="1035192" />
-        <model path="caffe/FP32/openpose_face/openpose_face.xml" test="infer_request_inference" device="CPU" vmsize="1187529" vmpeak="1279207" vmrss="398512" vmhwm="528730" />
-        <model path="caffe/FP32/openpose_face/openpose_face.xml" test="infer_request_inference" device="GPU" vmsize="1288707" vmpeak="1360796" vmrss="935778" vmhwm="1038888" />
-        <model path="caffe/FP32/openpose_hand/openpose_hand.xml" test="create_exenetwork" device="CPU" vmsize="755634" vmpeak="1259024" vmrss="23342" vmhwm="507980" />
-        <model path="caffe/FP32/openpose_hand/openpose_hand.xml" test="create_exenetwork" device="GPU" vmsize="845886" vmpeak="1297898" vmrss="500957" vmhwm="1016822" />
-        <model path="caffe/FP32/openpose_hand/openpose_hand.xml" test="infer_request_inference" device="CPU" vmsize="1327246" vmpeak="1327246" vmrss="384634" vmhwm="507522" />
-        <model path="caffe/FP32/openpose_hand/openpose_hand.xml" test="infer_request_inference" device="GPU" vmsize="1277117" vmpeak="1300490" vmrss="923674" vmhwm="1018956" />
-        <model path="caffe/FP32/openpose_pose_coco/openpose_pose_coco.xml" test="create_exenetwork" device="CPU" vmsize="757556" vmpeak="1471373" vmrss="32780" vmhwm="716861" />
-        <model path="caffe/FP32/openpose_pose_coco/openpose_pose_coco.xml" test="create_exenetwork" device="GPU" vmsize="1153103" vmpeak="1684306" vmrss="807426" vmhwm="1402513" />
-        <model path="caffe/FP32/openpose_pose_coco/openpose_pose_coco.xml" test="infer_request_inference" device="CPU" vmsize="1397686" vmpeak="1471373" vmrss="528620" vmhwm="717728" />
-        <model path="caffe/FP32/openpose_pose_coco/openpose_pose_coco.xml" test="infer_request_inference" device="GPU" vmsize="1597785" vmpeak="1680465" vmrss="1244672" vmhwm="1399217" />
-        <model path="caffe/FP32/places205_alexnet/places205_alexnet.xml" test="create_exenetwork" device="CPU" vmsize="753711" vmpeak="1485853" vmrss="14330" vmhwm="773766" />
-        <model path="caffe/FP32/places205_alexnet/places205_alexnet.xml" test="create_exenetwork" device="GPU" vmsize="604573" vmpeak="1684861" vmrss="259556" vmhwm="1403600" />
-        <model path="caffe/FP32/places205_alexnet/places205_alexnet.xml" test="infer_request_inference" device="CPU" vmsize="1311107" vmpeak="1485862" vmrss="528448" vmhwm="773656" />
-        <model path="caffe/FP32/places205_alexnet/places205_alexnet.xml" test="infer_request_inference" device="GPU" vmsize="1346840" vmpeak="1684896" vmrss="993942" vmhwm="1403886" />
-        <model path="caffe/FP32/places205_googlenet/places205_googlenet.xml" test="create_exenetwork" device="CPU" vmsize="757187" vmpeak="831362" vmrss="78795" vmhwm="113814" />
-        <model path="caffe/FP32/places205_googlenet/places205_googlenet.xml" test="create_exenetwork" device="GPU" vmsize="805270" vmpeak="920321" vmrss="460319" vmhwm="495638" />
-        <model path="caffe/FP32/places205_googlenet/places205_googlenet.xml" test="infer_request_inference" device="CPU" vmsize="852781" vmpeak="852781" vmrss="119033" vmhwm="119033" />
-        <model path="caffe/FP32/places205_googlenet/places205_googlenet.xml" test="infer_request_inference" device="GPU" vmsize="847052" vmpeak="919142" vmrss="494916" vmhwm="494916" />
-        <model path="caffe/FP32/resnet_18/resnet_18.xml" test="create_exenetwork" device="CPU" vmsize="754248" vmpeak="925443" vmrss="16878" vmhwm="177663" />
-        <model path="caffe/FP32/resnet_18/resnet_18.xml" test="create_exenetwork" device="GPU" vmsize="657659" vmpeak="799510" vmrss="312070" vmhwm="466153" />
-        <model path="caffe/FP32/resnet_18/resnet_18.xml" test="infer_request_inference" device="CPU" vmsize="920163" vmpeak="920163" vmrss="131859" vmhwm="176726" />
-        <model path="caffe/FP32/resnet_18/resnet_18.xml" test="infer_request_inference" device="GPU" vmsize="775350" vmpeak="847440" vmrss="422919" vmhwm="467610" />
-        <model path="caffe/FP32/resnet_v1_101/resnet_v1_101.xml" test="create_exenetwork" device="CPU" vmsize="760584" vmpeak="1338202" vmrss="43243" vmhwm="616928" />
-        <model path="caffe/FP32/resnet_v1_101/resnet_v1_101.xml" test="create_exenetwork" device="GPU" vmsize="1104862" vmpeak="1557006" vmrss="759030" vmhwm="1275071" />
-        <model path="caffe/FP32/resnet_v1_101/resnet_v1_101.xml" test="infer_request_inference" device="CPU" vmsize="1224172" vmpeak="1338172" vmrss="434944" vmhwm="616849" />
-        <model path="caffe/FP32/resnet_v1_101/resnet_v1_101.xml" test="infer_request_inference" device="GPU" vmsize="1452145" vmpeak="1558106" vmrss="1099428" vmhwm="1276787" />
-        <model path="caffe/FP32/resnet_v1_152/resnet_v1_152.xml" test="create_exenetwork" device="CPU" vmsize="764878" vmpeak="1551919" vmrss="58638" vmhwm="828383" />
-        <model path="caffe/FP32/resnet_v1_152/resnet_v1_152.xml" test="create_exenetwork" device="GPU" vmsize="1315120" vmpeak="1977250" vmrss="968858" vmhwm="1694796" />
-        <model path="caffe/FP32/resnet_v1_152/resnet_v1_152.xml" test="infer_request_inference" device="CPU" vmsize="1526166" vmpeak="1598256" vmrss="582401" vmhwm="829598" />
-        <model path="caffe/FP32/resnet_v1_152/resnet_v1_152.xml" test="infer_request_inference" device="GPU" vmsize="1804748" vmpeak="1975855" vmrss="1451397" vmhwm="1693419" />
-        <model path="caffe/FP32/resnet_v1_269/resnet_v1_269.xml" test="create_exenetwork" device="CPU" vmsize="927665" vmpeak="2236845" vmrss="224034" vmhwm="1396458" />
-        <model path="caffe/FP32/resnet_v1_269/resnet_v1_269.xml" test="create_exenetwork" device="GPU" vmsize="1988676" vmpeak="3156291" vmrss="1643919" vmhwm="2874946" />
-        <model path="caffe/FP32/resnet_v1_269/resnet_v1_269.xml" test="infer_request_inference" device="CPU" vmsize="2016999" vmpeak="2236955" vmrss="1117754" vmhwm="1396128" />
-        <model path="caffe/FP32/resnet_v1_269/resnet_v1_269.xml" test="infer_request_inference" device="GPU" vmsize="2845849" vmpeak="3165219" vmrss="2493550" vmhwm="2883091" />
-        <model path="caffe/FP32/resnet_v1_50/resnet_v1_50.xml" test="create_exenetwork" device="CPU" vmsize="766101" vmpeak="1079971" vmrss="27359" vmhwm="362142" />
-        <model path="caffe/FP32/resnet_v1_50/resnet_v1_50.xml" test="create_exenetwork" device="GPU" vmsize="834856" vmpeak="1080094" vmrss="490089" vmhwm="799312" />
-        <model path="caffe/FP32/resnet_v1_50/resnet_v1_50.xml" test="infer_request_inference" device="CPU" vmsize="1046381" vmpeak="1118471" vmrss="260528" vmhwm="362203" />
-        <model path="caffe/FP32/resnet_v1_50/resnet_v1_50.xml" test="infer_request_inference" device="GPU" vmsize="1060109" vmpeak="1132199" vmrss="707876" vmhwm="804108" />
-        <model path="caffe/FP32/se_bn_inception/se_bn_inception.xml" test="create_exenetwork" device="CPU" vmsize="758516" vmpeak="930397" vmrss="40572" vmhwm="194062" />
-        <model path="caffe/FP32/se_bn_inception/se_bn_inception.xml" test="create_exenetwork" device="GPU" vmsize="873061" vmpeak="1013430" vmrss="528167" vmhwm="692564" />
-        <model path="caffe/FP32/se_bn_inception/se_bn_inception.xml" test="infer_request_inference" device="CPU" vmsize="957620" vmpeak="1029710" vmrss="152754" vmhwm="194656" />
-        <model path="caffe/FP32/se_bn_inception/se_bn_inception.xml" test="infer_request_inference" device="GPU" vmsize="1014305" vmpeak="1086395" vmrss="662525" vmhwm="694821" />
-        <model path="caffe/FP32/se_resnext_50/se_resnext_50.xml" test="create_exenetwork" device="CPU" vmsize="759382" vmpeak="1174707" vmrss="39265" vmhwm="401856" />
-        <model path="caffe/FP32/se_resnext_50/se_resnext_50.xml" test="create_exenetwork" device="GPU" vmsize="983083" vmpeak="1257471" vmrss="637335" vmhwm="975444" />
-        <model path="caffe/FP32/se_resnext_50/se_resnext_50.xml" test="infer_request_inference" device="CPU" vmsize="1140730" vmpeak="1174672" vmrss="315977" vmhwm="401508" />
-        <model path="caffe/FP32/se_resnext_50/se_resnext_50.xml" test="infer_request_inference" device="GPU" vmsize="1251214" vmpeak="1323304" vmrss="899034" vmhwm="976474" />
-        <model path="caffe/FP32/squeezenet_v1.0/squeezenet_v1.0.xml" test="create_exenetwork" device="CPU" vmsize="754890" vmpeak="815095" vmrss="28833" vmhwm="43881" />
-        <model path="caffe/FP32/squeezenet_v1.0/squeezenet_v1.0.xml" test="create_exenetwork" device="GPU" vmsize="651974" vmpeak="746719" vmrss="306455" vmhwm="321345" />
-        <model path="caffe/FP32/squeezenet_v1.0/squeezenet_v1.0.xml" test="infer_request_inference" device="CPU" vmsize="824942" vmpeak="897032" vmrss="48567" vmhwm="48567" />
-        <model path="caffe/FP32/squeezenet_v1.0/squeezenet_v1.0.xml" test="infer_request_inference" device="GPU" vmsize="676328" vmpeak="748418" vmrss="324860" vmhwm="324860" />
-        <model path="caffe/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="create_exenetwork" device="CPU" vmsize="758212" vmpeak="813208" vmrss="29691" vmhwm="44220" />
-        <model path="caffe/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="create_exenetwork" device="GPU" vmsize="611789" vmpeak="706534" vmrss="266244" vmhwm="324007" />
-        <model path="caffe/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="infer_request_inference" device="CPU" vmsize="818549" vmpeak="890639" vmrss="47141" vmhwm="47141" />
-        <model path="caffe/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="infer_request_inference" device="GPU" vmsize="677705" vmpeak="749795" vmrss="326163" vmhwm="326163" />
-        <model path="caffe/FP32/ssd_googlenet/ssd_googlenet.xml" test="create_exenetwork" device="CPU" vmsize="757534" vmpeak="911495" vmrss="36445" vmhwm="182050" />
-        <model path="caffe/FP32/ssd_googlenet/ssd_googlenet.xml" test="create_exenetwork" device="GPU" vmsize="835683" vmpeak="973280" vmrss="490613" vmhwm="658640" />
-        <model path="caffe/FP32/ssd_googlenet/ssd_googlenet.xml" test="infer_request_inference" device="CPU" vmsize="941076" vmpeak="1013166" vmrss="148222" vmhwm="183185" />
-        <model path="caffe/FP32/ssd_googlenet/ssd_googlenet.xml" test="infer_request_inference" device="GPU" vmsize="989608" vmpeak="1061698" vmrss="637709" vmhwm="661746" />
-        <model path="caffe/FP32/ssd_mobilenet/ssd_mobilenet.xml" test="create_exenetwork" device="CPU" vmsize="757174" vmpeak="901648" vmrss="73409" vmhwm="106537" />
-        <model path="caffe/FP32/ssd_mobilenet/ssd_mobilenet.xml" test="create_exenetwork" device="GPU" vmsize="801644" vmpeak="915186" vmrss="456517" vmhwm="490520" />
-        <model path="caffe/FP32/ssd_mobilenet/ssd_mobilenet.xml" test="infer_request_inference" device="CPU" vmsize="847932" vmpeak="847932" vmrss="116410" vmhwm="116410" />
-        <model path="caffe/FP32/ssd_mobilenet/ssd_mobilenet.xml" test="infer_request_inference" device="GPU" vmsize="843022" vmpeak="915112" vmrss="490864" vmhwm="490864" />
-        <model path="caffe/FP32/ssd_squeezenet/ssd_squeezenet.xml" test="create_exenetwork" device="CPU" vmsize="765393" vmpeak="900402" vmrss="71544" vmhwm="105032" />
-        <model path="caffe/FP32/ssd_squeezenet/ssd_squeezenet.xml" test="create_exenetwork" device="GPU" vmsize="759668" vmpeak="872762" vmrss="414493" vmhwm="497701" />
-        <model path="caffe/FP32/ssd_squeezenet/ssd_squeezenet.xml" test="infer_request_inference" device="CPU" vmsize="848438" vmpeak="900754" vmrss="113590" vmhwm="113590" />
-        <model path="caffe/FP32/ssd_squeezenet/ssd_squeezenet.xml" test="infer_request_inference" device="GPU" vmsize="847620" vmpeak="919710" vmrss="495730" vmhwm="495730" />
-        <model path="caffe/FP32/ssd_vgg16_300/ssd_vgg16_300.xml" test="create_exenetwork" device="CPU" vmsize="755374" vmpeak="1146156" vmrss="22026" vmhwm="370176" />
-        <model path="caffe/FP32/ssd_vgg16_300/ssd_vgg16_300.xml" test="create_exenetwork" device="GPU" vmsize="768451" vmpeak="1074730" vmrss="423662" vmhwm="794266" />
-        <model path="caffe/FP32/ssd_vgg16_300/ssd_vgg16_300.xml" test="infer_request_inference" device="CPU" vmsize="1113609" vmpeak="1185698" vmrss="313513" vmhwm="370035" />
-        <model path="caffe/FP32/ssd_vgg16_300/ssd_vgg16_300.xml" test="infer_request_inference" device="GPU" vmsize="1134227" vmpeak="1206317" vmrss="783006" vmhwm="795000" />
-        <model path="caffe/FP32/ssd_vgg16_512/ssd_vgg16_512.xml" test="create_exenetwork" device="CPU" vmsize="755796" vmpeak="1267802" vmrss="23746" vmhwm="383983" />
-        <model path="caffe/FP32/ssd_vgg16_512/ssd_vgg16_512.xml" test="create_exenetwork" device="GPU" vmsize="794565" vmpeak="1272634" vmrss="449394" vmhwm="991632" />
-        <model path="caffe/FP32/ssd_vgg16_512/ssd_vgg16_512.xml" test="infer_request_inference" device="CPU" vmsize="1234050" vmpeak="1306140" vmrss="421194" vmhwm="421194" />
-        <model path="caffe/FP32/ssd_vgg16_512/ssd_vgg16_512.xml" test="infer_request_inference" device="GPU" vmsize="1348960" vmpeak="1421050" vmrss="999050" vmhwm="999050" />
-        <model path="caffe/FP32/vgg16/vgg16.xml" test="create_exenetwork" device="CPU" vmsize="754006" vmpeak="2548497" vmrss="15598" vmhwm="1808624" />
-        <model path="caffe/FP32/vgg16/vgg16.xml" test="create_exenetwork" device="GPU" vmsize="668602" vmpeak="3326708" vmrss="323791" vmhwm="3045328" />
-        <model path="caffe/FP32/vgg16/vgg16.xml" test="infer_request_inference" device="CPU" vmsize="2027181" vmpeak="2548497" vmrss="1242560" vmhwm="1808730" />
-        <model path="caffe/FP32/vgg16/vgg16.xml" test="infer_request_inference" device="GPU" vmsize="2441076" vmpeak="3326708" vmrss="2088055" vmhwm="3045050" />
-        <model path="caffe/FP32/vgg19/vgg19.xml" test="create_exenetwork" device="CPU" vmsize="754212" vmpeak="2618030" vmrss="15510" vmhwm="1877383" />
-        <model path="caffe/FP32/vgg19/vgg19.xml" test="create_exenetwork" device="GPU" vmsize="739222" vmpeak="3397112" vmrss="393866" vmhwm="3115085" />
-        <model path="caffe/FP32/vgg19/vgg19.xml" test="infer_request_inference" device="CPU" vmsize="2073794" vmpeak="2618030" vmrss="1289741" vmhwm="1878289" />
-        <model path="caffe/FP32/vgg19/vgg19.xml" test="infer_request_inference" device="GPU" vmsize="2518340" vmpeak="3397081" vmrss="2165196" vmhwm="3114975" />
-        <model path="caffe/FP32/vnect/vnect.xml" test="create_exenetwork" device="CPU" vmsize="764940" vmpeak="947157" vmrss="27988" vmhwm="223726" />
-        <model path="caffe/FP32/vnect/vnect.xml" test="create_exenetwork" device="GPU" vmsize="789223" vmpeak="941683" vmrss="443788" vmhwm="641476" />
-        <model path="caffe/FP32/vnect/vnect.xml" test="infer_request_inference" device="CPU" vmsize="962187" vmpeak="1034277" vmrss="177848" vmhwm="224180" />
-        <model path="caffe/FP32/vnect/vnect.xml" test="infer_request_inference" device="GPU" vmsize="969069" vmpeak="1041158" vmrss="616990" vmhwm="641977" />
-        <model path="caffe/FP32/wrn_50_2/wrn_50_2.xml" test="create_exenetwork" device="CPU" vmsize="755651" vmpeak="1654985" vmrss="24921" vmhwm="920400" />
-        <model path="caffe/FP32/wrn_50_2/wrn_50_2.xml" test="create_exenetwork" device="GPU" vmsize="936892" vmpeak="1838610" vmrss="590994" vmhwm="1556526" />
-        <model path="caffe/FP32/wrn_50_2/wrn_50_2.xml" test="infer_request_inference" device="CPU" vmsize="1433352" vmpeak="1654989" vmrss="639456" vmhwm="918693" />
-        <model path="caffe/FP32/wrn_50_2/wrn_50_2.xml" test="infer_request_inference" device="GPU" vmsize="1613176" vmpeak="1824922" vmrss="1259940" vmhwm="1543031" />
-        <model path="caffe/FP32/yolo_v1_full/yolo_v1_full.xml" test="create_exenetwork" device="CPU" vmsize="754692" vmpeak="4259393" vmrss="18013" vmhwm="3532412" />
-        <model path="caffe/FP32/yolo_v1_full/yolo_v1_full.xml" test="create_exenetwork" device="GPU" vmsize="719105" vmpeak="5906194" vmrss="373648" vmhwm="5623600" />
-        <model path="caffe/FP32/yolo_v1_full/yolo_v1_full.xml" test="infer_request_inference" device="CPU" vmsize="3167040" vmpeak="4259380" vmrss="2378362" vmhwm="3531237" />
-        <model path="caffe/FP32/yolo_v1_full/yolo_v1_full.xml" test="infer_request_inference" device="GPU" vmsize="4165801" vmpeak="5903801" vmrss="3812393" vmhwm="5621585" />
-        <model path="caffe/FP32/yolo_v1_tiny/yolo_v1_tiny.xml" test="create_exenetwork" device="CPU" vmsize="753860" vmpeak="1101161" vmrss="14599" vmhwm="375399" />
-        <model path="caffe/FP32/yolo_v1_tiny/yolo_v1_tiny.xml" test="create_exenetwork" device="GPU" vmsize="577640" vmpeak="1037480" vmrss="232443" vmhwm="755972" />
-        <model path="caffe/FP32/yolo_v1_tiny/yolo_v1_tiny.xml" test="infer_request_inference" device="CPU" vmsize="1059828" vmpeak="1131917" vmrss="272879" vmhwm="374721" />
-        <model path="caffe/FP32/yolo_v1_tiny/yolo_v1_tiny.xml" test="infer_request_inference" device="GPU" vmsize="957453" vmpeak="1037445" vmrss="605026" vmhwm="756606" />
-        <model path="caffe/FP32/yolo_v2/yolo_v2.xml" test="create_exenetwork" device="CPU" vmsize="754344" vmpeak="1422647" vmrss="16790" vmhwm="680072" />
-        <model path="caffe/FP32/yolo_v2/yolo_v2.xml" test="create_exenetwork" device="GPU" vmsize="678964" vmpeak="1435790" vmrss="334017" vmhwm="1154573" />
-        <model path="caffe/FP32/yolo_v2/yolo_v2.xml" test="infer_request_inference" device="CPU" vmsize="1279823" vmpeak="1422647" vmrss="490692" vmhwm="680526" />
-        <model path="caffe/FP32/yolo_v2/yolo_v2.xml" test="infer_request_inference" device="GPU" vmsize="1325156" vmpeak="1438571" vmrss="972140" vmhwm="1157138" />
-        <model path="caffe/FP32/yolo_v2_tiny/yolo_v2_tiny.xml" test="create_exenetwork" device="CPU" vmsize="753733" vmpeak="954430" vmrss="14278" vmhwm="229913" />
-        <model path="caffe/FP32/yolo_v2_tiny/yolo_v2_tiny.xml" test="create_exenetwork" device="GPU" vmsize="568880" vmpeak="814976" vmrss="223907" vmhwm="533808" />
-        <model path="caffe/FP32/yolo_v2_tiny/yolo_v2_tiny.xml" test="infer_request_inference" device="CPU" vmsize="1032882" vmpeak="1032882" vmrss="174631" vmhwm="230243" />
-        <model path="caffe/FP32/yolo_v2_tiny/yolo_v2_tiny.xml" test="infer_request_inference" device="GPU" vmsize="810031" vmpeak="816178" vmrss="456856" vmhwm="534503" />
-        <model path="caffe/FP32/yolo_v3/yolo_v3.xml" test="create_exenetwork" device="CPU" vmsize="756852" vmpeak="1587154" vmrss="31460" vmhwm="837570" />
-        <model path="caffe/FP32/yolo_v3/yolo_v3.xml" test="create_exenetwork" device="GPU" vmsize="1159840" vmpeak="1822444" vmrss="813969" vmhwm="1540343" />
-        <model path="caffe/FP32/yolo_v3/yolo_v3.xml" test="infer_request_inference" device="CPU" vmsize="1554462" vmpeak="1626552" vmrss="609677" vmhwm="836655" />
-        <model path="caffe/FP32/yolo_v3/yolo_v3.xml" test="infer_request_inference" device="GPU" vmsize="1735610" vmpeak="1821749" vmrss="1383285" vmhwm="1540598" />
-        <model path="mxnet/FP32/caffenet/caffenet.xml" test="create_exenetwork" device="CPU" vmsize="753856" vmpeak="1528538" vmrss="14414" vmhwm="815491" />
-        <model path="mxnet/FP32/caffenet/caffenet.xml" test="create_exenetwork" device="GPU" vmsize="580030" vmpeak="1741062" vmrss="235624" vmhwm="1460386" />
-        <model path="mxnet/FP32/caffenet/caffenet.xml" test="infer_request_inference" device="CPU" vmsize="1339681" vmpeak="1528538" vmrss="556146" vmhwm="815262" />
-        <model path="mxnet/FP32/caffenet/caffenet.xml" test="infer_request_inference" device="GPU" vmsize="1389097" vmpeak="1741093" vmrss="1036178" vmhwm="1460060" />
-        <model path="mxnet/FP32/densenet_121/densenet_121.xml" test="create_exenetwork" device="CPU" vmsize="772622" vmpeak="985749" vmrss="95431" vmhwm="151087" />
-        <model path="mxnet/FP32/densenet_121/densenet_121.xml" test="create_exenetwork" device="GPU" vmsize="1141962" vmpeak="1252068" vmrss="796734" vmhwm="827217" />
-        <model path="mxnet/FP32/densenet_121/densenet_121.xml" test="infer_request_inference" device="CPU" vmsize="985239" vmpeak="1057328" vmrss="158532" vmhwm="158532" />
-        <model path="mxnet/FP32/densenet_121/densenet_121.xml" test="infer_request_inference" device="GPU" vmsize="1171425" vmpeak="1243514" vmrss="818624" vmhwm="818624" />
-        <model path="mxnet/FP32/densenet_161/densenet_161.xml" test="create_exenetwork" device="CPU" vmsize="762731" vmpeak="1211720" vmrss="93486" vmhwm="426896" />
-        <model path="mxnet/FP32/densenet_161/densenet_161.xml" test="create_exenetwork" device="GPU" vmsize="1312801" vmpeak="1592839" vmrss="967252" vmhwm="1311569" />
-        <model path="mxnet/FP32/densenet_161/densenet_161.xml" test="infer_request_inference" device="CPU" vmsize="1198124" vmpeak="1270214" vmrss="353051" vmhwm="427319" />
-        <model path="mxnet/FP32/densenet_161/densenet_161.xml" test="infer_request_inference" device="GPU" vmsize="1657339" vmpeak="1729428" vmrss="1304820" vmhwm="1304820" />
-        <model path="mxnet/FP32/densenet_169/densenet_169.xml" test="create_exenetwork" device="CPU" vmsize="796360" vmpeak="1002408" vmrss="123094" vmhwm="239945" />
-        <model path="mxnet/FP32/densenet_169/densenet_169.xml" test="create_exenetwork" device="GPU" vmsize="1352916" vmpeak="1472262" vmrss="1007630" vmhwm="1084727" />
-        <model path="mxnet/FP32/densenet_169/densenet_169.xml" test="infer_request_inference" device="CPU" vmsize="1059880" vmpeak="1059880" vmrss="239307" vmhwm="241753" />
-        <model path="mxnet/FP32/densenet_169/densenet_169.xml" test="infer_request_inference" device="GPU" vmsize="1437656" vmpeak="1509745" vmrss="1084828" vmhwm="1084828" />
-        <model path="mxnet/FP32/densenet_201/densenet_201.xml" test="create_exenetwork" device="CPU" vmsize="864635" vmpeak="1154040" vmrss="148830" vmhwm="322528" />
-        <model path="mxnet/FP32/densenet_201/densenet_201.xml" test="create_exenetwork" device="GPU" vmsize="1505042" vmpeak="1650162" vmrss="1159906" vmhwm="1343711" />
-        <model path="mxnet/FP32/densenet_201/densenet_201.xml" test="infer_request_inference" device="CPU" vmsize="1181056" vmpeak="1253146" vmrss="315048" vmhwm="322282" />
-        <model path="mxnet/FP32/densenet_201/densenet_201.xml" test="infer_request_inference" device="GPU" vmsize="1719256" vmpeak="1791345" vmrss="1366767" vmhwm="1366767" />
-        <model path="mxnet/FP32/dpn_92/dpn_92.xml" test="create_exenetwork" device="CPU" vmsize="767976" vmpeak="1370195" vmrss="63456" vmhwm="539897" />
-        <model path="mxnet/FP32/dpn_92/dpn_92.xml" test="create_exenetwork" device="GPU" vmsize="1313452" vmpeak="1701664" vmrss="968145" vmhwm="1420434" />
-        <model path="mxnet/FP32/dpn_92/dpn_92.xml" test="infer_request_inference" device="CPU" vmsize="1295571" vmpeak="1370195" vmrss="430610" vmhwm="539536" />
-        <model path="mxnet/FP32/dpn_92/dpn_92.xml" test="infer_request_inference" device="GPU" vmsize="1651421" vmpeak="1723510" vmrss="1299738" vmhwm="1422326" />
-        <model path="mxnet/FP32/fcn8s_vgg16/fcn8s_vgg16.xml" test="create_exenetwork" device="CPU" vmsize="754212" vmpeak="3124338" vmrss="17362" vmhwm="1770388" />
-        <model path="mxnet/FP32/fcn8s_vgg16/fcn8s_vgg16.xml" test="create_exenetwork" device="GPU" vmsize="669583" vmpeak="3628222" vmrss="324363" vmhwm="3347071" />
-        <model path="mxnet/FP32/fcn8s_vgg16/fcn8s_vgg16.xml" test="infer_request_inference" device="CPU" vmsize="2705824" vmpeak="3124338" vmrss="1906933" vmhwm="1906933" />
-        <model path="mxnet/FP32/fcn8s_vgg16/fcn8s_vgg16.xml" test="infer_request_inference" device="GPU" vmsize="3710449" vmpeak="3782539" vmrss="3356861" vmhwm="3356861" />
-        <model path="mxnet/FP32/full_imagenet_network/full_imagenet_network.xml" test="create_exenetwork" device="CPU" vmsize="756870" vmpeak="1192276" vmrss="32300" vmhwm="470417" />
-        <model path="mxnet/FP32/full_imagenet_network/full_imagenet_network.xml" test="create_exenetwork" device="GPU" vmsize="772970" vmpeak="1363872" vmrss="428054" vmhwm="1079412" />
-        <model path="mxnet/FP32/full_imagenet_network/full_imagenet_network.xml" test="infer_request_inference" device="CPU" vmsize="1123746" vmpeak="1195836" vmrss="335288" vmhwm="470162" />
-        <model path="mxnet/FP32/full_imagenet_network/full_imagenet_network.xml" test="infer_request_inference" device="GPU" vmsize="1219618" vmpeak="1362376" vmrss="875415" vmhwm="1077560" />
-        <model path="mxnet/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="create_exenetwork" device="CPU" vmsize="848157" vmpeak="1522730" vmrss="178424" vmhwm="792470" />
-        <model path="mxnet/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="create_exenetwork" device="GPU" vmsize="1549574" vmpeak="2182501" vmrss="1203804" vmhwm="1900742" />
-        <model path="mxnet/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="infer_request_inference" device="CPU" vmsize="1437730" vmpeak="1522730" vmrss="644402" vmhwm="794024" />
-        <model path="mxnet/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="infer_request_inference" device="GPU" vmsize="2145426" vmpeak="2217516" vmrss="1793162" vmhwm="1899854" />
-        <model path="mxnet/FP32/inception_v3/inception_v3.xml" test="create_exenetwork" device="CPU" vmsize="756584" vmpeak="925636" vmrss="32982" vmhwm="182529" />
-        <model path="mxnet/FP32/inception_v3/inception_v3.xml" test="create_exenetwork" device="GPU" vmsize="769230" vmpeak="907847" vmrss="423874" vmhwm="604982" />
-        <model path="mxnet/FP32/inception_v3/inception_v3.xml" test="infer_request_inference" device="CPU" vmsize="928659" vmpeak="928659" vmrss="142304" vmhwm="182353" />
-        <model path="mxnet/FP32/inception_v3/inception_v3.xml" test="infer_request_inference" device="GPU" vmsize="926103" vmpeak="998192" vmrss="572985" vmhwm="603592" />
-        <model path="mxnet/FP32/inception_v3_no_batchnorm/inception_v3_no_batchnorm.xml" test="create_exenetwork" device="CPU" vmsize="757851" vmpeak="1078682" vmrss="34751" vmhwm="348154" />
-        <model path="mxnet/FP32/inception_v3_no_batchnorm/inception_v3_no_batchnorm.xml" test="create_exenetwork" device="GPU" vmsize="911473" vmpeak="1183102" vmrss="565549" vmhwm="900992" />
-        <model path="mxnet/FP32/inception_v3_no_batchnorm/inception_v3_no_batchnorm.xml" test="infer_request_inference" device="CPU" vmsize="1051652" vmpeak="1123742" vmrss="258231" vmhwm="349131" />
-        <model path="mxnet/FP32/inception_v3_no_batchnorm/inception_v3_no_batchnorm.xml" test="infer_request_inference" device="GPU" vmsize="1182570" vmpeak="1254660" vmrss="829659" vmhwm="899540" />
-        <model path="mxnet/FP32/inception_v4/inception_v4.xml" test="create_exenetwork" device="CPU" vmsize="764319" vmpeak="1327506" vmrss="61375" vmhwm="601048" />
-        <model path="mxnet/FP32/inception_v4/inception_v4.xml" test="create_exenetwork" device="GPU" vmsize="1206559" vmpeak="1676272" vmrss="860362" vmhwm="1393906" />
-        <model path="mxnet/FP32/inception_v4/inception_v4.xml" test="infer_request_inference" device="CPU" vmsize="1228396" vmpeak="1327475" vmrss="441135" vmhwm="603394" />
-        <model path="mxnet/FP32/inception_v4/inception_v4.xml" test="infer_request_inference" device="GPU" vmsize="1637486" vmpeak="1709576" vmrss="1285376" vmhwm="1398377" />
-        <model path="mxnet/FP32/location_net/location_net.xml" test="create_exenetwork" device="CPU" vmsize="761046" vmpeak="1754029" vmrss="43916" vmhwm="1002368" />
-        <model path="mxnet/FP32/location_net/location_net.xml" test="create_exenetwork" device="GPU" vmsize="1026110" vmpeak="2108686" vmrss="680191" vmhwm="1826792" />
-        <model path="mxnet/FP32/location_net/location_net.xml" test="infer_request_inference" device="CPU" vmsize="1512095" vmpeak="1753998" vmrss="701483" vmhwm="1002333" />
-        <model path="mxnet/FP32/location_net/location_net.xml" test="infer_request_inference" device="GPU" vmsize="1880973" vmpeak="2110306" vmrss="1532348" vmhwm="1828952" />
-        <model path="mxnet/FP32/lresnet100e/lresnet100e.xml" test="create_exenetwork" device="CPU" vmsize="759695" vmpeak="1636430" vmrss="38011" vmhwm="883225" />
-        <model path="mxnet/FP32/lresnet100e/lresnet100e.xml" test="create_exenetwork" device="GPU" vmsize="1118880" vmpeak="1994964" vmrss="773102" vmhwm="1713034" />
-        <model path="mxnet/FP32/lresnet100e/lresnet100e.xml" test="infer_request_inference" device="CPU" vmsize="1430871" vmpeak="1636434" vmrss="617078" vmhwm="882886" />
-        <model path="mxnet/FP32/lresnet100e/lresnet100e.xml" test="infer_request_inference" device="GPU" vmsize="1804484" vmpeak="1993530" vmrss="1450724" vmhwm="1711340" />
-        <model path="mxnet/FP32/mobilenet/mobilenet.xml" test="create_exenetwork" device="CPU" vmsize="754872" vmpeak="821893" vmrss="55070" vmhwm="82354" />
-        <model path="mxnet/FP32/mobilenet/mobilenet.xml" test="create_exenetwork" device="GPU" vmsize="626304" vmpeak="734201" vmrss="280918" vmhwm="362925" />
-        <model path="mxnet/FP32/mobilenet/mobilenet.xml" test="infer_request_inference" device="CPU" vmsize="831344" vmpeak="903434" vmrss="86495" vmhwm="86495" />
-        <model path="mxnet/FP32/mobilenet/mobilenet.xml" test="infer_request_inference" device="GPU" vmsize="718357" vmpeak="790446" vmrss="367096" vmhwm="367096" />
-        <model path="mxnet/FP32/mobilenet_v2/mobilenet_v2.xml" test="create_exenetwork" device="CPU" vmsize="756826" vmpeak="819711" vmrss="53961" vmhwm="77206" />
-        <model path="mxnet/FP32/mobilenet_v2/mobilenet_v2.xml" test="create_exenetwork" device="GPU" vmsize="758023" vmpeak="861784" vmrss="412702" vmhwm="436805" />
-        <model path="mxnet/FP32/mobilenet_v2/mobilenet_v2.xml" test="infer_request_inference" device="CPU" vmsize="836470" vmpeak="891765" vmrss="83050" vmhwm="83050" />
-        <model path="mxnet/FP32/mobilenet_v2/mobilenet_v2.xml" test="infer_request_inference" device="GPU" vmsize="788986" vmpeak="861075" vmrss="437646" vmhwm="437646" />
-        <model path="mxnet/FP32/mtcnn_o/mtcnn_o.xml" test="create_exenetwork" device="CPU" vmsize="762731" vmpeak="804491" vmrss="17490" vmhwm="28454" />
-        <model path="mxnet/FP32/mtcnn_o/mtcnn_o.xml" test="create_exenetwork" device="GPU" vmsize="578894" vmpeak="670546" vmrss="233547" vmhwm="245172" />
-        <model path="mxnet/FP32/mtcnn_o/mtcnn_o.xml" test="infer_request_inference" device="CPU" vmsize="808209" vmpeak="808209" vmrss="28314" vmhwm="28314" />
-        <model path="mxnet/FP32/mtcnn_o/mtcnn_o.xml" test="infer_request_inference" device="GPU" vmsize="600507" vmpeak="672597" vmrss="247596" vmhwm="247596" />
-        <model path="mxnet/FP32/mtcnn_p/mtcnn_p.xml" test="create_exenetwork" device="CPU" vmsize="753530" vmpeak="881588" vmrss="13208" vmhwm="35261" />
-        <model path="mxnet/FP32/mtcnn_p/mtcnn_p.xml" test="create_exenetwork" device="GPU" vmsize="570042" vmpeak="661702" vmrss="224870" vmhwm="353003" />
-        <model path="mxnet/FP32/mtcnn_p/mtcnn_p.xml" test="infer_request_inference" device="CPU" vmsize="901260" vmpeak="901260" vmrss="107390" vmhwm="107390" />
-        <model path="mxnet/FP32/mtcnn_p/mtcnn_p.xml" test="infer_request_inference" device="GPU" vmsize="686408" vmpeak="758498" vmrss="332895" vmhwm="351907" />
-        <model path="mxnet/FP32/mtcnn_r/mtcnn_r.xml" test="create_exenetwork" device="CPU" vmsize="753711" vmpeak="803228" vmrss="14546" vmhwm="25586" />
-        <model path="mxnet/FP32/mtcnn_r/mtcnn_r.xml" test="create_exenetwork" device="GPU" vmsize="577288" vmpeak="667682" vmrss="231642" vmhwm="242167" />
-        <model path="mxnet/FP32/mtcnn_r/mtcnn_r.xml" test="infer_request_inference" device="CPU" vmsize="806102" vmpeak="806102" vmrss="24468" vmhwm="24468" />
-        <model path="mxnet/FP32/mtcnn_r/mtcnn_r.xml" test="infer_request_inference" device="GPU" vmsize="595588" vmpeak="667678" vmrss="242246" vmhwm="242246" />
-        <model path="mxnet/FP32/nin/nin.xml" test="create_exenetwork" device="CPU" vmsize="753838" vmpeak="907420" vmrss="80674" vmhwm="122086" />
-        <model path="mxnet/FP32/nin/nin.xml" test="create_exenetwork" device="GPU" vmsize="675633" vmpeak="798283" vmrss="330184" vmhwm="372754" />
-        <model path="mxnet/FP32/nin/nin.xml" test="infer_request_inference" device="CPU" vmsize="841390" vmpeak="913479" vmrss="123776" vmhwm="123776" />
-        <model path="mxnet/FP32/nin/nin.xml" test="infer_request_inference" device="GPU" vmsize="726066" vmpeak="798155" vmrss="390764" vmhwm="390764" />
-        <model path="mxnet/FP32/nst_vgg19/nst_vgg19.xml" test="create_exenetwork" device="CPU" vmsize="754080" vmpeak="884950" vmrss="35930" vmhwm="56368" />
-        <model path="mxnet/FP32/nst_vgg19/nst_vgg19.xml" test="create_exenetwork" device="GPU" vmsize="613082" vmpeak="713020" vmrss="267753" vmhwm="358019" />
-        <model path="mxnet/FP32/nst_vgg19/nst_vgg19.xml" test="infer_request_inference" device="CPU" vmsize="847726" vmpeak="919815" vmrss="83300" vmhwm="83300" />
-        <model path="mxnet/FP32/nst_vgg19/nst_vgg19.xml" test="infer_request_inference" device="GPU" vmsize="710754" vmpeak="782843" vmrss="357442" vmhwm="357442" />
-        <model path="mxnet/FP32/resnet_v1_101/resnet_v1_101.xml" test="create_exenetwork" device="CPU" vmsize="760821" vmpeak="1370292" vmrss="44242" vmhwm="618965" />
-        <model path="mxnet/FP32/resnet_v1_101/resnet_v1_101.xml" test="create_exenetwork" device="GPU" vmsize="1077643" vmpeak="1594964" vmrss="731733" vmhwm="1313127" />
-        <model path="mxnet/FP32/resnet_v1_101/resnet_v1_101.xml" test="infer_request_inference" device="CPU" vmsize="1256200" vmpeak="1370261" vmrss="444043" vmhwm="617852" />
-        <model path="mxnet/FP32/resnet_v1_101/resnet_v1_101.xml" test="infer_request_inference" device="GPU" vmsize="1494732" vmpeak="1596218" vmrss="1141690" vmhwm="1314187" />
-        <model path="mxnet/FP32/resnet_v1_152/resnet_v1_152.xml" test="create_exenetwork" device="CPU" vmsize="765322" vmpeak="1593790" vmrss="61120" vmhwm="831661" />
-        <model path="mxnet/FP32/resnet_v1_152/resnet_v1_152.xml" test="create_exenetwork" device="GPU" vmsize="1339184" vmpeak="2040148" vmrss="993968" vmhwm="1758746" />
-        <model path="mxnet/FP32/resnet_v1_152/resnet_v1_152.xml" test="infer_request_inference" device="CPU" vmsize="1414652" vmpeak="1593754" vmrss="594426" vmhwm="832220" />
-        <model path="mxnet/FP32/resnet_v1_152/resnet_v1_152.xml" test="infer_request_inference" device="GPU" vmsize="1871271" vmpeak="2037904" vmrss="1518501" vmhwm="1756343" />
-        <model path="mxnet/FP32/resnet_v2_101/resnet_v2_101.xml" test="create_exenetwork" device="CPU" vmsize="760650" vmpeak="1369557" vmrss="43384" vmhwm="618015" />
-        <model path="mxnet/FP32/resnet_v2_101/resnet_v2_101.xml" test="create_exenetwork" device="GPU" vmsize="1022863" vmpeak="1592206" vmrss="676698" vmhwm="1309880" />
-        <model path="mxnet/FP32/resnet_v2_101/resnet_v2_101.xml" test="infer_request_inference" device="CPU" vmsize="1255557" vmpeak="1369522" vmrss="445350" vmhwm="618750" />
-        <model path="mxnet/FP32/resnet_v2_101/resnet_v2_101.xml" test="infer_request_inference" device="GPU" vmsize="1490077" vmpeak="1591563" vmrss="1137444" vmhwm="1309910" />
-        <model path="mxnet/FP32/resnet_v2_152/resnet_v2_152.xml" test="create_exenetwork" device="CPU" vmsize="765204" vmpeak="1593108" vmrss="61124" vmhwm="831353" />
-        <model path="mxnet/FP32/resnet_v2_152/resnet_v2_152.xml" test="create_exenetwork" device="GPU" vmsize="1340754" vmpeak="2034586" vmrss="995636" vmhwm="1753100" />
-        <model path="mxnet/FP32/resnet_v2_152/resnet_v2_152.xml" test="infer_request_inference" device="CPU" vmsize="1413992" vmpeak="1593077" vmrss="592710" vmhwm="831098" />
-        <model path="mxnet/FP32/resnet_v2_152/resnet_v2_152.xml" test="infer_request_inference" device="GPU" vmsize="1867096" vmpeak="2036610" vmrss="1514532" vmhwm="1755089" />
-        <model path="mxnet/FP32/resnext_101/resnext_101.xml" test="create_exenetwork" device="CPU" vmsize="766911" vmpeak="1356080" vmrss="64389" vmhwm="623026" />
-        <model path="mxnet/FP32/resnext_101/resnext_101.xml" test="create_exenetwork" device="GPU" vmsize="1105068" vmpeak="1552320" vmrss="759990" vmhwm="1271340" />
-        <model path="mxnet/FP32/resnext_101/resnext_101.xml" test="infer_request_inference" device="CPU" vmsize="1258699" vmpeak="1356084" vmrss="468780" vmhwm="623788" />
-        <model path="mxnet/FP32/resnext_101/resnext_101.xml" test="infer_request_inference" device="GPU" vmsize="1478730" vmpeak="1553591" vmrss="1126364" vmhwm="1272167" />
-        <model path="mxnet/FP32/resnext_101_64x4d/resnext_101_64x4d.xml" test="create_exenetwork" device="CPU" vmsize="761239" vmpeak="1894468" vmrss="40691" vmhwm="1139410" />
-        <model path="mxnet/FP32/resnext_101_64x4d/resnext_101_64x4d.xml" test="create_exenetwork" device="GPU" vmsize="1418938" vmpeak="2248351" vmrss="1073886" vmhwm="1967262" />
-        <model path="mxnet/FP32/resnext_101_64x4d/resnext_101_64x4d.xml" test="infer_request_inference" device="CPU" vmsize="1618592" vmpeak="1894499" vmrss="810946" vmhwm="1140422" />
-        <model path="mxnet/FP32/resnext_101_64x4d/resnext_101_64x4d.xml" test="infer_request_inference" device="GPU" vmsize="1996112" vmpeak="2247322" vmrss="1660700" vmhwm="1965405" />
-        <model path="mxnet/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="create_exenetwork" device="CPU" vmsize="754987" vmpeak="880664" vmrss="29475" vmhwm="43832" />
-        <model path="mxnet/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="create_exenetwork" device="GPU" vmsize="616360" vmpeak="711106" vmrss="270859" vmhwm="322498" />
-        <model path="mxnet/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="infer_request_inference" device="CPU" vmsize="818562" vmpeak="818562" vmrss="47141" vmhwm="47141" />
-        <model path="mxnet/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="infer_request_inference" device="GPU" vmsize="674124" vmpeak="746213" vmrss="322731" vmhwm="322731" />
-        <model path="mxnet/FP32/ssd_vgg16_300/ssd_vgg16_300.xml" test="create_exenetwork" device="CPU" vmsize="755224" vmpeak="1146433" vmrss="21806" vmhwm="370044" />
-        <model path="mxnet/FP32/ssd_vgg16_300/ssd_vgg16_300.xml" test="create_exenetwork" device="GPU" vmsize="775324" vmpeak="1077709" vmrss="430342" vmhwm="796857" />
-        <model path="mxnet/FP32/ssd_vgg16_300/ssd_vgg16_300.xml" test="infer_request_inference" device="CPU" vmsize="1113904" vmpeak="1185993" vmrss="312527" vmhwm="370946" />
-        <model path="mxnet/FP32/ssd_vgg16_300/ssd_vgg16_300.xml" test="infer_request_inference" device="GPU" vmsize="1137391" vmpeak="1137391" vmrss="785391" vmhwm="793201" />
-        <model path="mxnet/FP32/vgg16/vgg16.xml" test="create_exenetwork" device="CPU" vmsize="754133" vmpeak="2548906" vmrss="14955" vmhwm="1807044" />
-        <model path="mxnet/FP32/vgg16/vgg16.xml" test="create_exenetwork" device="GPU" vmsize="668619" vmpeak="3326725" vmrss="322691" vmhwm="3044404" />
-        <model path="mxnet/FP32/vgg16/vgg16.xml" test="infer_request_inference" device="CPU" vmsize="2027476" vmpeak="2548906" vmrss="1242678" vmhwm="1808470" />
-        <model path="mxnet/FP32/vgg16/vgg16.xml" test="infer_request_inference" device="GPU" vmsize="2438563" vmpeak="3326725" vmrss="2085028" vmhwm="3044505" />
-        <model path="mxnet/FP32/vgg19/vgg19.xml" test="create_exenetwork" device="CPU" vmsize="754226" vmpeak="2618325" vmrss="15708" vmhwm="1877977" />
-        <model path="mxnet/FP32/vgg19/vgg19.xml" test="create_exenetwork" device="GPU" vmsize="741092" vmpeak="3397116" vmrss="396074" vmhwm="3115345" />
-        <model path="mxnet/FP32/vgg19/vgg19.xml" test="infer_request_inference" device="CPU" vmsize="2074089" vmpeak="2618325" vmrss="1290049" vmhwm="1878672" />
-        <model path="mxnet/FP32/vgg19/vgg19.xml" test="infer_request_inference" device="GPU" vmsize="2518436" vmpeak="3397178" vmrss="2165728" vmhwm="3115459" />
-        <model path="mxnet/FP32/yolo_v1_full/yolo_v1_full.xml" test="create_exenetwork" device="CPU" vmsize="754701" vmpeak="4259684" vmrss="17626" vmhwm="3531853" />
-        <model path="mxnet/FP32/yolo_v1_full/yolo_v1_full.xml" test="create_exenetwork" device="GPU" vmsize="747582" vmpeak="5921322" vmrss="402490" vmhwm="5639084" />
-        <model path="mxnet/FP32/yolo_v1_full/yolo_v1_full.xml" test="infer_request_inference" device="CPU" vmsize="3095241" vmpeak="4259670" vmrss="2379062" vmhwm="3530652" />
-        <model path="mxnet/FP32/yolo_v1_full/yolo_v1_full.xml" test="infer_request_inference" device="GPU" vmsize="4163667" vmpeak="5923566" vmrss="3810193" vmhwm="5640967" />
-        <model path="mxnet/FP32/yolo_v1_tiny/yolo_v1_tiny.xml" test="create_exenetwork" device="CPU" vmsize="754023" vmpeak="1334414" vmrss="15254" vmhwm="608322" />
-        <model path="mxnet/FP32/yolo_v1_tiny/yolo_v1_tiny.xml" test="create_exenetwork" device="GPU" vmsize="600701" vmpeak="1330978" vmrss="255912" vmhwm="1049844" />
-        <model path="mxnet/FP32/yolo_v1_tiny/yolo_v1_tiny.xml" test="infer_request_inference" device="CPU" vmsize="1215838" vmpeak="1334383" vmrss="428331" vmhwm="607442" />
-        <model path="mxnet/FP32/yolo_v1_tiny/yolo_v1_tiny.xml" test="infer_request_inference" device="GPU" vmsize="1199972" vmpeak="1330384" vmrss="847391" vmhwm="1049228" />
-        <model path="onnx/FP32/ssd_resnet34/ssd_resnet34.xml" test="create_exenetwork" device="CPU" vmsize="755387" vmpeak="1175570" vmrss="25374" vmhwm="306904" />
-        <model path="onnx/FP32/ssd_resnet34/ssd_resnet34.xml" test="create_exenetwork" device="GPU" vmsize="805222" vmpeak="1346307" vmrss="460781" vmhwm="1065873" />
-        <model path="onnx/FP32/ssd_resnet34/ssd_resnet34.xml" test="infer_request_inference" device="CPU" vmsize="1188580" vmpeak="1260670" vmrss="336036" vmhwm="336036" />
-        <model path="onnx/FP32/ssd_resnet34/ssd_resnet34.xml" test="infer_request_inference" device="GPU" vmsize="1449408" vmpeak="1521498" vmrss="1096792" vmhwm="1096792" />
-        <model path="onnx/FP32/ssd_resnet34_new/ssd_resnet34_new.xml" test="create_exenetwork" device="CPU" vmsize="756822" vmpeak="1181615" vmrss="28468" vmhwm="309716" />
-        <model path="onnx/FP32/ssd_resnet34_new/ssd_resnet34_new.xml" test="create_exenetwork" device="GPU" vmsize="819271" vmpeak="2432738" vmrss="474764" vmhwm="1101047" />
-        <model path="onnx/FP32/ssd_resnet34_new/ssd_resnet34_new.xml" test="infer_request_inference" device="CPU" vmsize="1189117" vmpeak="1261207" vmrss="333788" vmhwm="333788" />
-        <model path="onnx/FP32/ssd_resnet34_new/ssd_resnet34_new.xml" test="infer_request_inference" device="GPU" vmsize="2539222" vmpeak="2611312" vmrss="2191604" vmhwm="2191604" />
-        <model path="pytorch/FP32/inceptionv3_pretrained/inceptionv3_pretrained.xml" test="create_exenetwork" device="CPU" vmsize="757878" vmpeak="1077934" vmrss="35261" vmhwm="348964" />
-        <model path="pytorch/FP32/inceptionv3_pretrained/inceptionv3_pretrained.xml" test="create_exenetwork" device="GPU" vmsize="899610" vmpeak="1179116" vmrss="553863" vmhwm="896997" />
-        <model path="pytorch/FP32/inceptionv3_pretrained/inceptionv3_pretrained.xml" test="infer_request_inference" device="CPU" vmsize="1050878" vmpeak="1077876" vmrss="256506" vmhwm="347974" />
-        <model path="pytorch/FP32/inceptionv3_pretrained/inceptionv3_pretrained.xml" test="infer_request_inference" device="GPU" vmsize="1179239" vmpeak="1251329" vmrss="826553" vmhwm="897714" />
-        <model path="pytorch/FP32/resnet50_pretrained/resnet50_pretrained.xml" test="create_exenetwork" device="CPU" vmsize="760456" vmpeak="1096708" vmrss="27315" vmhwm="361944" />
-        <model path="pytorch/FP32/resnet50_pretrained/resnet50_pretrained.xml" test="create_exenetwork" device="GPU" vmsize="834275" vmpeak="1073569" vmrss="489086" vmhwm="792343" />
-        <model path="pytorch/FP32/resnet50_pretrained/resnet50_pretrained.xml" test="infer_request_inference" device="CPU" vmsize="1058622" vmpeak="1130712" vmrss="267682" vmhwm="362749" />
-        <model path="pytorch/FP32/resnet50_pretrained/resnet50_pretrained.xml" test="infer_request_inference" device="GPU" vmsize="1050852" vmpeak="1122941" vmrss="697576" vmhwm="791040" />
-        <model path="pytorch/FP32/resnet50_torchvision/resnet50_torchvision.xml" test="create_exenetwork" device="CPU" vmsize="755950" vmpeak="1092203" vmrss="27640" vmhwm="362740" />
-        <model path="pytorch/FP32/resnet50_torchvision/resnet50_torchvision.xml" test="create_exenetwork" device="GPU" vmsize="835951" vmpeak="1073516" vmrss="490674" vmhwm="792224" />
-        <model path="pytorch/FP32/resnet50_torchvision/resnet50_torchvision.xml" test="infer_request_inference" device="CPU" vmsize="1058626" vmpeak="1130716" vmrss="266516" vmhwm="361992" />
-        <model path="pytorch/FP32/resnet50_torchvision/resnet50_torchvision.xml" test="infer_request_inference" device="GPU" vmsize="1050218" vmpeak="1071435" vmrss="696669" vmhwm="789848" />
-        <model path="pytorch/FP32/squeezenet_v1.1_pretrained/squeezenet_v1.1_pretrained.xml" test="create_exenetwork" device="CPU" vmsize="754872" vmpeak="880550" vmrss="29603" vmhwm="43212" />
-        <model path="pytorch/FP32/squeezenet_v1.1_pretrained/squeezenet_v1.1_pretrained.xml" test="create_exenetwork" device="GPU" vmsize="648881" vmpeak="743626" vmrss="303424" vmhwm="318348" />
-        <model path="pytorch/FP32/squeezenet_v1.1_pretrained/squeezenet_v1.1_pretrained.xml" test="infer_request_inference" device="CPU" vmsize="818246" vmpeak="818246" vmrss="46534" vmhwm="46534" />
-        <model path="pytorch/FP32/squeezenet_v1.1_pretrained/squeezenet_v1.1_pretrained.xml" test="infer_request_inference" device="GPU" vmsize="674146" vmpeak="746235" vmrss="320315" vmhwm="320315" />
-        <model path="tf/1.14.0/FP32/bert_base_uncased/bert_base_uncased.xml" test="create_exenetwork" device="CPU" vmsize="764755" vmpeak="2092574" vmrss="38016" vmhwm="1352450" />
-        <model path="tf/1.14.0/FP32/bert_base_uncased/bert_base_uncased.xml" test="create_exenetwork" device="GPU" vmsize="1578328" vmpeak="3355976" vmrss="1233474" vmhwm="3074953" />
-        <model path="tf/1.14.0/FP32/bert_base_uncased/bert_base_uncased.xml" test="infer_request_inference" device="CPU" vmsize="1802838" vmpeak="2092587" vmrss="994188" vmhwm="1352709" />
-        <model path="tf/1.14.0/FP32/bert_base_uncased/bert_base_uncased.xml" test="infer_request_inference" device="GPU" vmsize="2958472" vmpeak="3352694" vmrss="2607677" vmhwm="3072185" />
-        <model path="tf/1.14.0/FP32/bert_xnli/bert_xnli.xml" test="create_exenetwork" device="CPU" vmsize="765124" vmpeak="2035453" vmrss="39745" vmhwm="1292420" />
-        <model path="tf/1.14.0/FP32/bert_xnli/bert_xnli.xml" test="create_exenetwork" device="GPU" vmsize="1939801" vmpeak="3261715" vmrss="1594617" vmhwm="2980577" />
-        <model path="tf/1.14.0/FP32/bert_xnli/bert_xnli.xml" test="infer_request_inference" device="CPU" vmsize="1750196" vmpeak="2039945" vmrss="935774" vmhwm="1291963" />
-        <model path="tf/1.14.0/FP32/bert_xnli/bert_xnli.xml" test="infer_request_inference" device="GPU" vmsize="2902235" vmpeak="3265460" vmrss="2551727" vmhwm="2984352" />
-        <model path="tf/1.14.0/FP32/cmu/cmu.xml" test="create_exenetwork" device="CPU" vmsize="757587" vmpeak="1547678" vmrss="33004" vmhwm="718973" />
-        <model path="tf/1.14.0/FP32/cmu/cmu.xml" test="create_exenetwork" device="GPU" vmsize="1154670" vmpeak="1678943" vmrss="809811" vmhwm="1398284" />
-        <model path="tf/1.14.0/FP32/cmu/cmu.xml" test="infer_request_inference" device="CPU" vmsize="1553134" vmpeak="1553134" vmrss="606232" vmhwm="719791" />
-        <model path="tf/1.14.0/FP32/cmu/cmu.xml" test="infer_request_inference" device="GPU" vmsize="1753910" vmpeak="1826000" vmrss="1400234" vmhwm="1400234" />
-        <model path="tf/1.14.0/FP32/deeplab_v3/deeplab_v3.xml" test="create_exenetwork" device="CPU" vmsize="757160" vmpeak="867486" vmrss="41307" vmhwm="62678" />
-        <model path="tf/1.14.0/FP32/deeplab_v3/deeplab_v3.xml" test="create_exenetwork" device="GPU" vmsize="743283" vmpeak="841055" vmrss="398604" vmhwm="537209" />
-        <model path="tf/1.14.0/FP32/deeplab_v3/deeplab_v3.xml" test="infer_request_inference" device="CPU" vmsize="888087" vmpeak="960176" vmrss="114166" vmhwm="114166" />
-        <model path="tf/1.14.0/FP32/deeplab_v3/deeplab_v3.xml" test="infer_request_inference" device="GPU" vmsize="894339" vmpeak="966429" vmrss="541912" vmhwm="541912" />
-        <model path="tf/1.14.0/FP32/densenet_121/densenet_121.xml" test="create_exenetwork" device="CPU" vmsize="772728" vmpeak="951218" vmrss="95840" vmhwm="151676" />
-        <model path="tf/1.14.0/FP32/densenet_121/densenet_121.xml" test="create_exenetwork" device="GPU" vmsize="1135195" vmpeak="1245301" vmrss="789848" vmhwm="820410" />
-        <model path="tf/1.14.0/FP32/densenet_121/densenet_121.xml" test="infer_request_inference" device="CPU" vmsize="985450" vmpeak="1057540" vmrss="159046" vmhwm="159046" />
-        <model path="tf/1.14.0/FP32/densenet_121/densenet_121.xml" test="infer_request_inference" device="GPU" vmsize="1171152" vmpeak="1243242" vmrss="818598" vmhwm="818598" />
-        <model path="tf/1.14.0/FP32/densenet_169/densenet_169.xml" test="create_exenetwork" device="CPU" vmsize="864168" vmpeak="998263" vmrss="126266" vmhwm="241604" />
-        <model path="tf/1.14.0/FP32/densenet_169/densenet_169.xml" test="create_exenetwork" device="GPU" vmsize="1353237" vmpeak="1472583" vmrss="1007978" vmhwm="1094614" />
-        <model path="tf/1.14.0/FP32/densenet_169/densenet_169.xml" test="infer_request_inference" device="CPU" vmsize="1060316" vmpeak="1132406" vmrss="238326" vmhwm="240724" />
-        <model path="tf/1.14.0/FP32/densenet_169/densenet_169.xml" test="infer_request_inference" device="GPU" vmsize="1447146" vmpeak="1519236" vmrss="1094759" vmhwm="1097835" />
-        <model path="tf/1.14.0/FP32/dssd_avigilon/dssd_avigilon.xml" test="create_exenetwork" device="CPU" vmsize="757156" vmpeak="826843" vmrss="69031" vmhwm="100887" />
-        <model path="tf/1.14.0/FP32/dssd_avigilon/dssd_avigilon.xml" test="create_exenetwork" device="GPU" vmsize="796250" vmpeak="906813" vmrss="451171" vmhwm="482077" />
-        <model path="tf/1.14.0/FP32/dssd_avigilon/dssd_avigilon.xml" test="infer_request_inference" device="CPU" vmsize="849041" vmpeak="849041" vmrss="104464" vmhwm="104464" />
-        <model path="tf/1.14.0/FP32/dssd_avigilon/dssd_avigilon.xml" test="infer_request_inference" device="GPU" vmsize="833984" vmpeak="906074" vmrss="481786" vmhwm="481786" />
-        <model path="tf/1.14.0/FP32/facenet/facenet.xml" test="create_exenetwork" device="CPU" vmsize="760786" vmpeak="1139173" vmrss="66413" vmhwm="353346" />
-        <model path="tf/1.14.0/FP32/facenet/facenet.xml" test="create_exenetwork" device="GPU" vmsize="1055560" vmpeak="1255601" vmrss="710595" vmhwm="974815" />
-        <model path="tf/1.14.0/FP32/facenet/facenet.xml" test="infer_request_inference" device="CPU" vmsize="1097984" vmpeak="1170074" vmrss="281050" vmhwm="352228" />
-        <model path="tf/1.14.0/FP32/facenet/facenet.xml" test="infer_request_inference" device="GPU" vmsize="1259253" vmpeak="1331343" vmrss="906562" vmhwm="976483" />
-        <model path="tf/1.14.0/FP32/faster_rcnn_inception_resnet_v2_atrous_coco/faster_rcnn_inception_resnet_v2_atrous_coco.xml" test="create_exenetwork" device="CPU" vmsize="920884" vmpeak="2443892" vmrss="237186" vmhwm="851215" />
-        <model path="tf/1.14.0/FP32/faster_rcnn_inception_resnet_v2_atrous_coco/faster_rcnn_inception_resnet_v2_atrous_coco.xml" test="create_exenetwork" device="GPU" vmsize="1751376" vmpeak="4164239" vmrss="1406411" vmhwm="3883422" />
-        <model path="tf/1.14.0/FP32/faster_rcnn_inception_v2_coco/faster_rcnn_inception_v2_coco.xml" test="create_exenetwork" device="CPU" vmsize="757323" vmpeak="986519" vmrss="35006" vmhwm="212911" />
-        <model path="tf/1.14.0/FP32/faster_rcnn_inception_v2_coco/faster_rcnn_inception_v2_coco.xml" test="create_exenetwork" device="GPU" vmsize="862219" vmpeak="1179283" vmrss="516881" vmhwm="897930" />
-        <model path="tf/1.14.0/FP32/faster_rcnn_resnet101_coco/faster_rcnn_resnet101_coco.xml" test="create_exenetwork" device="CPU" vmsize="761538" vmpeak="1491811" vmrss="45667" vmhwm="671554" />
-        <model path="tf/1.14.0/FP32/faster_rcnn_resnet101_coco/faster_rcnn_resnet101_coco.xml" test="create_exenetwork" device="GPU" vmsize="1126884" vmpeak="1800550" vmrss="781739" vmhwm="1519302" />
-        <model path="tf/1.14.0/FP32/faster_rcnn_resnet50_coco/faster_rcnn_resnet50_coco.xml" test="create_exenetwork" device="CPU" vmsize="766964" vmpeak="1233342" vmrss="29568" vmhwm="415509" />
-        <model path="tf/1.14.0/FP32/faster_rcnn_resnet50_coco/faster_rcnn_resnet50_coco.xml" test="create_exenetwork" device="GPU" vmsize="897432" vmpeak="1347007" vmrss="553357" vmhwm="1067290" />
-        <model path="tf/1.14.0/FP32/i3d_rgb/i3d_rgb.xml" test="create_exenetwork" device="CPU" vmsize="756562" vmpeak="1099533" vmrss="30078" vmhwm="245590" />
-        <model path="tf/1.14.0/FP32/i3d_rgb/i3d_rgb.xml" test="create_exenetwork" device="GPU" vmsize="764170" vmpeak="1353149" vmrss="419267" vmhwm="1072244" />
-        <model path="tf/1.14.0/FP32/i3d_rgb/i3d_rgb.xml" test="infer_request_inference" device="CPU" vmsize="1478496" vmpeak="1478496" vmrss="332820" vmhwm="332820" />
-        <model path="tf/1.14.0/FP32/i3d_rgb/i3d_rgb.xml" test="infer_request_inference" device="GPU" vmsize="1423364" vmpeak="1495454" vmrss="1070973" vmhwm="1172441" />
-        <model path="tf/1.14.0/FP32/icv_squeezenet_v1.0/icv_squeezenet_v1.0.xml" test="create_exenetwork" device="CPU" vmsize="755092" vmpeak="815298" vmrss="28811" vmhwm="43687" />
-        <model path="tf/1.14.0/FP32/icv_squeezenet_v1.0/icv_squeezenet_v1.0.xml" test="create_exenetwork" device="GPU" vmsize="620734" vmpeak="715479" vmrss="274991" vmhwm="324935" />
-        <model path="tf/1.14.0/FP32/icv_squeezenet_v1.0/icv_squeezenet_v1.0.xml" test="infer_request_inference" device="CPU" vmsize="825268" vmpeak="825268" vmrss="48439" vmhwm="48439" />
-        <model path="tf/1.14.0/FP32/icv_squeezenet_v1.0/icv_squeezenet_v1.0.xml" test="infer_request_inference" device="GPU" vmsize="680592" vmpeak="752681" vmrss="326972" vmhwm="326972" />
-        <model path="tf/1.14.0/FP32/icv_squeezenet_v1.1/icv_squeezenet_v1.1.xml" test="create_exenetwork" device="CPU" vmsize="765182" vmpeak="880712" vmrss="29827" vmhwm="44149" />
-        <model path="tf/1.14.0/FP32/icv_squeezenet_v1.1/icv_squeezenet_v1.1.xml" test="create_exenetwork" device="GPU" vmsize="612620" vmpeak="707366" vmrss="266855" vmhwm="323734" />
-        <model path="tf/1.14.0/FP32/icv_squeezenet_v1.1/icv_squeezenet_v1.1.xml" test="infer_request_inference" device="CPU" vmsize="818879" vmpeak="818879" vmrss="46534" vmhwm="46534" />
-        <model path="tf/1.14.0/FP32/icv_squeezenet_v1.1/icv_squeezenet_v1.1.xml" test="infer_request_inference" device="GPU" vmsize="681010" vmpeak="753099" vmrss="326902" vmhwm="326902" />
-        <model path="tf/1.14.0/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="create_exenetwork" device="CPU" vmsize="848056" vmpeak="1522360" vmrss="147382" vmhwm="794481" />
-        <model path="tf/1.14.0/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="create_exenetwork" device="GPU" vmsize="1699992" vmpeak="2187231" vmrss="1354892" vmhwm="1906344" />
-        <model path="tf/1.14.0/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="infer_request_inference" device="CPU" vmsize="1437365" vmpeak="1522364" vmrss="643724" vmhwm="793755" />
-        <model path="tf/1.14.0/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="infer_request_inference" device="GPU" vmsize="2152515" vmpeak="2224604" vmrss="1800026" vmhwm="1900395" />
-        <model path="tf/1.14.0/FP32/inception_v1/inception_v1.xml" test="create_exenetwork" device="CPU" vmsize="757526" vmpeak="905132" vmrss="83195" vmhwm="119653" />
-        <model path="tf/1.14.0/FP32/inception_v1/inception_v1.xml" test="create_exenetwork" device="GPU" vmsize="815988" vmpeak="932663" vmrss="470742" vmhwm="507760" />
-        <model path="tf/1.14.0/FP32/inception_v1/inception_v1.xml" test="infer_request_inference" device="CPU" vmsize="1007820" vmpeak="1007820" vmrss="123926" vmhwm="123926" />
-        <model path="tf/1.14.0/FP32/inception_v1/inception_v1.xml" test="infer_request_inference" device="GPU" vmsize="861520" vmpeak="933609" vmrss="507870" vmhwm="507870" />
-        <model path="tf/1.14.0/FP32/inception_v2/inception_v2.xml" test="create_exenetwork" device="CPU" vmsize="756756" vmpeak="925425" vmrss="34007" vmhwm="180769" />
-        <model path="tf/1.14.0/FP32/inception_v2/inception_v2.xml" test="create_exenetwork" device="GPU" vmsize="824168" vmpeak="962403" vmrss="478737" vmhwm="610280" />
-        <model path="tf/1.14.0/FP32/inception_v2/inception_v2.xml" test="infer_request_inference" device="CPU" vmsize="927669" vmpeak="999759" vmrss="141772" vmhwm="181966" />
-        <model path="tf/1.14.0/FP32/inception_v2/inception_v2.xml" test="infer_request_inference" device="GPU" vmsize="936755" vmpeak="1008845" vmrss="583963" vmhwm="611516" />
-        <model path="tf/1.14.0/FP32/inception_v3/inception_v3.xml" test="create_exenetwork" device="CPU" vmsize="759013" vmpeak="1063559" vmrss="51255" vmhwm="349113" />
-        <model path="tf/1.14.0/FP32/inception_v3/inception_v3.xml" test="create_exenetwork" device="GPU" vmsize="925958" vmpeak="1184101" vmrss="580056" vmhwm="902325" />
-        <model path="tf/1.14.0/FP32/inception_v3/inception_v3.xml" test="infer_request_inference" device="CPU" vmsize="1043583" vmpeak="1115672" vmrss="263520" vmhwm="349034" />
-        <model path="tf/1.14.0/FP32/inception_v3/inception_v3.xml" test="infer_request_inference" device="GPU" vmsize="1189548" vmpeak="1261638" vmrss="836646" vmhwm="903676" />
-        <model path="tf/1.14.0/FP32/inception_v4/inception_v4.xml" test="create_exenetwork" device="CPU" vmsize="764574" vmpeak="1327493" vmrss="64108" vmhwm="603842" />
-        <model path="tf/1.14.0/FP32/inception_v4/inception_v4.xml" test="create_exenetwork" device="GPU" vmsize="1221717" vmpeak="1686643" vmrss="875617" vmhwm="1404475" />
-        <model path="tf/1.14.0/FP32/inception_v4/inception_v4.xml" test="infer_request_inference" device="CPU" vmsize="1381556" vmpeak="1403402" vmrss="440356" vmhwm="602751" />
-        <model path="tf/1.14.0/FP32/inception_v4/inception_v4.xml" test="infer_request_inference" device="GPU" vmsize="1641921" vmpeak="1714011" vmrss="1289340" vmhwm="1405430" />
-        <model path="tf/1.14.0/FP32/mask_rcnn_resnet101_atrous_coco/mask_rcnn_resnet101_atrous_coco.xml" test="create_exenetwork" device="CPU" vmsize="762119" vmpeak="2738828" vmrss="47203" vmhwm="947557" />
-        <model path="tf/1.14.0/FP32/mask_rcnn_resnet101_atrous_coco/mask_rcnn_resnet101_atrous_coco.xml" test="create_exenetwork" device="GPU" vmsize="1295483" vmpeak="4189812" vmrss="949788" vmhwm="3908550" />
-        <model path="tf/1.14.0/FP32/mobilenet_v1_0.25_128/mobilenet_v1_0.25_128.xml" test="create_exenetwork" device="CPU" vmsize="763840" vmpeak="805556" vmrss="21938" vmhwm="33264" />
-        <model path="tf/1.14.0/FP32/mobilenet_v1_0.25_128/mobilenet_v1_0.25_128.xml" test="create_exenetwork" device="GPU" vmsize="652572" vmpeak="744180" vmrss="306754" vmhwm="318432" />
-        <model path="tf/1.14.0/FP32/mobilenet_v1_0.25_128/mobilenet_v1_0.25_128.xml" test="infer_request_inference" device="CPU" vmsize="814000" vmpeak="814000" vmrss="33391" vmhwm="33391" />
-        <model path="tf/1.14.0/FP32/mobilenet_v1_0.25_128/mobilenet_v1_0.25_128.xml" test="infer_request_inference" device="GPU" vmsize="672144" vmpeak="744233" vmrss="319026" vmhwm="319026" />
-        <model path="tf/1.14.0/FP32/mobilenet_v1_0.5_160/mobilenet_v1_0.5_160.xml" test="create_exenetwork" device="CPU" vmsize="754705" vmpeak="881188" vmrss="29282" vmhwm="44836" />
-        <model path="tf/1.14.0/FP32/mobilenet_v1_0.5_160/mobilenet_v1_0.5_160.xml" test="create_exenetwork" device="GPU" vmsize="614209" vmpeak="709759" vmrss="268778" vmhwm="326845" />
-        <model path="tf/1.14.0/FP32/mobilenet_v1_0.5_160/mobilenet_v1_0.5_160.xml" test="infer_request_inference" device="CPU" vmsize="818228" vmpeak="890318" vmrss="45513" vmhwm="45513" />
-        <model path="tf/1.14.0/FP32/mobilenet_v1_0.5_160/mobilenet_v1_0.5_160.xml" test="infer_request_inference" device="GPU" vmsize="682484" vmpeak="754573" vmrss="328966" vmhwm="328966" />
-        <model path="tf/1.14.0/FP32/mobilenet_v1_1.0_224/mobilenet_v1_1.0_224.xml" test="create_exenetwork" device="CPU" vmsize="754903" vmpeak="821928" vmrss="55237" vmhwm="82768" />
-        <model path="tf/1.14.0/FP32/mobilenet_v1_1.0_224/mobilenet_v1_1.0_224.xml" test="create_exenetwork" device="GPU" vmsize="643887" vmpeak="751788" vmrss="298685" vmhwm="367602" />
-        <model path="tf/1.14.0/FP32/mobilenet_v1_1.0_224/mobilenet_v1_1.0_224.xml" test="infer_request_inference" device="CPU" vmsize="831111" vmpeak="831111" vmrss="86732" vmhwm="86732" />
-        <model path="tf/1.14.0/FP32/mobilenet_v1_1.0_224/mobilenet_v1_1.0_224.xml" test="infer_request_inference" device="GPU" vmsize="720979" vmpeak="793069" vmrss="367584" vmhwm="367584" />
-        <model path="tf/1.14.0/FP32/mobilenet_v2_1.0_224/mobilenet_v2_1.0_224.xml" test="create_exenetwork" device="CPU" vmsize="756870" vmpeak="819759" vmrss="54586" vmhwm="78570" />
-        <model path="tf/1.14.0/FP32/mobilenet_v2_1.0_224/mobilenet_v2_1.0_224.xml" test="create_exenetwork" device="GPU" vmsize="705724" vmpeak="809490" vmrss="360267" vmhwm="435512" />
-        <model path="tf/1.14.0/FP32/mobilenet_v2_1.0_224/mobilenet_v2_1.0_224.xml" test="infer_request_inference" device="CPU" vmsize="835978" vmpeak="835978" vmrss="82583" vmhwm="82583" />
-        <model path="tf/1.14.0/FP32/mobilenet_v2_1.0_224/mobilenet_v2_1.0_224.xml" test="infer_request_inference" device="GPU" vmsize="788902" vmpeak="860992" vmrss="435727" vmhwm="435727" />
-        <model path="tf/1.14.0/FP32/mobilenet_v2_1.4_224/mobilenet_v2_1.4_224.xml" test="create_exenetwork" device="CPU" vmsize="756725" vmpeak="831080" vmrss="76414" vmhwm="111914" />
-        <model path="tf/1.14.0/FP32/mobilenet_v2_1.4_224/mobilenet_v2_1.4_224.xml" test="create_exenetwork" device="GPU" vmsize="787058" vmpeak="902290" vmrss="441399" vmhwm="476911" />
-        <model path="tf/1.14.0/FP32/mobilenet_v2_1.4_224/mobilenet_v2_1.4_224.xml" test="infer_request_inference" device="CPU" vmsize="847299" vmpeak="847299" vmrss="120969" vmhwm="120969" />
-        <model path="tf/1.14.0/FP32/mobilenet_v2_1.4_224/mobilenet_v2_1.4_224.xml" test="infer_request_inference" device="GPU" vmsize="828920" vmpeak="901010" vmrss="475939" vmhwm="475939" />
-        <model path="tf/1.14.0/FP32/ncf/ncf.xml" test="create_exenetwork" device="CPU" vmsize="760988" vmpeak="1018754" vmrss="14484" vmhwm="296612" />
-        <model path="tf/1.14.0/FP32/ncf/ncf.xml" test="create_exenetwork" device="GPU" vmsize="600859" vmpeak="965967" vmrss="255569" vmhwm="685150" />
-        <model path="tf/1.14.0/FP32/ncf/ncf.xml" test="infer_request_inference" device="CPU" vmsize="1095155" vmpeak="1167245" vmrss="304607" vmhwm="304607" />
-        <model path="tf/1.14.0/FP32/ncf/ncf.xml" test="infer_request_inference" device="GPU" vmsize="1004577" vmpeak="1076666" vmrss="651943" vmhwm="689915" />
-        <model path="tf/1.14.0/FP32/resnet_v1.5_50/resnet_v1.5_50.xml" test="create_exenetwork" device="CPU" vmsize="756096" vmpeak="1100136" vmrss="27812" vmhwm="362344" />
-        <model path="tf/1.14.0/FP32/resnet_v1.5_50/resnet_v1.5_50.xml" test="create_exenetwork" device="GPU" vmsize="822830" vmpeak="1073947" vmrss="477193" vmhwm="792264" />
-        <model path="tf/1.14.0/FP32/resnet_v1.5_50/resnet_v1.5_50.xml" test="infer_request_inference" device="CPU" vmsize="1060571" vmpeak="1132661" vmrss="269808" vmhwm="362771" />
-        <model path="tf/1.14.0/FP32/resnet_v1.5_50/resnet_v1.5_50.xml" test="infer_request_inference" device="GPU" vmsize="1054684" vmpeak="1075272" vmrss="702310" vmhwm="794314" />
-        <model path="tf/1.14.0/FP32/resnet_v1_101/resnet_v1_101.xml" test="create_exenetwork" device="CPU" vmsize="760764" vmpeak="1338383" vmrss="42706" vmhwm="617047" />
-        <model path="tf/1.14.0/FP32/resnet_v1_101/resnet_v1_101.xml" test="create_exenetwork" device="GPU" vmsize="1108602" vmpeak="1561885" vmrss="762616" vmhwm="1279700" />
-        <model path="tf/1.14.0/FP32/resnet_v1_101/resnet_v1_101.xml" test="infer_request_inference" device="CPU" vmsize="1279819" vmpeak="1338409" vmrss="435102" vmhwm="617865" />
-        <model path="tf/1.14.0/FP32/resnet_v1_101/resnet_v1_101.xml" test="infer_request_inference" device="GPU" vmsize="1455146" vmpeak="1561388" vmrss="1101755" vmhwm="1279845" />
-        <model path="tf/1.14.0/FP32/resnet_v1_152/resnet_v1_152.xml" test="create_exenetwork" device="CPU" vmsize="765221" vmpeak="1552262" vmrss="59875" vmhwm="829250" />
-        <model path="tf/1.14.0/FP32/resnet_v1_152/resnet_v1_152.xml" test="create_exenetwork" device="GPU" vmsize="1322098" vmpeak="1985359" vmrss="976223" vmhwm="1703319" />
-        <model path="tf/1.14.0/FP32/resnet_v1_152/resnet_v1_152.xml" test="infer_request_inference" device="CPU" vmsize="1373006" vmpeak="1552293" vmrss="581891" vmhwm="829848" />
-        <model path="tf/1.14.0/FP32/resnet_v1_152/resnet_v1_152.xml" test="infer_request_inference" device="GPU" vmsize="1814348" vmpeak="1986380" vmrss="1461099" vmhwm="1704714" />
-        <model path="tf/1.14.0/FP32/resnet_v1_50/resnet_v1_50.xml" test="create_exenetwork" device="CPU" vmsize="766088" vmpeak="1079958" vmrss="27324" vmhwm="362155" />
-        <model path="tf/1.14.0/FP32/resnet_v1_50/resnet_v1_50.xml" test="create_exenetwork" device="GPU" vmsize="838965" vmpeak="1085884" vmrss="493407" vmhwm="804324" />
-        <model path="tf/1.14.0/FP32/resnet_v1_50/resnet_v1_50.xml" test="infer_request_inference" device="CPU" vmsize="1046157" vmpeak="1118246" vmrss="260515" vmhwm="362810" />
-        <model path="tf/1.14.0/FP32/resnet_v1_50/resnet_v1_50.xml" test="infer_request_inference" device="GPU" vmsize="1057223" vmpeak="1080772" vmrss="704066" vmhwm="799440" />
-        <model path="tf/1.14.0/FP32/resnet_v2_101/resnet_v2_101.xml" test="create_exenetwork" device="CPU" vmsize="761754" vmpeak="1365104" vmrss="45179" vmhwm="620879" />
-        <model path="tf/1.14.0/FP32/resnet_v2_101/resnet_v2_101.xml" test="create_exenetwork" device="GPU" vmsize="1120737" vmpeak="1613546" vmrss="774637" vmhwm="1331308" />
-        <model path="tf/1.14.0/FP32/resnet_v2_101/resnet_v2_101.xml" test="infer_request_inference" device="CPU" vmsize="1251346" vmpeak="1365135" vmrss="446415" vmhwm="620241" />
-        <model path="tf/1.14.0/FP32/resnet_v2_101/resnet_v2_101.xml" test="infer_request_inference" device="GPU" vmsize="1515817" vmpeak="1613858" vmrss="1162572" vmhwm="1331968" />
-        <model path="tf/1.14.0/FP32/resnet_v2_152/resnet_v2_152.xml" test="create_exenetwork" device="CPU" vmsize="839823" vmpeak="1569361" vmrss="155029" vmhwm="833157" />
-        <model path="tf/1.14.0/FP32/resnet_v2_152/resnet_v2_152.xml" test="create_exenetwork" device="GPU" vmsize="1363960" vmpeak="2068752" vmrss="1018507" vmhwm="1787042" />
-        <model path="tf/1.14.0/FP32/resnet_v2_152/resnet_v2_152.xml" test="infer_request_inference" device="CPU" vmsize="1476041" vmpeak="1569392" vmrss="679918" vmhwm="833914" />
-        <model path="tf/1.14.0/FP32/resnet_v2_152/resnet_v2_152.xml" test="infer_request_inference" device="GPU" vmsize="1904799" vmpeak="2060317" vmrss="1551756" vmhwm="1778167" />
-        <model path="tf/1.14.0/FP32/resnet_v2_50/resnet_v2_50.xml" test="create_exenetwork" device="CPU" vmsize="756602" vmpeak="1096774" vmrss="28393" vmhwm="363391" />
-        <model path="tf/1.14.0/FP32/resnet_v2_50/resnet_v2_50.xml" test="create_exenetwork" device="GPU" vmsize="845226" vmpeak="1103374" vmrss="500051" vmhwm="821986" />
-        <model path="tf/1.14.0/FP32/resnet_v2_50/resnet_v2_50.xml" test="infer_request_inference" device="CPU" vmsize="1063304" vmpeak="1135393" vmrss="271220" vmhwm="364399" />
-        <model path="tf/1.14.0/FP32/resnet_v2_50/resnet_v2_50.xml" test="infer_request_inference" device="GPU" vmsize="1092159" vmpeak="1105997" vmrss="738276" vmhwm="823983" />
-        <model path="tf/1.14.0/FP32/rfcn_resnet101_coco/rfcn_resnet101_coco.xml" test="create_exenetwork" device="CPU" vmsize="838816" vmpeak="1561762" vmrss="116930" vmhwm="752906" />
-        <model path="tf/1.14.0/FP32/rfcn_resnet101_coco/rfcn_resnet101_coco.xml" test="create_exenetwork" device="GPU" vmsize="1674490" vmpeak="2318250" vmrss="1329842" vmhwm="2034986" />
-        <model path="tf/1.14.0/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="create_exenetwork" device="CPU" vmsize="755062" vmpeak="880739" vmrss="28415" vmhwm="43480" />
-        <model path="tf/1.14.0/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="create_exenetwork" device="GPU" vmsize="609298" vmpeak="704044" vmrss="263868" vmhwm="323488" />
-        <model path="tf/1.14.0/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="infer_request_inference" device="CPU" vmsize="825048" vmpeak="897138" vmrss="49108" vmhwm="49108" />
-        <model path="tf/1.14.0/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="infer_request_inference" device="GPU" vmsize="675844" vmpeak="747934" vmrss="322753" vmhwm="322753" />
-        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_coco/ssd_mobilenet_v1_coco.xml" test="create_exenetwork" device="CPU" vmsize="756804" vmpeak="978252" vmrss="70514" vmhwm="120370" />
-        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_coco/ssd_mobilenet_v1_coco.xml" test="create_exenetwork" device="GPU" vmsize="831318" vmpeak="949744" vmrss="485619" vmhwm="524550" />
-        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_coco/ssd_mobilenet_v1_coco.xml" test="infer_request_inference" device="CPU" vmsize="925689" vmpeak="997779" vmrss="130244" vmhwm="130244" />
-        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_coco/ssd_mobilenet_v1_coco.xml" test="infer_request_inference" device="GPU" vmsize="878099" vmpeak="950188" vmrss="525395" vmhwm="525395" />
-        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_fpn_coco/ssd_mobilenet_v1_fpn_coco.xml" test="create_exenetwork" device="CPU" vmsize="759435" vmpeak="1442861" vmrss="34680" vmhwm="509454" />
-        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_fpn_coco/ssd_mobilenet_v1_fpn_coco.xml" test="create_exenetwork" device="GPU" vmsize="1012906" vmpeak="1460487" vmrss="667977" vmhwm="1179833" />
-        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_fpn_coco/ssd_mobilenet_v1_fpn_coco.xml" test="infer_request_inference" device="CPU" vmsize="1368043" vmpeak="1442861" vmrss="427737" vmhwm="509533" />
-        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_fpn_coco/ssd_mobilenet_v1_fpn_coco.xml" test="infer_request_inference" device="GPU" vmsize="1542648" vmpeak="1542648" vmrss="1195304" vmhwm="1195304" />
-        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_fpn_coco_602x602/ssd_mobilenet_v1_fpn_coco_602x602.xml" test="create_exenetwork" device="CPU" vmsize="759558" vmpeak="1426185" vmrss="33862" vmhwm="507768" />
-        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_fpn_coco_602x602/ssd_mobilenet_v1_fpn_coco_602x602.xml" test="create_exenetwork" device="GPU" vmsize="1010358" vmpeak="1414454" vmrss="665451" vmhwm="1133941" />
-        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_fpn_coco_602x602/ssd_mobilenet_v1_fpn_coco_602x602.xml" test="infer_request_inference" device="CPU" vmsize="1350650" vmpeak="1426185" vmrss="421828" vmhwm="509168" />
-        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_fpn_coco_602x602/ssd_mobilenet_v1_fpn_coco_602x602.xml" test="infer_request_inference" device="GPU" vmsize="1493681" vmpeak="1565770" vmrss="1145416" vmhwm="1145416" />
-        <model path="tf/1.14.0/FP32/ssd_mobilenet_v2_coco/ssd_mobilenet_v2_coco.xml" test="create_exenetwork" device="CPU" vmsize="761433" vmpeak="985784" vmrss="41514" vmhwm="254610" />
-        <model path="tf/1.14.0/FP32/ssd_mobilenet_v2_coco/ssd_mobilenet_v2_coco.xml" test="create_exenetwork" device="GPU" vmsize="876933" vmpeak="1078919" vmrss="531814" vmhwm="798001" />
-        <model path="tf/1.14.0/FP32/ssd_mobilenet_v2_coco/ssd_mobilenet_v2_coco.xml" test="infer_request_inference" device="CPU" vmsize="1028508" vmpeak="1064698" vmrss="201212" vmhwm="254390" />
-        <model path="tf/1.14.0/FP32/ssd_mobilenet_v2_coco/ssd_mobilenet_v2_coco.xml" test="infer_request_inference" device="GPU" vmsize="1091807" vmpeak="1163896" vmrss="739525" vmhwm="798023" />
-        <model path="tf/1.14.0/FP32/unet2d/unet2d.xml" test="create_exenetwork" device="CPU" vmsize="754067" vmpeak="1169247" vmrss="15686" vmhwm="429523" />
-        <model path="tf/1.14.0/FP32/unet2d/unet2d.xml" test="create_exenetwork" device="GPU" vmsize="682413" vmpeak="1130109" vmrss="337194" vmhwm="848733" />
-        <model path="tf/1.14.0/FP32/unet2d/unet2d.xml" test="infer_request_inference" device="CPU" vmsize="1106463" vmpeak="1178553" vmrss="321428" vmhwm="429871" />
-        <model path="tf/1.14.0/FP32/unet2d/unet2d.xml" test="infer_request_inference" device="GPU" vmsize="1083904" vmpeak="1155994" vmrss="730976" vmhwm="845882" />
-        <model path="tf/1.14.0/FP32/vgg16/vgg16.xml" test="create_exenetwork" device="CPU" vmsize="754010" vmpeak="2548502" vmrss="15452" vmhwm="1807863" />
-        <model path="tf/1.14.0/FP32/vgg16/vgg16.xml" test="create_exenetwork" device="GPU" vmsize="686602" vmpeak="3327385" vmrss="340982" vmhwm="3045398" />
-        <model path="tf/1.14.0/FP32/vgg16/vgg16.xml" test="infer_request_inference" device="CPU" vmsize="2026776" vmpeak="2548502" vmrss="1241011" vmhwm="1808730" />
-        <model path="tf/1.14.0/FP32/vgg16/vgg16.xml" test="infer_request_inference" device="GPU" vmsize="2438568" vmpeak="3312188" vmrss="2084328" vmhwm="3029980" />
-        <model path="tf/1.14.0/FP32/vgg19/vgg19.xml" test="create_exenetwork" device="CPU" vmsize="754168" vmpeak="2617986" vmrss="16073" vmhwm="1877000" />
-        <model path="tf/1.14.0/FP32/vgg19/vgg19.xml" test="create_exenetwork" device="GPU" vmsize="612194" vmpeak="3415310" vmrss="266732" vmhwm="3133363" />
-        <model path="tf/1.14.0/FP32/vgg19/vgg19.xml" test="infer_request_inference" device="CPU" vmsize="2145479" vmpeak="2617885" vmrss="1287272" vmhwm="1877568" />
-        <model path="tf/1.14.0/FP32/vgg19/vgg19.xml" test="infer_request_inference" device="GPU" vmsize="2521367" vmpeak="3415297" vmrss="2167426" vmhwm="3133059" />
-        <model path="tf/1.14.0/FP32/yolo_v2/yolo_v2.xml" test="create_exenetwork" device="CPU" vmsize="754344" vmpeak="1426625" vmrss="17173" vmhwm="684173" />
-        <model path="tf/1.14.0/FP32/yolo_v2/yolo_v2.xml" test="create_exenetwork" device="GPU" vmsize="684424" vmpeak="1460949" vmrss="339600" vmhwm="1180036" />
-        <model path="tf/1.14.0/FP32/yolo_v2/yolo_v2.xml" test="infer_request_inference" device="CPU" vmsize="1282802" vmpeak="1426625" vmrss="493737" vmhwm="684802" />
-        <model path="tf/1.14.0/FP32/yolo_v2/yolo_v2.xml" test="infer_request_inference" device="GPU" vmsize="1331783" vmpeak="1443006" vmrss="978560" vmhwm="1161124" />
-        <model path="tf/1.14.0/FP32/yolo_v2_tiny_voc/yolo_v2_tiny_voc.xml" test="create_exenetwork" device="CPU" vmsize="753724" vmpeak="954421" vmrss="14414" vmhwm="229578" />
-        <model path="tf/1.14.0/FP32/yolo_v2_tiny_voc/yolo_v2_tiny_voc.xml" test="create_exenetwork" device="GPU" vmsize="569179" vmpeak="816648" vmrss="224250" vmhwm="535449" />
-        <model path="tf/1.14.0/FP32/yolo_v2_tiny_voc/yolo_v2_tiny_voc.xml" test="infer_request_inference" device="CPU" vmsize="960810" vmpeak="960810" vmrss="174231" vmhwm="229807" />
-        <model path="tf/1.14.0/FP32/yolo_v2_tiny_voc/yolo_v2_tiny_voc.xml" test="infer_request_inference" device="GPU" vmsize="808627" vmpeak="880717" vmrss="455677" vmhwm="533002" />
-        <model path="tf/1.14.0/FP32/yolo_v2_voc/yolo_v2_voc.xml" test="create_exenetwork" device="CPU" vmsize="754344" vmpeak="1422647" vmrss="17437" vmhwm="680666" />
-        <model path="tf/1.14.0/FP32/yolo_v2_voc/yolo_v2_voc.xml" test="create_exenetwork" device="GPU" vmsize="686316" vmpeak="1436296" vmrss="340586" vmhwm="1154617" />
-        <model path="tf/1.14.0/FP32/yolo_v2_voc/yolo_v2_voc.xml" test="infer_request_inference" device="CPU" vmsize="1279797" vmpeak="1422616" vmrss="490982" vmhwm="680147" />
-        <model path="tf/1.14.0/FP32/yolo_v2_voc/yolo_v2_voc.xml" test="infer_request_inference" device="GPU" vmsize="1330780" vmpeak="1442570" vmrss="978392" vmhwm="1161490" />
-        <model path="tf/1.14.0/FP32/yolo_v3/yolo_v3.xml" test="create_exenetwork" device="CPU" vmsize="756958" vmpeak="1587260" vmrss="31108" vmhwm="836506" />
-        <model path="tf/1.14.0/FP32/yolo_v3/yolo_v3.xml" test="create_exenetwork" device="GPU" vmsize="1163712" vmpeak="1824596" vmrss="819011" vmhwm="1543559" />
-        <model path="tf/1.14.0/FP32/yolo_v3/yolo_v3.xml" test="infer_request_inference" device="CPU" vmsize="1405879" vmpeak="1591766" vmrss="610302" vmhwm="836594" />
-        <model path="tf/1.14.0/FP32/yolo_v3/yolo_v3.xml" test="infer_request_inference" device="GPU" vmsize="1734233" vmpeak="1823470" vmrss="1381925" vmhwm="1542178" />
-        <model path="tf/1.14.0/FP32/yolo_v3_tiny/yolo_v3_tiny.xml" test="create_exenetwork" device="CPU" vmsize="753975" vmpeak="895633" vmrss="15637" vmhwm="140927" />
-        <model path="tf/1.14.0/FP32/yolo_v3_tiny/yolo_v3_tiny.xml" test="create_exenetwork" device="GPU" vmsize="599332" vmpeak="728939" vmrss="254029" vmhwm="412566" />
-        <model path="tf/1.14.0/FP32/yolo_v3_tiny/yolo_v3_tiny.xml" test="infer_request_inference" device="CPU" vmsize="903469" vmpeak="975559" vmrss="116124" vmhwm="141182" />
-        <model path="tf/1.14.0/FP32/yolo_v3_tiny/yolo_v3_tiny.xml" test="infer_request_inference" device="GPU" vmsize="741738" vmpeak="813828" vmrss="389259" vmhwm="413476" />
+        <model path="caffe/FP32/alexnet/alexnet.xml" test="create_exenetwork" device="CPU" vmsize="1321668" vmpeak="1631245" vmrss="657919" vmhwm="967408" />
+        <model path="caffe/FP32/alexnet/alexnet.xml" test="create_exenetwork" device="GPU" vmsize="1563796" vmpeak="2064987" vmrss="1227532" vmhwm="1728485" />
+        <model path="caffe/FP32/alexnet/alexnet.xml" test="infer_request_inference" device="CPU" vmsize="1589073" vmpeak="1631151" vmrss="659287" vmhwm="966721" />
+        <model path="caffe/FP32/alexnet/alexnet.xml" test="infer_request_inference" device="GPU" vmsize="1557202" vmpeak="1973197" vmrss="1079972" vmhwm="1580035" />
+        <model path="caffe/FP32/caffenet/caffenet.xml" test="create_exenetwork" device="CPU" vmsize="1341314" vmpeak="1650890" vmrss="665329" vmhwm="974724" />
+        <model path="caffe/FP32/caffenet/caffenet.xml" test="create_exenetwork" device="GPU" vmsize="1591844" vmpeak="1793074" vmrss="1255238" vmhwm="1456566" />
+        <model path="caffe/FP32/caffenet/caffenet.xml" test="infer_request_inference" device="CPU" vmsize="1441388" vmpeak="1650797" vmrss="682999" vmhwm="973897" />
+        <model path="caffe/FP32/caffenet/caffenet.xml" test="infer_request_inference" device="GPU" vmsize="1605884" vmpeak="1696297" vmrss="1128160" vmhwm="1303270" />
+        <model path="caffe/FP32/densenet_121/densenet_121.xml" test="create_exenetwork" device="CPU" vmsize="903562" vmpeak="903562" vmrss="180684" vmhwm="180684" />
+        <model path="caffe/FP32/densenet_121/densenet_121.xml" test="create_exenetwork" device="GPU" vmsize="1301939" vmpeak="1301939" vmrss="964126" vmhwm="964126" />
+        <model path="caffe/FP32/densenet_121/densenet_121.xml" test="infer_request_inference" device="CPU" vmsize="1170582" vmpeak="1255779" vmrss="189836" vmhwm="189836" />
+        <model path="caffe/FP32/densenet_121/densenet_121.xml" test="infer_request_inference" device="GPU" vmsize="1057290" vmpeak="1142486" vmrss="582316" vmhwm="582316" />
+        <model path="caffe/FP32/densenet_161/densenet_161.xml" test="create_exenetwork" device="CPU" vmsize="1155512" vmpeak="1257531" vmrss="406551" vmhwm="508289" />
+        <model path="caffe/FP32/densenet_161/densenet_161.xml" test="create_exenetwork" device="GPU" vmsize="1884636" vmpeak="1884636" vmrss="1547655" vmhwm="1547655" />
+        <model path="caffe/FP32/densenet_161/densenet_161.xml" test="infer_request_inference" device="CPU" vmsize="1241500" vmpeak="1326696" vmrss="419666" vmhwm="506740" />
+        <model path="caffe/FP32/densenet_161/densenet_161.xml" test="infer_request_inference" device="GPU" vmsize="1583504" vmpeak="1668700" vmrss="1108941" vmhwm="1108941" />
+        <model path="caffe/FP32/densenet_169/densenet_169.xml" test="create_exenetwork" device="CPU" vmsize="992170" vmpeak="1004790" vmrss="275704" vmhwm="288189" />
+        <model path="caffe/FP32/densenet_169/densenet_169.xml" test="create_exenetwork" device="GPU" vmsize="1487241" vmpeak="1487241" vmrss="1150458" vmhwm="1150458" />
+        <model path="caffe/FP32/densenet_169/densenet_169.xml" test="infer_request_inference" device="CPU" vmsize="1259122" vmpeak="1259122" vmrss="283545" vmhwm="286317" />
+        <model path="caffe/FP32/densenet_169/densenet_169.xml" test="infer_request_inference" device="GPU" vmsize="1294259" vmpeak="1379456" vmrss="819712" vmhwm="819712" />
+        <model path="caffe/FP32/densenet_201/densenet_201.xml" test="create_exenetwork" device="CPU" vmsize="1135388" vmpeak="1188803" vmrss="366688" vmhwm="384436" />
+        <model path="caffe/FP32/densenet_201/densenet_201.xml" test="create_exenetwork" device="GPU" vmsize="1903132" vmpeak="1903132" vmrss="1341693" vmhwm="1509783" />
+        <model path="caffe/FP32/densenet_201/densenet_201.xml" test="infer_request_inference" device="CPU" vmsize="1221381" vmpeak="1306578" vmrss="376038" vmhwm="384514" />
+        <model path="caffe/FP32/densenet_201/densenet_201.xml" test="infer_request_inference" device="GPU" vmsize="1517360" vmpeak="1602556" vmrss="1041424" vmhwm="1041424" />
+        <model path="caffe/FP32/dilation/dilation.xml" test="create_exenetwork" device="CPU" vmsize="2658385" vmpeak="3374820" vmrss="1479264" vmhwm="2195507" />
+        <model path="caffe/FP32/dilation/dilation.xml" test="create_exenetwork" device="GPU" vmsize="3398751" vmpeak="3980990" vmrss="3009406" vmhwm="3589695" />
+        <model path="caffe/FP32/dilation/dilation.xml" test="infer_request_inference" device="CPU" vmsize="2763358" vmpeak="3374727" vmrss="1996228" vmhwm="2195658" />
+        <model path="caffe/FP32/dilation/dilation.xml" test="infer_request_inference" device="GPU" vmsize="3381653" vmpeak="3900676" vmrss="2904111" vmhwm="3506760" />
+        <model path="caffe/FP32/dpn_92/dpn_92.xml" test="create_exenetwork" device="CPU" vmsize="1254858" vmpeak="1436120" vmrss="461666" vmhwm="642226" />
+        <model path="caffe/FP32/dpn_92/dpn_92.xml" test="create_exenetwork" device="GPU" vmsize="1880288" vmpeak="2024947" vmrss="1544847" vmhwm="1688965" />
+        <model path="caffe/FP32/dpn_92/dpn_92.xml" test="infer_request_inference" device="CPU" vmsize="1529008" vmpeak="1529008" vmrss="505601" vmhwm="640972" />
+        <model path="caffe/FP32/dpn_92/dpn_92.xml" test="infer_request_inference" device="GPU" vmsize="1560561" vmpeak="1620039" vmrss="1084423" vmhwm="1227179" />
+        <model path="caffe/FP32/fcn_alexnet/fcn_alexnet.xml" test="create_exenetwork" device="CPU" vmsize="1467497" vmpeak="1765602" vmrss="637795" vmhwm="935719" />
+        <model path="caffe/FP32/fcn_alexnet/fcn_alexnet.xml" test="create_exenetwork" device="GPU" vmsize="1611261" vmpeak="2008177" vmrss="1219769" vmhwm="1615723" />
+        <model path="caffe/FP32/fcn_alexnet/fcn_alexnet.xml" test="infer_request_inference" device="CPU" vmsize="1771364" vmpeak="1771364" vmrss="805464" vmhwm="935511" />
+        <model path="caffe/FP32/fcn_alexnet/fcn_alexnet.xml" test="infer_request_inference" device="GPU" vmsize="1605936" vmpeak="1895415" vmrss="1127750" vmhwm="1502191" />
+        <model path="caffe/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="create_exenetwork" device="CPU" vmsize="1436468" vmpeak="1623923" vmrss="753001" vmhwm="940030" />
+        <model path="caffe/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="create_exenetwork" device="GPU" vmsize="2477649" vmpeak="2606604" vmrss="1727107" vmhwm="1917645" />
+        <model path="caffe/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="infer_request_inference" device="CPU" vmsize="1704596" vmpeak="1704596" vmrss="763807" vmhwm="939510" />
+        <model path="caffe/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="infer_request_inference" device="GPU" vmsize="2069168" vmpeak="2154365" vmrss="1592208" vmhwm="1718236" />
+        <model path="caffe/FP32/inception_v1/inception_v1.xml" test="create_exenetwork" device="CPU" vmsize="755742" vmpeak="920202" vmrss="149593" vmhwm="149593" />
+        <model path="caffe/FP32/inception_v1/inception_v1.xml" test="create_exenetwork" device="GPU" vmsize="941834" vmpeak="941834" vmrss="605690" vmhwm="605690" />
+        <model path="caffe/FP32/inception_v1/inception_v1.xml" test="infer_request_inference" device="CPU" vmsize="1012616" vmpeak="1012616" vmrss="154793" vmhwm="154793" />
+        <model path="caffe/FP32/inception_v1/inception_v1.xml" test="infer_request_inference" device="GPU" vmsize="827018" vmpeak="912215" vmrss="350012" vmhwm="350012" />
+        <model path="caffe/FP32/inception_v2/inception_v2.xml" test="create_exenetwork" device="CPU" vmsize="833872" vmpeak="886454" vmrss="162780" vmhwm="214853" />
+        <model path="caffe/FP32/inception_v2/inception_v2.xml" test="create_exenetwork" device="GPU" vmsize="1017369" vmpeak="1055308" vmrss="681980" vmhwm="719721" />
+        <model path="caffe/FP32/inception_v2/inception_v2.xml" test="infer_request_inference" device="CPU" vmsize="920363" vmpeak="1005560" vmrss="167133" vmhwm="214895" />
+        <model path="caffe/FP32/inception_v2/inception_v2.xml" test="infer_request_inference" device="GPU" vmsize="898206" vmpeak="983403" vmrss="419707" vmhwm="455660" />
+        <model path="caffe/FP32/inception_v3/inception_v3.xml" test="create_exenetwork" device="CPU" vmsize="980382" vmpeak="1099368" vmrss="295952" vmhwm="414325" />
+        <model path="caffe/FP32/inception_v3/inception_v3.xml" test="create_exenetwork" device="GPU" vmsize="1322427" vmpeak="1407354" vmrss="987646" vmhwm="1072141" />
+        <model path="caffe/FP32/inception_v3/inception_v3.xml" test="infer_request_inference" device="CPU" vmsize="1248421" vmpeak="1248421" vmrss="307860" vmhwm="415298" />
+        <model path="caffe/FP32/inception_v3/inception_v3.xml" test="infer_request_inference" device="GPU" vmsize="1158170" vmpeak="1243366" vmrss="680934" vmhwm="763703" />
+        <model path="caffe/FP32/inception_v4/inception_v4.xml" test="create_exenetwork" device="CPU" vmsize="1188829" vmpeak="1392934" vmrss="513037" vmhwm="716632" />
+        <model path="caffe/FP32/inception_v4/inception_v4.xml" test="create_exenetwork" device="GPU" vmsize="1859291" vmpeak="1997377" vmrss="1524088" vmhwm="1661504" />
+        <model path="caffe/FP32/inception_v4/inception_v4.xml" test="infer_request_inference" device="CPU" vmsize="1456962" vmpeak="1456962" vmrss="521965" vmhwm="715650" />
+        <model path="caffe/FP32/inception_v4/inception_v4.xml" test="infer_request_inference" device="GPU" vmsize="1605110" vmpeak="1690306" vmrss="1127874" vmhwm="1262539" />
+        <model path="caffe/FP32/lenet/lenet.xml" test="create_exenetwork" device="CPU" vmsize="694122" vmpeak="774706" vmrss="35958" vmhwm="35958" />
+        <model path="caffe/FP32/lenet/lenet.xml" test="create_exenetwork" device="GPU" vmsize="617312" vmpeak="617312" vmrss="281574" vmhwm="281574" />
+        <model path="caffe/FP32/lenet/lenet.xml" test="infer_request_inference" device="CPU" vmsize="961006" vmpeak="1046203" vmrss="35443" vmhwm="35443" />
+        <model path="caffe/FP32/lenet/lenet.xml" test="infer_request_inference" device="GPU" vmsize="610729" vmpeak="695926" vmrss="132324" vmhwm="132324" />
+        <model path="caffe/FP32/mobilenet/mobilenet.xml" test="create_exenetwork" device="CPU" vmsize="720948" vmpeak="795828" vmrss="98992" vmhwm="98992" />
+        <model path="caffe/FP32/mobilenet/mobilenet.xml" test="create_exenetwork" device="GPU" vmsize="770952" vmpeak="770952" vmrss="435333" vmhwm="435333" />
+        <model path="caffe/FP32/mobilenet/mobilenet.xml" test="infer_request_inference" device="CPU" vmsize="987984" vmpeak="1073181" vmrss="103136" vmhwm="103136" />
+        <model path="caffe/FP32/mobilenet/mobilenet.xml" test="infer_request_inference" device="GPU" vmsize="727896" vmpeak="813092" vmrss="252522" vmhwm="252522" />
+        <model path="caffe/FP32/mobilenet_v2/mobilenet_v2.xml" test="create_exenetwork" device="CPU" vmsize="727100" vmpeak="727100" vmrss="92372" vmhwm="92372" />
+        <model path="caffe/FP32/mobilenet_v2/mobilenet_v2.xml" test="create_exenetwork" device="GPU" vmsize="858800" vmpeak="858800" vmrss="523712" vmhwm="523712" />
+        <model path="caffe/FP32/mobilenet_v2/mobilenet_v2.xml" test="infer_request_inference" device="CPU" vmsize="994151" vmpeak="1079348" vmrss="100588" vmhwm="100588" />
+        <model path="caffe/FP32/mobilenet_v2/mobilenet_v2.xml" test="infer_request_inference" device="GPU" vmsize="763750" vmpeak="848946" vmrss="288984" vmhwm="288984" />
+        <model path="caffe/FP32/mtcnn_o/mtcnn_o.xml" test="create_exenetwork" device="CPU" vmsize="694023" vmpeak="774893" vmrss="34673" vmhwm="34673" />
+        <model path="caffe/FP32/mtcnn_o/mtcnn_o.xml" test="create_exenetwork" device="GPU" vmsize="631940" vmpeak="631940" vmrss="288189" vmhwm="288189" />
+        <model path="caffe/FP32/mtcnn_o/mtcnn_o.xml" test="infer_request_inference" device="CPU" vmsize="960580" vmpeak="1045777" vmrss="35604" vmhwm="35604" />
+        <model path="caffe/FP32/mtcnn_o/mtcnn_o.xml" test="infer_request_inference" device="GPU" vmsize="618436" vmpeak="703632" vmrss="140368" vmhwm="140368" />
+        <model path="caffe/FP32/mtcnn_p/mtcnn_p.xml" test="create_exenetwork" device="CPU" vmsize="783447" vmpeak="866314" vmrss="43825" vmhwm="43825" />
+        <model path="caffe/FP32/mtcnn_p/mtcnn_p.xml" test="create_exenetwork" device="GPU" vmsize="728395" vmpeak="756038" vmrss="383780" vmhwm="410545" />
+        <model path="caffe/FP32/mtcnn_p/mtcnn_p.xml" test="infer_request_inference" device="CPU" vmsize="979997" vmpeak="979997" vmrss="128320" vmhwm="128320" />
+        <model path="caffe/FP32/mtcnn_p/mtcnn_p.xml" test="infer_request_inference" device="GPU" vmsize="763287" vmpeak="848484" vmrss="284648" vmhwm="284648" />
+        <model path="caffe/FP32/mtcnn_r/mtcnn_r.xml" test="create_exenetwork" device="CPU" vmsize="691485" vmpeak="691485" vmrss="30253" vmhwm="30253" />
+        <model path="caffe/FP32/mtcnn_r/mtcnn_r.xml" test="create_exenetwork" device="GPU" vmsize="520577" vmpeak="523374" vmrss="126614" vmhwm="129084" />
+        <model path="caffe/FP32/mtcnn_r/mtcnn_r.xml" test="infer_request_inference" device="CPU" vmsize="963367" vmpeak="1048564" vmrss="33337" vmhwm="33337" />
+        <model path="caffe/FP32/mtcnn_r/mtcnn_r.xml" test="infer_request_inference" device="GPU" vmsize="605597" vmpeak="690794" vmrss="128091" vmhwm="129911" />
+        <model path="caffe/FP32/openpose_face/openpose_face.xml" test="create_exenetwork" device="CPU" vmsize="1141790" vmpeak="1336405" vmrss="431813" vmhwm="626236" />
+        <model path="caffe/FP32/openpose_face/openpose_face.xml" test="create_exenetwork" device="GPU" vmsize="1443811" vmpeak="1566063" vmrss="1055756" vmhwm="1177592" />
+        <model path="caffe/FP32/openpose_face/openpose_face.xml" test="infer_request_inference" device="CPU" vmsize="1409517" vmpeak="1409517" vmrss="472004" vmhwm="625461" />
+        <model path="caffe/FP32/openpose_face/openpose_face.xml" test="infer_request_inference" device="GPU" vmsize="1361157" vmpeak="1446354" vmrss="883168" vmhwm="1005030" />
+        <model path="caffe/FP32/openpose_hand/openpose_hand.xml" test="create_exenetwork" device="CPU" vmsize="1125716" vmpeak="1312344" vmrss="413764" vmhwm="600215" />
+        <model path="caffe/FP32/openpose_hand/openpose_hand.xml" test="create_exenetwork" device="GPU" vmsize="1426141" vmpeak="1538960" vmrss="1037488" vmhwm="1149792" />
+        <model path="caffe/FP32/openpose_hand/openpose_hand.xml" test="infer_request_inference" device="CPU" vmsize="1212156" vmpeak="1312438" vmrss="455239" vmhwm="601276" />
+        <model path="caffe/FP32/openpose_hand/openpose_hand.xml" test="infer_request_inference" device="GPU" vmsize="1337679" vmpeak="1365301" vmrss="859944" vmhwm="972233" />
+        <model path="caffe/FP32/openpose_pose_coco/openpose_pose_coco.xml" test="create_exenetwork" device="CPU" vmsize="1299688" vmpeak="1563577" vmrss="586242" vmhwm="849924" />
+        <model path="caffe/FP32/openpose_pose_coco/openpose_pose_coco.xml" test="create_exenetwork" device="GPU" vmsize="1812174" vmpeak="1997912" vmrss="1424103" vmhwm="1609166" />
+        <model path="caffe/FP32/openpose_pose_coco/openpose_pose_coco.xml" test="infer_request_inference" device="CPU" vmsize="1386018" vmpeak="1563577" vmrss="626147" vmhwm="849420" />
+        <model path="caffe/FP32/openpose_pose_coco/openpose_pose_coco.xml" test="infer_request_inference" device="GPU" vmsize="1652414" vmpeak="1755286" vmrss="1174087" vmhwm="1361599" />
+        <model path="caffe/FP32/places205_alexnet/places205_alexnet.xml" test="create_exenetwork" device="CPU" vmsize="1287572" vmpeak="1580612" vmrss="624582" vmhwm="917441" />
+        <model path="caffe/FP32/places205_alexnet/places205_alexnet.xml" test="create_exenetwork" device="GPU" vmsize="1513813" vmpeak="1998531" vmrss="1151737" vmhwm="1636216" />
+        <model path="caffe/FP32/places205_alexnet/places205_alexnet.xml" test="infer_request_inference" device="CPU" vmsize="1464517" vmpeak="1580597" vmrss="626922" vmhwm="916905" />
+        <model path="caffe/FP32/places205_alexnet/places205_alexnet.xml" test="infer_request_inference" device="GPU" vmsize="1498551" vmpeak="1889992" vmrss="1020489" vmhwm="1496653" />
+        <model path="caffe/FP32/places205_googlenet/places205_googlenet.xml" test="create_exenetwork" device="CPU" vmsize="746007" vmpeak="746007" vmrss="136240" vmhwm="136240" />
+        <model path="caffe/FP32/places205_googlenet/places205_googlenet.xml" test="create_exenetwork" device="GPU" vmsize="926957" vmpeak="926957" vmrss="577309" vmhwm="577309" />
+        <model path="caffe/FP32/places205_googlenet/places205_googlenet.xml" test="infer_request_inference" device="CPU" vmsize="1013547" vmpeak="1013547" vmrss="142885" vmhwm="142885" />
+        <model path="caffe/FP32/places205_googlenet/places205_googlenet.xml" test="infer_request_inference" device="GPU" vmsize="813794" vmpeak="898991" vmrss="336570" vmhwm="336570" />
+        <model path="caffe/FP32/resnet_18/resnet_18.xml" test="create_exenetwork" device="CPU" vmsize="824631" vmpeak="897722" vmrss="151590" vmhwm="210714" />
+        <model path="caffe/FP32/resnet_18/resnet_18.xml" test="create_exenetwork" device="GPU" vmsize="838567" vmpeak="891956" vmrss="503739" vmhwm="557273" />
+        <model path="caffe/FP32/resnet_18/resnet_18.xml" test="infer_request_inference" device="CPU" vmsize="910988" vmpeak="996184" vmrss="158886" vmhwm="211936" />
+        <model path="caffe/FP32/resnet_18/resnet_18.xml" test="infer_request_inference" device="GPU" vmsize="818776" vmpeak="903973" vmrss="341322" vmhwm="391955" />
+        <model path="caffe/FP32/resnet_v1_101/resnet_v1_101.xml" test="create_exenetwork" device="CPU" vmsize="1184934" vmpeak="1406100" vmrss="511170" vmhwm="731827" />
+        <model path="caffe/FP32/resnet_v1_101/resnet_v1_101.xml" test="create_exenetwork" device="GPU" vmsize="1640386" vmpeak="1850810" vmrss="1305855" vmhwm="1515966" />
+        <model path="caffe/FP32/resnet_v1_101/resnet_v1_101.xml" test="infer_request_inference" device="CPU" vmsize="1452578" vmpeak="1452578" vmrss="518258" vmhwm="732508" />
+        <model path="caffe/FP32/resnet_v1_101/resnet_v1_101.xml" test="infer_request_inference" device="GPU" vmsize="1479166" vmpeak="1604392" vmrss="1000901" vmhwm="1210248" />
+        <model path="caffe/FP32/resnet_v1_152/resnet_v1_152.xml" test="create_exenetwork" device="CPU" vmsize="1360918" vmpeak="1658852" vmrss="684892" vmhwm="982316" />
+        <model path="caffe/FP32/resnet_v1_152/resnet_v1_152.xml" test="create_exenetwork" device="GPU" vmsize="2023595" vmpeak="2311010" vmrss="1620923" vmhwm="1906216" />
+        <model path="caffe/FP32/resnet_v1_152/resnet_v1_152.xml" test="infer_request_inference" device="CPU" vmsize="1628577" vmpeak="1713774" vmrss="691672" vmhwm="982930" />
+        <model path="caffe/FP32/resnet_v1_152/resnet_v1_152.xml" test="infer_request_inference" device="GPU" vmsize="1814176" vmpeak="2016393" vmrss="1336238" vmhwm="1622244" />
+        <model path="caffe/FP32/resnet_v1_269/resnet_v1_269.xml" test="create_exenetwork" device="CPU" vmsize="2119015" vmpeak="2465268" vmrss="1307748" vmhwm="1653490" />
+        <model path="caffe/FP32/resnet_v1_269/resnet_v1_269.xml" test="create_exenetwork" device="GPU" vmsize="3063808" vmpeak="3522360" vmrss="2673543" vmhwm="3130623" />
+        <model path="caffe/FP32/resnet_v1_269/resnet_v1_269.xml" test="infer_request_inference" device="CPU" vmsize="2386618" vmpeak="2465538" vmrss="1321663" vmhwm="1652372" />
+        <model path="caffe/FP32/resnet_v1_269/resnet_v1_269.xml" test="infer_request_inference" device="GPU" vmsize="2799269" vmpeak="3172618" vmrss="2321664" vmhwm="2777736" />
+        <model path="caffe/FP32/resnet_v1_50/resnet_v1_50.xml" test="create_exenetwork" device="CPU" vmsize="974698" vmpeak="1100762" vmrss="304220" vmhwm="429774" />
+        <model path="caffe/FP32/resnet_v1_50/resnet_v1_50.xml" test="create_exenetwork" device="GPU" vmsize="1173671" vmpeak="1286625" vmrss="838682" vmhwm="951636" />
+        <model path="caffe/FP32/resnet_v1_50/resnet_v1_50.xml" test="infer_request_inference" device="CPU" vmsize="1242233" vmpeak="1242233" vmrss="310086" vmhwm="429150" />
+        <model path="caffe/FP32/resnet_v1_50/resnet_v1_50.xml" test="infer_request_inference" device="GPU" vmsize="1090726" vmpeak="1175922" vmrss="613813" vmhwm="726200" />
+        <model path="caffe/FP32/se_bn_inception/se_bn_inception.xml" test="create_exenetwork" device="CPU" vmsize="870022" vmpeak="924336" vmrss="179088" vmhwm="232892" />
+        <model path="caffe/FP32/se_bn_inception/se_bn_inception.xml" test="create_exenetwork" device="GPU" vmsize="1125753" vmpeak="1166344" vmrss="786666" vmhwm="827138" />
+        <model path="caffe/FP32/se_bn_inception/se_bn_inception.xml" test="infer_request_inference" device="CPU" vmsize="1137541" vmpeak="1137541" vmrss="184485" vmhwm="232949" />
+        <model path="caffe/FP32/se_bn_inception/se_bn_inception.xml" test="infer_request_inference" device="GPU" vmsize="955177" vmpeak="1040374" vmrss="477032" vmhwm="519178" />
+        <model path="caffe/FP32/se_resnext_50/se_resnext_50.xml" test="create_exenetwork" device="CPU" vmsize="1074985" vmpeak="1208168" vmrss="344406" vmhwm="477089" />
+        <model path="caffe/FP32/se_resnext_50/se_resnext_50.xml" test="create_exenetwork" device="GPU" vmsize="1383397" vmpeak="1496918" vmrss="980408" vmhwm="1092702" />
+        <model path="caffe/FP32/se_resnext_50/se_resnext_50.xml" test="infer_request_inference" device="CPU" vmsize="1168200" vmpeak="1253397" vmrss="374275" vmhwm="477698" />
+        <model path="caffe/FP32/se_resnext_50/se_resnext_50.xml" test="infer_request_inference" device="GPU" vmsize="1240657" vmpeak="1325854" vmrss="762725" vmhwm="854386" />
+        <model path="caffe/FP32/squeezenet_v1.0/squeezenet_v1.0.xml" test="create_exenetwork" device="CPU" vmsize="713351" vmpeak="787898" vmrss="52858" vmhwm="52858" />
+        <model path="caffe/FP32/squeezenet_v1.0/squeezenet_v1.0.xml" test="create_exenetwork" device="GPU" vmsize="719794" vmpeak="719794" vmrss="384508" vmhwm="384508" />
+        <model path="caffe/FP32/squeezenet_v1.0/squeezenet_v1.0.xml" test="infer_request_inference" device="CPU" vmsize="980522" vmpeak="980522" vmrss="59456" vmhwm="59456" />
+        <model path="caffe/FP32/squeezenet_v1.0/squeezenet_v1.0.xml" test="infer_request_inference" device="GPU" vmsize="686613" vmpeak="771810" vmrss="211426" vmhwm="211426" />
+        <model path="caffe/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="create_exenetwork" device="CPU" vmsize="705796" vmpeak="705796" vmrss="52405" vmhwm="52405" />
+        <model path="caffe/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="create_exenetwork" device="GPU" vmsize="724984" vmpeak="724984" vmrss="390031" vmhwm="390031" />
+        <model path="caffe/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="infer_request_inference" device="CPU" vmsize="791918" vmpeak="877115" vmrss="56269" vmhwm="56269" />
+        <model path="caffe/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="infer_request_inference" device="GPU" vmsize="674590" vmpeak="759787" vmrss="199139" vmhwm="199139" />
+        <model path="caffe/FP32/ssd_googlenet/ssd_googlenet.xml" test="create_exenetwork" device="CPU" vmsize="850278" vmpeak="901976" vmrss="168672" vmhwm="218660" />
+        <model path="caffe/FP32/ssd_googlenet/ssd_googlenet.xml" test="create_exenetwork" device="GPU" vmsize="1092353" vmpeak="1123298" vmrss="689566" vmhwm="762699" />
+        <model path="caffe/FP32/ssd_googlenet/ssd_googlenet.xml" test="infer_request_inference" device="CPU" vmsize="1118015" vmpeak="1118015" vmrss="177444" vmhwm="218670" />
+        <model path="caffe/FP32/ssd_googlenet/ssd_googlenet.xml" test="infer_request_inference" device="GPU" vmsize="944564" vmpeak="1029761" vmrss="467672" vmhwm="495326" />
+        <model path="caffe/FP32/ssd_mobilenet/ssd_mobilenet.xml" test="create_exenetwork" device="CPU" vmsize="740069" vmpeak="740069" vmrss="128315" vmhwm="128315" />
+        <model path="caffe/FP32/ssd_mobilenet/ssd_mobilenet.xml" test="create_exenetwork" device="GPU" vmsize="791986" vmpeak="791986" vmrss="456830" vmhwm="456830" />
+        <model path="caffe/FP32/ssd_mobilenet/ssd_mobilenet.xml" test="infer_request_inference" device="CPU" vmsize="1188891" vmpeak="1274088" vmrss="138252" vmhwm="138252" />
+        <model path="caffe/FP32/ssd_mobilenet/ssd_mobilenet.xml" test="infer_request_inference" device="GPU" vmsize="819218" vmpeak="904415" vmrss="342066" vmhwm="342066" />
+        <model path="caffe/FP32/ssd_squeezenet/ssd_squeezenet.xml" test="create_exenetwork" device="CPU" vmsize="740714" vmpeak="803946" vmrss="126521" vmhwm="126521" />
+        <model path="caffe/FP32/ssd_squeezenet/ssd_squeezenet.xml" test="create_exenetwork" device="GPU" vmsize="925225" vmpeak="925225" vmrss="519417" vmhwm="586206" />
+        <model path="caffe/FP32/ssd_squeezenet/ssd_squeezenet.xml" test="infer_request_inference" device="CPU" vmsize="1008446" vmpeak="1093643" vmrss="135714" vmhwm="135714" />
+        <model path="caffe/FP32/ssd_squeezenet/ssd_squeezenet.xml" test="infer_request_inference" device="GPU" vmsize="824470" vmpeak="909667" vmrss="348103" vmhwm="348103" />
+        <model path="caffe/FP32/ssd_vgg16_300/ssd_vgg16_300.xml" test="create_exenetwork" device="CPU" vmsize="1046843" vmpeak="1178897" vmrss="308848" vmhwm="440377" />
+        <model path="caffe/FP32/ssd_vgg16_300/ssd_vgg16_300.xml" test="create_exenetwork" device="GPU" vmsize="1151961" vmpeak="1168070" vmrss="815692" vmhwm="831932" />
+        <model path="caffe/FP32/ssd_vgg16_300/ssd_vgg16_300.xml" test="infer_request_inference" device="CPU" vmsize="1321751" vmpeak="1321751" vmrss="373412" vmhwm="440299" />
+        <model path="caffe/FP32/ssd_vgg16_300/ssd_vgg16_300.xml" test="infer_request_inference" device="GPU" vmsize="1200820" vmpeak="1286017" vmrss="725717" vmhwm="734500" />
+        <model path="caffe/FP32/ssd_vgg16_512/ssd_vgg16_512.xml" test="create_exenetwork" device="CPU" vmsize="1186697" vmpeak="1322895" vmrss="323164" vmhwm="457116" />
+        <model path="caffe/FP32/ssd_vgg16_512/ssd_vgg16_512.xml" test="create_exenetwork" device="GPU" vmsize="1522606" vmpeak="1522606" vmrss="1120277" vmhwm="1120277" />
+        <model path="caffe/FP32/ssd_vgg16_512/ssd_vgg16_512.xml" test="infer_request_inference" device="CPU" vmsize="1288424" vmpeak="1373621" vmrss="500370" vmhwm="500370" />
+        <model path="caffe/FP32/ssd_vgg16_512/ssd_vgg16_512.xml" test="infer_request_inference" device="GPU" vmsize="1449448" vmpeak="1534644" vmrss="973845" vmhwm="973845" />
+        <model path="caffe/FP32/vgg16/vgg16.xml" test="create_exenetwork" device="CPU" vmsize="2133809" vmpeak="2836407" vmrss="1438444" vmhwm="2140850" />
+        <model path="caffe/FP32/vgg16/vgg16.xml" test="create_exenetwork" device="GPU" vmsize="2707359" vmpeak="3834188" vmrss="2314816" vmhwm="3441464" />
+        <model path="caffe/FP32/vgg16/vgg16.xml" test="infer_request_inference" device="CPU" vmsize="2401339" vmpeak="3101945" vmrss="1469098" vmhwm="2139987" />
+        <model path="caffe/FP32/vgg16/vgg16.xml" test="infer_request_inference" device="GPU" vmsize="2792654" vmpeak="3834136" vmrss="2314577" vmhwm="3440408" />
+        <model path="caffe/FP32/vgg19/vgg19.xml" test="create_exenetwork" device="CPU" vmsize="2188804" vmpeak="2918375" vmrss="1492623" vmhwm="2222001" />
+        <model path="caffe/FP32/vgg19/vgg19.xml" test="create_exenetwork" device="GPU" vmsize="2898989" vmpeak="4025117" vmrss="2481081" vmhwm="3626459" />
+        <model path="caffe/FP32/vgg19/vgg19.xml" test="infer_request_inference" device="CPU" vmsize="2275379" vmpeak="2918474" vmrss="1523834" vmhwm="2221715" />
+        <model path="caffe/FP32/vgg19/vgg19.xml" test="infer_request_inference" device="GPU" vmsize="2876250" vmpeak="3944834" vmrss="2398682" vmhwm="3551002" />
+        <model path="caffe/FP32/vnect/vnect.xml" test="create_exenetwork" device="CPU" vmsize="873480" vmpeak="943924" vmrss="196320" vmhwm="266656" />
+        <model path="caffe/FP32/vnect/vnect.xml" test="create_exenetwork" device="GPU" vmsize="1067367" vmpeak="1101604" vmrss="730048" vmhwm="764051" />
+        <model path="caffe/FP32/vnect/vnect.xml" test="infer_request_inference" device="CPU" vmsize="961745" vmpeak="1046942" vmrss="212149" vmhwm="266546" />
+        <model path="caffe/FP32/vnect/vnect.xml" test="infer_request_inference" device="GPU" vmsize="976471" vmpeak="1061668" vmrss="499335" vmhwm="528736" />
+        <model path="caffe/FP32/wrn_50_2/wrn_50_2.xml" test="create_exenetwork" device="CPU" vmsize="1428580" vmpeak="1776923" vmrss="741670" vmhwm="1089587" />
+        <model path="caffe/FP32/wrn_50_2/wrn_50_2.xml" test="create_exenetwork" device="GPU" vmsize="1842729" vmpeak="2177494" vmrss="1452183" vmhwm="1785934" />
+        <model path="caffe/FP32/wrn_50_2/wrn_50_2.xml" test="infer_request_inference" device="CPU" vmsize="1514890" vmpeak="1776834" vmrss="756730" vmhwm="1088464" />
+        <model path="caffe/FP32/wrn_50_2/wrn_50_2.xml" test="infer_request_inference" device="GPU" vmsize="1753476" vmpeak="2003045" vmrss="1275523" vmhwm="1608807" />
+        <model path="caffe/FP32/yolo_v1_full/yolo_v1_full.xml" test="create_exenetwork" device="CPU" vmsize="3478618" vmpeak="4858219" vmrss="2796794" vmhwm="4176062" />
+        <model path="caffe/FP32/yolo_v1_full/yolo_v1_full.xml" test="create_exenetwork" device="GPU" vmsize="4842442" vmpeak="6987687" vmrss="4397738" vmhwm="6544928" />
+        <model path="caffe/FP32/yolo_v1_full/yolo_v1_full.xml" test="infer_request_inference" device="CPU" vmsize="3567340" vmpeak="4858193" vmrss="2814666" vmhwm="4176177" />
+        <model path="caffe/FP32/yolo_v1_full/yolo_v1_full.xml" test="infer_request_inference" device="GPU" vmsize="4814217" vmpeak="6932785" vmrss="4335193" vmhwm="6538194" />
+        <model path="caffe/FP32/yolo_v1_tiny/yolo_v1_tiny.xml" test="create_exenetwork" device="CPU" vmsize="998956" vmpeak="1136428" vmrss="307600" vmhwm="444735" />
+        <model path="caffe/FP32/yolo_v1_tiny/yolo_v1_tiny.xml" test="create_exenetwork" device="GPU" vmsize="1052719" vmpeak="1232316" vmrss="717854" vmhwm="897540" />
+        <model path="caffe/FP32/yolo_v1_tiny/yolo_v1_tiny.xml" test="infer_request_inference" device="CPU" vmsize="1258004" vmpeak="1258004" vmrss="326175" vmhwm="443996" />
+        <model path="caffe/FP32/yolo_v1_tiny/yolo_v1_tiny.xml" test="infer_request_inference" device="GPU" vmsize="1059619" vmpeak="1138789" vmrss="582155" vmhwm="745664" />
+        <model path="caffe/FP32/yolo_v2/yolo_v2.xml" test="create_exenetwork" device="CPU" vmsize="1249211" vmpeak="1506304" vmrss="550752" vmhwm="807762" />
+        <model path="caffe/FP32/yolo_v2/yolo_v2.xml" test="create_exenetwork" device="GPU" vmsize="1492743" vmpeak="1714642" vmrss="1095354" vmhwm="1316988" />
+        <model path="caffe/FP32/yolo_v2/yolo_v2.xml" test="infer_request_inference" device="CPU" vmsize="1427483" vmpeak="1512680" vmrss="582514" vmhwm="806858" />
+        <model path="caffe/FP32/yolo_v2/yolo_v2.xml" test="infer_request_inference" device="GPU" vmsize="1456343" vmpeak="1595287" vmrss="978369" vmhwm="1201579" />
+        <model path="caffe/FP32/yolo_v2_tiny/yolo_v2_tiny.xml" test="create_exenetwork" device="CPU" vmsize="871930" vmpeak="952359" vmrss="193388" vmhwm="273634" />
+        <model path="caffe/FP32/yolo_v2_tiny/yolo_v2_tiny.xml" test="create_exenetwork" device="GPU" vmsize="878768" vmpeak="973180" vmrss="533348" vmhwm="627848" />
+        <model path="caffe/FP32/yolo_v2_tiny/yolo_v2_tiny.xml" test="infer_request_inference" device="CPU" vmsize="959909" vmpeak="1045106" vmrss="208156" vmhwm="273530" />
+        <model path="caffe/FP32/yolo_v2_tiny/yolo_v2_tiny.xml" test="infer_request_inference" device="GPU" vmsize="883818" vmpeak="969014" vmrss="406442" vmhwm="476595" />
+        <model path="caffe/FP32/yolo_v3/yolo_v3.xml" test="create_exenetwork" device="CPU" vmsize="1388405" vmpeak="1700311" vmrss="680352" vmhwm="991998" />
+        <model path="caffe/FP32/yolo_v3/yolo_v3.xml" test="create_exenetwork" device="GPU" vmsize="1970503" vmpeak="2164422" vmrss="1583935" vmhwm="1777209" />
+        <model path="caffe/FP32/yolo_v3/yolo_v3.xml" test="infer_request_inference" device="CPU" vmsize="1661649" vmpeak="1746846" vmrss="723148" vmhwm="991354" />
+        <model path="caffe/FP32/yolo_v3/yolo_v3.xml" test="infer_request_inference" device="GPU" vmsize="1812694" vmpeak="1917910" vmrss="1335609" vmhwm="1524931" />
+        <model path="mxnet/FP32/caffenet/caffenet.xml" test="create_exenetwork" device="CPU" vmsize="1321320" vmpeak="1630896" vmrss="658730" vmhwm="968125" />
+        <model path="mxnet/FP32/caffenet/caffenet.xml" test="create_exenetwork" device="GPU" vmsize="1563660" vmpeak="2064852" vmrss="1226097" vmhwm="1727050" />
+        <model path="mxnet/FP32/caffenet/caffenet.xml" test="infer_request_inference" device="CPU" vmsize="1679251" vmpeak="1849645" vmrss="659406" vmhwm="966815" />
+        <model path="mxnet/FP32/caffenet/caffenet.xml" test="infer_request_inference" device="GPU" vmsize="1557181" vmpeak="1973176" vmrss="1079998" vmhwm="1579983" />
+        <model path="mxnet/FP32/densenet_121/densenet_121.xml" test="create_exenetwork" device="CPU" vmsize="908549" vmpeak="908549" vmrss="180804" vmhwm="180804" />
+        <model path="mxnet/FP32/densenet_121/densenet_121.xml" test="create_exenetwork" device="GPU" vmsize="1315620" vmpeak="1315620" vmrss="978213" vmhwm="978213" />
+        <model path="mxnet/FP32/densenet_121/densenet_121.xml" test="infer_request_inference" device="CPU" vmsize="1170239" vmpeak="1255436" vmrss="189326" vmhwm="189326" />
+        <model path="mxnet/FP32/densenet_121/densenet_121.xml" test="infer_request_inference" device="GPU" vmsize="1068553" vmpeak="1153750" vmrss="590298" vmhwm="590298" />
+        <model path="mxnet/FP32/densenet_161/densenet_161.xml" test="create_exenetwork" device="CPU" vmsize="1160718" vmpeak="1262736" vmrss="405376" vmhwm="507317" />
+        <model path="mxnet/FP32/densenet_161/densenet_161.xml" test="create_exenetwork" device="GPU" vmsize="1898410" vmpeak="1898410" vmrss="1560884" vmhwm="1560884" />
+        <model path="mxnet/FP32/densenet_161/densenet_161.xml" test="infer_request_inference" device="CPU" vmsize="1240917" vmpeak="1326114" vmrss="419094" vmhwm="507306" />
+        <model path="mxnet/FP32/densenet_161/densenet_161.xml" test="infer_request_inference" device="GPU" vmsize="1594502" vmpeak="1679698" vmrss="1116954" vmhwm="1116954" />
+        <model path="mxnet/FP32/densenet_169/densenet_169.xml" test="create_exenetwork" device="CPU" vmsize="991671" vmpeak="1004291" vmrss="275397" vmhwm="287918" />
+        <model path="mxnet/FP32/densenet_169/densenet_169.xml" test="create_exenetwork" device="GPU" vmsize="1616690" vmpeak="1618188" vmrss="1278908" vmhwm="1280494" />
+        <model path="mxnet/FP32/densenet_169/densenet_169.xml" test="infer_request_inference" device="CPU" vmsize="1258623" vmpeak="1258623" vmrss="284320" vmhwm="287606" />
+        <model path="mxnet/FP32/densenet_169/densenet_169.xml" test="infer_request_inference" device="GPU" vmsize="1303156" vmpeak="1388353" vmrss="824928" vmhwm="824928" />
+        <model path="mxnet/FP32/densenet_201/densenet_201.xml" test="create_exenetwork" device="CPU" vmsize="1134889" vmpeak="1188636" vmrss="367130" vmhwm="384935" />
+        <model path="mxnet/FP32/densenet_201/densenet_201.xml" test="create_exenetwork" device="GPU" vmsize="1865047" vmpeak="1865047" vmrss="1527947" vmhwm="1527947" />
+        <model path="mxnet/FP32/densenet_201/densenet_201.xml" test="infer_request_inference" device="CPU" vmsize="1220882" vmpeak="1306078" vmrss="376006" vmhwm="384217" />
+        <model path="mxnet/FP32/densenet_201/densenet_201.xml" test="infer_request_inference" device="GPU" vmsize="1551019" vmpeak="1636216" vmrss="1071928" vmhwm="1071928" />
+        <model path="mxnet/FP32/dpn_92/dpn_92.xml" test="create_exenetwork" device="CPU" vmsize="1255898" vmpeak="1437160" vmrss="461385" vmhwm="642049" />
+        <model path="mxnet/FP32/dpn_92/dpn_92.xml" test="create_exenetwork" device="GPU" vmsize="1800479" vmpeak="1945580" vmrss="1462780" vmhwm="1607470" />
+        <model path="mxnet/FP32/dpn_92/dpn_92.xml" test="infer_request_inference" device="CPU" vmsize="1530053" vmpeak="1530053" vmrss="505570" vmhwm="641368" />
+        <model path="mxnet/FP32/dpn_92/dpn_92.xml" test="infer_request_inference" device="GPU" vmsize="1561955" vmpeak="1619753" vmrss="1084324" vmhwm="1225473" />
+        <model path="mxnet/FP32/fcn8s_vgg16/fcn8s_vgg16.xml" test="create_exenetwork" device="CPU" vmsize="2833797" vmpeak="3516609" vmrss="1409798" vmhwm="2092417" />
+        <model path="mxnet/FP32/fcn8s_vgg16/fcn8s_vgg16.xml" test="create_exenetwork" device="GPU" vmsize="4293634" vmpeak="4293634" vmrss="3955525" vmhwm="3955525" />
+        <model path="mxnet/FP32/fcn8s_vgg16/fcn8s_vgg16.xml" test="infer_request_inference" device="CPU" vmsize="3022032" vmpeak="3516609" vmrss="2255333" vmhwm="2255333" />
+        <model path="mxnet/FP32/fcn8s_vgg16/fcn8s_vgg16.xml" test="infer_request_inference" device="GPU" vmsize="4277993" vmpeak="4363190" vmrss="3799333" vmhwm="3799333" />
+        <model path="mxnet/FP32/full_imagenet_network/full_imagenet_network.xml" test="create_exenetwork" device="CPU" vmsize="1066384" vmpeak="1233736" vmrss="390972" vmhwm="557528" />
+        <model path="mxnet/FP32/full_imagenet_network/full_imagenet_network.xml" test="create_exenetwork" device="GPU" vmsize="1358442" vmpeak="1615062" vmrss="1020947" vmhwm="1273121" />
+        <model path="mxnet/FP32/full_imagenet_network/full_imagenet_network.xml" test="infer_request_inference" device="CPU" vmsize="1243392" vmpeak="1328589" vmrss="398580" vmhwm="558469" />
+        <model path="mxnet/FP32/full_imagenet_network/full_imagenet_network.xml" test="infer_request_inference" device="GPU" vmsize="1256070" vmpeak="1398212" vmrss="778549" vmhwm="1001192" />
+        <model path="mxnet/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="create_exenetwork" device="CPU" vmsize="1437560" vmpeak="1625010" vmrss="754254" vmhwm="941142" />
+        <model path="mxnet/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="create_exenetwork" device="GPU" vmsize="2281713" vmpeak="2410668" vmrss="1943780" vmhwm="2072428" />
+        <model path="mxnet/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="infer_request_inference" device="CPU" vmsize="1524473" vmpeak="1625005" vmrss="763001" vmhwm="940264" />
+        <model path="mxnet/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="infer_request_inference" device="GPU" vmsize="2070671" vmpeak="2155868" vmrss="1593108" vmhwm="1719125" />
+        <model path="mxnet/FP32/inception_v3/inception_v3.xml" test="create_exenetwork" device="CPU" vmsize="835629" vmpeak="889226" vmrss="164216" vmhwm="217245" />
+        <model path="mxnet/FP32/inception_v3/inception_v3.xml" test="create_exenetwork" device="GPU" vmsize="983507" vmpeak="1024665" vmrss="645985" vmhwm="686930" />
+        <model path="mxnet/FP32/inception_v3/inception_v3.xml" test="infer_request_inference" device="CPU" vmsize="927451" vmpeak="1012648" vmrss="168360" vmhwm="216569" />
+        <model path="mxnet/FP32/inception_v3/inception_v3.xml" test="infer_request_inference" device="GPU" vmsize="900712" vmpeak="985909" vmrss="423519" vmhwm="463533" />
+        <model path="mxnet/FP32/inception_v3_no_batchnorm/inception_v3_no_batchnorm.xml" test="create_exenetwork" device="CPU" vmsize="980636" vmpeak="1099706" vmrss="296680" vmhwm="415194" />
+        <model path="mxnet/FP32/inception_v3_no_batchnorm/inception_v3_no_batchnorm.xml" test="create_exenetwork" device="GPU" vmsize="1326213" vmpeak="1409371" vmrss="988488" vmhwm="1071366" />
+        <model path="mxnet/FP32/inception_v3_no_batchnorm/inception_v3_no_batchnorm.xml" test="infer_request_inference" device="CPU" vmsize="1248691" vmpeak="1248691" vmrss="306857" vmhwm="414752" />
+        <model path="mxnet/FP32/inception_v3_no_batchnorm/inception_v3_no_batchnorm.xml" test="infer_request_inference" device="GPU" vmsize="1163032" vmpeak="1248228" vmrss="685843" vmhwm="765507" />
+        <model path="mxnet/FP32/inception_v4/inception_v4.xml" test="create_exenetwork" device="CPU" vmsize="1189531" vmpeak="1393636" vmrss="513661" vmhwm="717204" />
+        <model path="mxnet/FP32/inception_v4/inception_v4.xml" test="create_exenetwork" device="GPU" vmsize="1866176" vmpeak="2002847" vmrss="1528664" vmhwm="1664577" />
+        <model path="mxnet/FP32/inception_v4/inception_v4.xml" test="infer_request_inference" device="CPU" vmsize="1457669" vmpeak="1457669" vmrss="523811" vmhwm="715837" />
+        <model path="mxnet/FP32/inception_v4/inception_v4.xml" test="infer_request_inference" device="GPU" vmsize="1606243" vmpeak="1691440" vmrss="1129185" vmhwm="1262534" />
+        <model path="mxnet/FP32/location_net/location_net.xml" test="create_exenetwork" device="CPU" vmsize="1521920" vmpeak="1894167" vmrss="814210" vmhwm="1185704" />
+        <model path="mxnet/FP32/location_net/location_net.xml" test="create_exenetwork" device="GPU" vmsize="1961772" vmpeak="2317998" vmrss="1623268" vmhwm="1979062" />
+        <model path="mxnet/FP32/location_net/location_net.xml" test="infer_request_inference" device="CPU" vmsize="1789325" vmpeak="1894157" vmrss="828328" vmhwm="1185480" />
+        <model path="mxnet/FP32/location_net/location_net.xml" test="infer_request_inference" device="GPU" vmsize="1951877" vmpeak="2240295" vmrss="1479337" vmhwm="1843041" />
+        <model path="mxnet/FP32/lresnet100e/lresnet100e.xml" test="create_exenetwork" device="CPU" vmsize="1427384" vmpeak="1755920" vmrss="719097" vmhwm="1047295" />
+        <model path="mxnet/FP32/lresnet100e/lresnet100e.xml" test="create_exenetwork" device="GPU" vmsize="2059070" vmpeak="2371101" vmrss="1721616" vmhwm="2033194" />
+        <model path="mxnet/FP32/lresnet100e/lresnet100e.xml" test="infer_request_inference" device="CPU" vmsize="1694035" vmpeak="1779232" vmrss="732596" vmhwm="1046208" />
+        <model path="mxnet/FP32/lresnet100e/lresnet100e.xml" test="infer_request_inference" device="GPU" vmsize="1863825" vmpeak="2084664" vmrss="1386002" vmhwm="1691248" />
+        <model path="mxnet/FP32/mobilenet/mobilenet.xml" test="create_exenetwork" device="CPU" vmsize="720959" vmpeak="795839" vmrss="98898" vmhwm="98898" />
+        <model path="mxnet/FP32/mobilenet/mobilenet.xml" test="create_exenetwork" device="GPU" vmsize="749106" vmpeak="749106" vmrss="411049" vmhwm="411049" />
+        <model path="mxnet/FP32/mobilenet/mobilenet.xml" test="infer_request_inference" device="CPU" vmsize="806941" vmpeak="806941" vmrss="104702" vmhwm="104702" />
+        <model path="mxnet/FP32/mobilenet/mobilenet.xml" test="infer_request_inference" device="GPU" vmsize="727818" vmpeak="813014" vmrss="252787" vmhwm="252787" />
+        <model path="mxnet/FP32/mobilenet_v2/mobilenet_v2.xml" test="create_exenetwork" device="CPU" vmsize="727116" vmpeak="793010" vmrss="92508" vmhwm="92508" />
+        <model path="mxnet/FP32/mobilenet_v2/mobilenet_v2.xml" test="create_exenetwork" device="GPU" vmsize="817554" vmpeak="817554" vmrss="479762" vmhwm="479762" />
+        <model path="mxnet/FP32/mobilenet_v2/mobilenet_v2.xml" test="infer_request_inference" device="CPU" vmsize="813108" vmpeak="898305" vmrss="99481" vmhwm="99481" />
+        <model path="mxnet/FP32/mobilenet_v2/mobilenet_v2.xml" test="infer_request_inference" device="GPU" vmsize="765070" vmpeak="850267" vmrss="290040" vmhwm="290040" />
+        <model path="mxnet/FP32/mtcnn_o/mtcnn_o.xml" test="create_exenetwork" device="CPU" vmsize="694023" vmpeak="694023" vmrss="34377" vmhwm="34377" />
+        <model path="mxnet/FP32/mtcnn_o/mtcnn_o.xml" test="create_exenetwork" device="GPU" vmsize="631919" vmpeak="631919" vmrss="294070" vmhwm="294070" />
+        <model path="mxnet/FP32/mtcnn_o/mtcnn_o.xml" test="infer_request_inference" device="CPU" vmsize="779532" vmpeak="864728" vmrss="36524" vmhwm="36524" />
+        <model path="mxnet/FP32/mtcnn_o/mtcnn_o.xml" test="infer_request_inference" device="GPU" vmsize="618586" vmpeak="703783" vmrss="140582" vmhwm="140582" />
+        <model path="mxnet/FP32/mtcnn_p/mtcnn_p.xml" test="create_exenetwork" device="CPU" vmsize="783447" vmpeak="783447" vmrss="42936" vmhwm="42936" />
+        <model path="mxnet/FP32/mtcnn_p/mtcnn_p.xml" test="create_exenetwork" device="GPU" vmsize="724302" vmpeak="724302" vmrss="386261" vmhwm="386339" />
+        <model path="mxnet/FP32/mtcnn_p/mtcnn_p.xml" test="infer_request_inference" device="CPU" vmsize="1070524" vmpeak="1155720" vmrss="129376" vmhwm="129376" />
+        <model path="mxnet/FP32/mtcnn_p/mtcnn_p.xml" test="infer_request_inference" device="GPU" vmsize="762933" vmpeak="848130" vmrss="284216" vmhwm="284216" />
+        <model path="mxnet/FP32/mtcnn_r/mtcnn_r.xml" test="create_exenetwork" device="CPU" vmsize="691485" vmpeak="691485" vmrss="30700" vmhwm="30700" />
+        <model path="mxnet/FP32/mtcnn_r/mtcnn_r.xml" test="create_exenetwork" device="GPU" vmsize="588270" vmpeak="610240" vmrss="250692" vmhwm="269453" />
+        <model path="mxnet/FP32/mtcnn_r/mtcnn_r.xml" test="infer_request_inference" device="CPU" vmsize="958042" vmpeak="958042" vmrss="30908" vmhwm="30908" />
+        <model path="mxnet/FP32/mtcnn_r/mtcnn_r.xml" test="infer_request_inference" device="GPU" vmsize="605176" vmpeak="690372" vmrss="127602" vmhwm="129365" />
+        <model path="mxnet/FP32/nin/nin.xml" test="create_exenetwork" device="CPU" vmsize="732747" vmpeak="732747" vmrss="146874" vmhwm="146874" />
+        <model path="mxnet/FP32/nin/nin.xml" test="create_exenetwork" device="GPU" vmsize="778096" vmpeak="778096" vmrss="439654" vmhwm="439654" />
+        <model path="mxnet/FP32/nin/nin.xml" test="infer_request_inference" device="CPU" vmsize="818864" vmpeak="904061" vmrss="148220" vmhwm="148220" />
+        <model path="mxnet/FP32/nin/nin.xml" test="infer_request_inference" device="GPU" vmsize="781279" vmpeak="866476" vmrss="323528" vmhwm="323528" />
+        <model path="mxnet/FP32/nst_vgg19/nst_vgg19.xml" test="create_exenetwork" device="CPU" vmsize="739559" vmpeak="739559" vmrss="67152" vmhwm="67152" />
+        <model path="mxnet/FP32/nst_vgg19/nst_vgg19.xml" test="create_exenetwork" device="GPU" vmsize="769938" vmpeak="769938" vmrss="431922" vmhwm="431922" />
+        <model path="mxnet/FP32/nst_vgg19/nst_vgg19.xml" test="infer_request_inference" device="CPU" vmsize="1007323" vmpeak="1007323" vmrss="99127" vmhwm="99127" />
+        <model path="mxnet/FP32/nst_vgg19/nst_vgg19.xml" test="infer_request_inference" device="GPU" vmsize="760047" vmpeak="845244" vmrss="281866" vmhwm="281866" />
+        <model path="mxnet/FP32/resnet_v1_101/resnet_v1_101.xml" test="create_exenetwork" device="CPU" vmsize="1219296" vmpeak="1440462" vmrss="513271" vmhwm="733850" />
+        <model path="mxnet/FP32/resnet_v1_101/resnet_v1_101.xml" test="create_exenetwork" device="GPU" vmsize="1693062" vmpeak="1898192" vmrss="1355270" vmhwm="1559838" />
+        <model path="mxnet/FP32/resnet_v1_101/resnet_v1_101.xml" test="infer_request_inference" device="CPU" vmsize="1305881" vmpeak="1440556" vmrss="527399" vmhwm="732924" />
+        <model path="mxnet/FP32/resnet_v1_101/resnet_v1_101.xml" test="infer_request_inference" device="GPU" vmsize="1500881" vmpeak="1620819" vmrss="1022845" vmhwm="1226721" />
+        <model path="mxnet/FP32/resnet_v1_152/resnet_v1_152.xml" test="create_exenetwork" device="CPU" vmsize="1406802" vmpeak="1704736" vmrss="687445" vmhwm="984760" />
+        <model path="mxnet/FP32/resnet_v1_152/resnet_v1_152.xml" test="create_exenetwork" device="GPU" vmsize="2147516" vmpeak="2429642" vmrss="1810073" vmhwm="2091382" />
+        <model path="mxnet/FP32/resnet_v1_152/resnet_v1_152.xml" test="infer_request_inference" device="CPU" vmsize="1674363" vmpeak="1759560" vmrss="702972" vmhwm="984744" />
+        <model path="mxnet/FP32/resnet_v1_152/resnet_v1_152.xml" test="infer_request_inference" device="GPU" vmsize="1849614" vmpeak="2046543" vmrss="1371458" vmhwm="1652222" />
+        <model path="mxnet/FP32/resnet_v2_101/resnet_v2_101.xml" test="create_exenetwork" device="CPU" vmsize="1218568" vmpeak="1439734" vmrss="513505" vmhwm="734136" />
+        <model path="mxnet/FP32/resnet_v2_101/resnet_v2_101.xml" test="create_exenetwork" device="GPU" vmsize="1688476" vmpeak="1897693" vmrss="1350502" vmhwm="1559168" />
+        <model path="mxnet/FP32/resnet_v2_101/resnet_v2_101.xml" test="infer_request_inference" device="CPU" vmsize="1305106" vmpeak="1439828" vmrss="526188" vmhwm="732721" />
+        <model path="mxnet/FP32/resnet_v2_101/resnet_v2_101.xml" test="infer_request_inference" device="GPU" vmsize="1498400" vmpeak="1619649" vmrss="1021170" vmhwm="1226201" />
+        <model path="mxnet/FP32/resnet_v2_152/resnet_v2_152.xml" test="create_exenetwork" device="CPU" vmsize="1406007" vmpeak="1703941" vmrss="687798" vmhwm="985082" />
+        <model path="mxnet/FP32/resnet_v2_152/resnet_v2_152.xml" test="create_exenetwork" device="GPU" vmsize="2132431" vmpeak="2419976" vmrss="1795331" vmhwm="2082298" />
+        <model path="mxnet/FP32/resnet_v2_152/resnet_v2_152.xml" test="infer_request_inference" device="CPU" vmsize="1673562" vmpeak="1758759" vmrss="702202" vmhwm="984557" />
+        <model path="mxnet/FP32/resnet_v2_152/resnet_v2_152.xml" test="infer_request_inference" device="GPU" vmsize="1852832" vmpeak="2055175" vmrss="1375025" vmhwm="1661046" />
+        <model path="mxnet/FP32/resnext_101/resnext_101.xml" test="create_exenetwork" device="CPU" vmsize="1214486" vmpeak="1422704" vmrss="531008" vmhwm="738576" />
+        <model path="mxnet/FP32/resnext_101/resnext_101.xml" test="create_exenetwork" device="GPU" vmsize="1653386" vmpeak="1850721" vmrss="1316047" vmhwm="1513090" />
+        <model path="mxnet/FP32/resnext_101/resnext_101.xml" test="infer_request_inference" device="CPU" vmsize="1307545" vmpeak="1422720" vmrss="553290" vmhwm="739018" />
+        <model path="mxnet/FP32/resnext_101/resnext_101.xml" test="infer_request_inference" device="GPU" vmsize="1505826" vmpeak="1597455" vmrss="1028154" vmhwm="1203888" />
+        <model path="mxnet/FP32/resnext_101_64x4d/resnext_101_64x4d.xml" test="create_exenetwork" device="CPU" vmsize="1639840" vmpeak="2058960" vmrss="933025" vmhwm="1351495" />
+        <model path="mxnet/FP32/resnext_101_64x4d/resnext_101_64x4d.xml" test="create_exenetwork" device="GPU" vmsize="2290340" vmpeak="2674006" vmrss="1952048" vmhwm="2335455" />
+        <model path="mxnet/FP32/resnext_101_64x4d/resnext_101_64x4d.xml" test="infer_request_inference" device="CPU" vmsize="1914021" vmpeak="2149482" vmrss="959363" vmhwm="1351006" />
+        <model path="mxnet/FP32/resnext_101_64x4d/resnext_101_64x4d.xml" test="infer_request_inference" device="GPU" vmsize="2119436" vmpeak="2416320" vmrss="1662554" vmhwm="2022462" />
+        <model path="mxnet/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="create_exenetwork" device="CPU" vmsize="705806" vmpeak="780353" vmrss="52806" vmhwm="52806" />
+        <model path="mxnet/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="create_exenetwork" device="GPU" vmsize="700835" vmpeak="700835" vmrss="362949" vmhwm="362949" />
+        <model path="mxnet/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="infer_request_inference" device="CPU" vmsize="791934" vmpeak="791934" vmrss="56794" vmhwm="56794" />
+        <model path="mxnet/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="infer_request_inference" device="GPU" vmsize="674611" vmpeak="759808" vmrss="198120" vmhwm="198120" />
+        <model path="mxnet/FP32/ssd_vgg16_300/ssd_vgg16_300.xml" test="create_exenetwork" device="CPU" vmsize="1046858" vmpeak="1178912" vmrss="308542" vmhwm="439483" />
+        <model path="mxnet/FP32/ssd_vgg16_300/ssd_vgg16_300.xml" test="create_exenetwork" device="GPU" vmsize="1226248" vmpeak="1247022" vmrss="889018" vmhwm="909454" />
+        <model path="mxnet/FP32/ssd_vgg16_300/ssd_vgg16_300.xml" test="infer_request_inference" device="CPU" vmsize="1140729" vmpeak="1225926" vmrss="372574" vmhwm="439826" />
+        <model path="mxnet/FP32/ssd_vgg16_300/ssd_vgg16_300.xml" test="infer_request_inference" device="GPU" vmsize="1199894" vmpeak="1285091" vmrss="724178" vmhwm="734505" />
+        <model path="mxnet/FP32/vgg16/vgg16.xml" test="create_exenetwork" device="CPU" vmsize="2134158" vmpeak="2836756" vmrss="1438309" vmhwm="2140715" />
+        <model path="mxnet/FP32/vgg16/vgg16.xml" test="create_exenetwork" device="GPU" vmsize="2781932" vmpeak="3912818" vmrss="2443178" vmhwm="3574105" />
+        <model path="mxnet/FP32/vgg16/vgg16.xml" test="infer_request_inference" device="CPU" vmsize="2220634" vmpeak="2836865" vmrss="1468797" vmhwm="2139722" />
+        <model path="mxnet/FP32/vgg16/vgg16.xml" test="infer_request_inference" device="GPU" vmsize="2790174" vmpeak="3834277" vmrss="2311826" vmhwm="3439888" />
+        <model path="mxnet/FP32/vgg19/vgg19.xml" test="create_exenetwork" device="CPU" vmsize="2189153" vmpeak="2918723" vmrss="1491048" vmhwm="2220868" />
+        <model path="mxnet/FP32/vgg19/vgg19.xml" test="create_exenetwork" device="GPU" vmsize="2869105" vmpeak="4001228" vmrss="2531100" vmhwm="3662869" />
+        <model path="mxnet/FP32/vgg19/vgg19.xml" test="infer_request_inference" device="CPU" vmsize="2366254" vmpeak="2918817" vmrss="1523605" vmhwm="2221388" />
+        <model path="mxnet/FP32/vgg19/vgg19.xml" test="infer_request_inference" device="GPU" vmsize="2877716" vmpeak="3944751" vmrss="2400091" vmhwm="3551449" />
+        <model path="mxnet/FP32/yolo_v1_full/yolo_v1_full.xml" test="create_exenetwork" device="CPU" vmsize="3569482" vmpeak="4949084" vmrss="2797106" vmhwm="4176364" />
+        <model path="mxnet/FP32/yolo_v1_full/yolo_v1_full.xml" test="create_exenetwork" device="GPU" vmsize="4819713" vmpeak="6984764" vmrss="4481042" vmhwm="6645126" />
+        <model path="mxnet/FP32/yolo_v1_full/yolo_v1_full.xml" test="infer_request_inference" device="CPU" vmsize="3929790" vmpeak="4858536" vmrss="2814931" vmhwm="4176198" />
+        <model path="mxnet/FP32/yolo_v1_full/yolo_v1_full.xml" test="infer_request_inference" device="GPU" vmsize="4816962" vmpeak="6932770" vmrss="4337715" vmhwm="6538006" />
+        <model path="mxnet/FP32/yolo_v1_tiny/yolo_v1_tiny.xml" test="create_exenetwork" device="CPU" vmsize="1172662" vmpeak="1401509" vmrss="491966" vmhwm="720564" />
+        <model path="mxnet/FP32/yolo_v1_tiny/yolo_v1_tiny.xml" test="create_exenetwork" device="GPU" vmsize="1345822" vmpeak="1585391" vmrss="1008384" vmhwm="1247916" />
+        <model path="mxnet/FP32/yolo_v1_tiny/yolo_v1_tiny.xml" test="infer_request_inference" device="CPU" vmsize="1442381" vmpeak="1442381" vmrss="510697" vmhwm="720267" />
+        <model path="mxnet/FP32/yolo_v1_tiny/yolo_v1_tiny.xml" test="infer_request_inference" device="GPU" vmsize="1348219" vmpeak="1513917" vmrss="870485" vmhwm="1120215" />
+        <model path="onnx/FP32/ssd_resnet34/ssd_resnet34.xml" test="create_exenetwork" device="CPU" vmsize="1106159" vmpeak="1204460" vmrss="268408" vmhwm="366470" />
+        <model path="onnx/FP32/ssd_resnet34/ssd_resnet34.xml" test="create_exenetwork" device="GPU" vmsize="1568190" vmpeak="1568190" vmrss="1230538" vmhwm="1230538" />
+        <model path="onnx/FP32/ssd_resnet34/ssd_resnet34.xml" test="infer_request_inference" device="CPU" vmsize="1395617" vmpeak="1395617" vmrss="399692" vmhwm="399692" />
+        <model path="onnx/FP32/ssd_resnet34/ssd_resnet34.xml" test="infer_request_inference" device="GPU" vmsize="1513621" vmpeak="1598818" vmrss="1035897" vmhwm="1035897" />
+        <model path="onnx/FP32/ssd_resnet34_new/ssd_resnet34_new.xml" test="create_exenetwork" device="CPU" vmsize="1108187" vmpeak="1206488" vmrss="271648" vmhwm="369590" />
+        <model path="onnx/FP32/ssd_resnet34_new/ssd_resnet34_new.xml" test="create_exenetwork" device="GPU" vmsize="2870816" vmpeak="2870816" vmrss="1290972" vmhwm="1290972" />
+        <model path="onnx/FP32/ssd_resnet34_new/ssd_resnet34_new.xml" test="infer_request_inference" device="CPU" vmsize="1396408" vmpeak="1396408" vmrss="396172" vmhwm="396172" />
+        <model path="onnx/FP32/ssd_resnet34_new/ssd_resnet34_new.xml" test="infer_request_inference" device="GPU" vmsize="2778490" vmpeak="2863686" vmrss="2307058" vmhwm="2307058" />
+        <model path="pytorch/FP32/inceptionv3_pretrained/inceptionv3_pretrained.xml" test="create_exenetwork" device="CPU" vmsize="979706" vmpeak="1098692" vmrss="295682" vmhwm="414247" />
+        <model path="pytorch/FP32/inceptionv3_pretrained/inceptionv3_pretrained.xml" test="create_exenetwork" device="GPU" vmsize="1303499" vmpeak="1390069" vmrss="965224" vmhwm="1051580" />
+        <model path="pytorch/FP32/inceptionv3_pretrained/inceptionv3_pretrained.xml" test="infer_request_inference" device="CPU" vmsize="1247750" vmpeak="1247750" vmrss="307928" vmhwm="415266" />
+        <model path="pytorch/FP32/inceptionv3_pretrained/inceptionv3_pretrained.xml" test="infer_request_inference" device="GPU" vmsize="1160265" vmpeak="1245462" vmrss="682354" vmhwm="766100" />
+        <model path="pytorch/FP32/resnet50_pretrained/resnet50_pretrained.xml" test="create_exenetwork" device="CPU" vmsize="985660" vmpeak="1111723" vmrss="304610" vmhwm="430336" />
+        <model path="pytorch/FP32/resnet50_pretrained/resnet50_pretrained.xml" test="create_exenetwork" device="GPU" vmsize="1170265" vmpeak="1281675" vmrss="833180" vmhwm="944299" />
+        <model path="pytorch/FP32/resnet50_pretrained/resnet50_pretrained.xml" test="infer_request_inference" device="CPU" vmsize="1253189" vmpeak="1253189" vmrss="316373" vmhwm="429618" />
+        <model path="pytorch/FP32/resnet50_pretrained/resnet50_pretrained.xml" test="infer_request_inference" device="GPU" vmsize="1091214" vmpeak="1176411" vmrss="613095" vmhwm="724110" />
+        <model path="pytorch/FP32/resnet50_torchvision/resnet50_torchvision.xml" test="create_exenetwork" device="CPU" vmsize="985660" vmpeak="1111723" vmrss="304772" vmhwm="430414" />
+        <model path="pytorch/FP32/resnet50_torchvision/resnet50_torchvision.xml" test="create_exenetwork" device="GPU" vmsize="1150806" vmpeak="1261878" vmrss="813394" vmhwm="924123" />
+        <model path="pytorch/FP32/resnet50_torchvision/resnet50_torchvision.xml" test="infer_request_inference" device="CPU" vmsize="1253194" vmpeak="1253194" vmrss="315463" vmhwm="428974" />
+        <model path="pytorch/FP32/resnet50_torchvision/resnet50_torchvision.xml" test="infer_request_inference" device="GPU" vmsize="1090070" vmpeak="1175267" vmrss="612274" vmhwm="722924" />
+        <model path="pytorch/FP32/squeezenet_v1.1_pretrained/squeezenet_v1.1_pretrained.xml" test="create_exenetwork" device="CPU" vmsize="705577" vmpeak="780457" vmrss="53320" vmhwm="53320" />
+        <model path="pytorch/FP32/squeezenet_v1.1_pretrained/squeezenet_v1.1_pretrained.xml" test="create_exenetwork" device="GPU" vmsize="716476" vmpeak="716476" vmrss="378487" vmhwm="378487" />
+        <model path="pytorch/FP32/squeezenet_v1.1_pretrained/squeezenet_v1.1_pretrained.xml" test="infer_request_inference" device="CPU" vmsize="972613" vmpeak="1057810" vmrss="57033" vmhwm="57033" />
+        <model path="pytorch/FP32/squeezenet_v1.1_pretrained/squeezenet_v1.1_pretrained.xml" test="infer_request_inference" device="GPU" vmsize="672594" vmpeak="757790" vmrss="194183" vmhwm="194183" />
+        <model path="tf/1.14.0/FP32/bert_base_uncased/bert_base_uncased.xml" test="create_exenetwork" device="CPU" vmsize="1863586" vmpeak="2298270" vmrss="1166578" vmhwm="1601236" />
+        <model path="tf/1.14.0/FP32/bert_base_uncased/bert_base_uncased.xml" test="create_exenetwork" device="GPU" vmsize="3438385" vmpeak="3992487" vmrss="3100890" vmhwm="3654268" />
+        <model path="tf/1.14.0/FP32/bert_base_uncased/bert_base_uncased.xml" test="infer_request_inference" device="CPU" vmsize="2136893" vmpeak="2298270" vmrss="1177888" vmhwm="1601350" />
+        <model path="tf/1.14.0/FP32/bert_base_uncased/bert_base_uncased.xml" test="infer_request_inference" device="GPU" vmsize="2866156" vmpeak="3332056" vmrss="2390778" vmhwm="2939315" />
+        <model path="tf/1.14.0/FP32/bert_xnli/bert_xnli.xml" test="create_exenetwork" device="CPU" vmsize="1795970" vmpeak="2230654" vmrss="1095978" vmhwm="1530557" />
+        <model path="tf/1.14.0/FP32/bert_xnli/bert_xnli.xml" test="create_exenetwork" device="GPU" vmsize="3373229" vmpeak="3883687" vmrss="3035104" vmhwm="3545068" />
+        <model path="tf/1.14.0/FP32/bert_xnli/bert_xnli.xml" test="infer_request_inference" device="CPU" vmsize="2069298" vmpeak="2230675" vmrss="1108967" vmhwm="1530178" />
+        <model path="tf/1.14.0/FP32/bert_xnli/bert_xnli.xml" test="infer_request_inference" device="GPU" vmsize="2783367" vmpeak="3206626" vmrss="2308222" vmhwm="2813283" />
+        <model path="tf/1.14.0/FP32/cmu/cmu.xml" test="create_exenetwork" device="CPU" vmsize="1389767" vmpeak="1653657" vmrss="587459" vmhwm="851136" />
+        <model path="tf/1.14.0/FP32/cmu/cmu.xml" test="create_exenetwork" device="GPU" vmsize="1997091" vmpeak="1999374" vmrss="1659538" vmhwm="1661498" />
+        <model path="tf/1.14.0/FP32/cmu/cmu.xml" test="infer_request_inference" device="CPU" vmsize="1660250" vmpeak="1660250" vmrss="717350" vmhwm="850948" />
+        <model path="tf/1.14.0/FP32/cmu/cmu.xml" test="infer_request_inference" device="GPU" vmsize="1842703" vmpeak="1927900" vmrss="1363991" vmhwm="1363991" />
+        <model path="tf/1.14.0/FP32/deeplab_v3/deeplab_v3.xml" test="create_exenetwork" device="CPU" vmsize="783562" vmpeak="783562" vmrss="74089" vmhwm="74089" />
+        <model path="tf/1.14.0/FP32/deeplab_v3/deeplab_v3.xml" test="create_exenetwork" device="GPU" vmsize="976300" vmpeak="976300" vmrss="639132" vmhwm="639132" />
+        <model path="tf/1.14.0/FP32/deeplab_v3/deeplab_v3.xml" test="infer_request_inference" device="CPU" vmsize="1055204" vmpeak="1140401" vmrss="135018" vmhwm="135018" />
+        <model path="tf/1.14.0/FP32/deeplab_v3/deeplab_v3.xml" test="infer_request_inference" device="GPU" vmsize="895616" vmpeak="980813" vmrss="418631" vmhwm="418631" />
+        <model path="tf/1.14.0/FP32/densenet_121/densenet_121.xml" test="create_exenetwork" device="CPU" vmsize="903520" vmpeak="903520" vmrss="182405" vmhwm="182405" />
+        <model path="tf/1.14.0/FP32/densenet_121/densenet_121.xml" test="create_exenetwork" device="GPU" vmsize="1300780" vmpeak="1300780" vmrss="963144" vmhwm="963144" />
+        <model path="tf/1.14.0/FP32/densenet_121/densenet_121.xml" test="infer_request_inference" device="CPU" vmsize="1261171" vmpeak="1346368" vmrss="191354" vmhwm="191354" />
+        <model path="tf/1.14.0/FP32/densenet_121/densenet_121.xml" test="infer_request_inference" device="GPU" vmsize="1066088" vmpeak="1151285" vmrss="588608" vmhwm="588608" />
+        <model path="tf/1.14.0/FP32/densenet_169/densenet_169.xml" test="create_exenetwork" device="CPU" vmsize="992097" vmpeak="1004718" vmrss="276021" vmhwm="288532" />
+        <model path="tf/1.14.0/FP32/densenet_169/densenet_169.xml" test="create_exenetwork" device="GPU" vmsize="1673510" vmpeak="1686178" vmrss="1335256" vmhwm="1346415" />
+        <model path="tf/1.14.0/FP32/densenet_169/densenet_169.xml" test="infer_request_inference" device="CPU" vmsize="1259304" vmpeak="1259304" vmrss="285667" vmhwm="288584" />
+        <model path="tf/1.14.0/FP32/densenet_169/densenet_169.xml" test="infer_request_inference" device="GPU" vmsize="1318803" vmpeak="1404000" vmrss="840652" vmhwm="840652" />
+        <model path="tf/1.14.0/FP32/dssd_avigilon/dssd_avigilon.xml" test="create_exenetwork" device="CPU" vmsize="742190" vmpeak="801429" vmrss="120036" vmhwm="120036" />
+        <model path="tf/1.14.0/FP32/dssd_avigilon/dssd_avigilon.xml" test="create_exenetwork" device="GPU" vmsize="917155" vmpeak="917155" vmrss="580470" vmhwm="580470" />
+        <model path="tf/1.14.0/FP32/dssd_avigilon/dssd_avigilon.xml" test="infer_request_inference" device="CPU" vmsize="828079" vmpeak="828079" vmrss="124950" vmhwm="124950" />
+        <model path="tf/1.14.0/FP32/dssd_avigilon/dssd_avigilon.xml" test="infer_request_inference" device="GPU" vmsize="798803" vmpeak="884000" vmrss="322223" vmhwm="322223" />
+        <model path="tf/1.14.0/FP32/facenet/facenet.xml" test="create_exenetwork" device="CPU" vmsize="1036542" vmpeak="1123340" vmrss="332675" vmhwm="418984" />
+        <model path="tf/1.14.0/FP32/facenet/facenet.xml" test="create_exenetwork" device="GPU" vmsize="1419095" vmpeak="1503018" vmrss="1081142" vmhwm="1164966" />
+        <model path="tf/1.14.0/FP32/facenet/facenet.xml" test="infer_request_inference" device="CPU" vmsize="1122513" vmpeak="1207710" vmrss="333564" vmhwm="417877" />
+        <model path="tf/1.14.0/FP32/facenet/facenet.xml" test="infer_request_inference" device="GPU" vmsize="1206654" vmpeak="1291851" vmrss="729799" vmhwm="812141" />
+        <model path="tf/1.14.0/FP32/faster_rcnn_inception_resnet_v2_atrous_coco/faster_rcnn_inception_resnet_v2_atrous_coco.xml" test="create_exenetwork" device="CPU" vmsize="2502557" vmpeak="2710479" vmrss="803394" vmhwm="1011098" />
+        <model path="tf/1.14.0/FP32/faster_rcnn_inception_resnet_v2_atrous_coco/faster_rcnn_inception_resnet_v2_atrous_coco.xml" test="create_exenetwork" device="GPU" vmsize="4844647" vmpeak="4844647" vmrss="4505820" vmhwm="4505820" />
+        <model path="tf/1.14.0/FP32/faster_rcnn_inception_v2_coco/faster_rcnn_inception_v2_coco.xml" test="create_exenetwork" device="CPU" vmsize="927518" vmpeak="990735" vmrss="192327" vmhwm="255424" />
+        <model path="tf/1.14.0/FP32/faster_rcnn_inception_v2_coco/faster_rcnn_inception_v2_coco.xml" test="create_exenetwork" device="GPU" vmsize="1410156" vmpeak="1410156" vmrss="1071818" vmhwm="1071818" />
+        <model path="tf/1.14.0/FP32/faster_rcnn_resnet101_coco/faster_rcnn_resnet101_coco.xml" test="create_exenetwork" device="CPU" vmsize="1348308" vmpeak="1587736" vmrss="555162" vmhwm="794456" />
+        <model path="tf/1.14.0/FP32/faster_rcnn_resnet101_coco/faster_rcnn_resnet101_coco.xml" test="create_exenetwork" device="GPU" vmsize="2073328" vmpeak="2139914" vmrss="1735650" vmhwm="1801794" />
+        <model path="tf/1.14.0/FP32/faster_rcnn_resnet50_coco/faster_rcnn_resnet50_coco.xml" test="create_exenetwork" device="CPU" vmsize="1137926" vmpeak="1282252" vmrss="347172" vmhwm="491384" />
+        <model path="tf/1.14.0/FP32/faster_rcnn_resnet50_coco/faster_rcnn_resnet50_coco.xml" test="create_exenetwork" device="GPU" vmsize="1528581" vmpeak="1558133" vmrss="1191273" vmhwm="1220918" />
+        <model path="tf/1.14.0/FP32/i3d_rgb/i3d_rgb.xml" test="create_exenetwork" device="CPU" vmsize="1064445" vmpeak="1124276" vmrss="233131" vmhwm="292728" />
+        <model path="tf/1.14.0/FP32/i3d_rgb/i3d_rgb.xml" test="create_exenetwork" device="GPU" vmsize="1608666" vmpeak="1608666" vmrss="1270744" vmhwm="1270744" />
+        <model path="tf/1.14.0/FP32/i3d_rgb/i3d_rgb.xml" test="infer_request_inference" device="CPU" vmsize="1209941" vmpeak="1295138" vmrss="396422" vmhwm="396422" />
+        <model path="tf/1.14.0/FP32/i3d_rgb/i3d_rgb.xml" test="infer_request_inference" device="GPU" vmsize="1593238" vmpeak="1678435" vmrss="1137583" vmhwm="1257484" />
+        <model path="tf/1.14.0/FP32/icv_squeezenet_v1.0/icv_squeezenet_v1.0.xml" test="create_exenetwork" device="CPU" vmsize="713814" vmpeak="788028" vmrss="53034" vmhwm="53034" />
+        <model path="tf/1.14.0/FP32/icv_squeezenet_v1.0/icv_squeezenet_v1.0.xml" test="create_exenetwork" device="GPU" vmsize="701729" vmpeak="701729" vmrss="363578" vmhwm="363578" />
+        <model path="tf/1.14.0/FP32/icv_squeezenet_v1.0/icv_squeezenet_v1.0.xml" test="infer_request_inference" device="CPU" vmsize="799869" vmpeak="885066" vmrss="59810" vmhwm="59810" />
+        <model path="tf/1.14.0/FP32/icv_squeezenet_v1.0/icv_squeezenet_v1.0.xml" test="infer_request_inference" device="GPU" vmsize="687694" vmpeak="772891" vmrss="209248" vmhwm="209248" />
+        <model path="tf/1.14.0/FP32/icv_squeezenet_v1.1/icv_squeezenet_v1.1.xml" test="create_exenetwork" device="CPU" vmsize="706258" vmpeak="780140" vmrss="52884" vmhwm="52884" />
+        <model path="tf/1.14.0/FP32/icv_squeezenet_v1.1/icv_squeezenet_v1.1.xml" test="create_exenetwork" device="GPU" vmsize="705052" vmpeak="705052" vmrss="367395" vmhwm="367395" />
+        <model path="tf/1.14.0/FP32/icv_squeezenet_v1.1/icv_squeezenet_v1.1.xml" test="infer_request_inference" device="CPU" vmsize="973367" vmpeak="1058564" vmrss="56414" vmhwm="56414" />
+        <model path="tf/1.14.0/FP32/icv_squeezenet_v1.1/icv_squeezenet_v1.1.xml" test="infer_request_inference" device="GPU" vmsize="677320" vmpeak="762517" vmrss="198619" vmhwm="198619" />
+        <model path="tf/1.14.0/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="create_exenetwork" device="CPU" vmsize="1437061" vmpeak="1624516" vmrss="755024" vmhwm="942141" />
+        <model path="tf/1.14.0/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="create_exenetwork" device="GPU" vmsize="2478034" vmpeak="2597150" vmrss="2139680" vmhwm="2258219" />
+        <model path="tf/1.14.0/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="infer_request_inference" device="CPU" vmsize="1524120" vmpeak="1624521" vmrss="762559" vmhwm="940914" />
+        <model path="tf/1.14.0/FP32/inception_resnet_v2/inception_resnet_v2.xml" test="infer_request_inference" device="GPU" vmsize="2100274" vmpeak="2185471" vmrss="1622847" vmhwm="1739566" />
+        <model path="tf/1.14.0/FP32/inception_v1/inception_v1.xml" test="create_exenetwork" device="CPU" vmsize="748534" vmpeak="809437" vmrss="143514" vmhwm="143514" />
+        <model path="tf/1.14.0/FP32/inception_v1/inception_v1.xml" test="create_exenetwork" device="GPU" vmsize="943758" vmpeak="943758" vmrss="606392" vmhwm="606392" />
+        <model path="tf/1.14.0/FP32/inception_v1/inception_v1.xml" test="infer_request_inference" device="CPU" vmsize="1015783" vmpeak="1015783" vmrss="147118" vmhwm="147118" />
+        <model path="tf/1.14.0/FP32/inception_v1/inception_v1.xml" test="infer_request_inference" device="GPU" vmsize="835073" vmpeak="920270" vmrss="357146" vmhwm="357146" />
+        <model path="tf/1.14.0/FP32/inception_v2/inception_v2.xml" test="create_exenetwork" device="CPU" vmsize="834953" vmpeak="887541" vmrss="164626" vmhwm="217001" />
+        <model path="tf/1.14.0/FP32/inception_v2/inception_v2.xml" test="create_exenetwork" device="GPU" vmsize="1034649" vmpeak="1064835" vmrss="696592" vmhwm="726694" />
+        <model path="tf/1.14.0/FP32/inception_v2/inception_v2.xml" test="infer_request_inference" device="CPU" vmsize="921081" vmpeak="1006278" vmrss="167502" vmhwm="215597" />
+        <model path="tf/1.14.0/FP32/inception_v2/inception_v2.xml" test="infer_request_inference" device="GPU" vmsize="911310" vmpeak="996507" vmrss="433617" vmhwm="464682" />
+        <model path="tf/1.14.0/FP32/inception_v3/inception_v3.xml" test="create_exenetwork" device="CPU" vmsize="971453" vmpeak="1081683" vmrss="305390" vmhwm="415204" />
+        <model path="tf/1.14.0/FP32/inception_v3/inception_v3.xml" test="create_exenetwork" device="GPU" vmsize="1332598" vmpeak="1413375" vmrss="995165" vmhwm="1075859" />
+        <model path="tf/1.14.0/FP32/inception_v3/inception_v3.xml" test="infer_request_inference" device="CPU" vmsize="1148685" vmpeak="1233882" vmrss="314220" vmhwm="414882" />
+        <model path="tf/1.14.0/FP32/inception_v3/inception_v3.xml" test="infer_request_inference" device="GPU" vmsize="1167634" vmpeak="1252830" vmrss="689416" vmhwm="769002" />
+        <model path="tf/1.14.0/FP32/inception_v4/inception_v4.xml" test="create_exenetwork" device="CPU" vmsize="1189630" vmpeak="1393740" vmrss="511908" vmhwm="715540" />
+        <model path="tf/1.14.0/FP32/inception_v4/inception_v4.xml" test="create_exenetwork" device="GPU" vmsize="1867418" vmpeak="2007080" vmrss="1529990" vmhwm="1668929" />
+        <model path="tf/1.14.0/FP32/inception_v4/inception_v4.xml" test="infer_request_inference" device="CPU" vmsize="1367256" vmpeak="1452453" vmrss="523946" vmhwm="715577" />
+        <model path="tf/1.14.0/FP32/inception_v4/inception_v4.xml" test="infer_request_inference" device="GPU" vmsize="1611350" vmpeak="1696546" vmrss="1133615" vmhwm="1270427" />
+        <model path="tf/1.14.0/FP32/mask_rcnn_resnet101_atrous_coco/mask_rcnn_resnet101_atrous_coco.xml" test="create_exenetwork" device="CPU" vmsize="2715268" vmpeak="3061650" vmrss="776375" vmhwm="1122695" />
+        <model path="tf/1.14.0/FP32/mask_rcnn_resnet101_atrous_coco/mask_rcnn_resnet101_atrous_coco.xml" test="create_exenetwork" device="GPU" vmsize="4160156" vmpeak="4971210" vmrss="3823164" vmhwm="4634151" />
+        <model path="tf/1.14.0/FP32/mobilenet_v1_0.25_128/mobilenet_v1_0.25_128.xml" test="create_exenetwork" device="CPU" vmsize="701350" vmpeak="776562" vmrss="42281" vmhwm="42281" />
+        <model path="tf/1.14.0/FP32/mobilenet_v1_0.25_128/mobilenet_v1_0.25_128.xml" test="create_exenetwork" device="GPU" vmsize="717771" vmpeak="717771" vmrss="379501" vmhwm="379501" />
+        <model path="tf/1.14.0/FP32/mobilenet_v1_0.25_128/mobilenet_v1_0.25_128.xml" test="infer_request_inference" device="CPU" vmsize="786552" vmpeak="786552" vmrss="42406" vmhwm="42406" />
+        <model path="tf/1.14.0/FP32/mobilenet_v1_0.25_128/mobilenet_v1_0.25_128.xml" test="infer_request_inference" device="GPU" vmsize="656084" vmpeak="741280" vmrss="177543" vmhwm="177543" />
+        <model path="tf/1.14.0/FP32/mobilenet_v1_0.5_160/mobilenet_v1_0.5_160.xml" test="create_exenetwork" device="CPU" vmsize="705936" vmpeak="781149" vmrss="55619" vmhwm="55619" />
+        <model path="tf/1.14.0/FP32/mobilenet_v1_0.5_160/mobilenet_v1_0.5_160.xml" test="create_exenetwork" device="GPU" vmsize="724765" vmpeak="724765" vmrss="386458" vmhwm="386458" />
+        <model path="tf/1.14.0/FP32/mobilenet_v1_0.5_160/mobilenet_v1_0.5_160.xml" test="infer_request_inference" device="CPU" vmsize="791554" vmpeak="791554" vmrss="55582" vmhwm="55582" />
+        <model path="tf/1.14.0/FP32/mobilenet_v1_0.5_160/mobilenet_v1_0.5_160.xml" test="infer_request_inference" device="GPU" vmsize="670987" vmpeak="756184" vmrss="193029" vmhwm="193029" />
+        <model path="tf/1.14.0/FP32/mobilenet_v1_1.0_224/mobilenet_v1_1.0_224.xml" test="create_exenetwork" device="CPU" vmsize="720673" vmpeak="720673" vmrss="99512" vmhwm="99512" />
+        <model path="tf/1.14.0/FP32/mobilenet_v1_1.0_224/mobilenet_v1_1.0_224.xml" test="create_exenetwork" device="GPU" vmsize="771253" vmpeak="771253" vmrss="433087" vmhwm="433087" />
+        <model path="tf/1.14.0/FP32/mobilenet_v1_1.0_224/mobilenet_v1_1.0_224.xml" test="infer_request_inference" device="CPU" vmsize="987828" vmpeak="1073025" vmrss="104005" vmhwm="104005" />
+        <model path="tf/1.14.0/FP32/mobilenet_v1_1.0_224/mobilenet_v1_1.0_224.xml" test="infer_request_inference" device="GPU" vmsize="726986" vmpeak="812182" vmrss="248450" vmhwm="248450" />
+        <model path="tf/1.14.0/FP32/mobilenet_v2_1.0_224/mobilenet_v2_1.0_224.xml" test="create_exenetwork" device="CPU" vmsize="726554" vmpeak="793447" vmrss="91452" vmhwm="91452" />
+        <model path="tf/1.14.0/FP32/mobilenet_v2_1.0_224/mobilenet_v2_1.0_224.xml" test="create_exenetwork" device="GPU" vmsize="857027" vmpeak="857027" vmrss="519630" vmhwm="519630" />
+        <model path="tf/1.14.0/FP32/mobilenet_v2_1.0_224/mobilenet_v2_1.0_224.xml" test="infer_request_inference" device="CPU" vmsize="812619" vmpeak="897816" vmrss="100895" vmhwm="100895" />
+        <model path="tf/1.14.0/FP32/mobilenet_v2_1.0_224/mobilenet_v2_1.0_224.xml" test="infer_request_inference" device="GPU" vmsize="764800" vmpeak="849997" vmrss="287019" vmhwm="287019" />
+        <model path="tf/1.14.0/FP32/mobilenet_v2_1.4_224/mobilenet_v2_1.4_224.xml" test="create_exenetwork" device="CPU" vmsize="739960" vmpeak="739960" vmrss="134924" vmhwm="134924" />
+        <model path="tf/1.14.0/FP32/mobilenet_v2_1.4_224/mobilenet_v2_1.4_224.xml" test="create_exenetwork" device="GPU" vmsize="905439" vmpeak="905439" vmrss="567876" vmhwm="567876" />
+        <model path="tf/1.14.0/FP32/mobilenet_v2_1.4_224/mobilenet_v2_1.4_224.xml" test="infer_request_inference" device="CPU" vmsize="825988" vmpeak="891722" vmrss="144684" vmhwm="144684" />
+        <model path="tf/1.14.0/FP32/mobilenet_v2_1.4_224/mobilenet_v2_1.4_224.xml" test="infer_request_inference" device="GPU" vmsize="821251" vmpeak="906448" vmrss="343085" vmhwm="343085" />
+        <model path="tf/1.14.0/FP32/ncf/ncf.xml" test="create_exenetwork" device="CPU" vmsize="1026407" vmpeak="1026407" vmrss="351535" vmhwm="351535" />
+        <model path="tf/1.14.0/FP32/ncf/ncf.xml" test="create_exenetwork" device="GPU" vmsize="1104485" vmpeak="1149496" vmrss="766740" vmhwm="811642" />
+        <model path="tf/1.14.0/FP32/ncf/ncf.xml" test="infer_request_inference" device="CPU" vmsize="1209280" vmpeak="1209280" vmrss="362325" vmhwm="362325" />
+        <model path="tf/1.14.0/FP32/ncf/ncf.xml" test="infer_request_inference" device="GPU" vmsize="1105275" vmpeak="1190472" vmrss="627822" vmhwm="671450" />
+        <model path="tf/1.14.0/FP32/resnet_v1.5_50/resnet_v1.5_50.xml" test="create_exenetwork" device="CPU" vmsize="988072" vmpeak="1114146" vmrss="304798" vmhwm="430279" />
+        <model path="tf/1.14.0/FP32/resnet_v1.5_50/resnet_v1.5_50.xml" test="create_exenetwork" device="GPU" vmsize="1171383" vmpeak="1282325" vmrss="833705" vmhwm="944476" />
+        <model path="tf/1.14.0/FP32/resnet_v1.5_50/resnet_v1.5_50.xml" test="infer_request_inference" device="CPU" vmsize="1164982" vmpeak="1250178" vmrss="319394" vmhwm="429904" />
+        <model path="tf/1.14.0/FP32/resnet_v1.5_50/resnet_v1.5_50.xml" test="infer_request_inference" device="GPU" vmsize="1090481" vmpeak="1115056" vmrss="613485" vmhwm="722176" />
+        <model path="tf/1.14.0/FP32/resnet_v1_101/resnet_v1_101.xml" test="create_exenetwork" device="CPU" vmsize="1185163" vmpeak="1406329" vmrss="511669" vmhwm="732674" />
+        <model path="tf/1.14.0/FP32/resnet_v1_101/resnet_v1_101.xml" test="create_exenetwork" device="GPU" vmsize="1646897" vmpeak="1857653" vmrss="1308538" vmhwm="1518940" />
+        <model path="tf/1.14.0/FP32/resnet_v1_101/resnet_v1_101.xml" test="infer_request_inference" device="CPU" vmsize="1361906" vmpeak="1447102" vmrss="515138" vmhwm="731073" />
+        <model path="tf/1.14.0/FP32/resnet_v1_101/resnet_v1_101.xml" test="infer_request_inference" device="GPU" vmsize="1486612" vmpeak="1612171" vmrss="1008602" vmhwm="1218973" />
+        <model path="tf/1.14.0/FP32/resnet_v1_152/resnet_v1_152.xml" test="create_exenetwork" device="CPU" vmsize="1361328" vmpeak="1659262" vmrss="685287" vmhwm="983091" />
+        <model path="tf/1.14.0/FP32/resnet_v1_152/resnet_v1_152.xml" test="create_exenetwork" device="GPU" vmsize="2053204" vmpeak="2340951" vmrss="1714788" vmhwm="2002072" />
+        <model path="tf/1.14.0/FP32/resnet_v1_152/resnet_v1_152.xml" test="infer_request_inference" device="CPU" vmsize="1628504" vmpeak="1713701" vmrss="690892" vmhwm="983257" />
+        <model path="tf/1.14.0/FP32/resnet_v1_152/resnet_v1_152.xml" test="infer_request_inference" device="GPU" vmsize="1817290" vmpeak="2019841" vmrss="1338792" vmhwm="1625405" />
+        <model path="tf/1.14.0/FP32/resnet_v1_50/resnet_v1_50.xml" test="create_exenetwork" device="CPU" vmsize="980148" vmpeak="1106211" vmrss="304340" vmhwm="430242" />
+        <model path="tf/1.14.0/FP32/resnet_v1_50/resnet_v1_50.xml" test="create_exenetwork" device="GPU" vmsize="1177410" vmpeak="1291040" vmrss="839217" vmhwm="952868" />
+        <model path="tf/1.14.0/FP32/resnet_v1_50/resnet_v1_50.xml" test="infer_request_inference" device="CPU" vmsize="1060997" vmpeak="1146194" vmrss="308906" vmhwm="429811" />
+        <model path="tf/1.14.0/FP32/resnet_v1_50/resnet_v1_50.xml" test="infer_request_inference" device="GPU" vmsize="1094189" vmpeak="1123038" vmrss="616548" vmhwm="730298" />
+        <model path="tf/1.14.0/FP32/resnet_v2_101/resnet_v2_101.xml" test="create_exenetwork" device="CPU" vmsize="1217086" vmpeak="1438262" vmrss="515611" vmhwm="736502" />
+        <model path="tf/1.14.0/FP32/resnet_v2_101/resnet_v2_101.xml" test="create_exenetwork" device="GPU" vmsize="1721532" vmpeak="1922648" vmrss="1383304" vmhwm="1584195" />
+        <model path="tf/1.14.0/FP32/resnet_v2_101/resnet_v2_101.xml" test="infer_request_inference" device="CPU" vmsize="1394296" vmpeak="1479493" vmrss="530197" vmhwm="735883" />
+        <model path="tf/1.14.0/FP32/resnet_v2_101/resnet_v2_101.xml" test="infer_request_inference" device="GPU" vmsize="1533625" vmpeak="1649492" vmrss="1055813" vmhwm="1256236" />
+        <model path="tf/1.14.0/FP32/resnet_v2_152/resnet_v2_152.xml" test="create_exenetwork" device="CPU" vmsize="1664005" vmpeak="1929070" vmrss="791611" vmhwm="988280" />
+        <model path="tf/1.14.0/FP32/resnet_v2_152/resnet_v2_152.xml" test="create_exenetwork" device="GPU" vmsize="2054062" vmpeak="2324472" vmrss="1715776" vmhwm="1985344" />
+        <model path="tf/1.14.0/FP32/resnet_v2_152/resnet_v2_152.xml" test="infer_request_inference" device="CPU" vmsize="1750642" vmpeak="1750642" vmrss="806811" vmhwm="988041" />
+        <model path="tf/1.14.0/FP32/resnet_v2_152/resnet_v2_152.xml" test="infer_request_inference" device="GPU" vmsize="1905020" vmpeak="2088814" vmrss="1426682" vmhwm="1694347" />
+        <model path="tf/1.14.0/FP32/resnet_v2_50/resnet_v2_50.xml" test="create_exenetwork" device="CPU" vmsize="994541" vmpeak="1120615" vmrss="307034" vmhwm="432806" />
+        <model path="tf/1.14.0/FP32/resnet_v2_50/resnet_v2_50.xml" test="create_exenetwork" device="GPU" vmsize="1212042" vmpeak="1312194" vmrss="874780" vmhwm="974438" />
+        <model path="tf/1.14.0/FP32/resnet_v2_50/resnet_v2_50.xml" test="infer_request_inference" device="CPU" vmsize="1081334" vmpeak="1166531" vmrss="322436" vmhwm="432702" />
+        <model path="tf/1.14.0/FP32/resnet_v2_50/resnet_v2_50.xml" test="infer_request_inference" device="GPU" vmsize="1116720" vmpeak="1132315" vmrss="638097" vmhwm="738348" />
+        <model path="tf/1.14.0/FP32/rfcn_resnet101_coco/rfcn_resnet101_coco.xml" test="create_exenetwork" device="CPU" vmsize="1467762" vmpeak="1671108" vmrss="691412" vmhwm="894509" />
+        <model path="tf/1.14.0/FP32/rfcn_resnet101_coco/rfcn_resnet101_coco.xml" test="create_exenetwork" device="GPU" vmsize="2625381" vmpeak="2732168" vmrss="2288915" vmhwm="2392494" />
+        <model path="tf/1.14.0/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="create_exenetwork" device="CPU" vmsize="713590" vmpeak="788138" vmrss="53216" vmhwm="53216" />
+        <model path="tf/1.14.0/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="create_exenetwork" device="GPU" vmsize="724427" vmpeak="724427" vmrss="386354" vmhwm="386354" />
+        <model path="tf/1.14.0/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="infer_request_inference" device="CPU" vmsize="799604" vmpeak="799604" vmrss="59534" vmhwm="59534" />
+        <model path="tf/1.14.0/FP32/squeezenet_v1.1/squeezenet_v1.1.xml" test="infer_request_inference" device="GPU" vmsize="685677" vmpeak="770874" vmrss="206845" vmhwm="206845" />
+        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_coco/ssd_mobilenet_v1_coco.xml" test="create_exenetwork" device="CPU" vmsize="832010" vmpeak="832010" vmrss="144367" vmhwm="144367" />
+        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_coco/ssd_mobilenet_v1_coco.xml" test="create_exenetwork" device="GPU" vmsize="920249" vmpeak="920249" vmrss="582769" vmhwm="582769" />
+        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_coco/ssd_mobilenet_v1_coco.xml" test="infer_request_inference" device="CPU" vmsize="1009200" vmpeak="1094397" vmrss="156052" vmhwm="156052" />
+        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_coco/ssd_mobilenet_v1_coco.xml" test="infer_request_inference" device="GPU" vmsize="851666" vmpeak="936863" vmrss="374660" vmhwm="374660" />
+        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_fpn_coco/ssd_mobilenet_v1_fpn_coco.xml" test="create_exenetwork" device="CPU" vmsize="1357855" vmpeak="1537842" vmrss="428038" vmhwm="602841" />
+        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_fpn_coco/ssd_mobilenet_v1_fpn_coco.xml" test="create_exenetwork" device="GPU" vmsize="1748255" vmpeak="1748255" vmrss="1410474" vmhwm="1410474" />
+        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_fpn_coco/ssd_mobilenet_v1_fpn_coco.xml" test="infer_request_inference" device="CPU" vmsize="1539933" vmpeak="1625130" vmrss="506157" vmhwm="602326" />
+        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_fpn_coco/ssd_mobilenet_v1_fpn_coco.xml" test="infer_request_inference" device="GPU" vmsize="1597762" vmpeak="1597762" vmrss="1125956" vmhwm="1125956" />
+        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_fpn_coco_602x602/ssd_mobilenet_v1_fpn_coco_602x602.xml" test="create_exenetwork" device="CPU" vmsize="1508566" vmpeak="1688554" vmrss="427086" vmhwm="602414" />
+        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_fpn_coco_602x602/ssd_mobilenet_v1_fpn_coco_602x602.xml" test="create_exenetwork" device="GPU" vmsize="1694071" vmpeak="1694071" vmrss="1356300" vmhwm="1356300" />
+        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_fpn_coco_602x602/ssd_mobilenet_v1_fpn_coco_602x602.xml" test="infer_request_inference" device="CPU" vmsize="1418346" vmpeak="1507495" vmrss="498206" vmhwm="602238" />
+        <model path="tf/1.14.0/FP32/ssd_mobilenet_v1_fpn_coco_602x602/ssd_mobilenet_v1_fpn_coco_602x602.xml" test="infer_request_inference" device="GPU" vmsize="1533370" vmpeak="1618567" vmrss="1062006" vmhwm="1062006" />
+        <model path="tf/1.14.0/FP32/ssd_mobilenet_v2_coco/ssd_mobilenet_v2_coco.xml" test="create_exenetwork" device="CPU" vmsize="912147" vmpeak="990698" vmrss="224068" vmhwm="302484" />
+        <model path="tf/1.14.0/FP32/ssd_mobilenet_v2_coco/ssd_mobilenet_v2_coco.xml" test="create_exenetwork" device="GPU" vmsize="1144707" vmpeak="1222395" vmrss="807570" vmhwm="885076" />
+        <model path="tf/1.14.0/FP32/ssd_mobilenet_v2_coco/ssd_mobilenet_v2_coco.xml" test="infer_request_inference" device="CPU" vmsize="998842" vmpeak="1048663" vmrss="239059" vmhwm="302291" />
+        <model path="tf/1.14.0/FP32/ssd_mobilenet_v2_coco/ssd_mobilenet_v2_coco.xml" test="infer_request_inference" device="GPU" vmsize="1054336" vmpeak="1139533" vmrss="577106" vmhwm="651913" />
+        <model path="tf/1.14.0/FP32/unet2d/unet2d.xml" test="create_exenetwork" device="CPU" vmsize="1046905" vmpeak="1206301" vmrss="351400" vmhwm="510603" />
+        <model path="tf/1.14.0/FP32/unet2d/unet2d.xml" test="create_exenetwork" device="GPU" vmsize="1199005" vmpeak="1333363" vmrss="861400" vmhwm="995815" />
+        <model path="tf/1.14.0/FP32/unet2d/unet2d.xml" test="infer_request_inference" device="CPU" vmsize="1132003" vmpeak="1217200" vmrss="380998" vmhwm="509615" />
+        <model path="tf/1.14.0/FP32/unet2d/unet2d.xml" test="infer_request_inference" device="GPU" vmsize="1174336" vmpeak="1259533" vmrss="696300" vmhwm="857849" />
+        <model path="tf/1.14.0/FP32/vgg16/vgg16.xml" test="create_exenetwork" device="CPU" vmsize="2133768" vmpeak="2836366" vmrss="1437966" vmhwm="2140403" />
+        <model path="tf/1.14.0/FP32/vgg16/vgg16.xml" test="create_exenetwork" device="GPU" vmsize="2803710" vmpeak="3934762" vmrss="2464961" vmhwm="3596054" />
+        <model path="tf/1.14.0/FP32/vgg16/vgg16.xml" test="infer_request_inference" device="CPU" vmsize="2400741" vmpeak="2836230" vmrss="1468438" vmhwm="2139410" />
+        <model path="tf/1.14.0/FP32/vgg16/vgg16.xml" test="infer_request_inference" device="GPU" vmsize="2793221" vmpeak="3855737" vmrss="2313766" vmhwm="3461135" />
+        <model path="tf/1.14.0/FP32/vgg19/vgg19.xml" test="create_exenetwork" device="CPU" vmsize="2188924" vmpeak="2918494" vmrss="1491630" vmhwm="2221008" />
+        <model path="tf/1.14.0/FP32/vgg19/vgg19.xml" test="create_exenetwork" device="GPU" vmsize="2899624" vmpeak="4031731" vmrss="2561410" vmhwm="3693086" />
+        <model path="tf/1.14.0/FP32/vgg19/vgg19.xml" test="infer_request_inference" device="CPU" vmsize="2274792" vmpeak="2918401" vmrss="1523438" vmhwm="2221039" />
+        <model path="tf/1.14.0/FP32/vgg19/vgg19.xml" test="infer_request_inference" device="GPU" vmsize="2877160" vmpeak="3966222" vmrss="2398546" vmhwm="3572186" />
+        <model path="tf/1.14.0/FP32/yolo_v2/yolo_v2.xml" test="create_exenetwork" device="CPU" vmsize="1252357" vmpeak="1511010" vmrss="552931" vmhwm="811361" />
+        <model path="tf/1.14.0/FP32/yolo_v2/yolo_v2.xml" test="create_exenetwork" device="GPU" vmsize="1481464" vmpeak="1701512" vmrss="1144072" vmhwm="1363939" />
+        <model path="tf/1.14.0/FP32/yolo_v2/yolo_v2.xml" test="infer_request_inference" device="CPU" vmsize="1340471" vmpeak="1510438" vmrss="585192" vmhwm="810186" />
+        <model path="tf/1.14.0/FP32/yolo_v2/yolo_v2.xml" test="infer_request_inference" device="GPU" vmsize="1465339" vmpeak="1601189" vmrss="987604" vmhwm="1207902" />
+        <model path="tf/1.14.0/FP32/yolo_v2_tiny_voc/yolo_v2_tiny_voc.xml" test="create_exenetwork" device="CPU" vmsize="872019" vmpeak="952447" vmrss="192904" vmhwm="272953" />
+        <model path="tf/1.14.0/FP32/yolo_v2_tiny_voc/yolo_v2_tiny_voc.xml" test="create_exenetwork" device="GPU" vmsize="876340" vmpeak="970054" vmrss="538460" vmhwm="632299" />
+        <model path="tf/1.14.0/FP32/yolo_v2_tiny_voc/yolo_v2_tiny_voc.xml" test="infer_request_inference" device="CPU" vmsize="959992" vmpeak="1045189" vmrss="207662" vmhwm="273093" />
+        <model path="tf/1.14.0/FP32/yolo_v2_tiny_voc/yolo_v2_tiny_voc.xml" test="infer_request_inference" device="GPU" vmsize="883292" vmpeak="968489" vmrss="405891" vmhwm="476907" />
+        <model path="tf/1.14.0/FP32/yolo_v2_voc/yolo_v2_voc.xml" test="create_exenetwork" device="CPU" vmsize="1248988" vmpeak="1505738" vmrss="549031" vmhwm="805745" />
+        <model path="tf/1.14.0/FP32/yolo_v2_voc/yolo_v2_voc.xml" test="create_exenetwork" device="GPU" vmsize="1459816" vmpeak="1681716" vmrss="1121952" vmhwm="1343638" />
+        <model path="tf/1.14.0/FP32/yolo_v2_voc/yolo_v2_voc.xml" test="infer_request_inference" device="CPU" vmsize="1337055" vmpeak="1506221" vmrss="582212" vmhwm="806447" />
+        <model path="tf/1.14.0/FP32/yolo_v2_voc/yolo_v2_voc.xml" test="infer_request_inference" device="GPU" vmsize="1456322" vmpeak="1589104" vmrss="977688" vmhwm="1194798" />
+        <model path="tf/1.14.0/FP32/yolo_v3/yolo_v3.xml" test="create_exenetwork" device="CPU" vmsize="1388498" vmpeak="1700405" vmrss="680981" vmhwm="992706" />
+        <model path="tf/1.14.0/FP32/yolo_v3/yolo_v3.xml" test="create_exenetwork" device="GPU" vmsize="1904952" vmpeak="2102276" vmrss="1567898" vmhwm="1764921" />
+        <model path="tf/1.14.0/FP32/yolo_v3/yolo_v3.xml" test="infer_request_inference" device="CPU" vmsize="1486066" vmpeak="1705636" vmrss="724443" vmhwm="992409" />
+        <model path="tf/1.14.0/FP32/yolo_v3/yolo_v3.xml" test="infer_request_inference" device="GPU" vmsize="1809121" vmpeak="1916995" vmrss="1331512" vmhwm="1523137" />
+        <model path="tf/1.14.0/FP32/yolo_v3_tiny/yolo_v3_tiny.xml" test="create_exenetwork" device="CPU" vmsize="803400" vmpeak="848244" vmrss="123765" vmhwm="168360" />
+        <model path="tf/1.14.0/FP32/yolo_v3_tiny/yolo_v3_tiny.xml" test="create_exenetwork" device="GPU" vmsize="795683" vmpeak="825796" vmrss="458718" vmhwm="488498" />
+        <model path="tf/1.14.0/FP32/yolo_v3_tiny/yolo_v3_tiny.xml" test="infer_request_inference" device="CPU" vmsize="892273" vmpeak="977470" vmrss="139048" vmhwm="168292" />
+        <model path="tf/1.14.0/FP32/yolo_v3_tiny/yolo_v3_tiny.xml" test="infer_request_inference" device="GPU" vmsize="789438" vmpeak="874634" vmrss="312400" vmhwm="338832" />
     </models>
 </attributes>
\ No newline at end of file
diff --git a/tests/stress_tests/.automation/memleaks_tests/precommit_configs/desktop_env_config.xml b/tests/stress_tests/.automation/memleaks_tests/precommit_configs/desktop_env_config.xml
new file mode 100644 (file)
index 0000000..7e137c7
--- /dev/null
@@ -0,0 +1,6 @@
+<?xml version="1.0"?>
+<attributes>
+    <irs_path>
+        <value>${STRESS_IRS_PATH}</value>
+    </irs_path>
+</attributes>
diff --git a/tests/stress_tests/.automation/memleaks_tests/precommit_configs/desktop_test_config.xml b/tests/stress_tests/.automation/memleaks_tests/precommit_configs/desktop_test_config.xml
new file mode 100644 (file)
index 0000000..a27bcc9
--- /dev/null
@@ -0,0 +1,20 @@
+<?xml version="1.0"?>
+<attributes>
+<!--[ WARNING ] Use of attribute "processes" from config isn't implemented yet. It will be ignored.-->
+    <processes>
+        <value>1</value>
+    </processes>
+    <threads>
+        <value>1</value>
+    </threads>
+    <iterations>
+        <value>30</value>
+    </iterations>
+    <devices>
+        <value>CPU</value>
+<!--        <value>GPU</value>-->
+    </devices>
+    <models>
+        <value>public/mobilenet-ssd/FP32/mobilenet-ssd.xml</value>
+    </models>
+</attributes>
index 74ddd50..a8f6ff5 100644 (file)
@@ -8,7 +8,7 @@
         <value>1</value>
     </threads>
     <iterations>
-        <value>10000</value>
+        <value>5000</value>
     </iterations>
     <devices>
         <value>CPU</value>
index 6c94f40..cab0ca2 100644 (file)
@@ -2,7 +2,7 @@
 <attributes>
     <processes>
         <value>1</value>
-        <value>2</value>
+        <!--<value>2</value>-->
     </processes>
     <threads>
         <value>1</value>
@@ -16,7 +16,7 @@
         <value>GPU</value>
     </devices>
     <models>
-        <value>caffe/FP32/alexnet/alexnet.xml</value>
+        <!--<value>caffe/FP32/alexnet/alexnet.xml</value>-->
         <value>tf/1.14.0/FP32/inception_v3/inception_v3.xml</value>
         <value>tf/1.14.0/FP32/ssd_mobilenet_v1_coco/ssd_mobilenet_v1_coco.xml</value>
     </models>
diff --git a/tests/stress_tests/.gitignore b/tests/stress_tests/.gitignore
new file mode 100644 (file)
index 0000000..cedcbdf
--- /dev/null
@@ -0,0 +1,2 @@
+# Name of virtualenv created by stress_tests/scripts/get_testdata.py
+./.stress_venv
\ No newline at end of file
index c4d5e42..a9ce0a6 100644 (file)
@@ -1,3 +1,7 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
 #include "pipelines.h"
 #include "../utils.h"
 
index aaac1c3..38e967e 100644 (file)
@@ -1,3 +1,7 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
 #include <string>
 #include <functional>
 #include <inference_engine.hpp>
index 797432e..a649067 100644 (file)
@@ -1,3 +1,7 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
 #pragma once
 
 #include <iostream>
index 9157899..cf9f3e2 100644 (file)
@@ -1,3 +1,7 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
 #include "task_manager.h"
 
 #include <future>
index ed91e0f..0f5bfb6 100644 (file)
@@ -1,3 +1,7 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
 #include "tests_utils.h"
 
 #include <gtest/gtest.h>
@@ -27,6 +31,7 @@ std::vector<TestCase> generateTestsParams(std::initializer_list<std::string> fie
     const pugi::xml_document & test_config = Environment::Instance().getTestConfig();
     std::string models_path = Environment::Instance().getEnvConfig()
             .child("attributes").child("irs_path").child("value").text().as_string();
+    models_path = expand_env_vars(models_path);
 
     std::vector<int> processes;
     std::vector<int> threads;
index d2acf0b..65af35d 100644 (file)
@@ -1,3 +1,7 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
 #pragma once
 
 #include "utils.h"
index 607c3f7..56d5f71 100644 (file)
@@ -1,3 +1,7 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
 #include "utils.h"
 
 #include <string>
@@ -62,3 +66,25 @@ size_t getVmRSSInKB() {return getVirtualMemoryInKB((char*) "VmRSS:");}
 size_t getVmHWMInKB() {return getVirtualMemoryInKB((char*) "VmHWM:");}
 
 #endif
+
+void auto_expand_env_vars(std::string &input) {
+    const static std::string pattern1 = "${", pattern2 = "}";
+    size_t pattern1_pos, pattern2_pos, envvar_start_pos, envvar_finish_pos;
+    while ((pattern1_pos = input.find(pattern1)) != std::string::npos) {
+        envvar_start_pos = pattern1_pos + pattern1.length();
+        if ((pattern2_pos = input.find(pattern2)) != std::string::npos) {
+            envvar_finish_pos = pattern2_pos - pattern2.length();
+            const std::string envvar_name = input.substr(envvar_start_pos, envvar_finish_pos - envvar_start_pos + 1);
+            const char *envvar_val = getenv(envvar_name.c_str());
+            if (envvar_val == NULL)
+                throw std::logic_error("Expected environment variable " + envvar_name + " is not set.");
+            const std::string envvar_val_s(envvar_val);
+            input.replace(pattern1_pos, pattern2_pos - pattern1_pos + 1, envvar_val_s);
+        }
+    }
+}
+std::string expand_env_vars(const std::string &input) {
+    std::string _input = input;
+    auto_expand_env_vars(_input);
+    return _input;
+}
index 7e82d12..5eb9448 100644 (file)
@@ -1,3 +1,7 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
 #pragma once
 
 #include <iostream>
@@ -70,3 +74,6 @@ inline void run_in_threads(const int &numthreads, Function const &function, Args
     }
     v.clear();
 }
+
+void auto_expand_env_vars(std::string &input);
+std::string expand_env_vars(const std::string &input);
index 9bd09da..27e8d7d 100644 (file)
@@ -1,3 +1,7 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
 #pragma once
 
 #include "../common/utils.h"
index 35b701b..c9e6e79 100644 (file)
@@ -1,21 +1,21 @@
 <?xml version="1.0"?>
 <attributes>
     <models>
-<model path="public/mobilenet-ssd/FP32/mobilenet-ssd.xml" test="create_exenetwork" device="CPU" vmsize="757218" vmpeak="901683" vmrss="73920" vmhwm="107866" />
-<model path="public/mobilenet-ssd/FP32/mobilenet-ssd.xml" test="create_exenetwork" device="GPU" vmsize="747815" vmpeak="860978" vmrss="401808" vmhwm="435358" />
-<model path="public/mobilenet-ssd/FP32/mobilenet-ssd.xml" test="infer_request_inference" device="CPU" vmsize="1001189" vmpeak="1001189" vmrss="116080" vmhwm="116080" />
-<model path="public/mobilenet-ssd/FP32/mobilenet-ssd.xml" test="infer_request_inference" device="GPU" vmsize="788752" vmpeak="860842" vmrss="435283" vmhwm="435283" />
-<model path="public/mtcnn-r/FP32/mtcnn-r.xml" test="create_exenetwork" device="CPU" vmsize="754806" vmpeak="803184" vmrss="15206" vmhwm="26532" />
-<model path="public/mtcnn-r/FP32/mtcnn-r.xml" test="create_exenetwork" device="GPU" vmsize="554650" vmpeak="644666" vmrss="207592" vmhwm="217720" />
-<model path="public/mtcnn-r/FP32/mtcnn-r.xml" test="infer_request_inference" device="CPU" vmsize="959257" vmpeak="959257" vmrss="26690" vmhwm="26690" />
-<model path="public/mtcnn-r/FP32/mtcnn-r.xml" test="infer_request_inference" device="GPU" vmsize="572576" vmpeak="644666" vmrss="215230" vmhwm="215230" />
-<model path="public/ssd300/FP32/ssd300.xml" test="create_exenetwork" device="CPU" vmsize="755224" vmpeak="1146142" vmrss="22246" vmhwm="370770" />
-<model path="public/ssd300/FP32/ssd300.xml" test="create_exenetwork" device="GPU" vmsize="747709" vmpeak="1031694" vmrss="401746" vmhwm="749962" />
-<model path="public/ssd300/FP32/ssd300.xml" test="infer_request_inference" device="CPU" vmsize="1343474" vmpeak="1415563" vmrss="314204" vmhwm="371131" />
-<model path="public/ssd300/FP32/ssd300.xml" test="infer_request_inference" device="GPU" vmsize="1088700" vmpeak="1160790" vmrss="739626" vmhwm="748008" />
-<model path="public/vgg16/FP32/vgg16.xml" test="create_exenetwork" device="CPU" vmsize="754050" vmpeak="2548532" vmrss="15593" vmhwm="1808765" />
-<model path="public/vgg16/FP32/vgg16.xml" test="create_exenetwork" device="GPU" vmsize="648912" vmpeak="3289101" vmrss="299327" vmhwm="3003457" />
-<model path="public/vgg16/FP32/vgg16.xml" test="infer_request_inference" device="CPU" vmsize="2257006" vmpeak="2548532" vmrss="1243448" vmhwm="1809143" />
-<model path="public/vgg16/FP32/vgg16.xml" test="infer_request_inference" device="GPU" vmsize="2413290" vmpeak="3289101" vmrss="2059780" vmhwm="3006845" />
+        <model path="public/mobilenet-ssd/FP32/mobilenet-ssd.xml" test="create_exenetwork" device="CPU" vmsize="740214" vmpeak="805110" vmrss="129308" vmhwm="129308" />
+        <model path="public/mobilenet-ssd/FP32/mobilenet-ssd.xml" test="create_exenetwork" device="GPU" vmsize="739154" vmpeak="739154" vmrss="346522" vmhwm="346522" />
+        <model path="public/mobilenet-ssd/FP32/mobilenet-ssd.xml" test="infer_request_inference" device="CPU" vmsize="1007890" vmpeak="1007890" vmrss="138652" vmhwm="138652" />
+        <model path="public/mobilenet-ssd/FP32/mobilenet-ssd.xml" test="infer_request_inference" device="GPU" vmsize="824366" vmpeak="909563" vmrss="347167" vmhwm="347167" />
+        <model path="public/mtcnn-r/FP32/mtcnn-r.xml" test="create_exenetwork" device="CPU" vmsize="691589" vmpeak="922864" vmrss="31054" vmhwm="31054" />
+        <model path="public/mtcnn-r/FP32/mtcnn-r.xml" test="create_exenetwork" device="GPU" vmsize="520530" vmpeak="522740" vmrss="127706" vmhwm="129630" />
+        <model path="public/mtcnn-r/FP32/mtcnn-r.xml" test="infer_request_inference" device="CPU" vmsize="958240" vmpeak="1043437" vmrss="31366" vmhwm="31366" />
+        <model path="public/mtcnn-r/FP32/mtcnn-r.xml" test="infer_request_inference" device="GPU" vmsize="605727" vmpeak="690924" vmrss="127753" vmhwm="129537" />
+        <model path="public/ssd300/FP32/ssd300.xml" test="create_exenetwork" device="CPU" vmsize="1046988" vmpeak="1179042" vmrss="307990" vmhwm="439457" />
+        <model path="public/ssd300/FP32/ssd300.xml" test="create_exenetwork" device="GPU" vmsize="1108775" vmpeak="1126985" vmrss="716341" vmhwm="734578" />
+        <model path="public/ssd300/FP32/ssd300.xml" test="infer_request_inference" device="CPU" vmsize="1321819" vmpeak="1321819" vmrss="374207" vmhwm="439748" />
+        <model path="public/ssd300/FP32/ssd300.xml" test="infer_request_inference" device="GPU" vmsize="1199957" vmpeak="1285154" vmrss="728046" vmhwm="734593" />
+        <model path="public/vgg16/FP32/vgg16.xml" test="create_exenetwork" device="CPU" vmsize="2133814" vmpeak="2836412" vmrss="1438049" vmhwm="2140533" />
+        <model path="public/vgg16/FP32/vgg16.xml" test="create_exenetwork" device="GPU" vmsize="2707988" vmpeak="3834209" vmrss="2313022" vmhwm="3439202" />
+        <model path="public/vgg16/FP32/vgg16.xml" test="infer_request_inference" device="CPU" vmsize="2401380" vmpeak="2836412" vmrss="1469832" vmhwm="2140377" />
+        <model path="public/vgg16/FP32/vgg16.xml" test="infer_request_inference" device="GPU" vmsize="2793211" vmpeak="3834235" vmrss="2314192" vmhwm="3439550" />
     </models>
 </attributes>
index 4d235d1..6d6d6c9 100644 (file)
@@ -1,3 +1,7 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
 #include "flags.h"
 #include "../common/utils.h"
 #include <tests_utils.h>
index 5383a32..b0293ac 100644 (file)
@@ -1,3 +1,7 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
 #include "tests_utils.h"
 #include "../common/tests_utils.h"
 #include "../common/managers/thread_manager.h"
index 58204a8..0b03380 100644 (file)
@@ -1,3 +1,7 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
 #include "tests_pipelines.h"
 
 #include <string>
@@ -6,7 +10,7 @@
 
 #include <inference_engine.hpp>
 
-#define REPORTING_THRESHOLD 1.1
+#define REPORTING_THRESHOLD 1.3
 
 using namespace InferenceEngine;
 
@@ -48,7 +52,9 @@ test_create_exenetwork(const std::string &model_name, const std::string &model_p
     vmsize_before_test = (long) getVmSizeInKB();
     vmrss_before_test = (long) getVmRSSInKB();
 
-    create_exenetwork(model_path, target_device)();
+    Core ie;
+    CNNNetwork cnnNetwork = ie.ReadNetwork(model_path);
+    ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device);
 
     getAlignedVmValues(test_cur_vmsize, test_cur_vmpeak, test_cur_vmrss, test_cur_vmhwm,
                        vmsize_before_test, vmrss_before_test);
index 0712bca..c12f1c3 100644 (file)
@@ -1,8 +1,11 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
 #pragma once
 
 #include "../../common/tests_utils.h"
 #include "../../common/utils.h"
-#include "../../common/ie_pipelines/pipelines.h"
 
 #include <string>
 
index 32afff1..23d4398 100644 (file)
@@ -1,3 +1,7 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
 #include "../common/tests_utils.h"
 
 #include <pugixml.hpp>
@@ -34,6 +38,7 @@ public:
         // Parse RefsConfig from MemCheckEnvironment
         std::string models_path = Environment::Instance().getEnvConfig()
                 .child("attributes").child("irs_path").child("value").text().as_string();
+        models_path = expand_env_vars(models_path);
 
         const pugi::xml_document &refs_config = MemCheckEnvironment::Instance().getRefsConfig();
         auto values = refs_config.child("attributes").child("models");
@@ -69,4 +74,4 @@ public:
                         ref_vmhwm = vmhwm_v[i];
                     }
     }
-};
\ No newline at end of file
+};
index 9687797..6df324c 100644 (file)
@@ -1,3 +1,7 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
 #pragma once
 
 #include "../common/utils.h"
index 93752ab..2edf800 100644 (file)
@@ -1,3 +1,7 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
 #include "flags.h"
 #include "../common/utils.h"
 #include "../common/tests_utils.h"
index 99416be..eebef0c 100644 (file)
@@ -1,3 +1,7 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
 #include "../common/tests_utils.h"
 #include "../common/managers/thread_manager.h"
 #include "tests_pipelines/tests_pipelines.h"
index 49e60d3..dfbd865 100644 (file)
@@ -1,3 +1,7 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
 #include "tests_pipelines.h"
 
 #include <math.h>
index 2d144af..dd50968 100644 (file)
@@ -1,3 +1,7 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
 #pragma once
 
 #include "../../common/tests_utils.h"
index 0c73b26..b3c058f 100644 (file)
@@ -1,17 +1,30 @@
 #!/usr/bin/env python3
+# Copyright (C) 2020 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+#
+
+
 """ Script to acquire model IRs for stress tests.
 Usage: ./scrips/get_testdata.py
 """
+# pylint:disable=line-too-long
+
 import argparse
+import logging as log
 import multiprocessing
 import os
 import shutil
 import subprocess
+import sys
 from inspect import getsourcefile
+from pathlib import Path
+
+log.basicConfig(format="{file}: [ %(levelname)s ] %(message)s".format(file=os.path.basename(__file__)),
+                level=log.INFO, stream=sys.stdout)
 
 # Parameters
+OMZ_NUM_ATTEMPTS = 6
 MODEL_NAMES = 'vgg16,mtcnn-r,mobilenet-ssd,ssd300'
-OMZ_VERSION = 'efd238d02035f8a5417b7b1e25cd4c997d44351f'
 
 
 def abs_path(relative_path):
@@ -21,6 +34,54 @@ def abs_path(relative_path):
         os.path.join(os.path.dirname(getsourcefile(lambda: 0)), relative_path))
 
 
+class VirtualEnv:
+    """Class implemented creation and use of virtual environment."""
+    is_created = False
+
+    def __init__(self, venv_dir):
+        self.venv_dir = Path() / venv_dir
+        if sys.platform.startswith('linux') or sys.platform == 'darwin':
+            self.venv_executable = self.venv_dir / "bin" / "python3"
+        else:
+            self.venv_executable = self.venv_dir / "Scripts" / "python3.exe"
+
+    def get_venv_executable(self):
+        """Returns path to executable from virtual environment."""
+        return str(self.venv_executable)
+
+    def get_venv_dir(self):
+        """Returns path to virtual environment root directory."""
+        return str(self.venv_dir)
+
+    def create(self):
+        """Creates virtual environment."""
+        cmd = '{executable} -m venv {venv}'.format(executable=sys.executable,
+                                                   venv=self.get_venv_dir())
+        run_in_subprocess(cmd)
+        self.is_created = True
+
+    def install_requirements(self, *requirements):
+        """Installs provided requirements. Creates virtual environment if it hasn't been created."""
+        if not self.is_created:
+            self.create()
+        cmd = '{executable} -m pip install --upgrade pip'.format(executable=self.get_venv_executable())
+        cmd += ' && {executable} -m pip install'.format(executable=self.get_venv_executable())
+        for req in requirements:
+            cmd += " -r {req} ".format(req=req)
+        run_in_subprocess(cmd)
+
+    def create_n_install_requirements(self, *requirements):
+        """Creates virtual environment and installs provided requirements in it."""
+        self.create()
+        self.install_requirements(*requirements)
+
+
+def run_in_subprocess(cmd):
+    """Runs provided command in attached subprocess."""
+    log.info(cmd)
+    subprocess.check_call(cmd, shell=True)
+
+
 def main():
     """Main entry point.
     """
@@ -28,29 +89,73 @@ def main():
         description='Acquire test data',
         formatter_class=argparse.ArgumentDefaultsHelpFormatter)
 
-    parser.add_argument('--output_dir', default=f'./_models', help='directory to put test data into')
-    parser.add_argument('--cache_dir', default=f'./_cache', help='directory with test data cache')
+    parser.add_argument('--omz_repo', required=False,
+                        help='Path to Open Model Zoo (OMZ) repository. It will be used to skip cloning step.')
+    parser.add_argument('--mo_tool', default='../../model-optimizer/mo.py',
+                        help='Path to Model Optimizer (MO) runner. Required for OMZ converter.py only.')
+    parser.add_argument('--omz_models_out_dir', default='../_omz_out/models',
+                        help='Directory to put test data into. Required for OMZ downloader.py and converter.py')
+    parser.add_argument('--omz_irs_out_dir', default='../_omz_out/irs',
+                        help='Directory to put test data into. Required for OMZ converter.py only.')
+    parser.add_argument('--omz_cache_dir', default='../_omz_out/cache',
+                        help='Directory with test data cache. Required for OMZ downloader.py only.')
+    parser.add_argument('--no_venv', action="store_true",
+                        help='Skip preparation and use of virtual environment to convert models via OMZ converter.py.')
     args = parser.parse_args()
+    models_out_dir = Path(abs_path(args.omz_models_out_dir))
+    irs_out_dir = Path(abs_path(args.omz_irs_out_dir))
+    cache_dir = Path(abs_path(args.omz_cache_dir))
+    mo_tool = Path(args.mo_tool).resolve()
+
+    # Step 1: prepare Open Model Zoo
+    if args.omz_repo:
+        omz_path = Path(args.omz_repo).resolve()
+    else:
+        omz_path = Path(abs_path('../_open_model_zoo'))
+        # Clone Open Model Zoo into temporary path
+        if os.path.exists(str(omz_path)):
+            shutil.rmtree(str(omz_path))
+        cmd = 'git clone https://github.com/opencv/open_model_zoo {omz_path}'.format(omz_path=omz_path)
+        run_in_subprocess(cmd)
+
+    # Step 3: prepare models
+    downloader_path = omz_path / "tools" / "downloader" / "downloader.py"
+    cmd = '{downloader_path} --name "{MODEL_NAMES}"' \
+          ' --num_attempts {num_attempts}' \
+          ' --output_dir {models_dir}' \
+          ' --cache_dir {cache_dir}'.format(downloader_path=downloader_path, MODEL_NAMES=MODEL_NAMES,
+                                            num_attempts=OMZ_NUM_ATTEMPTS,
+                                            models_dir=models_out_dir,
+                                            cache_dir=cache_dir)
+    run_in_subprocess(cmd)
+
+    # Step 4: prepare virtual environment and install requirements
+    python_executable = sys.executable
+    if not args.no_venv:
+        Venv = VirtualEnv("./.stress_venv")
+        requirements = [
+            omz_path / "tools" / "downloader" / "requirements.in",
+            mo_tool.parent / "requirements.txt",
+            mo_tool.parent / "requirements_dev.txt",
+            # omz_path / "tools" / "downloader" / "requirements-caffe2.in",
+            # omz_path / "tools" / "downloader" / "requirements-pytorch.in"
+        ]
+        Venv.create_n_install_requirements(*requirements)
+        python_executable = Venv.get_venv_executable()
 
-    # Clone Open Model Zoo into temporary path
-    omz_path = './_open_model_zoo'
-    if os.path.exists(omz_path):
-        shutil.rmtree(omz_path)
-    subprocess.check_call(
-        f'git clone https://github.com/opencv/open_model_zoo {omz_path}' \
-        f' && cd {omz_path}'\
-        f' && git checkout {OMZ_VERSION}', shell=True)
-    # Acquire model IRs
-    mo_tool = abs_path('../../../model-optimizer/mo.py')
-    subprocess.check_call(
-        f'{omz_path}/tools/downloader/downloader.py --name "{MODEL_NAMES}"' \
-        f' --output_dir {args.output_dir}/{OMZ_VERSION}/models' \
-        f' --cache_dir {args.cache_dir}', shell=True)
-    subprocess.check_call(
-        f'{omz_path}/tools/downloader/converter.py --name "{MODEL_NAMES}"' \
-        f' --output_dir {args.output_dir}/{OMZ_VERSION}/IRs' \
-        f' --download_dir {args.output_dir}/{OMZ_VERSION}/models' \
-        f' --mo {mo_tool} --jobs {multiprocessing.cpu_count()}', shell=True)
+    # Step 5: convert models to IRs
+    converter_path = omz_path / "tools" / "downloader" / "converter.py"
+    # NOTE: remove --precision if both precisions (FP32 & FP16) required
+    cmd = '{executable} {converter_path} --name "{MODEL_NAMES}"' \
+          ' -p {executable}' \
+          ' --precision=FP32' \
+          ' --output_dir {irs_dir}' \
+          ' --download_dir {models_dir}' \
+          ' --mo {mo_tool} --jobs {workers_num}'.format(executable=python_executable, converter_path=converter_path,
+                                                        MODEL_NAMES=MODEL_NAMES, irs_dir=irs_out_dir,
+                                                        models_dir=models_out_dir, mo_tool=mo_tool,
+                                                        workers_num=multiprocessing.cpu_count())
+    run_in_subprocess(cmd)
 
 
 if __name__ == "__main__":
index 7f4ff8a..eeba370 100644 (file)
@@ -1,3 +1,7 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
 #pragma once
 
 #include "../common/utils.h"
index 3d23a64..65ad080 100644 (file)
@@ -1,3 +1,7 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
 #include "flags.h"
 #include "../common/utils.h"
 #include "../common/tests_utils.h"
index f710d36..33c9a68 100644 (file)
@@ -1,3 +1,7 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
 #include "../common/tests_utils.h"
 #include "tests_pipelines/tests_pipelines.h"
 
index afa4845..9ed7893 100644 (file)
@@ -1,3 +1,7 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
 #include "tests_pipelines.h"
 
 #include <string>
index 7e5ef80..94d2470 100644 (file)
@@ -1,3 +1,7 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
 #pragma once
 
 #include "../../common/utils.h"
index f537b67..5cd8f2b 100644 (file)
@@ -54,11 +54,11 @@ def is_image_info(blob):
 def set_inputs(paths_to_input, batch_size, input_info, requests):
   requests_input_data = get_inputs(paths_to_input, batch_size, input_info, requests)
   for i in range(len(requests)):
-    inputs = requests[i].inputs
+    inputs = requests[i].input_blobs
     for k, v in requests_input_data[i].items():
         if k not in inputs.keys():
             raise Exception("No input with name {} found!".format(k))
-        inputs[k][:] = v
+        inputs[k].buffer[:] = v
 
 def get_inputs(paths_to_input, batch_size, input_info, requests):
     input_image_sizes = {}